text stringlengths 38 1.54M |
|---|
from django.urls import path
from .views import Index,addWork,UpdateWork,DeleteWork
app_name = "SurfaceApp"
urlpatterns = [
path('',Index,name="index"),
path('add-work-details/',addWork,name="AddWork"),
path('update-work-details/<str:slug>/',UpdateWork,name="UpdateWork"),
path('delete-work/<str:slug>/',DeleteWork,name="DeleteWork"),
] |
from django.http.response import HttpResponse
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def sample(request):
return HttpResponse("hey! im here")
|
"""
this is all the code I have done for my code. I am useing rover number 1.
"""
import RPi.GPIO as GPIO
from time import sleep
import random
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(13, GPIO.IN)
while True:
i=GPIO.input(13)
GPIO.output(24, GPIO.HIGH)
GPIO.output(27, GPIO.HIGH)
if i==1:
print("CLEAR")
elif i==0:
GPIO.output(24, GPIO.LOW)
GPIO.output(27, GPIO.LOW)
GPIO.output(23, GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
sleep(1)
GPIO.output(17, GPIO.LOW)
GPIO.output(17, GPIO.LOW)
sleep(1)
number=random.random()
number=10*number
if number > 5:
GPIO.output(27, GPIO.HIGH)
sleep(1)
GPIO.output(27, GPIO.LOW)
elif number < 5:
GPIO.output(24, GPIO.HIGH)
sleep(1)
GPIO.output(24, GPIO.LOW)
|
glossary = {
'string': 'A series of characters',
'comment': 'A note that the Python interpreter ignores',
'list': 'A collection of items in a particular order and can be amended',
'loop': 'Work through a collection of items, one at a time',
'dictionary': 'A collection of key-value pairs'
}
for word in glossary.keys():
print(f'{word.title()}: {glossary[word]}')
|
import sys
from sklearn.datasets import load_iris
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
import mysql.connector
DATASET_PATH = "/Users/michaelfilonenko/Downloads/dataset.csv"
def get_db_connection():
return mysql.connector.connect(
host="localhost",
user="successProjectDeveloper",
passwd="Success1_",
auth_plugin='mysql_native_password'
)
def form_dataset(connection):
cursor = connection.cursor()
cursor.execute("use successProjectDatabase")
# get project count from db
query = ("select count(*) from Project")
cursor.execute(query)
row_count = cursor.fetchall()[0][0]
print("row count {}", row_count)
ids = np.zeros((row_count, ), dtype=int)
# get project ids into the array
query = "select * from Project"
cursor.execute(query)
i = 0
for project in cursor:
ids[i] = project[0]
i += 1
# print(ids)
query = "select * from Project_Answers"
cursor.execute(query)
dataset = np.zeros((row_count, 3), dtype=np.float)
answers_count = np.zeros((row_count, 3), dtype=np.int)
tmp_connection = get_db_connection();
tmp_cursor = tmp_connection.cursor()
tmp_cursor.execute("use successProjectDatabase")
for project_answer in cursor:
project_id = project_answer[1]
answer_id = project_answer[2]
index = np.where(ids == project_id)[0][0]
query = "select * from Answers where ID = {}".format(answer_id)
tmp_cursor.execute(query)
tmp_set = tmp_cursor.fetchall()[0]
answer_value = tmp_set[1]
question_id = int(tmp_set[2])
if(question_id < 4) and answer_value != None:
dataset[index][question_id - 1] += float(answer_value)
answers_count[index][question_id - 1] += 1
# print(project_id, ' ', answer_id)
for i in range(dataset.shape[0]):
for j in range(dataset.shape[1]):
if answers_count[i,j] != 0:
dataset[i,j] /= answers_count[i,j]
print(dataset)
# query = ("select * from Question")
# cursor.execute(query)
# questions = []
# for (_, question) in cursor:
# questions.append(question)
# print(questions)
# dataset = np.zeros((row_count, 2), dtype=np.int)
# print(dataset);
# query = ("select count(*) from Answers")
# cursor.execute(query)
# answers_count = cursor.fetchall()[0][0]
# answers = np.zeros((answers_count, 2), dtype=np.int16)
# query = ("select * from Answers")
# cursor.execute(query)
# for (id, answer, question_id) in cursor:
# question_id = float(str(question_id))
# answer = str(answer)
# if answer == 'None':
# answer = -1
# else:
# answer = float(answer)
# answers[id - 1, 0] = question_id
# answers[id - 1, 1] = answer
# query = ("select * from Project_Answers")
# cursor.execute(query)
# for (_, project_id, answer_id, _, _) in cursor:
# project_id = int(str(project_id))
# answer_id = int(str(answer_id))
# dataset[project_id - 1, answers[answer_id - 1][0] - 1] = answers[answer_id - 1][1];
cursor.close()
connection.close()
tmp_cursor.close()
tmp_connection.close()
# train = dataset[dataset[:,3] != 0]
# test = dataset[dataset[:,3] == 0]
train = pd.DataFrame.from_records(dataset, columns=["Impact", "Complexity", "Improvement"])
return (["Impact", "Complexity", "Improvement"], train)
# print(train)
# test = pd.DataFrame.from_records(test, columns=["Impact", "Complexity", "Improvement", "Implementation"])
# return (questions, train, test)
def process_dataset(questions, dataset):
X = dataset[questions[:len(questions) - 1]]
Y = dataset[questions[-1]]
regr = linear_model.LinearRegression()
regr.fit(X, Y)
return regr
def main():
ds = form_dataset(get_db_connection())
regr = process_dataset(ds[0], ds[1])
# print('Intercept: \n', regr.intercept_)
# print('Coefficients: \n', regr.coef_)
# for row in range(ds[2].shape[0]):
# prediction = regr.predict([ds[2][row, :ds[2].shape[1] - 1]])
# print("Prediction: {}", prediction)
print(regr.predict([(10, 10)]))
while True:
line = sys.stdin.readline()
line = line[:-1]
split = [float(s) for s in line.split(' ')]
prediction = regr.predict([split])
print(prediction[0])
if __name__ == "__main__":
main()
|
import ctypes as ct
import logging
from contextlib import suppress
from ctypes.wintypes import *
import PyHook3
import psutil
import pythoncom
from model.base import Rectangle
log = logging.getLogger('app.process')
log.setLevel(logging.WARNING)
kernel32 = ct.windll.kernel32
user32 = ct.windll.user32
WNDENUMPROC = ct.WINFUNCTYPE(BOOL, HWND, LPARAM)
class KeyboardHook:
callbacks = []
def __init__(self):
self.pressed = set()
self.hook_manager = PyHook3.HookManager()
self.hook_manager.KeyDown = self.on_key_down
self.hook_manager.KeyUp = self.on_key_up
self.hook_manager.HookKeyboard()
def on_key_down(self, event):
if event.KeyID not in self.pressed:
self.pressed.add(event.KeyID)
self.callback()
return True
def on_key_up(self, event):
with suppress(Exception):
self.pressed.remove(event.KeyID)
return True
def set_callback(self, callback, keys):
self.callbacks.append([callback, keys])
def callback(self):
for callback, keys in self.callbacks:
call = True
for key in keys:
if key not in self.pressed:
call = False
if call:
callback()
@staticmethod
def flush():
pythoncom.PumpMessages()
def close(self):
self.hook_manager.UnhookKeyboard()
user32.PostQuitMessage(0)
class Memory(object):
def __init__(self, process):
self._process = process
def read(self, result, addr: int):
kernel32.ReadProcessMemory(self._process, addr, ct.byref(result), ct.sizeof(result), 0)
log.debug(f'Address: {hex(addr)}, value:{result.value}')
return result.value
def read_uint16(self, addr: int):
return self.read(ct.c_ushort(), addr)
def read_uint32(self, addr: int):
return self.read(ct.c_ulong(), addr)
def read_float(self, addr: int):
return self.read(ct.c_float(), addr)
def read_byte(self, addr: int):
return self.read(ct.c_byte(), addr)
def read_ptr(self, addr: int, offset=0x0):
log.debug(f'Base: {hex(addr)}, offset: {hex(offset)}')
return self.read(ct.c_ulong(), addr + offset)
def read_ptr_indirect(self, result, addr: int, *args):
base = addr
if len(args) == 0:
return self.read(result, addr)
if len(args) == 1:
return self.read_ptr(addr, args[0])
if len(args) > 1:
for o in args[:-1]:
if base != 0x0 and addr == 0x0:
log.warning('Address has become 0x0 - maybe incorrect pointer?')
addr = self.read_ptr(addr, o)
return self.read(result, addr + args[-1])
class Process(object):
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
def __init__(self, name: str, open_privileges=None):
self.name = name
self.pid = self._pid()
self.handle = None
self.memory = None
if open_privileges is not None:
self.open(open_privileges)
def __del__(self):
if self.handle is not None:
self.close()
def exists(self):
return self._pid() > 0
def open(self, privileges):
if self.pid == 0:
log.error('Process could not be opened - pid not found')
raise RuntimeError("Process could not be opened")
self.handle = kernel32.OpenProcess(privileges, 0, self.pid)
self.memory = Memory(self.handle)
def close(self):
kernel32.CloseHandle(self.handle)
self.handle = None
def _pid(self):
for process in psutil.process_iter():
if process.name() == self.name:
return process.pid
else:
return 0
class Window(object):
def __init__(self, name: str):
self.name = name
self.hwnd = None
self.enum_windows()
def center(self, screen):
(_, _, w, h) = self.geometry()
x = int((screen.w - w) / 2)
y = int((screen.h - h) / 2)
self.foreground()
self.reposition(x, y)
def geometry(self):
rect = self.window_rect()
return Rectangle(rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top)
def window_rect(self):
rect = RECT()
user32.GetWindowRect(self.hwnd, ct.byref(rect))
return rect
def foreground(self):
user32.BringWindowToTop(self.hwnd)
user32.SetForegroundWindow(self.hwnd)
def reposition(self, x, y):
(_, _, w, h) = self.geometry()
user32.MoveWindow(self.hwnd, x, y, w, h, False)
def enum_windows(self):
user32.EnumWindows(WNDENUMPROC(self.wnd_enum_proc), 0)
def wnd_enum_proc(self, hwnd, lParam):
length = user32.GetWindowTextLengthW(hwnd) + 1
buffer = ct.create_unicode_buffer(length)
user32.GetWindowTextW(hwnd, buffer, length)
self.hwnd = None
if self.name in buffer.value:
self.hwnd = hwnd
return False
return True
|
import webbrowser
class Movie():
"""this class stores information related to the movie
Attributes:
title: The title of the video.
storyline: The storyline about the movie.
poster_image_url: The poster of the movie.
trailer_youtube_url: The trailer of the movie"""
def __init__(self,
movie_title,
movie_storyline,
poster_image,
trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
# opens movie trailer in a web browser
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
from pykkar import *
import random as rnd
import time
def generate(r,c,start, finish): # Randomized Prim's algorithm
start = [start,start]
tries = [[0,0], [0,1], [0,-1], [1, 0], [-1, 0]]
lst = []
for i in range(r):
temp = []
for j in range(c):
temp.append('#')
lst.append(temp)
queue = [start]
while queue:
node = queue.pop(rnd.randint(0,len(queue)-1))
if lst[node[0][0]][node[0][1]] == ' ':
continue
lst[node[0][0]][node[0][1]] = ' '
lst[node[1][0]][node[1][1]] = ' '
nbrs = [[0, 2], [2, 0], [0, -2], [-2, 0]]
rnd.shuffle(nbrs)
for i in range(len(nbrs)):
if 0 <= node[0][0] + nbrs[i][0] <= r and 0 <= node[0][1] + nbrs[i][1] <= c:
nbr = [node[0][0] + nbrs[i][0], node[0][1] + nbrs[i][1]]
passage = [node[0][0] + nbrs[i][0]//2, node[0][1] + nbrs[i][1]//2]
else:
continue
if lst[nbr[0]][nbr[1]] == '#':
queue.append([nbr,passage])
lst[start[0][0]][start[0][1]] = '>'
for i in range(len(tries)):
if lst[finish[0] + tries[i][0]][finish[1] + tries[i][1]] != '#':
lst[finish[0] + tries[i][0]][finish[1] + tries[i][1]] = 'b'
end = [finish[0] + tries[i][0], finish[1] + tries[i][1]]
break
# Getting the matrix of the map for A* and string for world generation
maze = ''''''
for i in range(len(lst)):
lst[i].append('#')
maze += "".join(lst[i]) + "\n"
last_row = ['#' for i in range(c+1)]
lst.append(last_row)
maze += (c+1)*"#"
return maze, lst, end
def left():
right()
right()
right()
def turn():
right()
right()
def random_mouse():
while not is_box():
clear = []
for i in range(4): # Gather info
if i != 2:
if not is_wall() and not is_painted() and not is_cone():
clear.append(i)
right()
if len(clear) > 0: # Get random path
go = rnd.choice(clear)
else:
go = 2 # If no other possibility, go back
for i in range(go): # Turn
right()
step()
def wall_follow(): # Wall following algorithm
while not is_box():
right()
if is_wall():
left()
else:
step()
if is_box():
break
if is_wall():
left()
else:
step()
def get_dirs():
dirs = 0
clear = []
painted = []
for i in range(4): # Gather info
if i != 2:
if not is_wall():
dirs += 1
if not is_cone():
if not is_painted():
clear.append(i)
else:
painted.append(i)
right()
if len(clear) > 0: # If possible, go to unvisited path
go = rnd.choice(clear)
elif len(painted) > 0:
go = rnd.choice(painted)
else:
go = 2 # If no other possibility, go back
return dirs, go
def Tremaux(): # Trémaux's algorithm
last = 0
while not is_box():
dirs, go = get_dirs()
if dirs >= 2:
turn()
if not is_painted() and not is_wall(): # Paint the tile behind you
paint()
elif is_painted() and (last > 2 or last == 0): # If already painted, put a cone
put()
turn()
for i in range(go): # Turn to the next path
right()
if is_painted(): # If the path has been partially explored once, put cone
put()
else:
paint() # Otherwise mark the first time entrance to the path
last = 0
else:
for i in range(go): # Turn to the next path
right()
last += 1
if not is_wall():
step()
def H(loc, fin): # Heuristic function
return abs(loc[0]-fin[0]) + abs(loc[1]-fin[1])
def a_star():
node = [H(s, end), s, 0, H(s, end)] # score, location, actual cost, heuristic
visited = []
queue = [node]
dirs = [[0,1], [1,0], [0,-1], [-1,0]]
paths = {}
count = 0
while node[3] > 1:
visited.append(node[1])
nextNodes = []
for i in range(len(dirs)):
nbr = info[node[1][0] + dirs[i][0]][node[1][1] + dirs[i][1]]
if nbr == ' ' or nbr == 'b': # Get all next valid nodes, append to queue
nextNode = [H(node[1], end) + node[3], [node[1][0] + dirs[i][0], node[1][1] + dirs[i][1]], node[2]+1, H(node[1], end)]
if nextNode[1] not in visited and nextNode not in queue:
queue.append(nextNode)
nextNodes.append(nextNode[1])
paths[count] = nextNodes
queue.sort()
node = queue.pop(0)
count += 1
# Extract path from all visited nodes
path = []
last = end
vals = list(paths.values())
while True:
path.append(last)
if last == s:
break
for i in range(len(vals)):
if last in vals[i]:
nr = i
break
last = visited[nr]
path = path[::-1]
# Get movement instructions from path
NWSE = ['N', 'W', 'S', 'E']
dif = [[-1,0], [0,-1], [1,0], [0,1]]
mat = [[0, 3, 2, 1],
[1, 0, 3, 2],
[2, 1, 0, 3],
[3, 2, 1, 0]]
for i in range(1,len(path)):
d = [path[i][0]-path[i-1][0], path[i][1]-path[i-1][1]]
row = dif.index(d)
col = NWSE.index(get_direction())
for i in range(mat[col][row]):
right()
step()
nr = 10
while True:
while True:
print("Insert 1 to generate.")
print("Insert 2 to edit the maze size.")
print("Insert 3 to edit the start/end coordinates.")
try:
choose = int(input("--> "))
except:
print("Something went wrong!\n")
continue
if 0 < choose < 4:
break
else:
print("Invalid value, try again.\n")
continue
if choose == 2:
while True:
try:
rows = int(input("Insert an even number of rows in the maze grid (2 - 20): \n"))
except:
print("Something went wrong!\n")
continue
if 2 <= rows <= 20 and rows % 2 == 0:
break
else:
print("Invalid value, try again.\n")
while True:
try:
cols = int(input("Insert an even number of collumns in the maze grid (2 - 40): \n"))
except:
print("Something went wrong!")
continue
if 2 <= cols <= 40 and rows % 2 == 0:
break
else:
print("Invalid value, try again.\n")
continue
if choose == 3:
kill = 0
while True:
try:
s = list(input("Enter the start grid coordinates (odd numbers, use comma as delimiter): \n").split(','))
s = list(map(int, s))
except:
print("Something went wrong!\n")
continue
try:
if (0 < s[0] < rows and s[0] % 2 == 1) and (0 < s[1] < cols and s[1] % 2 == 1):
break
else:
print("Invalid value, try again.\n")
except:
print("Please specify maze size.\n")
kill = 1
break
if kill == 1:
continue
while True:
try:
f = list(input("Enter the end grid coordinates (odd numbers, use comma as delimiter): \n").split(','))
f = list(map(int, f))
except:
print("Something went wrong!\n")
continue
try:
if (0 < f[0] < rows and f[0] % 2 == 1) and (0 < f[1] < cols and f[1] % 2 == 1):
break
else:
print("Invalid value, try again.\n")
except:
print("Please specify maze size.\n")
break
continue
if choose == 1:
while True:
try:
maze, info, end = generate(rows,cols,s,f)
print("Generation successful!\n")
except:
print("Some values invalid or missing. \n")
break
while True:
create_world(maze)
set_speed(nr)
while True:
back = 0
back1 = 0
print("Insert 1 to choose the algorithm.")
print("Insert 2 to choose the simulation speed (default 10).")
print("Insert 3 to change maze parameters.")
print("Insert 4 to generate new maze.")
print("Insert 5 to reset the maze.\n")
try:
choose2 = int(input("--> "))
except:
print("Something went wrong!")
continue
if 0 < choose2 < 6:
pass
else:
print("Invalid value, try again.\n")
continue
if choose2 == 1:
while True:
print("Insert 1 for random mouse algorithm.")
print("Insert 2 for wall following algorithm.")
print("Insert 3 for Trémaux's algorithm.")
print("Insert 4 for A* algorithm.")
print("Insert 5 to go back.\n")
try:
choose3 = int(input("--> "))
except:
print("Something went wrong!\n")
continue
if 0 < choose3 < 6:
pass
else:
print("Invalid value, try again.\n")
continue
try:
if choose3 == 1:
random_mouse()
elif choose3 == 2:
wall_follow()
elif choose3 == 3:
try:
Tremaux()
except:
print("Something went wrong!\n")
elif choose3 == 4:
a_star()
else:
break
break
except:
print("Something went wrong!\n")
break
if choose2 == 2:
while True:
print("Insert the simulation speed (1 - 10).\n")
nr = int(input("--> "))
if 0 < nr < 11:
set_speed(nr)
break
else:
print("Invalid value, try again.\n")
continue
if choose2 == 3:
back = 1
back1 = 1
break
if choose2 == 4:
back1 = 1
break
if choose2 == 5:
break
bye()
if back1 == 1:
break
if back == 1:
break
|
from flask import request
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from flask.wrappers import Response
from typing import List
from .schema import WhatsitSchema
from .service import WhatsitService
from .model import Whatsit
from .interface import WhatsitInterface
api = Namespace('Whatsit', description='A modular namespace within Other API') # noqa
@api.route('/')
class WhatsitResource(Resource):
'''Whatsits'''
@responds(schema=WhatsitSchema, many=True)
def get(self) -> List[Whatsit]:
'''Get all Whatsits'''
return WhatsitService.get_all()
@accepts(schema=WhatsitSchema, api=api)
@responds(schema=WhatsitSchema)
def post(self) -> Whatsit:
'''Create a Single Whatsit'''
return WhatsitService.create(request.parsed_obj)
@api.route('/<int:whatsitId>')
@api.param('whatsitId', 'Whatsit database ID')
class WhatsitIdResource(Resource):
@responds(schema=WhatsitSchema)
def get(self, whatsitId: int) -> Whatsit:
'''Get Single Whatsit'''
return WhatsitService.get_by_id(whatsitId)
def delete(self, whatsitId: int) -> Response:
'''Delete Single Whatsit'''
from flask import jsonify
id = WhatsitService.delete_by_id(whatsitId)
return jsonify(dict(status='Success', id=id))
@accepts(schema=WhatsitSchema, api=api)
@responds(schema=WhatsitSchema)
def put(self, whatsitId: int) -> Whatsit:
'''Update Single Whatsit'''
changes: WhatsitInterface = request.parsed_obj
Whatsit = WhatsitService.get_by_id(whatsitId)
return WhatsitService.update(Whatsit, changes)
|
###################################################################################################
from mpl_toolkits.basemap import Basemap
import matplotlib
import math
from scipy import *
import pylab as P
import numpy as np
import sys, glob
import os
import time
from optparse import OptionParser
import netCDF4
import datetime
from news_e_post_cbook import *
from multiprocessing import Pool
#sys.path.append("/scratch/software/Anaconda2/bin")
###################################################################################################
# run_script is a function that runs a system command
def run_script(cmd):
print "Executing command: " + cmd
os.system(cmd)
print cmd + " is finished...."
return
###################################################################################################
parser = OptionParser()
parser.add_option("-d", dest="summary_dir", type="string", default= None, help="Input Directory (of summary files)")
parser.add_option("-a", dest="asos_dir", type="string", default= None, help="Input Directory (of ASOS files)")
parser.add_option("-i", dest="image_dir", type="string", default=None, help="Image Directory")
#parser.add_option("-m", dest="mapname", type="string", default=None, help="Path to Basemap Instance for plotting")
parser.add_option("-e", dest="fcst_nt", type="int", help = "Total number of timesteps in forecast")
(options, args) = parser.parse_args()
if ((options.summary_dir == None) or (options.asos_dir == None) or (options.image_dir == None) or (options.fcst_nt == None)):
print
parser.print_help()
print
sys.exit(1)
else:
summary_dir = options.summary_dir
asos_dir = options.asos_dir
image_dir = options.image_dir
# mapname = options.mapname
fcst_nt = options.fcst_nt
#mapname = '/scratch2/patrick.skinner/images/map.pickle'
pool = Pool(processes=(6)) # set up a queue to run
######### Get Fcst initialization time from summary file directory: #############
init_hour = int(summary_dir[-5:-3])
init_minute = int(summary_dir[-3:-1])
if (init_hour < 10):
init_hour = init_hour + 24
init_time_seconds = init_hour * 3600. + init_minute * 60.
######### Make Dot Plots #########
ens_t = 0
prev_ens_t = 0
iteration = 0
while (ens_t < fcst_nt):
#get current time:
current = datetime.datetime.now()
current_hour = current.hour
current_minute = current.minute
if (current_hour < 12): current_hour = current_hour + 24.
# current_time_seconds = current_hour * 3600. + current_minute * 60. + (3600. * 5.) #convert to UTC from CDT
current_time_seconds = 20000000. #hack to force plotting in retro mode
summary_files_temp = os.listdir(summary_dir)
summary_files_temp.sort()
for f, file in enumerate(summary_files_temp):
valid_time_seconds = ens_t * 300. + init_time_seconds
str_ens_t = str(ens_t)
if (len(str_ens_t) == 1):
str_ens_t = '0' + str_ens_t
if ((file[-28:-25] == 'ENV') and (file[-24:-22] == str_ens_t)):
if (current_time_seconds > (valid_time_seconds + 900.)): #If it is 15 minutes later than the valid time of the forecast
print 'STARTING COMMAND (valid, current, ens_t, iter): ', valid_time_seconds, current_time_seconds, ens_t, iteration
cmd = "/home/louis.wicker/anaconda2/envs/wof-test/bin/python news_e_dot.py -d %s -a %s -o %s -t %d " % (summary_dir, asos_dir, image_dir, ens_t)
pool.apply_async(run_script, (cmd,))
ens_t = ens_t + 1
iteration = 0
else:
time.sleep(30)
if (ens_t == prev_ens_t):
time.sleep(10)
iteration = iteration + 1
print 'NOTHING HAPPENED, ', ens_t, iteration
if (iteration > 60):
print 'NOTHING HAPPENED FOR 30 MINUTES, GIVING UP'
break
prev_ens_t = ens_t
time.sleep(300)
cmd = "/home/louis.wicker/anaconda2/envs/wof-test/bin/python news_e_dot.py -d %s -a %s -o %s -t %d " % (summary_dir, asos_dir, image_dir, fcst_nt)
pool.apply_async(run_script, (cmd,))
time.sleep(2)
pool.close()
pool.join()
|
from collections import defaultdict
from chainn.util import Vocabulary
from chainn.util import functions as UF
def strip_split(line):
return line.strip().split()
def unsorted_batch(batches, dicts):
for x_batch, dct in zip(batches, dicts):
max_len = max(len(x) for x in x_batch)
for i in range(len(x_batch)):
x_batch[i] += [dct.stuff_id() for _ in range(max_len-len(x_batch[i]))]
return batches
def load_train_data(data, SRC, TRG, src_count=None, trg_count=None, x_cut=1, y_cut=1, replace_unknown=False):
rep_rare = lambda vocab, w, count, cut: vocab[w] if count is None or count[w] > cut else vocab.unk_id()
rep_unk = lambda vocab, w: vocab[w] if w in vocab else vocab.unk_id()
convert_to_id = lambda vocab, w, count, cut: rep_unk(vocab, w) if replace_unknown else rep_rare(vocab, w, count, cut)
holder = []
for src, trg in data:
src = [convert_to_id(SRC, word, src_count, x_cut) for word in src]
trg = [convert_to_id(TRG, word, trg_count, y_cut) for word in trg]
holder.append((src, trg))
return holder
def batch(data, dicts, batch_size=1):
data = sorted(data, key=lambda x: len(x[0]), reverse=True)
new_batch = lambda: [[] for _ in range(len(data[0]))]
batch_list = new_batch()
size = 0
for item in data:
for i in range(len(item)):
batch_list[i].append(item[i])
size += 1
if size >= batch_size:
yield unsorted_batch(batch_list, dicts)
batch_list = new_batch()
size = 0
if len(batch_list[0]) != 0:
yield unsorted_batch(batch_list, dicts)
def load_test_data(lines, SRC, preprocessing=strip_split):
rep_rare = lambda vocab, w: vocab[w] if w in vocab else vocab.unk_id()
holder = []
for i, src in enumerate(lines):
src = [rep_rare(SRC, word) for word in preprocessing(src)]
holder.append((src, i))
return holder
"""
* POS TAGGER *
"""
def load_pos_train_data(lines, cut_threshold=1):
SRC, TRG = Vocabulary(unk=True, eos=True), Vocabulary(unk=False, eos=True)
data = []
w_count = defaultdict(lambda: 0)
# Reading in the data
for line in lines:
sent = line.strip().split()
words, labels = [], []
for word in sent:
word, tag = word.split("_")
words.append(word)
labels.append(tag)
w_count[word] += 1
data.append((words,labels))
# Data generator
data = load_train_data(data, SRC, TRG, \
src_count=w_count, x_cut=cut_threshold)
# Return
return SRC, TRG, data
def load_pos_test_data(lines, SRC):
return load_test_data(lines, SRC)
"""
* NMT *
"""
def load_nmt_train_data(src, trg, SRC=None, TRG=None, cut_threshold=1):
src_count = defaultdict(lambda:0)
trg_count = defaultdict(lambda:0)
rep_unk = SRC is not None and TRG is not None
if SRC is None:
SRC = Vocabulary(unk=True, eos=True)
if TRG is None:
TRG = Vocabulary(unk=True, eos=True)
data = []
# Reading in data
for sent_id, (src_line, trg_line) in enumerate(zip(src, trg)):
src_line = src_line.strip().lower().split() + [SRC.eos()]
trg_line = trg_line.strip().lower().split() + [TRG.eos()]
for word in src_line:
src_count[word] += 1
for word in trg_line:
trg_count[word] += 1
data.append((src_line, trg_line))
# Data generator
data = load_train_data(data, SRC, TRG, \
src_count=src_count, trg_count=trg_count, \
x_cut=cut_threshold, y_cut=cut_threshold, \
replace_unknown=rep_unk)
# Return
return SRC, TRG, data
def load_nmt_test_data(src, SRC):
def preprocessing(line):
return line.strip().split() + [SRC.eos()]
return load_test_data(src, SRC, preprocessing)
"""
* LANGUAGE MODEL *
"""
def load_lm_data(lines, SRC=None, cut_threshold=1):
replace_unk = SRC is not None
if SRC is None:
SRC = Vocabulary()
SRC["<s>"], SRC["</s>"]
count = defaultdict(lambda:0)
data = []
# Reading and counting the data
for sent_id, line in enumerate(lines):
sent = ["<s>"] + line.strip().lower().split() + ["</s>"]
words, next_w = [], []
for i, tok in enumerate(sent):
count[tok] += 1
if i < len(sent)-1:
words.append(sent[i])
next_w.append(sent[i+1])
data.append((words, next_w))
# Data generator
data = load_train_data(data, SRC, SRC, \
src_count=count, trg_count=count, x_cut=cut_threshold, y_cut=cut_threshold,\
replace_unknown=replace_unk)
return SRC, data
def load_lm_gen_data(lines, SRC):
def preprocessing(line):
return ["<s>"] + line.strip().split() + [SRC.eos()]
return load_test_data(lines, SRC, preprocessing)
|
from math import *
def main():
max_num_of_p = 1000
p = [0] * (max_num_of_p+1)
for a in range(max_num_of_p):
for b in range(max_num_of_p):
c = calc_hypotenuse(a, b)
if(a+b+c > max_num_of_p):
break
if(check_if_integer(c)):
p[a+b+int(c)] += 1
current_max_p = 0
current_p = 0
for i in range(len(p)):
if(p[i] > current_max_p):
current_p = i
current_max_p = p[i]
print current_p
def calc_hypotenuse(a, b):
a_sqare = a*a
b_sqare = b*b
c_sqare = a_sqare + b_sqare
return sqrt(c_sqare)
def check_if_integer(num):
return num == int(num)
if __name__ == "__main__":
main(); |
print(1, 2, 3)
print("파" + "이" + "썬")
print("파""이""썬")
print("파", "이", "썬")
print([1, 2, 3]) |
import numpy as np
import Dataset
from Dataset import OptimizedDataset, OptimizedDatabase
import unittest
import Loss
import time
import random
import Activation
import math
from FullyConnectedLayer import FCLayer
class DeNet:
"""DeNet is similar to LoopyNet. It uses the same algorithms,
but each layer hold its own data and achieves
forward/backward propagation/update by itself. DeNet has been designed
to minimize data allocations, and to be parallelizable."""
def __init__(self):
self.lossF = Loss.Softmax
self.dropOut = 1 # [0-1]. 1 : no dropout
self.epochsStats = []
self.learningRateStep = 5
self.activationF = Activation.LeakyReLU
self.sizes = np.array([784, 100, 30, 10]) # size of each layer
self.L = len(self.sizes) # number of layers
self.layers = [None]
for sizePrevious, sizeCurrent in zip(self.sizes[:-1], self.sizes[1:]):
self.layers.append(FCLayer(sizePrevious,
sizeCurrent,
self.activationF,
self.dropOut))
def guessLabel(self, sample):
"""Return the class evaluated by the network for the
given sample."""
a = sample
for layer in self.layers[1:]:
a = layer.fastForward(a)
return np.argmax(a)
def evaluate(self, dataset):
"""Returns the success rate of the network over the test set
of the given dataset."""
success = 0
for sample, labelVector, label in dataset.tests:
if self.guessLabel(sample) == label:
success += 1
return success / len(dataset.tests)
def forwardBackward(self, sample, label, indexInBatch, ass, bundles):
"""Performs a forward pass and a backward pass over the network with
given sample. Returns the loss relative to the given sample."""
# forward pass
for l, layer in enumerate(self.layers):
if l == 0:
ass[l] = sample
else:
layer.forward(l, ass, bundles[l][indexInBatch])
# compute loss and gradient
loss = self.lossF.f(ass[-1], label)
dErrors = [np.empty_like(a) for a in ass]
dErrorLeftOperand = self.lossF.fprime(ass[-1], label) # left operand of equation BP1
# propagate the error back
for l in reversed(range(1, len(self.layers))):
dErrorLeftOperand = self.layers[l].backward(l, ass, dErrors, bundles[l][indexInBatch], dErrorLeftOperand)
return loss
def batchForwardBackward(self, batch, learningRate, ass, dErrors, bundles):
"""Trains the network over given batch. Called by train(). Returns the mean
loss relative to samples of the batch."""
# reset bundles
for layerIndex in range(1, len(bundles)):
for bundle in bundles[layerIndex]:
self.layers[layerIndex].resetBundle(bundle)
# forwardBackward on each sample, and sum up modifications suggested by the gradients
iterations = 0
sumLoss = 0
for indexInBatch, (sample, label, labelScalar) in enumerate(batch):
iterations += 1
sumLoss += self.forwardBackward(sample, label, indexInBatch, ass, bundles)
meanLoss = sumLoss/iterations
# modify weigths and biases according to previous backpropagations
for layerIndex in range(1, len(bundles)):
self.layers[layerIndex].update(layerIndex, ass, learningRate, bundles[layerIndex])
return meanLoss
def train(self, dataset, epochs, batchSize, learningRate):
"""Trains the network using the training set of
the given dataset, during given amount of epochs, using given
batch size, and beginning with given learning rate."""
datas = [layer.getDataStructures()
for layer in self.layers[1:]]
ass = [a for a, dError in datas]
dErrors = [dError for a, dError in datas]
# dErrors.append(None) # reserve a spot for
bundles = [[layer.createBundle() for bundle in range(batchSize)]
for layer in self.layers[1:]]
ass.insert(0, None) # reserve a spot for the sample as input of the network
dErrors.insert(0, None) # reserve a spot for the sample as input of the network
bundles.insert(0, None) # reserve a spot for the sample as input of the network
print("\nDeNet training with configuration:")
print("\tSize of hidden layers:", [layer.getSize() for layer in self.layers[1:]])
print("\tActivation function:", self.activationF.name())
print("\tLoss function:", self.lossF.name())
print("\tLearning rate:", learningRate)
print("\tLearning rate step:", self.learningRateStep)
print("\tDropout percentage:", self.dropOut, "(From 0 to 1. 1 means 'no dropout')")
print("\tBatch size:", batchSize)
print("\tEpochs:", epochs)
print("\tValidation set size:", len(dataset.valid))
print("\tTraining set size:", len(dataset.train))
print("\tTest set size:", len(dataset.tests))
print()
for epoch in range(1, epochs + 1):
epochBeginTime = time.clock()
print()
print("Beginning of epoch", epoch, ". Time:", "{0:.2f}".format(epochBeginTime/60), "min")
random.shuffle(dataset.train)
batchBeginning = 0
batchEnd = min(len(dataset.train), batchBeginning + batchSize)
iterations = 0
sumLoss = 0
while (batchEnd - batchBeginning) >= 1:
iterations += 1
batchLoss = self.batchForwardBackward(
dataset.train[batchBeginning:batchEnd],
learningRate,
ass, dErrors, bundles
)
sumLoss += batchLoss
if iterations % 100 == 0:
print("Loss of batch", epoch, "-", iterations, ":", batchLoss)
batchBeginning = batchEnd
batchEnd = min(len(dataset.train), batchBeginning + batchSize)
meanLoss = sumLoss / iterations
epochEndTime = time.clock()
epochDuration = epochEndTime - epochBeginTime
print("End of epoch ", epoch, ". Time:", "{0:.2f}".format(epochEndTime/60), "min. Duration:", "{0:.2f}".format(epochDuration), "seconds")
print("Mean loss :", meanLoss)
print("learning rate:", learningRate)
successRate = self.evaluate(dataset)
print("Test success rate:", successRate)
self.epochsStats.append((epochBeginTime, epochDuration, epochEndTime, meanLoss, learningRate, successRate))
learningRate *= 0.97**math.floor(epoch/self.learningRateStep)
class Tests(unittest.TestCase):
def setUp(self):
self.data = Dataset.loadSmallPickledData()
self.net = DeNet()
def test_evaluateRandom(self):
res = self.net.guessLabel(self.data.train[0][0])
print("random guess:", res)
def test_evaluate(self):
res = self.net.evaluate(self.data)
print("success rate:", res)
def runSmall():
data = Dataset.loadSmallPickledData()
net = DeNet()
net.train(data, 2, 50, 0.1)
def runMedium():
data = Dataset.loadMediumPickledData()
net = DeNet()
net.train(data, 30, 100, 0.1)
def runBig():
data = Dataset.loadPickledData()
net = DeNet()
net.train(data, 30, 100, 0.1)
if __name__ == '__main__':
# unittest.main()
# runSmall()
# runMedium()
runBig()
|
import argparse
import json
hierarchy_file = '/home/chamo/Documents/work/OpenImgChamo/config/bbox_labels_500_hierarchy.json'
result_file = '/home/chamo/Documents/data/UntitledFolder/test.csv'
ouput_file = '/home/chamo/Documents/data/UntitledFolder/expanded_test.csv'
ouput_box_file = '/home/chamo/Documents/data/UntitledFolder/box_test.csv'
result_dict = {}
line_count = 1
img_count = 0
def _update_dict(initial_dict, update):
for key, value_list in update.items():
if key in initial_dict:
initial_dict[key].extend(value_list)
else:
initial_dict[key] = value_list
def _build_plain_hierarchy(hierarchy, skip_root=False):
all_children = []
all_keyed_parent = {}
all_keyed_child = {}
if 'Subcategory' in hierarchy:
for node in hierarchy['Subcategory']:
keyed_parent, keyed_child, children = _build_plain_hierarchy(node)
_update_dict(all_keyed_parent, keyed_parent)
_update_dict(all_keyed_child, keyed_child)
all_children.extend(children)
if not skip_root:
all_keyed_parent[hierarchy['LabelName']] = all_children
all_children = [hierarchy['LabelName']] + all_children
for child, _ in all_keyed_child.items():
all_keyed_child[child].append(hierarchy['LabelName'])
all_keyed_child[hierarchy['LabelName']] = []
return all_keyed_parent, all_keyed_child, all_children
with open(hierarchy_file) as f:
hierarchy = json.load(f)
all_keyed_parent, all_keyed_child, all_children = _build_plain_hierarchy(hierarchy, skip_root=True)
with open(result_file, "r") as f_result:
with open(ouput_file, "w") as f_out:
with open(ouput_box_file, "w") as f_box_out:
line = f_result.readline()
f_out.write(line)
total_add_count=0
f_box_out.write(
'ImageID,Source,LabelName,Score,XMin,XMax,YMin,YMax,IsOccluded,IsTruncated,IsGroupOf,IsDepiction,IsInside')
while True:
line = f_result.readline()
if line == '':
break
line_count = line_count + 1
splited = line.split(",")
image_name = splited[0]
if image_name=='':
break
if image_name in result_dict.keys():
print('rep img')
if len(splited) < 2:
print('error')
new_str = []
if splited[1] != '\n':
cache_re = splited[1]
splited = splited[1].split(' ')
box_count = 0
boxes_list=[]
class_list=[]
while True:
box_info = []
for cc in range(5):
temp_index=box_count * 6 + cc+1
temp_val=float(splited[temp_index])
if temp_val>1:
splited[temp_index]='1.0'
if temp_val<0:
splited[temp_index]='0.0'
box_info.append(splited[box_count * 6 + 0]) #class name
box_info.append(splited[box_count * 6 + 1]) #confidence
box_info.append(splited[box_count * 6 + 2]) #xmin
box_info.append(splited[box_count * 6 + 3]) #ymin
box_info.append(splited[box_count * 6 + 4]) #xmax
box_info.append(splited[box_count * 6 + 5]) #ymax
box_count = box_count + 1
if len(splited) - 1 <= box_count * 6:
break
if float(box_info[1])<0.0001:
continue
if float(box_info[4])-float(box_info[2])<=0 or float(box_info[5])-float(box_info[3])<=0:
print('zero size box')
continue
boxes_list.append(box_info)
class_list.append(box_info[0])
for box_info in boxes_list:
box_box = []
box_box.append(image_name)
box_box.append('freeform')
box_box.append(box_info[0])
box_box.append(box_info[1])
box_box.append(box_info[2])
box_box.append(box_info[4])
box_box.append(box_info[3])
box_box.append(box_info[5])
f_box_out.write(','.join(box_box))
f_box_out.write('\n')
result =[' '.join(box_info)]
assert box_info[0] in all_keyed_child
parent_nodes = all_keyed_child[box_info[0]]
for parent_node in parent_nodes:
if parent_node in class_list:
continue
total_add_count=total_add_count+1
box_info[0] = parent_node
box_box=[]
box_box.append(image_name)
box_box.append('freeform')
box_box.append(box_info[0])
box_box.append(box_info[1])
box_box.append(box_info[2])
box_box.append(box_info[4])
box_box.append(box_info[3])
box_box.append(box_info[5])
f_box_out.write(','.join(box_box))
f_box_out.write('\n')
result.append(' '.join(box_info))
#print(image_name)
new_str.append(' '.join(result))
img_count = img_count + 1
f_out.write(image_name+','+' '.join(new_str)+'\n')
print(total_add_count)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DongguanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
qnum = scrapy.Field() # 编号
qtype = scrapy.Field() # 类型
qtitle = scrapy.Field() # 标题
qcontent = scrapy.Field() # 问题描述
qstatus = scrapy.Field() # 状态
ifriend = scrapy.Field() # 网友
qtime = scrapy.Field() # 时间
pass
|
fname = input('Please enter a file name: ')
try:
fhand = open(fname)
except:
if fname == 'na na boo boo': print('NA NA BOO BOO TO YOU - You have been punk d!')
else: print('File cannot be opened: ',fname)
count = 0
for line in fhand:
if line.startswith('Subject:'):
count+=1
print('There were',count, 'subject line in', fname,'.')
|
__author__ = 'ruben'
__doc__ = 'Query the sqlite database of Buszaki HC database to extract cells, cellType, region. It outputs a txt file'
import sqlite3 as sqlite
tablesToIgnore = ["sqlite_sequence"]
outputFilename = None
def Print(msg):
if (outputFilename != None):
outputFile = open(outputFilename, 'a')
print >> outputFile, msg
outputFile.close()
else:
print msg
def Describe(dbFile):
connection = sqlite.connect(dbFile)
cursor = connection.cursor()
Print("TableName\tColumns\tRows\tCells")
totalTables = 0
totalColumns = 0
totalRows = 0
totalCells = 0
# Get List of Tables:
tableListQuery = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY Name"
cursor.execute(tableListQuery)
tables = map(lambda t: t[0], cursor.fetchall())
for table in tables:
if (table in tablesToIgnore):
continue
columnsQuery = "PRAGMA table_info(%s)" % table
cursor.execute(columnsQuery)
numberOfColumns = len(cursor.fetchall())
rowsQuery = "SELECT Count() FROM %s" % table
cursor.execute(rowsQuery)
numberOfRows = cursor.fetchone()[0]
numberOfCells = numberOfColumns * numberOfRows
Print("%s\t%d\t%d\t%d" % (table, numberOfColumns, numberOfRows, numberOfCells))
totalTables += 1
totalColumns += numberOfColumns
totalRows += numberOfRows
totalCells += numberOfCells
Print("")
Print("Number of Tables:\t%d" % totalTables)
Print("Total Number of Columns:\t%d" % totalColumns)
Print("Total Number of Rows:\t%d" % totalRows)
Print("Total Number of Cells:\t%d" % totalCells)
return cursor, connection
if __name__ == "__main__":
dbFile = '/media/bigdata/hc-3/hc3-metadata-tables/hc3-tables.db'
cursor, connection = Describe(dbFile)
experiment = 'ec013.15'
basep = '/media/bigdata/hc-3/' + experiment
linear_query = 'SELECT cellType, region, ele, clu FROM cell WHERE topdir={!r}'.format(experiment)
cursor.execute(linear_query)
cell_type = cursor.fetchall()
print '{} Cells found'.format(len(cell_type))
with open(basep + "/isIntern.txt", "w") as text_f:
text_f.write("cellId\tisIntern?\tele\tclu\tregion\n")
for idx, i in enumerate(cell_type):
t = 1 if i[0] == 'i' else 0
text_f.write("{}\t{}\t{}\t{}\t{}\n".format(idx + 1, t, i[2], i[3], i[1]))
print('File saved')
cursor.close()
connection.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-11-03 11:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='dag',
fields=[
('name', models.CharField(default='', max_length=64, primary_key=True, serialize=False)),
('user', models.CharField(default='', max_length=64)),
('description', models.TextField(default='')),
('nvertices', models.IntegerField(default=0)),
('root', models.CharField(default='', max_length=64)),
('ts_def', models.DateTimeField(blank=True, null=True, verbose_name='ts_def')),
],
),
migrations.CreateModel(
name='dagEdge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dag', models.CharField(default='', max_length=64)),
('name', models.CharField(default='', max_length=64)),
('dirpath', models.CharField(default='', max_length=256)),
('comment', models.CharField(default='', max_length=256)),
('datatype', models.CharField(default='', max_length=64)),
('datatag', models.CharField(default='', max_length=64)),
('source', models.CharField(default='', max_length=64)),
('target', models.CharField(default='', max_length=64)),
],
),
migrations.CreateModel(
name='dagVertex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=64)),
('dag', models.CharField(default='', max_length=64)),
('jobtype', models.CharField(default='', max_length=16)),
('payload', models.CharField(default='', max_length=256)),
('env', models.TextField(default='{}')),
('timelimit', models.PositiveIntegerField(default=1000)),
('priority', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='workflow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(default='', max_length=36)),
('user', models.CharField(default='', max_length=64)),
('comment', models.TextField(default='')),
('rootuuid', models.CharField(default='', max_length=36)),
('name', models.CharField(default='', max_length=64)),
('description', models.TextField(default='')),
('dag', models.CharField(default='', max_length=64)),
('state', models.CharField(default='', max_length=16)),
('ts_def', models.DateTimeField(blank=True, null=True, verbose_name='ts_def')),
('ts_sta', models.DateTimeField(blank=True, null=True, verbose_name='ts_sta')),
('ts_sto', models.DateTimeField(blank=True, null=True, verbose_name='ts_sto')),
('nvertices', models.IntegerField(default=0)),
('ndone', models.IntegerField(default=0)),
],
),
]
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 0.1, 0.01)
print x
plt.plot(x, np.sin(x))
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 10:22:41 2021
@author: holge
"""
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import mandelbrot_functions as mf
if __name__ == "__main__":
I = 100
T = 2
C = mf.create_mesh(4096, 4096)
# C = mf.create_mesh(100, 100)
numIter = 3
# If save_data is true, the plots are saved to pdf-files
# and the input/output data is saved to a HDF-file
save_data = True
print('GPU implementation')
GPU_times = []
for i in range(numIter):
start = time.time()
heatmap_gpu = mf.mandelbrot_gpu(C, T, I)
GPU_times.append(time.time() - start)
GPU_mean_time = np.mean(GPU_times)
print(f'Mean execution time:{GPU_mean_time:.2f} seconds\n')
plt.imshow(heatmap_gpu, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: GPU, Time: {GPU_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_gpu, 'Mandelbrot_gpu.pdf')
plt.show()
print("Naive implementation")
naive_times = []
for i in range(numIter):
start = time.time()
heatmap_naive = mf.mandelbrot_naive(C, T, I)
naive_times.append((time.time() - start))
naive_mean_time = np.mean(naive_times)
print(f'Execution time:{naive_mean_time:.2f} seconds\n')
plt.imshow(heatmap_naive, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Naive, Time: {naive_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_naive, 'Mandelbrot_naive.pdf')
plt.show()
print("Vectorized implementation")
data = [C, T, I]
vector_times = []
for i in range(numIter):
start = time.time()
heatmap_vector = mf.mandelbrot_vector(data)
vector_times.append(time.time() - start)
vector_mean_time = np.mean(vector_times)
print(f'Execution time:{vector_mean_time:.2f} seconds\n')
plt.imshow(heatmap_vector, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Vectorized, Time: {vector_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_vector, 'Mandelbrot_vector.pdf')
plt.show()
print("Numba implementation")
# Run once to compile numba code
mf.mandelbrot_numba(C, T, I)
numba_times = []
for i in range(numIter):
start = time.time()
heatmap_numba = mf.mandelbrot_numba(C, T, I)
numba_times.append(time.time() - start)
numba_mean_time = np.mean(numba_times)
print(f'Execution time:{numba_mean_time:.2f} seconds\n')
plt.imshow(heatmap_numba, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Numba, Time: {numba_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_numba, 'Mandelbrot_numba.pdf')
plt.show()
print("Cython implementation using naive function")
cython_naive_times = []
for i in range(numIter):
start = time.time()
heatmap_cython_naive = mf.mandelbrot_naive_cython(C, T, I)
cython_naive_times.append(time.time() - start)
cython_naive_mean_time = np.mean(cython_naive_times)
print(f'Execution time: {cython_naive_mean_time:.2f} seconds\n')
plt.imshow(heatmap_cython_naive, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Cython naive, Time: {cython_naive_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_cython_naive, 'Mandelbrot_cython_naive.pdf')
plt.show()
print("Cython implementation using vector function")
cython_vector_times = []
for i in range(numIter):
start = time.time()
heatmap_cython_vector = mf.mandelbrot_vector_cython([C, T, I])
cython_vector_times.append(time.time() - start)
cython_vector_mean_time = np.mean(cython_vector_times)
print(f'Execution time: {cython_vector_mean_time:.2f} seconds\n')
plt.imshow(heatmap_cython_naive, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Cython vector, Time: {cython_vector_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_cython_naive, 'Mandelbrot_cython_vector.pdf')
plt.show()
print("Multiprocessing implementation using vector function")
processors = 12
parallel_vector_times = []
for i in range(numIter):
start = time.time()
heatmap_parallel = mf.mandelbrot_parallel_vector(C, T, I, processors, 512, 8)
# heatmap_parallel = mf.mandelbrot_parallel_vector(C, T, I, processors, 20, 5)
parallel_vector_times.append(time.time() - start)
parallel_vector_mean_time = np.mean(parallel_vector_times)
print(f'Execution time using {processors} cores: {parallel_vector_mean_time:.2f} seconds\n')
plt.imshow(heatmap_parallel, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Parallel vectorized, Time: {parallel_vector_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_parallel, 'Mandelbrot_parallel.pdf')
plt.show()
print('Distributed vector implementation')
processors = 12
distributed_vector_times = []
for i in range(numIter):
start = time.time()
heatmap_dist_vec = mf.mandelbrot_distributed_vector(C, T, I, processors, 512, 8)
# heatmap_dist_vec = mf.mandelbrot_distributed_vector(C, T, I, processors, 20, 5)
distributed_vector_times.append(time.time() - start)
distributed_vector_mean_time = np.mean(distributed_vector_times)
print(f'Execution time using {processors} cores: {distributed_vector_mean_time:.2f} seconds\n')
plt.imshow(heatmap_dist_vec, cmap='hot', extent=[-2, 1, -1.5, 1.5])
plt.title(f'Implementation: Distributed vectorized, Time: {distributed_vector_mean_time:.2f} seconds')
if save_data:
mf.export_figure_matplotlib(heatmap_dist_vec, 'Mandelbrot_distributed.pdf')
plt.show()
if save_data:
f = h5py.File('mandelbrot_data', 'r+')
input_group = f.create_group('input')
input_group.create_dataset('complex_input_plane', data=C)
input_group.create_dataset('threshold_value', data=T)
input_group.create_dataset('maximum_iterations', data=I)
output_group = f.create_group('outputs')
output_group.create_dataset('Naive_implementation', data=heatmap_naive)
output_group.create_dataset('Vectorized_implementation', data=heatmap_vector)
output_group.create_dataset('Numba_implementation', data=heatmap_numba)
output_group.create_dataset('Cython_implementation_using_naive_function', data=heatmap_cython_naive)
output_group.create_dataset('Cython_implementation_using_vector_function', data=heatmap_cython_naive)
output_group.create_dataset('Multiprocessing_implementation_using_vector_function', data=heatmap_parallel)
output_group.create_dataset('GPU_implementation', data=heatmap_gpu)
output_group.create_dataset('Distributed_vector_implementation', data=heatmap_dist_vec)
time_group = f.create_group('times')
time_group.create_dataset('Naive_implementation', data=naive_times)
time_group.create_dataset('Vectorized_implementation', data=vector_times)
time_group.create_dataset('Numba_implementation', data=numba_times)
time_group.create_dataset('Cython_implementation_using_naive_function', data=cython_naive_times)
time_group.create_dataset('Cython_implementation_using_vector_function', data=cython_vector_times)
time_group.create_dataset('Multiprocessing_implementation_using_vector_function', data=parallel_vector_times)
time_group.create_dataset('GPU_implementation', data=GPU_times)
time_group.create_dataset('Distributed_vector_implementation', data=distributed_vector_times)
f.close()
|
import os
import sys
# Plot the scatter of generated test case for (y=x**2)
epoch_interval = 500
total_epoch = 20000
total = 0
good = 0
for i in range(0,total_epoch, epoch_interval):
with open(str(i)+'.txt', 'r') as f:
for line in f:
a,b = line.split(',')
# Condition of good test case
if abs(float(b) - float(a)**2) <= 0.05:
good += 1
total += 1
print("Good test case rate of epoch " + str(i) + " is: %0.2f" % (good/total))
for i in range(0,total_epoch, epoch_interval):
cmd = 'Rscript plot.r -i ' + str(i)+'.txt -o ' + str(i)+'.png'
os.system('/bin/bash -c ' + '\"' + cmd + '\"')
|
def m_hamming_distance(s1, s2):
if len(s1) < len(s2):
return s1
elif len(s1) > len(s2):
return s2
else:
result = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
result = result + 1
return result
if __name__ == "__main__":
print(m_hamming_distance("Silvio", "Peroni"))
print(m_hamming_distance("Silvio", "Silvia"))
print(m_hamming_distance("Silvio", "Tiziana")) |
import torch
from torch import nn
from models.resnet import resnet50
class PCBModel(nn.Module):
def __init__(self, num_class=None, num_parts=6, bottleneck_dims=256, pool_type="avg", share_embed=False):
super(PCBModel, self).__init__()
assert pool_type in ['max', 'avg']
self.backbone = resnet50(pretrained=True, last_stride=1)
if pool_type == "max":
self.part_pool = nn.AdaptiveAvgPool2d((num_parts, 1))
else:
self.part_pool = nn.AdaptiveMaxPool2d((num_parts, 1))
# classifier
if share_embed:
embed = nn.Sequential(nn.Linear(2048, bottleneck_dims, bias=False),
nn.BatchNorm1d(num_features=bottleneck_dims),
nn.ReLU(inplace=True))
self.embed = nn.ModuleList([embed for _ in range(num_parts)])
else:
self.embed = nn.ModuleList([nn.Sequential(nn.Linear(2048, bottleneck_dims, bias=False),
nn.BatchNorm1d(num_features=bottleneck_dims),
nn.ReLU(inplace=True)) for _ in range(num_parts)])
self.classifier = None
if num_class is not None:
self.classifier = nn.ModuleList(
[nn.Linear(bottleneck_dims, num_class, bias=False) for _ in range(num_parts)])
def forward(self, x):
x = self.backbone_forward(x)
# part pooling
x = self.part_pool(x)
x = x.squeeze(-1)
if not self.training:
return self.eval_forward(x)
else:
return self.train_forward(x)
def backbone_forward(self, x):
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
return x
def train_forward(self, x):
part_logits = []
for i in range(x.size(2)):
embed = self.embed[i](x[:, :, i])
embed = self.classifier[i](embed)
part_logits.append(embed)
return torch.cat(part_logits, dim=0)
def eval_forward(self, x):
embeds = []
for i in range(x.size(2)):
embed = self.embed[i](x[:, :, i])
embeds.append(embed)
# embeds.append(x[:, :, i])
return torch.cat(embeds, dim=1)
|
def reverseWord(s):
#your code here
#return s[::-1]
#s = list(s)
new_s = []
start = 0
end = len(s)-1
while end>=0:
# s[start],s[end] = s[end],s[start]
# start += 1
# end -= 1
new_s.append(s[end])
end -= 1
return ''.join(new_s)
if __name__ == "__main__":
t = int(input())
while(t>0):
s = input()
print(reverseWord(s))
t = t-1 |
from configparser import ConfigParser
def getConfig(file_name, encoding='utf-8'):
config = {}
cf = ConfigParser()
cf.read(file_name, encoding=encoding)
sections = cf.sections()
for section in sections:
options = cf.options(section)
for option in options:
config[section+option] = cf.get(section, option)
return config |
import sys
sys.path.append("/home/hecher/dev/Projekte/OSGExt/Scripting/Example/TestRefCounting")
from MFRefCountTest import (MFRecPtrAccessTest, MFUnrecPtrAccessTest, MFWeakPtrAccessTest)
from SFRefCountTest import (SFRecPtrAccessTest, SFUnrecPtrAccessTest, SFWeakPtrAccessTest, SingleFieldTest)
tests = []
def addMFPtrTests():
tests.append(MFRecPtrAccessTest ('mMFRecNodePtr' , 2, _fieldAccessHandler))
tests.append(MFUnrecPtrAccessTest('mMFUnrecNodePtr', 2, _fieldAccessHandler))
tests.append(MFWeakPtrAccessTest ('mMFWeakNodePtr' , 2, _fieldAccessHandler))
def addSFPtrTests():
tests.append(SFRecPtrAccessTest ('mSFRecNodePtr' , _fieldAccessHandler))
#tests.append(SFRecPtrAccessTest ('mSFUnrecNodePtr', _fieldAccessHandler))
#tests.append(SFRecPtrAccessTest ('mSFWeakNodePtr' , _fieldAccessHandler))
def addMFValueTests():
pass
def addSFValueTests():
pass
def init():
addMFPtrTests()
addSFPtrTests()
addMFValueTests()
addSFValueTests()
for test in tests:
print('\n\n-----------------------------------------------------------------')
print(' STARTING TEST ON "' + test.get_fieldname() + '"\n')
if test.do_test():
print('\n SUCCESSFULLY COMPLETED ON "' + test.get_fieldname() + '".')
else:
print('\n FAILED ON "' + test.get_fieldname() + '".')
print('-----------------------------------------------------------------')
return
def shutdown():
pass
def frame(timeStamp, frameCount):
pass
def changed(whichField, origin, details):
pass |
def merge_sort(list):
if len(list)<=1:
return
size=len(list)
mid=size // 2
left=list[:mid]
right=list[mid:]
merge_sort(left)
merge_sort(right)
return merge_two_lists(list,left,right)
def merge_two_lists(list,a,b):
len_a=len(a)
len_b=len(b)
i=j=k=0
while i<len_a and j<len_b:
if a[i]<b[j]:
list[k]=a[i]
i=i+1
k=k+1
else:
list[k]=b[j]
j=j+1
k=k+1
while i<len_a:
list[k]=a[i]
i=i+1
k=k+1
while j<len_b:
list[k]=b[j]
j=j+1
k=k+1
return list
list=[1,6,4,5,3,2,3,5,7,8,9,0]
print(merge_sort(list)) |
import pytest
from saq.database import Remediation, get_db_connection, User
from saq.remediation import *
@pytest.mark.parametrize('processing, state, css, restore_key, history', [
(False, 'new', '', None, []),
(True, 'new', '', None,
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = None,
),
],
),
(True, 'removing', 'warning', 'hello',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True, user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_IN_PROGRESS,
),
],
),
(True, 'removing', 'danger', 'hello',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = False,
user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_IN_PROGRESS,
),
],
),
(False, 'removed', 'success', 'hello',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_COMPLETED,
),
],
),
(False, 'remove failed', 'danger', 'hello',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = False,
user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_COMPLETED,
),
],
),
(False, 'restored', 'success', 'hello',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_COMPLETED,
),
Remediation(
action = REMEDIATION_ACTION_RESTORE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = None,
status = REMEDIATION_STATUS_COMPLETED,
),
],
),
(False, 'restored', 'success', 'world',
[
Remediation(
action = REMEDIATION_ACTION_REMOVE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = 'hello',
status = REMEDIATION_STATUS_COMPLETED,
),
Remediation(
action = REMEDIATION_ACTION_RESTORE,
type = 'email',
key = '<test>|jdoe@site.com',
successful = True,
user_id = 1,
restore_key = 'world',
status = REMEDIATION_STATUS_COMPLETED,
),
],
),
])
@pytest.mark.integration
def test_remediation_target(processing, state, css, restore_key, history):
# add all remediation history
for remediation in history:
remediation.user_id = saq.AUTOMATION_USER_ID
saq.db.add(remediation)
saq.db.commit()
# instantiate a remediation target
target = RemediationTarget('email', '<test>|jdoe@site.com')
# validate target properties
assert target.processing == processing
assert target.state == state
assert target.css_class == css
assert target.last_restore_key == restore_key
@pytest.mark.integration
def test_remediation_target_id():
# instantiate a target from the id of another and ensure they are the same target
target1 = RemediationTarget('email', '<test>|jdoe@site.com')
target2 = RemediationTarget(id=target1.id)
assert target2.type == target1.type
assert target2.value == target1.value
assert target2.id == target1.id
@pytest.mark.integration
def test_remediation_target_queue():
# fetch targets with Remediation service
service = RemediationService()
targets = service.get_targets()
assert len(targets) == 0
# queue a remediation of a target
target = RemediationTarget('email', '<test>|jdoe@site.com')
target.queue(REMEDIATION_ACTION_REMOVE, saq.AUTOMATION_USER_ID)
# fetch targets with Remediation service
targets = service.get_targets()
assert len(targets) == 1
assert targets[0].type == target.type
assert targets[0].key == target.value
assert targets[0].restore_key is None
assert targets[0].user_id == saq.AUTOMATION_USER_ID
assert targets[0].action == REMEDIATION_ACTION_REMOVE
assert targets[0].status == REMEDIATION_STATUS_IN_PROGRESS
assert targets[0].successful
assert targets[0].lock == service.uuid
assert targets[0].lock_time is not None
@pytest.mark.integration
def test_remediation_target_stop_remediation():
# queue a target for removal
target = RemediationTarget('email', '<test>|jdoe@site.com')
target.queue(REMEDIATION_ACTION_REMOVE, saq.AUTOMATION_USER_ID)
# reload target
target = RemediationTarget('email', '<test>|jdoe@site.com')
# make sure the target was queued
assert len(target.history) == 1
assert target.history[0].status == 'NEW'
# stop all remediations for the target
target.stop_remediation()
# reload target
target = RemediationTarget('email', '<test>|jdoe@site.com')
assert target.history[0].status == 'COMPLETED'
assert target.history[0].successful == False
class MockRemediator(Remediator):
def __init__(self, config_section, result):
self.name = config_section
self.config = {}
self.result = result
@property
def type(self):
return 'email'
def remove(self, target):
return self.result
@pytest.mark.parametrize('result1, result2, status, success, restore_key', [
(RemediationSuccess('hello', restore_key='test'), RemediationSuccess('world'), REMEDIATION_STATUS_COMPLETED, True, 'test'),
(RemediationSuccess('hello'), RemediationSuccess('world'), REMEDIATION_STATUS_COMPLETED, True, None),
(RemediationSuccess('hello'), RemediationDelay('world'), REMEDIATION_STATUS_IN_PROGRESS, True, None),
(RemediationSuccess('hello'), RemediationError('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationSuccess('hello'), RemediationFailure('world'), REMEDIATION_STATUS_COMPLETED, False, None),
(RemediationSuccess('hello'), RemediationIgnore('world'), REMEDIATION_STATUS_COMPLETED, True, None),
(RemediationDelay('hello'), RemediationDelay('world'), REMEDIATION_STATUS_IN_PROGRESS, True, None),
(RemediationDelay('hello'), RemediationError('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationDelay('hello'), RemediationFailure('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationDelay('hello'), RemediationIgnore('world'), REMEDIATION_STATUS_IN_PROGRESS, True, None),
(RemediationError('hello'), RemediationError('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationError('hello'), RemediationFailure('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationError('hello'), RemediationIgnore('world'), REMEDIATION_STATUS_IN_PROGRESS, False, None),
(RemediationFailure('hello'), RemediationFailure('world'), REMEDIATION_STATUS_COMPLETED, False, None),
(RemediationFailure('hello'), RemediationIgnore('world'), REMEDIATION_STATUS_COMPLETED, False, None),
(RemediationIgnore('hello'), RemediationIgnore('world'), REMEDIATION_STATUS_COMPLETED, False, None),
])
@pytest.mark.integration
def test_remediation(result1, result2, status, success, restore_key):
# setup a test remediation service
service = RemediationService()
service.remediators.append(MockRemediator('test1', result1))
service.remediators.append(MockRemediator('test2', result2))
# queue target
RemediationTarget('email', '<test>|jdoe@site.com').queue(REMEDIATION_ACTION_REMOVE, saq.AUTOMATION_USER_ID)
# remediate target with remediation service
target = service.get_targets()[0]
service.remediate(target)
# verify results
target = RemediationTarget('email', '<test>|jdoe@site.com')
assert target.history[0].status == status
assert target.history[0].successful == success
assert target.history[0].restore_key == restore_key
# this is an integration test because I don't have a way to mock the email_archive database
@pytest.mark.integration
def test_message_id_remediation_targets():
from saq.observables import MessageIDObservable
# clear all tables
with get_db_connection("email_archive") as db:
c = db.cursor()
c.execute("DELETE FROM archive_search", None)
c.execute("DELETE FROM archive", None)
c.execute("DELETE FROM archive_server", None)
db.commit()
saq.db.execute(Remediation.__table__.delete())
saq.db.execute(User.__table__.delete())
# add a test user
user = User(username="jsmith", email="john.smith@site.com")
user.password = 'password'
saq.db.add(user)
saq.db.commit()
user_id = saq.db.query(User).one().id
# insert some recipients to test
with get_db_connection("email_archive") as db:
c = db.cursor()
c.execute("INSERT INTO archive_server (hostname) VALUES (%s)", ('localhost',))
c.execute("SELECT server_id FROM archive_server", None)
server_id = c.fetchone()[0]
c.execute("INSERT INTO archive (server_id, md5) VALUES (%s, %s)", (server_id, b'\x13\0\0\0\x08\0'))
c.execute("SELECT archive_id FROM archive", None)
archive_id = c.fetchone()[0]
sql = "INSERT INTO `archive_search` (archive_id, field, value) VALUES (%s, %s, %s)"
c.execute(sql, (archive_id, 'message_id', '<test>'))
c.execute(sql, (archive_id, 'env_to', 'john@site.com'))
c.execute(sql, (archive_id, 'body_to', 'jane@site.com'))
db.commit()
# add some remediation history
history = Remediation(type='email', key="<test>|foo@site.com", action='remove', user_id=user_id)
saq.db.add(history)
saq.db.commit()
# get remediation targets for MessageIDObservable
message_id = MessageIDObservable("<test>")
targets = message_id.remediation_targets
assert len(targets) == 3
target_strings = []
for target in targets:
target_strings.append(f"{target.type}|{target.value}")
assert 'email|<test>|foo@site.com' in target_strings
assert 'email|<test>|john@site.com' in target_strings
assert 'email|<test>|jane@site.com' in target_strings
|
# 3. В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
import random
a = [random.randint(1, i) for i in range(1,10)]
min_num = 10
max_num = 0
min_pos = 0
max_pos = 0
i = 0
for x in a:
if x < min_num:
min_num = x
min_pos = i
if x > max_num:
max_num = x
max_pos = i
i = i + 1
print(a)
a[max_pos] = min_num
a[min_pos] = max_num
print(a) |
def euler581():
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47]
def is_smooth(n):
for p in primes:
while n%p==0:
n/=p
return n==1
cache = dict()
smooth_numbers = [1]
sum = 0
sum2 = 0
for p in primes:
print p, len(smooth_numbers)
for n in smooth_numbers:
if n not in cache and is_smooth(n+1):
cache[n] = True
sum += n
sum2 += (n*(n+1))/2
if n*p <= 10**13:
smooth_numbers += [n*p]
print sum2
print len(smooth_numbers)
return sum
print euler581() |
#_____________________
# simulationsOutput.py
#_____________________
from ...path.modulePath import ModulePath
from ..io.readLists import readFileProcesses
from ..io.readLists import readFileMinValues
from ..io.readLists import readFileLevels
from ..io.readLists import readFileLabels
from ..io.navigate import *
from ..fields.defineFields import defineFields
from ..timeSelection.defaultTSelect import makeSelectXtimesNt
from simulationsConfiguration import SimulationsConfiguration
#__________________________________________________
def buildSimulationsOutput(config):
try:
funTSelect = makeSelectXtimesNt(config.xTSelect)
except:
funTSelect = None
return SimulationsOutput(config.outputDir,
config.sessionName,
config.workName,
funTSelect)
#__________________________________________________
class SimulationsOutput:
def __init__(self, outputDir, sessionName, workName, funTSelect=None):
self.modulePath = ModulePath()
self.outputDir = outputDir
self.sessionName = sessionName
self.workName = workName
#_________________________
self.configDir = self.outputDir + self.sessionName + 'config/'
self.fileMinValues = self.configDir + 'min_values.dat'
self.fileProcesses = self.outputDir + self.sessionName + 'list_processes.dat'
self.fileLevels = self.configDir + 'levels.dat'
self.fileLabels = self.outputDir + self.sessionName + 'list_labels.dat'
#_________________________
self.workingDir = self.outputDir + self.sessionName + self.workName
self.statDir = self.workingDir + 'statistics/'
self.scalingDir = self.workingDir + 'scaling/'
self.launcherDir = self.workingDir + 'launchers/'
self.figDir = self.workingDir + 'figures/'
self.OTDir = self.workingDir + 'optimalTransport/'
self.OT2DDir = self.OTDir + 'OT2D/'
self.OTGSDir = self.OTDir + 'OTGS/'
self.applyOTGSDir = self.OTDir + 'applyOTGS/'
#_________________________
self.simConfig = SimulationsConfiguration(self.configDir)
self.procList = readFileProcesses(self.fileProcesses)
self.minValues = readFileMinValues(self.fileMinValues)
self.levels = readFileLevels(self.fileLevels)
self.labelList = readFileLabels(self.fileLabels)
self.funTSelect = funTSelect
self.fieldList = defineFields(self)
#_________________________
self.launcherPreprocessRawDataDir = self.launcherDir + 'preprocess/preprocessRawData/'
self.pythonLauncherPreprocessRawData = self.launcherPreprocessRawDataDir + 'preprocessRawData.py'
self.bashLauncherPreprocessRawData = self.launcherPreprocessRawDataDir + 'preprocessRawData.sh'
self.fileProcessesPreprocessRawData = self.launcherPreprocessRawDataDir + 'processesPreprocessRawData.dat'
self.configFilePreprocessRawData = self.launcherPreprocessRawDataDir + 'preprocessRawData.cfg'
self.fileLogPreprocessRawData = self.launcherPreprocessRawDataDir + 'logPreprocessRawData'
self.fileNodesPreprocessRawData = self.launcherPreprocessRawDataDir + 'nodesPreprocessRawData.dat'
#_________________________
self.launcherStatisticalAnalyseFiles = {}
self.launcherStatisticalAnalyseFiles['directory'] = self.launcherDir + 'statisticalAnalyse/performStatisticalAnalyse/'
self.launcherStatisticalAnalyseFiles['pyLauncher'] = self.launcherStatisticalAnalyseFiles['directory'] + 'performStatisticalAnalyse.py'
self.launcherStatisticalAnalyseFiles['shLauncher'] = self.launcherStatisticalAnalyseFiles['directory'] + 'performStatisticalAnalyse.sh'
self.launcherStatisticalAnalyseFiles['processes'] = self.launcherStatisticalAnalyseFiles['directory'] + 'processesPerformStatisticalAnalyse.dat'
self.launcherStatisticalAnalyseFiles['config'] = self.launcherStatisticalAnalyseFiles['directory'] + 'performStatisticalAnalyse.cfg'
self.launcherStatisticalAnalyseFiles['nodes'] = self.launcherStatisticalAnalyseFiles['directory'] + 'nodesPerformStatisticalAnalyse.dat'
self.launcherStatisticalAnalyseFiles['log'] = self.launcherStatisticalAnalyseFiles['directory'] + 'logPerformStatisticalAnalyse'
#_________________________
self.launcherOTDir = self.launcherDir + 'optimalTransport/'
self.launcherOTGSDir = self.launcherOTDir + 'GS/'
self.launcherInterpolateIntoOTGSResolutionDir = self.launcherOTGSDir + 'interpolateIntoOTGSResolution/'
self.configFileInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'interpolateIntoOTGSResolution.cfg'
self.fileProcessesInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'processesInterpolateIntoOTGSResolution.dat'
self.fileLogInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'logInterpolateIntoOTGSResolution'
self.fileNodesInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'nodesInterpolateIntoOTGSResolution.dat'
self.pythonLauncherInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'interpolateIntoOTGSResolution.py'
self.bashLauncherInterpolateIntoOTGSResolution = self.launcherInterpolateIntoOTGSResolutionDir + 'interpolateIntoOTGSResolution.sh'
self.launcherMergeOTGSResultsDir = self.launcherOTGSDir + 'mergeOTGSResults/'
self.configFileMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'mergeOTGSResults.cfg'
self.fileProcessesMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'processesMergeOTGSResults.dat'
self.fileLogMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'logMergeOTGSResults'
self.fileNodesMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'nodesMergeOTGSResults.dat'
self.pythonLauncherMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'mergeOTGSResults.py'
self.bashLauncherMergeOTGSResults = self.launcherMergeOTGSResultsDir + 'mergeOTGSResults.sh'
self.launcherApplyGSTransportFiles = {}
self.launcherApplyGSTransportFiles['directory'] = self.launcherOTGSDir + 'applyGSTransport/'
self.launcherApplyGSTransportFiles['pyLauncher'] = self.launcherApplyGSTransportFiles['directory'] + 'applyGSTransport.py'
self.launcherApplyGSTransportFiles['shLauncher'] = self.launcherApplyGSTransportFiles['directory'] + 'applyGSTransport.sh'
self.launcherApplyGSTransportFiles['processes'] = self.launcherApplyGSTransportFiles['directory'] + 'processesApplyGSTransport.dat'
self.launcherApplyGSTransportFiles['config'] = self.launcherApplyGSTransportFiles['directory'] + 'applyGSTransport.cfg'
self.launcherApplyGSTransportFiles['nodes'] = self.launcherApplyGSTransportFiles['directory'] + 'nodesApplyGSTransport.dat'
self.launcherApplyGSTransportFiles['log'] = self.launcherApplyGSTransportFiles['directory'] + 'logApplyGSTransport'
self.launcherOT2DDir = self.launcherOTDir + '2D/'
self.launcherInterpolateIntoOT2DResolutionDir = self.launcherOT2DDir + 'interpolateIntoOT2DResolution/'
self.configFileInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'interpolateIntoOT2DResolution.cfg'
self.fileProcessesInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'processesInterpolateIntoOT2DResolution.dat'
self.fileLogInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'logInterpolateIntoOT2DResolution'
self.fileNodesInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'nodesInterpolateIntoOT2DResolution.dat'
self.pythonLauncherInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'interpolateIntoOT2DResolution.py'
self.bashLauncherInterpolateIntoOT2DResolution = self.launcherInterpolateIntoOT2DResolutionDir + 'interpolateIntoOT2DResolution.sh'
self.launcherMergeOT2DResultsDir = self.launcherOT2DDir + 'mergeOT2DResults/'
self.configFileMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'mergeOT2DResults.cfg'
self.fileProcessesMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'processesMergeOT2DResults.dat'
self.fileLogMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'logMergeOT2DResults'
self.fileNodesMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'nodesMergeOT2DResults.dat'
self.pythonLauncherMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'mergeOT2DResults.py'
self.bashLauncherMergeOT2DResults = self.launcherMergeOT2DResultsDir + 'mergeOT2DResults.sh'
#_________________________
self.launcherPlotSimulationDir = self.launcherDir + 'plotting/simulation/'
self.pythonLauncherPlotSimulation = self.launcherPlotSimulationDir + 'plotSimulation.py'
self.bashLauncherPlotSimulation = self.launcherPlotSimulationDir + 'plotSimulation.sh'
self.fileProcessesPlotSimulation = self.launcherPlotSimulationDir + 'processesPlotSimulation.dat'
self.configFilePlotSimulation = self.launcherPlotSimulationDir + 'plotSimulation.cfg'
self.fileLogPlotSimulation = self.launcherPlotSimulationDir + 'logPlotSimulation'
self.fileNodesPlotSimulation = self.launcherPlotSimulationDir + 'nodesPlotSimulation.dat'
#_________________________
self.launcherPlotFieldsDir = self.launcherDir + 'plotting/fields/'
self.pythonLauncherPlotFields = self.launcherPlotFieldsDir + 'plotFields.py'
self.bashLauncherPlotFields = self.launcherPlotFieldsDir + 'plotFields.sh'
self.fileProcessesPlotFields = self.launcherPlotFieldsDir + 'processesPlotFields.dat'
self.configFilePlotFields = self.launcherPlotFieldsDir + 'plotFields.cfg'
self.fileLogPlotFields = self.launcherPlotFieldsDir + 'logPlotFields'
self.fileNodesPlotFields = self.launcherPlotFieldsDir + 'nodesPlotFields.dat'
#_________________________
self.launcherPlotApplyGSTransportFiles = {}
self.launcherPlotApplyGSTransportFiles['directory'] = self.launcherDir + 'plotting/applyGSTransport/'
self.launcherPlotApplyGSTransportFiles['pyLauncher'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'plotApplyGSTransport.py'
self.launcherPlotApplyGSTransportFiles['shLauncher'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'plotApplyGSTransport.sh'
self.launcherPlotApplyGSTransportFiles['processes'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'processesPlotApplyGSTransport.dat'
self.launcherPlotApplyGSTransportFiles['config'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'plotApplyGSTransport.cfg'
self.launcherPlotApplyGSTransportFiles['nodes'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'nodesPlotApplyGSTransport.dat'
self.launcherPlotApplyGSTransportFiles['log'] = self.launcherPlotApplyGSTransportFiles['directory'] + 'logPlotApplyGSTransport'
#_________________________
self.simulationfigDir = self.figDir + 'simulation/'
self.fieldfigDir = self.figDir + 'fields/fields/'
self.fieldAttachGrayScalefigDir = self.figDir + 'fields/fieldsAttachGrayScale/'
self.OTfigDir = self.figDir + 'optimalTransport/'
self.OT2DfigDir = self.OTfigDir + 'OT2D/'
self.OTGSfigDir = self.OTfigDir + 'OTGS/'
self.applyGSTransportfigDir = self.OTfigDir + 'applyGSTransport/'
#_________________________
def procOutputDir(self, proc):
return ( self.outputDir + self.sessionName + proc + '/' )
def fileSpeciesBinProc(self, proc, DOW, IOB, speciesBin):
return ( self.procOutputDir(proc) + DOW + IOB + speciesBin + '.bin' )
#_________________________
def scalingFieldDir(self, AOG, field, LOL):
return ( self.scalingDir + AOG + field.name + '/' + LOL + '/' )
def fileScalingFieldSpecies(self, AOG, field, LOL, species):
return ( self.scalingFieldDir(AOG, field, LOL) + species + '.npy' )
def fileFMScalingFieldSpecies(self, AOG, field, LOL, species):
return ( self.scalingFieldDir(AOG, field,LOL) + species + '_FM.npy' )
#_________________________
def analyseFieldSeciesDir(self, AOG, field, LOL, species):
return ( self.statDir + AOG + field.name + '/' + LOL + '/' )
#_________________________
def procPreprocessedDataDir(self, proc):
return ( self.workingDir + proc + '/' )
def procPreprocessedFieldDir(self, proc, AOG, field, LOL):
return ( self.procPreprocessedDataDir(proc) + 'rawResolution/' + AOG + field.name + '/' + LOL + '/' )
def fileProcPreprocessedField(self, proc, AOG, field, LOL, species):
return ( self.procPreprocessedFieldDir(proc, AOG, field, LOL) + species + '.npy' )
def fileProcPreprocessedFieldGS(self, proc, AOG, field, LOL, species, TS):
return ( self.procPreprocessedFieldDir(proc, AOG, field, LOL) + species + 'grayScale' + TS + '.npy' )
#_________________________
def procPreprocessedFieldOTResolutionDir(self, proc, AOG, field, LOL):
return ( self.procPreprocessedDataDir(proc) + 'OTResolution/' + AOG + field.name + '/' + LOL + '/' )
def fileProcPreprocessedFieldOTResolution(self, proc, AOG, field, LOL, species):
return ( self.procPreprocessedFieldOTResolutionDir(proc, AOG, field, LOL) + species + '.npy' )
def fileProcPreprocessedFieldGSOTResolution(self, proc, AOG, field, LOL, species, TS):
return ( self.procPreprocessedFieldOTResolutionDir(proc, AOG, field, LOL) + species + 'grayScale' + TS + '.npy' )
#_________________________
def simOutputFieldFigDir(self, AOG, field, LOL, species):
return ( self.simulationfigDir + AOG + field.name + '/' + LOL + '/' + species + '/' )
#_________________________
def fieldFigDir(self, AOG, field, LOL, species):
return ( self.fieldfigDir + AOG + field.name + '/' + LOL + '/' + species + '/' )
#_________________________
def fieldAttachGrayScaleFigDir(self, AOG, field, LOL, species):
return ( self.fieldAttachGrayScalefigDir + AOG + field.name + '/' + LOL + '/' + species + '/' )
#_________________________
def launcherPerformOT2DDir(self, configName):
return ( self.launcherOT2DDir + configName + '/performOT2D/' )
def fileProcessesPerformOT2D(self, configName):
return ( self.launcherPerformOT2DDir(configName) + 'processesPerformOT2D.dat' )
def fileLogPerformOT2D(self, configName):
return ( self.launcherPerformOT2DDir(configName) + 'logPerformOT2D' )
def fileNodesPerformOT2D(self, configName):
return ( self.launcherPerformOT2DDir(configName) + 'nodesPerformOT2D.dat' )
def pythonLauncherPerformOT2D(self, configName):
return ( self.launcherPerformOT2DDir(configName) + 'performOT2D.py' )
def bashLauncherPerformOT2D(self, configName):
return ( self.launcherPerformOT2DDir(configName) + 'performOT2D.sh' )
def performOT2DFieldSpeciesDir(self, AOG, field, LOL, species):
return ( self.OT2DDir + AOG + field.name + '/' + LOL + '/' + species + '/' )
def performOT2DP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species):
return ( self.performOT2DFieldSpeciesDir(AOG, field, LOL, species) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
def configFilePerformOT2DP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species):
return ( self.performOT2DP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species) + configName + '.cfg' )
def resultsFileOT2DP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species):
return ( self.performOT2DP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species) + 'result.bin' )
def mergedResultsFileOT2DFieldSpecies(self, configName, AOG, field, LOL, species):
return ( self.performOTGSFieldSpeciesDir(AOG, field, LOL, species) + 'results_'+configName )
#_________________________
def launcherPerformOTGSDir(self, configName):
return ( self.launcherOTGSDir + configName + '/performOTGS/' )
def fileProcessesPerformOTGS(self, configName):
return ( self.launcherPerformOTGSDir(configName) + 'processesPerformOTGS.dat' )
def fileLogPerformOTGS(self, configName):
return ( self.launcherPerformOTGSDir(configName) + 'logPerformOTGS' )
def fileNodesPerformOTGS(self, configName):
return ( self.launcherPerformOTGSDir(configName) + 'nodesPerformOTGS.dat' )
def pythonLauncherPerformOTGS(self, configName):
return ( self.launcherPerformOTGSDir(configName) + 'performOTGS.py' )
def bashLauncherPerformOTGS(self, configName):
return ( self.launcherPerformOTGSDir(configName) + 'performOTGS.sh' )
def performOTGSFieldSpeciesDir(self, AOG, field, LOL, species, TS):
return ( self.OTGSDir + AOG + field.name + '/' + LOL + '/' + species + '/' + TS + '/' )
def performOTGSP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.performOTGSFieldSpeciesDir(AOG, field, LOL, species, TS) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
def configFilePerformOTGSP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.performOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + configName + '.cfg' )
def resultsFileOTGSP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.performOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + 'result.bin' )
def TmapFileOTGSP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.performOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + 'Tmap.npy' )
def mergedResultsFileOTGSFieldSpecies(self, configName, AOG, field, LOL, species, TS):
return ( self.performOTGSFieldSpeciesDir(AOG, field, LOL, species, TS) + 'results_'+configName )
#_________________________
def applyOTGSFieldSpeciesDir(self, AOG, field, LOL, species, TS):
return ( self.applyOTGSDir + AOG + field.name + '/' + LOL + '/' + species + '/' + TS + '/' )
def applyOTGSP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.applyOTGSFieldSpeciesDir(AOG, field, LOL, species, TS) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
def applyOTGSForwardP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.applyOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + 'forwardTransport.npy' )
def applyOTGSBackwardP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.applyOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + 'backwardTransport.npy' )
#_________________________
def launcherPlotOT2DDir(self, configName):
return ( self.launcherOT2DDir + configName + '/plotOT2D/' )
def fileProcessesPlotOT2D(self, configName):
return ( self.launcherPlotOT2DDir(configName) + 'processesPlotOT2D.dat' )
def fileLogPlotOT2D(self, configName):
return ( self.launcherPlotOT2DDir(configName) + 'logPlotOT2D' )
def fileNodesPlotOT2D(self, configName):
return ( self.launcherPlotOT2DDir(configName) + 'nodesPlotOT2D.dat' )
def pythonLauncherPlotOT2D(self, configName):
return ( self.launcherPlotOT2DDir(configName) + 'plotOT2D.py' )
def bashLauncherPlotOT2D(self, configName):
return ( self.launcherPlotOT2DDir(configName) + 'plotOT2D.sh' )
def plotOT2DFieldSpeciesDir(self, AOG, field, LOL, species):
return ( self.OT2DfigDir + AOG + field.name + '/' + LOL + '/' + species + '/' )
def plotOT2DP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species):
return ( self.plotOT2DFieldSpeciesDir(AOG, field, LOL, species) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
def configFilePlotOT2DP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species):
return ( self.plotOT2DP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species) + 'plotting_' + configName + '.cfg' )
#_________________________
def launcherPlotOTGSDir(self, configName):
return ( self.launcherOTGSDir + configName + '/plotOTGS/' )
def fileProcessesPlotOTGS(self, configName):
return ( self.launcherPlotOTGSDir(configName) + 'processesPlotOTGS.dat' )
def fileLogPlotOTGS(self, configName):
return ( self.launcherPlotOTGSDir(configName) + 'logPlotOTGS' )
def fileNodesPlotOTGS(self, configName):
return ( self.launcherPlotOTGSDir(configName) + 'nodesPlotOTGS.dat' )
def pythonLauncherPlotOTGS(self, configName):
return ( self.launcherPlotOTGSDir(configName) + 'plotOTGS.py' )
def bashLauncherPlotOTGS(self, configName):
return ( self.launcherPlotOTGSDir(configName) + 'plotOTGS.sh' )
def plotOTGSFieldSpeciesDir(self, AOG, field, LOL, species, TS):
return ( self.OTGSfigDir + AOG + field.name + '/' + LOL + '/' + species + '/' + TS + '/' )
def plotOTGSP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.plotOTGSFieldSpeciesDir(AOG, field, LOL, species, TS) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
def configFilePlotOTGSP0P1FieldSpecies(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.plotOTGSP0P1FieldSpeciesDir(configName, p0, p1, AOG, field, LOL, species, TS) + 'plotting_' + configName + '.cfg' )
#_________________________
def plotApplyGSTransportFieldSpeciesDir(self, AOG, field, LOL, species, TS):
return ( self.applyGSTransportfigDir + AOG + field.name + '/' + LOL + '/' + species + '/' + TS + '/' )
def plotApplyGSTransportP0P1FieldSpeciesDir(self, configName, p0, p1, AOG, field, LOL, species, TS):
return ( self.plotApplyGSTransportFieldSpeciesDir(AOG, field, LOL, species, TS) + str(p0) + '-' + str(p1) + '/' + configName + '/' )
#_________________________
def makeProcLabelSuffixListList(self, AOO='all', addSimLabel=True):
if AOO == 'all':
procListList = [self.procList]
suffixFigNameList = ['allsim']
if addSimLabel:
labelListList = [self.labelList]
else:
labelList = []
for proc in self.procList:
labelList.append('')
labelListList = [labelList]
elif AOO == 'one':
procListList = []
labelListList = []
suffixFigNameList = []
for (proc, label) in zip(self.simOutput.procList, self.simOutput.labelList):
procListList.append([proc])
suffixFigNameList.append([label])
if addSimLabel:
labelListList.append([label])
else:
labelListList.append([''])
return (procListList, labelListList, suffixFigNameList)
#_________________________
def fieldLOLList(self, AOG, field=None, LOL=None):
if field is None:
fieldList = self.fieldList[AOG]
else:
fieldList = []
for f in self.fieldList[AOG]:
if field == f.name:
fieldList.append(f)
break
if LOL is None:
LOLList = LinOrLog()
else:
LOLList = [LOL]
return (fieldList, LOLList)
#_________________________
def fieldLOLTSList(self, AOG, field=None, LOL=None, TS=None):
(fieldList, LOLList) = self.fieldLOLList(AOG, field, LOL)
if TS is None:
TSList = ThresholdNoThreshold()
else:
TSList = [TS]
return (fieldList, LOLList, TSList)
#__________________________________________________
|
#!/usr/bin/python
# -*- coding: utf-8
import json
import urllib2
import websocket
import threading
import logging
from abstract_bot import AbstractBot, AbstractMessage
class SlackBot(AbstractBot):
_message_counter = 1
def __init__(self, token):
super(SlackBot, self).__init__()
self._websocket_app = None
self._thread = threading.Thread(target=self._run)
self._token = token
def connect(self):
url_template = 'https://slack.com/api/rtm.start?simple_latest=true&no_unreads=true&token=%s'
req = urllib2.Request(url_template % self._token)
response = urllib2.urlopen(req)
body = response.read()
slack_team_info = json.loads(body)
websocket_url = slack_team_info['url']
self._start_websocket_app(websocket_url)
super(SlackBot, self).connect()
def _start_websocket_app(self, websocket_url):
self._websocket_app = websocket.WebSocketApp(websocket_url,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close)
self._thread.start()
def _run(self):
self._websocket_app.run_forever()
def _stop_websocket_app(self):
self._websocket_app.close()
self._thread.join()
def disconnect(self):
self._stop_timers()
self._stop_websocket_app()
self._websocket_app = None
def is_connected(self):
return self._websocket_app and self._websocket_app.sock.connected
def _on_message(self, ws, message):
"""
Income message format:
{
"type": "message",
"channel": "C0Z87P9QX",
"user": "U0Z8J9602",
"text": "привет",
"ts": "1460149913.000003",
"team": "T0Z6RJS83"
}
"""
logging.debug('websocket message: %s' % message)
msg = json.loads(message)
msg_type = msg.get('type', None)
if msg_type == 'message':
if 'reply_to' in msg:
return # Skip the last sent message (by bot) - don't need to check whether it was successfully sent
self.handle_message(AbstractMessage(channel_id=message['channel'], text=message['text']))
elif msg_type == 'reconnect_url':
pass # experimental
def _on_error(self, ws, error):
logging.debug('websocket error: %s' % error)
def _on_close(self, ws):
logging.debug('websocket closed')
def send_message(self, channel_id, text):
"""
Sends message with the content <text> to the channel with <channel_id>
"""
response_msg = {
'id': self._message_counter,
'type': 'message',
'channel': channel_id,
'text': text
}
self._websocket_app.sock.send(json.dumps(response_msg))
self._message_counter += 1
|
from rest_framework import serializers
from .models import Equipos
class SongsSerializer(serializers.ModelSerializer):
class Meta:
model = Equipos
fields = ('id', 'nombre_equipo', 'liga','tecnico') |
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-0.x/blob/master/LICENSE
import unittest
import numpy
from awkward0 import *
class Test(unittest.TestCase):
def runTest(self):
pass
def test_masked_nbytes(self):
assert isinstance(MaskedArray([True, False, True, False, True, False, True, False, True, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True).nbytes, int)
def test_masked_get(self):
a = MaskedArray([True, False, True, False, True, False, True, False, True, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
def test_masked_get_flip(self):
a = MaskedArray([False, True, False, True, False, True, False, True, False, True], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=False)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
def test_masked_ufunc(self):
a = MaskedArray([True, False, True, False, True, False, True, False, True, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True)
b = MaskedArray([True, True, True, True, True, False, False, False, False, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True)
assert (a + b).tolist() == [None, None, None, None, None, 11.0, None, 15.4, None, 19.8]
assert (a + [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]).tolist() == [None, 2.2, None, 6.6, None, 11.0, None, 15.4, None, 19.8]
assert (a + numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])).tolist() == [None, 2.2, None, 6.6, None, 11.0, None, 15.4, None, 19.8]
assert (a + IndexedMaskedArray([-1, -1, -1, 1, -1, 2, -1, 4, -1, 3], [0.0, 1.1, 2.2, 3.3, 4.4])).tolist() == [None, None, None, 4.4, None, 7.7, None, 12.100000000000001, None, 13.2]
def test_bitmasked_get(self):
a = BitMaskedArray.fromboolmask([True, False, True, False, True, False, True, False, True, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True, lsborder=True)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
a = BitMaskedArray.fromboolmask([True, False, True, False, True, False, True, False, True, False], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=True, lsborder=False)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
def test_bitmasked_get_flip(self):
a = BitMaskedArray.fromboolmask([False, True, False, True, False, True, False, True, False, True], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=False, lsborder=True)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
a = BitMaskedArray.fromboolmask([False, True, False, True, False, True, False, True, False, True], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], maskedwhen=False, lsborder=False)
assert a.tolist() == [None, 1.1, None, 3.3, None, 5.5, None, 7.7, None, 9.9]
assert a[0] is None
assert not a[1] is None
assert a[5:].tolist() == [5.5, None, 7.7, None, 9.9]
assert not a[5:][0] is None
assert a[5:][1] is None
assert a[[3, 2, 1]].tolist() == [3.3, None, 1.1]
assert a[[True, True, True, True, True, False, False, False, False, False]].tolist() == [None, 1.1, None, 3.3, None]
def test_bitmasked_arrow(self):
# Apache Arrow layout example
# https://github.com/apache/arrow/blob/master/format/Layout.md#null-bitmaps
a = BitMaskedArray.fromboolmask([True, True, False, True, False, True], [0, 1, 999, 2, 999, 3], maskedwhen=False, lsborder=True)
assert a.tolist() == [0, 1, None, 2, None, 3]
# extra gunk at the end of the array
a = BitMaskedArray.fromboolmask([True, True, False, True, False, True, True, True], [0, 1, 999, 2, 999, 3], maskedwhen=False, lsborder=True)
assert a.tolist() == [0, 1, None, 2, None, 3]
# opposite sign
a = BitMaskedArray.fromboolmask([True, True, False, True, False, True, False, False], [0, 1, 999, 2, 999, 3], maskedwhen=False, lsborder=True)
assert a.tolist() == [0, 1, None, 2, None, 3]
# doubled
a = BitMaskedArray.fromboolmask([True, True, False, True, False, True, True, True, False, True, False, True], [0, 1, 999, 2, 999, 3, 0, 1, 999, 2, 999, 3], maskedwhen=False, lsborder=True)
assert a.tolist() == [0, 1, None, 2, None, 3, 0, 1, None, 2, None, 3]
def test_indexedmasked_get(self):
a = IndexedMaskedArray([-1, 0, -1, 1, -1, 2, -1, 4, -1, 3], [0.0, 1.1, 2.2, 3.3, 4.4])
assert a.tolist() == [None, 0.0, None, 1.1, None, 2.2, None, 4.4, None, 3.3]
assert [a[i] for i in range(len(a))] == [None, 0.0, None, 1.1, None, 2.2, None, 4.4, None, 3.3]
|
from app import app, cache
from app.models import Page, User
#from app.big_brain import Interpreter
from flask import render_template, request, redirect
from werkzeug.exceptions import NotFound
@app.route('/facelift/<notion_url>')
@cache.cached()
def test(notion_url):
i = Interpreter('https://www.notion.so/' + notion_url)
return render_template('wiki/notion_page.html', page=i.render())
@app.route('/articles/browse')
@cache.cached()
def browse():
try:
query = request.args['query']
except KeyError:
query = ""
return render_template('wiki/browse_pages.html', query=query, pages=Page.query.all())
@app.route('/articles')
def page():
try:
p = Page.query.get(int(request.args['id']))
return render_template('wiki/page.html', page=p)
except KeyError:
redirect('/articles/browse')
@app.route('/articles/<page_name>')
@cache.cached()
def page_by_name(page_name):
p = Page.query.filter_by(title=page_name).first()
if p:
return render_template('wiki/page.html', page=p)
else:
raise NotFound
@app.route('/tutors')
def show_tutors():
return render_template('wiki/tutors.html', tutors=User.query.filter_by(tutor=True))
|
from math import log2, floor
from torch import nn, cat, add, Tensor
from torch.nn import init, Upsample, Conv2d, ReLU, Sequential
from torch.nn.functional import interpolate
class ScaleLayer(nn.Module):
def __init__(self, init_value=1e-3):
super().__init__()
self.scale = nn.Parameter(Tensor([init_value]))
def forward(self, data):
return data * self.scale
class Net(nn.Module):
def __init__(self, upscale_factor, num_channels=3, base_channel=256, num_residuals=32):
super(Net, self).__init__()
assert log2(upscale_factor).is_integer(), "Upscale factor must be power of two"
self.input_conv = nn.Conv2d(num_channels, base_channel, kernel_size=3, stride=1, padding=1)
resnet_blocks = []
for _ in range(num_residuals):
resnet_blocks.append(ResnetBlock(base_channel, kernel=3, stride=1, padding=1))
self.residual_layers = nn.Sequential(*resnet_blocks)
self.mid_conv = nn.Conv2d(base_channel, base_channel, kernel_size=3, stride=1, padding=1)
upscale_layers = [PixelShuffleBlock(base_channel, base_channel, upscale_factor=2)
for _ in range(int(log2(upscale_factor)))]
self.upscale_layers = Sequential(*upscale_layers)
self.output_conv = nn.Conv2d(base_channel, num_channels, kernel_size=3, stride=1, padding=1)
def weight_init(self, mean=0.0, std=0.02):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = self.input_conv(x)
residual = x
x = self.residual_layers(x)
x = self.mid_conv(x)
x += residual
x = self.upscale_layers(x)
x = self.output_conv(x)
return x
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
if m.bias is not None:
m.bias.data.zero_()
class ResnetBlock(nn.Module):
def __init__(self, num_channel, kernel=3, stride=1, padding=1):
super(ResnetBlock, self).__init__()
self.conv1 = nn.Conv2d(num_channel, num_channel, kernel, stride, padding)
self.conv2 = nn.Conv2d(num_channel, num_channel, kernel, stride, padding)
#self.bn = nn.BatchNorm2d(num_channel)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
#x = self.bn(self.conv1(x))
x = self.conv1(x)
x = self.activation(x)
#x = self.bn(self.conv2(x))
x = x + residual
return x * 0.1
class PixelShuffleBlock(nn.Module):
def __init__(self, in_channel, out_channel, upscale_factor, kernel=3, stride=1, padding=1):
super(PixelShuffleBlock, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel * upscale_factor ** 2, kernel, stride, padding)
self.ps = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.ps(self.conv(x))
return x
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ReportBuilderConfig(AppConfig):
name = 'report_builder'
verbose_name = _('Reports') |
from apps.user.models import AgentGroup
def create_agent_group(supplier, name='', desc=''):
agent_group = AgentGroup()
agent_group.supplier = supplier
agent_group.name = name
agent_group.desc = desc
agent_group.save()
return agent_group
|
import numpy as np
def max_profit(t, p, d, n):
'''
Compute the maximum profit of a[n] jobs with t[n] times,
p[n] profit and d[n] deadline each.
This is a variation of the knapsack problem. We just order all vector
in function of the relative order of d.
'''
order = np.array(d).argsort()
t = np.array(t)[order]
p = np.array(p)[order]
d = np.array(d)[order]
D = d[n-1]+1
w = [[None for j in range(D)] for i in range(n+1)] # initializing the w matrix of size nxn
for j in range(0, D):
w[0][j] = 0
for i in range(1, n+1):
a = w[i-1][j]
if t[i-1] > j:
b = 0
else:
b = w[i-1][j-t[i-1]] + p[i-1]
w[i][j] = max(a, b)
s = get_schedule(w, t, D-1, n, order)
return w, w[n-1][D-1], s
def get_schedule(w, t, D, n, order):
'''
Give the sequence of jobs we have to execute to get the maximum profit
based on the w matrix, the times t[n] and the maximum deadline D.
The order is the relative order of the d - the deadline array.
'''
j = D
i = n
s = [None] * (n)
k = n-1
for i in range(n, 0, -1):
if w[i][j] != w[i-1][j]:
s[k] = 'a' + str(order[i-1] + 1) # the +1 is just to the indexing fashion
j = j-t[i-1]
k = k - 1
return s[k+1:n]
def main():
#d = [2,4,11]
#p = [10,22,12]
#t = [1,2,3]
d = [1,3,4,5,6,8,6]
p = [1,2,3,4,2,2,12]
t = [1,2,3,2,2,3,5]
#d = [15,16]
#p = [10,20]
#t = [10,13]
print
print '==============================================='
print 'Maximum profit productivity'
print '==============================================='
print 'p = ', p
print 'd = ', d
print 't = ', t
w, mp, s = max_profit(t, p, d, len(d))
print 'w = '
print np.array(w)
print
print 'maximum profit = ', mp
print 'best jobs schedule = ', s
if __name__=="__main__":
main() |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 18:40:33 2021
@author: 柏均
"""
from mcpi.minecraft import Minecraft
import time
mc= Minecraft.create()
x,y,z = mc.player.getTilePos()
time.sleep(6)
mc.setBlock(x,y,z,8)
time.sleep(6)
mc.setBlock(x+5,y,z,8)
time.sleep(6)
mc.setBlock(x,y,z+5,8)
x,y,z = mc.player.getTilePos()
mc.setBlocks(x-5, y-1, z-5,x+5,y+1,z+5,46)
x,y,z = mc.player.getTilePos()
mc.setBlocks(x+3,y,z,x+13,y+10,z+10,336)
mc.setBlocks(x+4,y+1,z+1,x+12,y+9,z+9,0)
|
from django import forms
from typing import Dict, Any
from private.models import *
class UploadForm(forms.ModelForm):
class Meta:
model = Song
fields = "__all__"
exclude = ["post_author", "counter"]
widgets = {
"band": forms.TextInput(
attrs = {
"class": "form-control",
"id": "band",
"placeholder": "Band",
"data-error": "Please enter Band Name",
}
),
"name": forms.TextInput(
attrs = {
"class": "form-control",
"id": "name",
"placeholder": "Name of Song",
"data-error": "Please enter Name of Song",
}
),
"album": forms.TextInput(
attrs = {
"class": "form-control",
"id": "album",
"placeholder": "Name of Album",
"data-error": "Please enter Name of Album",
}
),
"genre": forms.Select(
attrs = {
"class": "form-control",
"id": "genre",
"placeholder": "Genre",
"data-error": "Please enter Genre",
}
),
"audio": forms.FileInput(
attrs = {
"class": "form-control",
"id": "audio",
"placeholder": "Audio File",
"data-error": "Please upload Audio File",
}
),
"picture": forms.FileInput(
attrs = {
"class": "form-control",
"id": "picture",
"placeholder": "Picture File",
"data-error": "Please upload Picture File",
}
),
}
class PlaylistForm(forms.ModelForm):
class Meta:
model = Playlist
fields = "__all__"
exclude = ["playlist_author"]
widgets = {
"name": forms.TextInput(
attrs = {
"class": "form-control",
"id": "name",
"placeholder": "name",
"data-error": "Please enter Playlist Name",
}
),
"picture": forms.FileInput(
attrs = {
"class": "form-control",
"id": "picture",
"placeholder": "Picture File",
"data-error": "Please upload Picture File",
}
),
}
class SongPlaylistForm(forms.ModelForm):
class Meta:
model = SongPlaylist
fields = "__all__"
exclude = ["song"]
widgets = {
"playlist": forms.Select(
attrs = {
"class": "form-control",
"id": "playlist",
"placeholder": "Choose playlist",
"data-error": "Please enter Playlist",
}
),
} |
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name = 'index'),
path('contact',views.contact, name = 'contact'),
path('about',views.about,name = 'about'),
path('pricing',views.pricing,name = 'pricing'),
path('service',views.service,name = 'service'),
path('blog',views.blog,name = 'blog'),
path('blog_detail',views.blog_detail,name = 'blog_detail'),
path('appointment',views.appointment,name = 'appointment'),
] |
def print_constraints_board_style(x, y, constraints):
for yy in range(9):
for xx in range(9):
if x == xx and y == yy:
print("X", end=" ")
else:
print("#" if constraints[y][x][yy][xx] else ".", end=" ")
print()
def print_variables_board_style(variables):
for column in variables:
for var in column:
if len(var['domain']) == 1:
print(var['domain'][0], end=" ")
elif len(var['domain']) > 1:
print(".", end=" ")
elif len(var['domain']) == 0:
print("#", end=" ")
print()
def print_remaining_variables_board_style(variables):
for column in variables:
for var in column:
print(len(var['domain']), end=" ")
print() |
# inputting your profile info:
birth_year = input('Birth year: ')
print(type(birth_year))
age = 2020 - int(birth_year)
print(type(age))
print(age)
body_weight_lbs = input('your current weight: ')
body_weight_kg = 0.45 * int(body_weight_lbs)
print(body_weight_kg)
|
import tkinter as tk
from tkinter import ttk
class SidebarFrame(tk.Frame):
"""Frame that manages the sidebar and user input
"""
# Init and window management
def __init__(self, parent, submitCallback, *args, **kwargs):
"""Args:
parent (tk): Tk parent widget
*args: Passthrough
**kwargs: Passthrough
"""
tk.Frame.__init__(self, *args, **kwargs)
self.controller = parent
self.submit = submitCallback
# Initialize window
self.initwindow()
def reFocusEntry(self):
self.entry.delete(0, last=tk.END)
self.entry.focus()
def initwindow(self):
"""Initialize widgets
"""
inOrderRow = 0
def rowInOrder():
"""Helper function to increment in-order elements"""
nonlocal inOrderRow
inOrderRow += 1
return inOrderRow
btn_open = ttk.Button(self, text="Open", takefocus=False, command=self.controller.openDir)
btn_open.grid(row=rowInOrder(), sticky=tk.W)
btn_filter = ttk.Button(self, text="Filter", takefocus=False, command=self.controller.changeMatchGlobs)
btn_filter.grid(row=inOrderRow, sticky=tk.E)
btn_ref = ttk.Button(self, text="Refresh", takefocus=False, command=(
lambda: (self.controller.reloadDirContext(), self.controller.imageUpdate()))
)
btn_ref.grid(row=rowInOrder(), sticky="WE")
btn_back = ttk.Button(self, text="Prev", takefocus=False, command=self.controller.prevImage)
btn_back.grid(row=rowInOrder(), sticky=tk.W)
btn_skip = ttk.Button(self, text="Skip", takefocus=False, command=self.controller.nextImage)
btn_skip.grid(row=inOrderRow, sticky=tk.E)
def highlightEntry(parent):
"""Quick factory for entries that highlight"""
return tk.Entry(parent, takefocus=True, highlightthickness=2)
# Entry text field
lab_context_label = ttk.Label(self, text="Move to folder ID:")
lab_context_label.grid(row=rowInOrder())
self.entry = highlightEntry(self)
self.entry.bind("<Return>", self.submit)
self.entry.bind("<KeyRelease>", self.processEntryInput)
self.entry.grid(row=rowInOrder(), sticky="WE")
self.reFocusEntry()
# New folder entry
lab_newfolder = ttk.Label(self, text="Move to new folder:")
lab_newfolder.grid(row=rowInOrder())
self.entry_newfolder = highlightEntry(self)
self.entry_newfolder.bind("<Return>", self.controller.moveToFolder)
self.entry_newfolder.grid(row=rowInOrder(), sticky="WE")
# Rename
lab_rename = ttk.Label(self, text="Rename")
lab_rename.grid(row=rowInOrder())
self.entry_rename = highlightEntry(self)
self.entry_rename.grid(row=rowInOrder(), sticky="WE")
self.entry_rename.bind("<Return>", self.controller.dorename)
lab_rename = ttk.Label(self, text="Rename Prefix")
lab_rename.grid(row=rowInOrder())
self.entry_rename = highlightEntry(self)
self.entry_rename.grid(row=rowInOrder(), sticky="WE")
self.entry_rename.bind("<Return>", self.controller.doPrefixRename)
# context keys
lab_context_label = ttk.Label(self, text="Folder IDs:")
lab_context_label.grid(row=rowInOrder())
# self.str_context = tk.StringVar()
self.listbox_context = tk.Listbox(
self, state=tk.DISABLED, takefocus=False, relief=tk.GROOVE)
self.listbox_context.grid(row=rowInOrder(), sticky="nsew")
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(inOrderRow, weight=1)
settings_popup = tk.Menu(self, tearoff=0)
for key, (var, label) in self.controller.settings.items():
settings_popup.add_checkbutton(label=label, variable=var)
settings_popup.add_separator()
settings_popup.add_command(label="Add Unsorted to base", command=self.controller.addUnsortedToBase)
settings_popup.add_command(label="Commit deleted files now", command=self.controller.trash.flush)
# settings_popup.add_separator()
btn_settings = ttk.Button(self, text="Settings", takefocus=False)
btn_settings.bind("<Button-1>", lambda event: settings_popup.tk_popup(event.x_root, event.y_root, 0))
btn_settings.grid(row=rowInOrder(), sticky=tk.EW)
self.combobox_sorter = ttk.Combobox(self, state="readonly", takefocus=False, values=[name for name in self.controller.sortkeys.keys()])
self.combobox_sorter.bind("<<ComboboxSelected>>", self.on_adjust_sort)
self.combobox_sorter.grid(row=rowInOrder(), sticky="WE")
self.var_progbar_seek = tk.IntVar()
self.progbar_seek = ttk.Scale(self, takefocus=False, variable=self.var_progbar_seek, command=self.on_adjust_seek)
self.progbar_seek.grid(row=rowInOrder(), sticky="WE")
self.var_progbar_prog = tk.IntVar()
self.progbar_prog = ttk.Progressbar(self, variable=self.var_progbar_prog)
self.progbar_prog.grid(row=rowInOrder(), sticky="WE")
self.highlightListboxItems([])
def highlightListboxItems(self, matches):
"""Highlight specific items in the listbox
Args:
matches (list): List of indexes to highlight
"""
self.listbox_context.configure(state=tk.NORMAL)
self.listbox_context.selection_clear(0, tk.END)
if len(matches) == 0:
self.listbox_context.configure(state=tk.DISABLED)
return
for index in matches:
self.listbox_context.selection_set(index)
self.listbox_context.see(index)
def on_adjust_seek(self, event):
self.controller.gotoImage(event)
def on_adjust_sort(self, event):
self.controller.sorter = self.controller.sortkeys[event.widget.get()]
self.controller.resortImageList()
# self.config(state=tk.NORMAL)
def processEntryInput(self, event):
"""Process entry input, handling element styling and possible automatic submission.
Args:
event (TYPE): Tk entry event
"""
GOOD = "#AAFFAA"
BAD = "#FFAAAA"
NORMAL = "#FFFFFF"
query = event.widget.get()
if event.keycode == 32:
query = query[:-1] # Delete space character
if query == "":
event.widget.configure(bg=NORMAL)
self.highlightListboxItems([])
self.controller.updateLabelFileName()
return
# Use controller folder to "predict" action and highlight
best_folder_list = self.controller.getBestFolders(query)
self.highlightListboxItems([ir.index for ir in best_folder_list])
# Preview target, state
if len(best_folder_list) == 1:
best_folder = best_folder_list[0]
self.controller.str_curfile.set(best_folder.label)
event.widget.configure(bg=GOOD)
# Automatically submit if aggressive
if self.controller.settings["aggressive"][0].get():
self.submit(entry=query)
else:
self.controller.str_curfile.set(
", ".join([li.label for li in best_folder_list])
)
event.widget.configure(bg=BAD)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.iot.device import ProvisioningDeviceClient
provisioning_host = os.getenv("PROVISIONING_HOST")
id_scope = os.getenv("PROVISIONING_IDSCOPE")
registration_id = os.getenv("PROVISIONING_REGISTRATION_ID")
symmetric_key = os.getenv("PROVISIONING_SYMMETRIC_KEY")
provisioning_device_client = ProvisioningDeviceClient.create_from_symmetric_key(
provisioning_host=provisioning_host,
registration_id=registration_id,
id_scope=id_scope,
symmetric_key=symmetric_key,
)
registration_result = provisioning_device_client.register()
# The result can be directly printed to view the important details.
print(registration_result)
# Individual attributes can be seen as well
print("The request_id was :-")
print(registration_result.request_id)
print("The etag is :-")
print(registration_result.registration_state.etag)
|
thistuple = ("apple", "banana", "cherry")
print(thistuple)
'''the tuple value cannot be added like this because the tuples are ordered nd unchangeable'''
#thistuple[1] = "orange"
#print(thistuple)
for x in thistuple:
print(x)
if "apple" in thistuple:
print("Yes, 'apple' is in the fruits tuple")
print(len(thistuple))
#del thistuple# del makes the tuples delete
#print(thistuple) # so if the tuples doesnot exist it shows error
constTuple = tuple(("apple", "banana", "cherry")) # note the double round-brackets
print(constTuple)# constructor creates the tuple values
'''SETS'''
thisSet = {"keyboard", "mouse", "monitor"}
print(thisSet)
for x in thisSet:
print(x)
print("keyboard" in thisSet)# retuens true if the value is inside the set
thisSet.add("printer")#using add only one item can be entred in the set
print(thisSet)
thisSet.update(["DVD", "Speaker", "microphone", "keyboard"])# here the item added in an unordered fasion and update is used to add more than one item in the set
print(thisSet)
print(len(thisSet))
thisSet.remove("mouse")
print(thisSet)
thisSet.discard("banana")
print(thisSet)#even if I added the set value as wrong it doesnot produce any error
d = thisSet.pop()
print(d)
print(thisSet)
thisSet.clear()
print(thisSet)# clear all values and retrun an empty constructor of set
#del thisSet
#print(thisSet)#produces error because it tried to print the unknown set
contSet = set(("apple", "banana", "cherry")) # note the double round-brackets
print(contSet)
Relative to today's computers and transmission media, data is information converted into binary digital form. It is acceptable for data to be used as a singular subject or a plural subject.
|
import os
from contextlib import contextmanager
from typing import (
Iterable,
Iterator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from .objects import ExtResource, GDObject, SubResource
from .sections import (
GDExtResourceSection,
GDNodeSection,
GDSection,
GDSectionHeader,
GDSubResourceSection,
)
from .structure import scene_file
from .util import find_project_root, gdpath_to_filepath
__all__ = ["GDFile", "GDScene", "GDResource"]
# Scene and resource files seem to group the section types together and sort them.
# This is the order I've observed
SCENE_ORDER = [
"gd_scene",
"gd_resource",
"ext_resource",
"sub_resource",
"resource",
"node",
"connection",
"editable",
]
GDFileType = TypeVar("GDFileType", bound="GDFile")
class GodotFileException(Exception):
""" Thrown when there are errors in a Godot file """
class GDFile(object):
""" Base class representing the contents of a Godot file """
project_root: Optional[str] = None
def __init__(self, *sections: GDSection) -> None:
self._sections = list(sections)
def add_section(self, new_section: GDSection) -> int:
""" Add a section to the file and return the index of that section """
new_idx = SCENE_ORDER.index(new_section.header.name)
for i, section in enumerate(self._sections):
idx = SCENE_ORDER.index(section.header.name)
if new_idx < idx: # type: ignore
self._sections.insert(i, new_section)
return i
self._sections.append(new_section)
return len(self._sections) - 1
def remove_section(self, section: GDSection) -> bool:
""" Remove a section from the file """
idx = -1
for i, s in enumerate(self._sections):
if section == s:
idx = i
break
if idx == -1:
return False
self.remove_at(idx)
return True
def remove_at(self, index: int) -> GDSection:
""" Remove a section at an index """
return self._sections.pop(index)
def get_sections(self, name: str = None) -> List[GDSection]:
""" Get all sections, or all sections of a given type """
if name is None:
return self._sections
return [s for s in self._sections if s.header.name == name]
def get_nodes(self) -> List[GDNodeSection]:
""" Get all [node] sections """
return cast(List[GDNodeSection], self.get_sections("node"))
def get_ext_resources(self) -> List[GDExtResourceSection]:
""" Get all [ext_resource] sections """
return cast(List[GDExtResourceSection], self.get_sections("ext_resource"))
def get_sub_resources(self) -> List[GDSubResourceSection]:
""" Get all [sub_resource] sections """
return cast(List[GDSubResourceSection], self.get_sections("sub_resource"))
def find_node(
self, property_constraints: Optional[dict] = None, **constraints
) -> Optional[GDNodeSection]:
""" Find first [node] section that matches (see find_section) """
return cast(
GDNodeSection,
self.find_section("node", property_constraints, **constraints),
)
def find_ext_resource(
self, property_constraints: Optional[dict] = None, **constraints
) -> Optional[GDExtResourceSection]:
""" Find first [ext_resource] section that matches (see find_section) """
return cast(
GDExtResourceSection,
self.find_section("ext_resource", property_constraints, **constraints),
)
def find_sub_resource(
self, property_constraints: Optional[dict] = None, **constraints
) -> Optional[GDSubResourceSection]:
""" Find first [sub_resource] section that matches (see find_section) """
return cast(
GDSubResourceSection,
self.find_section("sub_resource", property_constraints, **constraints),
)
def find_section(
self,
section_name_: str = None,
property_constraints: Optional[dict] = None,
**constraints
) -> Optional[GDSection]:
"""
Find the first section that matches
You may pass in a section_name, which will match the header name (e.g. 'node').
You may also pass in kwargs that act as filters. For example::
# Find the first node
scene.find_section('node')
# Find the first Sprite
scene.find_section('node', type='Sprite')
# Find the first ext_resource that references Health.tscn
scene.find_section('ext_resourcee', path='Health.tscn')
"""
for section in self.find_all(
section_name_, property_constraints=property_constraints, **constraints
):
return section
return None
def find_all(
self,
section_name_: str = None,
property_constraints: Optional[dict] = None,
**constraints
) -> Iterable[GDSection]:
""" Same as find_section, but returns all matches """
for section in self.get_sections(section_name_):
found = True
for k, v in constraints.items():
if getattr(section, k, None) == v:
continue
if section.header.get(k) == v:
continue
found = False
break
if property_constraints is not None:
for k, v in property_constraints.items():
if section.get(k) != v:
found = False
break
if found:
yield section
def add_ext_resource(self, path: str, type: str) -> GDExtResourceSection:
""" Add an ext_resource """
next_id = 1 + max([s.id for s in self.get_ext_resources()] + [0])
section = GDExtResourceSection(path, type, next_id)
self.add_section(section)
return section
def add_sub_resource(self, type: str, **kwargs) -> GDSubResourceSection:
""" Add a sub_resource """
next_id = 1 + max([s.id for s in self.get_sub_resources()] + [0])
section = GDSubResourceSection(type, next_id, **kwargs)
self.add_section(section)
return section
def add_node(
self, name: str, type: str = None, parent: str = None, index: int = None
) -> GDNodeSection:
"""
Simple API for adding a node
For a friendlier, tree-oriented API use use_tree()
"""
node = GDNodeSection(name, type=type, parent=parent, index=index)
self.add_section(node)
return node
def add_ext_node(
self, name: str, instance: int, parent: str = None, index: int = None
) -> GDNodeSection:
"""
Simple API for adding a node that instances an ext_resource
For a friendlier, tree-oriented API use use_tree()
"""
node = GDNodeSection.ext_node(name, instance, parent=parent, index=index)
self.add_section(node)
return node
@property
def is_inherited(self) -> bool:
root = self.find_node(parent=None)
if root is None:
return False
return root.instance is not None
def get_parent_scene(self) -> Optional[str]:
root = self.find_node(parent=None)
if root is None or root.instance is None:
return None
parent_res = self.find_ext_resource(id=root.instance)
if parent_res is None:
return None
return parent_res.path
def load_parent_scene(self) -> "GDScene":
if self.project_root is None:
raise RuntimeError(
"load_parent_scene() requires a project_root on the GDFile"
)
root = self.find_node(parent=None)
if root is None or root.instance is None:
raise RuntimeError("Cannot load parent scene; scene is not inherited")
parent_res = self.find_ext_resource(id=root.instance)
if parent_res is None:
raise RuntimeError(
"Could not find parent scene resource id(%d)" % root.instance
)
return GDScene.load(gdpath_to_filepath(self.project_root, parent_res.path))
@contextmanager
def use_tree(self):
"""
Helper API for working with the nodes in a tree structure
This temporarily builds the nodes into a tree, and flattens them back into the
GD file format when done.
Example::
with scene.use_tree() as tree:
tree.root = Node('MyScene')
tree.root.add_child(Node('Sensor', type='Area2D'))
tree.root.add_child(Node('HealthBar', instance=1))
scene.write("MyScene.tscn")
"""
from .tree import Tree
tree = Tree.build(self)
yield tree
for i in range(len(self._sections) - 1, -1, -1):
section = self._sections[i]
if section.header.name == "node":
self._sections.pop(i)
nodes = tree.flatten()
if not nodes:
return
# Let's find out where the root node belongs and then bulk add the rest at that
# index
i = self.add_section(nodes[0])
self._sections[i + 1 : i + 1] = nodes[1:]
def get_node(self, path: str = ".") -> Optional[GDNodeSection]:
""" Mimics the Godot get_node API """
with self.use_tree() as tree:
if tree.root is None:
return None
node = tree.root.get_node(path)
return node.section if node is not None else None
@classmethod
def parse(cls: Type[GDFileType], contents: str) -> GDFileType:
""" Parse the contents of a Godot file """
return cls.from_parser(scene_file.parseString(contents, parseAll=True))
@classmethod
def load(cls: Type[GDFileType], filepath: str) -> GDFileType:
with open(filepath, "r") as ifile:
file = cls.parse(ifile.read())
file.project_root = find_project_root(filepath)
return file
@classmethod
def from_parser(cls: Type[GDFileType], parse_result):
first_section = parse_result[0]
if first_section.header.name == "gd_scene":
scene = GDScene.__new__(GDScene)
scene._sections = list(parse_result)
return scene
elif first_section.header.name == "gd_resource":
resource = GDResource.__new__(GDResource)
resource._sections = list(parse_result)
return resource
return cls(*parse_result)
def write(self, filename: str):
""" Writes this to a file """
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as ofile:
ofile.write(str(self))
def __str__(self) -> str:
return "\n\n".join([str(s) for s in self._sections]) + "\n"
def __repr__(self) -> str:
return "%s(%s)" % (type(self).__name__, self.__str__())
def __eq__(self, other) -> bool:
if not isinstance(other, GDFile):
return False
return self._sections == other._sections
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class GDCommonFile(GDFile):
""" Base class with common application logic for all Godot file types """
def __init__(self, name: str, *sections: GDSection) -> None:
super().__init__(
GDSection(GDSectionHeader(name, load_steps=1, format=2)), *sections
)
self.load_steps = (
1 + len(self.get_ext_resources()) + len(self.get_sub_resources())
)
@property
def load_steps(self) -> int:
return self._sections[0].header["load_steps"]
@load_steps.setter
def load_steps(self, steps: int):
self._sections[0].header["load_steps"] = steps
def add_section(self, new_section: GDSection) -> int:
idx = super().add_section(new_section)
if new_section.header.name in ["ext_resource", "sub_resource"]:
self.load_steps += 1
return idx
def remove_at(self, index: int):
section = self._sections.pop(index)
if section.header.name in ["ext_resource", "sub_resource"]:
self.load_steps -= 1
def remove_unused_resources(self):
self._remove_unused_resources(self.get_ext_resources(), ExtResource)
self._remove_unused_resources(self.get_sub_resources(), SubResource)
def _remove_unused_resources(
self,
sections: Sequence[Union[GDExtResourceSection, GDSubResourceSection]],
reference_type: Type[Union[ExtResource, SubResource]],
) -> None:
seen = set()
for ref in self._iter_node_resource_references():
if isinstance(ref, reference_type):
seen.add(ref.id)
if len(seen) < len(sections):
to_remove = [s for s in sections if s.id not in seen]
for s in to_remove:
self.remove_section(s)
def renumber_resource_ids(self):
""" Refactor all resource IDs to be sequential with no gaps """
self._renumber_resource_ids(self.get_ext_resources(), ExtResource)
self._renumber_resource_ids(self.get_sub_resources(), SubResource)
def _iter_node_resource_references(
self,
) -> Iterator[Union[ExtResource, SubResource]]:
def iter_resources(value):
if isinstance(value, (ExtResource, SubResource)):
yield value
elif isinstance(value, list):
for v in value:
yield from iter_resources(v)
elif isinstance(value, dict):
for v in value.values():
yield from iter_resources(v)
elif isinstance(value, GDObject):
for v in value.args:
yield from iter_resources(v)
for node in self.get_nodes():
yield from iter_resources(node.header.attributes)
yield from iter_resources(node.properties)
for resource in self.get_sections("resource"):
yield from iter_resources(resource.properties)
def _renumber_resource_ids(
self,
sections: Sequence[Union[GDExtResourceSection, GDSubResourceSection]],
reference_type: Type[Union[ExtResource, SubResource]],
) -> None:
id_map = {}
# First we renumber all the resource IDs so there are no gaps
for i, section in enumerate(sections):
id_map[section.id] = i + 1
section.id = i + 1
# Now we update all references to use the new number
for ref in self._iter_node_resource_references():
if isinstance(ref, reference_type):
try:
ref.id = id_map[ref.id]
except KeyError:
raise GodotFileException("Unknown resource ID %d" % ref.id)
class GDScene(GDCommonFile):
def __init__(self, *sections: GDSection) -> None:
super().__init__("gd_scene", *sections)
class GDResource(GDCommonFile):
def __init__(self, *sections: GDSection) -> None:
super().__init__("gd_resource", *sections)
|
# make_plots.py
# Make plots for other codes
#
# Created 9 Nov 18
# Updated 9 Nov 18
###################################################################
#Backend for python3 on mahler
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
import math
# Code to make plots
def make_plots(lines, specname, obswvl, obsflux, synthflux, outputname, resids=True, ivar=None, title=None, synthfluxup=None, synthfluxdown=None, synthflux_nomn=None, synthflux_cluster=None, savechisq=None, hires=False):
"""Make plots.
Inputs:
lines -- which linelist to use? Options: 'new', 'old'
specname -- name of star
obswvl -- observed wavelength array
obsflux -- observed flux
synthflux -- synthetic flux
outputname -- where to output file
Keywords:
resids -- plot residuals if 'True' (default); else, don't plot residuals
ivar -- inverse variance; if 'None' (default), don't plot errorbars
title -- plot title; if 'None' (default), then plot title = "Star + ID"
synthfluxup & synthfluxdown -- if not 'None' (default), then plot synthetic spectrum as region between [Mn/H]_best +/- 0.3dex
synthflux_nomn -- if not 'None' (default), then plot synthetic spectrum with [Mn/H] = -10.0
synthflux_cluster -- if not 'None' (default), then plot synthetic spectrum with mean [Mn/H] of cluster; in format synthflux_cluster = [mean [Mn/H], spectrum]
savechisq -- if not 'None' (default), compute & save reduced chi-sq for each line in output file with path savechisq
hires -- if 'False', plot as normal; else, zoom in a bit to show hi-res spectra
Outputs:
"""
# Define lines to plot
if lines == 'new':
#linelist = np.array([4739.,4754.,4761.5,4765.5,4783.,4823.,5394.,5399.,
# 5407.,5420.,5432.,5516.,5537.,6013.,6016.,6021.,6384.,6491.])
#linelist = np.array([4739.1, 4754.0, 4761.9, 4765.8, 4783.4, 4823.5,
# 5394.6, 5399.5, 5407.3, 5420.3, 5432.3, 5516.8,
# 5537.7, 6013.3, 6016.6, 6021.8, 6384.7, 6491.7])
#linewidth = np.array([1.,1.,1.5,1.,1.,1.,
# 1.,1.,1.,1.,1.,1.,
# 1.,1.,1.,1.,1.,1.])
linelist = np.array([4739.1, 4754.0, 4761.9, 4765.8, 4783.4,
4823.5, 5407.3, 5420.3, 5516.8, 5537.7,
6013.3, 6016.6, 6021.8, 6384.7, 6491.7])
linewidth = np.array([1.,1.,1.5,1.,1.,
1.,1.,1.,1.,1.,
1.,1.,1.,1.,1.])
nrows = 3
ncols = 5
figsize = (32,15)
#figsize = (40,15)
#figsize = (20,12)
elif lines == 'old':
linelist = np.array([4739.,4783.,4823.,5394.,5432.,5516.,5537.,6013.,6021.,6384.,6491.])
linewidth = np.ones(len(linelist))
nrows = 3
ncols = 4
figsize = (20,15)
# Define title
if title is None:
title = 'Star'+specname
# Plot showing fits
#f, axes = plt.subplots(nrows, ncols, sharey='row', num=1, figsize=figsize)
plt.figure(num=1, figsize=figsize)
plt.title(title)
# Plot showing residuals
if resids:
plt.figure(num=2, figsize=figsize)
plt.title(title)
# Plot showing ivar
if ivar is not None:
plt.figure(num=3, figsize=figsize)
plt.title(title)
# Prep for computing reduced chi-sq:
if savechisq is not None:
chisq = np.zeros(len(linelist))
chisq_up = np.zeros(len(linelist))
chisq_down = np.zeros(len(linelist))
for i in range(len(linelist)):
#f = plt.figure(1)
#for i, ax in enumerate(f.axes):
# Range over which to plot
#if hires == False:
lolim = linelist[i] - 10
uplim = linelist[i] + 10
#else:
# lolim = linelist[i] - 5
# uplim = linelist[i] + 5
# Make mask for wavelength
try:
mask = np.where((obswvl > lolim) & (obswvl < uplim))
if len(mask[0]) > 0:
if ivar is not None:
yerr=np.power(ivar[mask],-0.5)
else:
yerr=None
# Plot fits
with plt.rc_context({'axes.linewidth':4, 'axes.edgecolor':'#594F4F', 'xtick.color':'#594F4F', 'ytick.color':'#594F4F'}):
plt.figure(1)
if i==0:
ax = plt.subplot(nrows,ncols,i+1)
else:
plt.subplot(nrows,ncols,i+1) #,sharey=ax)
plt.axvspan(linelist[i] - linewidth[i], linelist[i] + linewidth[i], color='green', zorder=1, alpha=0.25)
# Plot synthetic spectrum
if (synthfluxup is not None) and (synthfluxdown is not None):
plt.fill_between(obswvl[mask], synthfluxup[mask], synthfluxdown[mask], facecolor='red', edgecolor='red', alpha=0.75, linewidth=0.5, label='Synthetic', zorder=2)
else:
plt.plot(obswvl[mask], synthflux[mask], color='r', alpha=0.5, linestyle='-', linewidth=2, label='Synthetic', zorder=100)
# Plot synthetic spectrum with basically no [Mn/Fe]
if synthflux_nomn is not None:
plt.plot(obswvl[mask], synthflux_nomn[mask], 'b-', label='[Mn/H] = -10.0', zorder=2)
# Plot synthetic spectrum with mean [Mn/Fe] of cluster
if synthflux_cluster is not None:
plt.plot(obswvl[mask], synthflux_cluster[1][mask], color='purple', linestyle='--', linewidth=2, label='<[Mn/H]>='+str(synthflux_cluster[0]), zorder=2)
# Plot observed spectrum
#if hires == False:
plt.errorbar(obswvl[mask], obsflux[mask], yerr=yerr, color='k', fmt='o', markersize=6, label='Observed', zorder=3)
#else:
#plt.plot(obswvl[mask], obsflux[mask], 'k-', label='Observed')
#plt.xticks([linelist[i]], fontsize=18)
plt.yticks(fontsize=10)
plt.xlim((lolim, uplim))
plt.ylim((0.75, 1.10))
if i==0:
leg = plt.legend(fancybox=True, framealpha=0.5, loc='best')
for text in leg.get_texts():
plt.setp(text, color='#594F4F', fontsize=18)
# Compute reduced chi-sq
if savechisq is not None:
current_chisq = np.sum(np.power(obsflux[mask] - synthflux[mask], 2.) * ivar[mask]) / (len(obsflux[mask]) - 1.)
current_chisq_up = np.sum(np.power(obsflux[mask] - synthfluxup[mask], 2.) * ivar[mask]) / (len(obsflux[mask]) - 1.)
current_chisq_down = np.sum(np.power(obsflux[mask] - synthfluxdown[mask], 2.) * ivar[mask]) / (len(obsflux[mask]) - 1.)
chisq[i] = current_chisq
chisq_up[i] = current_chisq_up
chisq_down[i] = current_chisq_down
if resids:
# Only plot residuals if synth spectrum has been smoothed to match obswvl
plt.figure(2)
plt.subplot(nrows,ncols,i+1)
plt.axvspan(linelist[i] - linewidth[i], linelist[i] + linewidth[i], color='green', alpha=0.25)
plt.errorbar(obswvl[mask], obsflux[mask] - synthflux[mask], yerr=yerr, color='k', fmt='o', label='Residuals')
plt.axhline(0, color='r', linestyle='solid', label='Zero')
if ivar is not None:
# Plot ivar
plt.figure(3)
plt.subplot(nrows,ncols,i+1)
plt.axvspan(linelist[i] - linewidth[i], linelist[i] + linewidth[i], color='green', alpha=0.25)
plt.errorbar(obswvl[mask], ivar[mask], color='k', linestyle='-')
#plt.axhline(0, color='r', linestyle='solid', label='Zero')
except:
#ax.set_visible(False)
continue
# Legend for plot showing fits
fig = plt.figure(1)
#fig.text(0.5, 0.04, 'Wavelength (A)', fontsize=18, ha='center', va='center', color='#594F4F')
#fig.text(0.06, 0.5, 'Relative flux', fontsize=18, ha='center', va='center', rotation='vertical', color='#594F4F')
#plt.ylabel('Relative flux')
#plt.xlabel('Wavelength (A)')
plt.savefig(outputname+'/'+specname+'finalfits.png',bbox_inches='tight') #,transparent=True)
plt.close(1)
if resids:
fig2 = plt.figure(2)
fig2.text(0.5, 0.04, 'Wavelength (A)', fontsize=18, ha='center', va='center')
fig2.text(0.06, 0.5, 'Residuals', fontsize=18, ha='center', va='center', rotation='vertical')
plt.legend(loc='best')
plt.savefig(outputname+'/'+specname+'resids.png',bbox_inches='tight')
plt.close(2)
if ivar is not None:
fig3 = plt.figure(3)
fig3.text(0.5, 0.04, 'Wavelength (A)', fontsize=18, ha='center', va='center')
fig3.text(0.06, 0.5, 'Inverse variance', fontsize=18, ha='center', va='center', rotation='vertical')
#plt.legend(loc='best')
plt.savefig(outputname+'/'+specname+'ivar.png',bbox_inches='tight')
plt.close(3)
# Save the reduced chi-sq values!
if savechisq is not None:
with open(savechisq, 'a') as f:
for i in range(len(linelist)):
f.write(specname[:-1]+'\t'+str(i)+'\t'+str(chisq[i])+'\t'+str(chisq_up[i])+'\t'+str(chisq_down[i])+'\n')
return |
from flask import *
from werkzeug.utils import import_string
import requests
import googlmap
import Googlemap2
app = Flask(__name__)
@app.route('/')
def pass_val():
if request.method == 'POST':
return jsonify('hello')
return render_template('index.html')
@app.route('/get_ans',methods = ['POST','GET'])
def get_ans():
if request.method == 'POST':
return jsonify(googlmap.get_airports())
if request.method == 'GET':
req =request.get_json()
print(req)
return jsonify("helloooo")
@app.route('/get_airport',methods = ['POST','GET'])
def get_airport():
if request.method == 'POST':
req =request.get_json()
return jsonify(Googlemap2.getdetail(req))
if __name__ == '__main__':
app.run(debug=True) |
def maior_numero():
lista = []
contagem = 0
while contagem < 5:
num = int(input("Informe um numero: "))
lista.append(num)
contagem += 1
lista
lista_ordenada = sorted(lista)
return lista_ordenada[-1]
print(maior_numero())
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
''' Marínez García Mariana Yasmin
316112840
Taller de Herramientas Computacionales
Esto es lo hicimos la clase 10 del curso, convertir de °C a °F y viceversa, de tres maneras diferentes '''
#Con while:
S= '==============================================================='
C = -20
iC = 5
print ' C F'
while C <= 40:
F = (9.0/5)*C + 32
print '%5d %5.1f' % (C,F)
C = C +iC #Esto es una asignación, lo que está a la derecha se evalúa y se guarda en la variable "C"
#Esto se puede escribir también como "C += iC"
print S
#Con for:
gradosC = [-20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40]
print ' C F'
for grado in gradosC:
F = (9.0/5)*grado + 32
print '%5d %5.2f' % (grado, F) #el 5 es para indicar que habrá 5 espacios a la izquierda del resultado, y donde hay .2 es para que el resultado tenga dos decimales
print S
#Con while y la lista:
indice = 0
print ' C F'
while indice < len(gradosC):
C = gradosC[indice]
F = (9.0/5)*C + 32
print '%5d %5.1f' % (C,F)
indice += 1
print S
gradosC = []
for C in range(-20,45,5): #Para cada uno de la lista que va del -20 hasta el 45 y va de 5 en 5 (No incluye al 45)
gradosC.append(C)
print gradosC
#range es una función y append, un método.
#Esta es una forma más difícil de escribir: gradosC = range(-20,45,5)
print S
gradosC = []
for i in range(0,21):
C = -20 + i*2.5
gradosC.append(C)
print gradosC
|
#!/usr/bin/python
import os
import sys
#a few things to make it easier for me
import func
#get basepath
basepath=os.path.dirname(os.path.realpath(__file__))+'/'
if len(sys.argv)!=2:
print "Usage: director <channel dir>"
exit()
channelName=""
channelPath=""
ffmpegname=""
ffmpegpath=""
ffmpegcmd=""
channelFullName=""
indexpath=""
showspath=""
feedurl=""
dyntext1=""
dyntext2=""
dyntext3=""
inphone=""
dynfont1=""
dynfont2=""
dynfont3=""
phonefont=""
# many voodoo here to set up paths and commandlines and
# all that shit. If you don't know what it does,
# don't change it. Or at least make backups first.
name = sys.argv[1]
channelName=name
channelPath=basepath+channelName+'/'
schedulebase=channelPath+"schedule"
#write pid to channel's director.pid for killing later.
pid=os.getpid()
f=open(channelPath+'director.pid','w')
f.write(str(pid))
f.write('\n')
f.close()
f = open(channelPath+'channel.conf')
lines=f.readlines()
f.close()
for b in lines:
c=b.strip()
line=c.split('=')
if line[0].strip() == 'ffmpeg':
ffmpegname=line[1].strip()
if line[0].strip() == 'ffmpegpath':
ffmpegpath=line[1].strip()
if line[0].strip() == 'channelfullname':
channelFullName=line[1].strip()
if line[0].strip() == 'showspath':
showspath=line[1].strip()
if line[0].strip() == 'feedurl':
feedurl=line[1].strip()
if line[0].strip() == 'dyntext1':
dyntext1=line[1].strip()
if line[0].strip() == 'dynfont1':
dynfont1=line[1].strip()
if line[0].strip() == 'dyntext2':
dyntext2=line[1].strip()
if line[0].strip() == 'dynfont2':
dynfont2=line[1].strip()
if line[0].strip() == 'dyntext3':
dyntext3=line[1].strip()
if line[0].strip() == 'dynfont3':
dynfont3=line[1].strip()
if line[0].strip() == 'inphone':
inphone=line[1].strip()
if line[0].strip() == 'phonefont':
phonefont=line[1].strip()
ffmpegcmd=ffmpegpath+ffmpegname
indexpath=channelPath+'index/'
fontpath=basepath+'Font/'
logopath=basepath+'Logo/'
# config voodoo area clear
#
#what time is it, yo?
#DOW 0=Monday ... 6=Sunday
DOW=func.getDayOfWeek()
NowHour=func.getCurrentHour()
NowMinute=func.getCurrentMinute()
NowSecond=func.getCurrentSecond()
TODMin=func.getTODMin()
schedulefile=schedulebase
scheduledshow=""
ShowSchedule=False
offset=0
#check if dayofweek override schedule is preset, if so,
#use that instead of default.
if os.path.isfile(schedulefile+'.'+str(DOW)):
schedulefile=schedulefile+'.'+str(DOW)
#read schedule
f = open(schedulefile,"r")
schedulelines = f.readlines()
f.close()
knownstarttime=0
#get last known show start time
if os.path.isfile(channelPath+'current.start'):
f = open(channelPath+'current.start','r')
tmp=f.readlines()
f.close()
knownstarttime=int(tmp[0].strip())
FullSet=False
ShowStart=False
#determine what show should be running at this minute
#much voodoo
for x in range(0,len(schedulelines)):
s=schedulelines[x].split(':')
if len(s)>3: #valid schedule entry
start=int(s[1])+(int(s[0])*60) #start minute of show
end=int(s[2])+start #end minute of show
if TODMin >= start: #if it's at or later than start minute
if TODMin < end: #and less than end minute
#this is the current show that's supposed to be playing.
cshow=s[3].strip() #here is the show filename
scheduledshow=cshow
offset=((TODMin-start)*60)+NowSecond #how far in the show are we supposed to be (in seconds)
scheduledstart=start #we were supposed to start this show at this time
print start
if start != knownstarttime:
ShowStart=True
FullSet=True
if TODMin >= (end-5): #make a note if it's time to show the schedule onscreen
ShowSchedule=True
if TODMin == start: #assume SHOWTIME!
offset=0
ShowStart=True
#scheduledshow contains what show we're supposed to be showing
#scheduledstart contains when current show was supposed to have started
#ShowSchedule is True or False, show schedule if true, but only
#if we didn't already do it.
#offset contains number of seconds into the show we're supposed to be.
#if offset = 0, it's beginning of show. Period.
if ShowStart:
#here's where we run the showfile
#showinfo needs to be reworked for subdirectory channels (mark3)
#tweak to allow for 1 schoolhouse rock after every show
if os.path.isfile(channelPath+"filler.first"):
os.remove(channelPath+"filler.first")
#write startfile, to indicate when current show was supposed to start
#this is so we can determine if we need to start a new show, or
#continue, in case we weren't running for a while
#also for check in case of back-to-back shows of the same name
f = open(channelPath+'current.start','w')
f.write(str(scheduledstart)+'\n\n')
f.close()
# clear onscreen text, just like how cleartext does it.
f = open(dyntext2,"r+")
f.seek(0)
f.truncate(0)
f.write('\n\n')
f.close()
f = open(dyntext3,"r+")
f.seek(0)
f.truncate(0)
f.write('\n\n')
f.close()
if ShowStart:
print "Start Show"
#this is where we call the current Show file
#Show file args: <channel> </path/to/queueshow/> </path/to/channel/index/>
if (offset == 0) or FullSet:
cmd=showspath+scheduledshow+' '+channelName+' "'+basepath+'" "'+indexpath+'"'
os.system(cmd)
#call cameraman: /path/to/cameraman <channel> <videofullpath> <offset>
f = open(channelPath+'current.episode','r')
tmp=f.readlines()
f.close()
vfp=tmp[0].strip()
cmd=basepath+'cameraman '+channelName+' "'+vfp+'" '+str(offset)
os.system(cmd)
#that should work.
else:
print "Restart @ "+str(offset)+" seconds."
if func.isRunning(ffmpegname):
print "We ok."
else:
print ffmpegname+" is not running."
#call cameraman: /path/to/cameraman <channel> <videofullpath> <offset>
f = open(channelPath+'current.episode','r')
tmp=f.readlines()
f.close()
vfp=tmp[0].strip()
cmd=basepath+'cameraman '+channelName+' "'+vfp+'" '+str(offset)
os.system(cmd)
if ShowSchedule:
cmd=basepath+'showschedule '+channelName
os.system(cmd)
|
# coding: utf-8
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from skcrm.models import ExpenseConceptSubType
from skcrm.tables import ExpenseConceptSubTypeTable
from skcrm.forms import SearchExpenseConceptSubTypeForm, ExpenseConceptSubTypeForm
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.contrib import messages
@login_required
def ls(request, id=None):
m = ExpenseConceptSubType.objects.all()
# The form has been used
search = SearchExpenseConceptSubTypeForm()
if request.method == 'POST':
search = SearchExpenseConceptSubTypeForm(request.POST, request.FILES)
if search.is_valid():
name = search.cleaned_data['name']
m = m.filter(name__icontains=name)
table = ExpenseConceptSubTypeTable(m, order_by=request.GET.get('sort'))
table.paginate(page=request.GET.get('page', 1), per_page=25)
return render_to_response('sub_concept_type_list.html',
{'form':search, 'table':table},
context_instance=RequestContext(request))
@login_required
def edit(request, id=None):
if id:
concept_type = get_object_or_404(ExpenseConceptSubType, pk=id)
else:
concept_type = ExpenseConceptSubType()
form = ExpenseConceptSubTypeForm(instance=concept_type)
if request.POST:
form = ExpenseConceptSubTypeForm(request.POST, instance=concept_type)
if form.is_valid():
sector = form.save()
messages.success(request, "Cambios guardados correctamente.")
return redirect('sub_concept_type_edit', id=concept_type.id)
return render_to_response('sub_concept_type_edit.html',
{'form':form, 'obj':concept_type},
context_instance=RequestContext(request))
@login_required
def delete(request, id=None):
concept_type = get_object_or_404(ExpenseConceptSubType, pk=id)
concept_type.delete()
return redirect('sub_concept_type_list')
|
# Generated by Django 2.0.7 on 2018-11-10 05:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('subsidy_elig', '0004_subsidyeligibility_subsidy_ftype'),
('apply_subsidy', '0017_auto_20181110_1023'),
]
operations = [
migrations.AddField(
model_name='applysubsidy',
name='sec_elig',
field=models.ForeignKey(default=8, on_delete=django.db.models.deletion.CASCADE, related_name='elig_sec', to='subsidy_elig.SubsidyEligibility'),
),
]
|
def task1():
i=0
print("Task1")
for x in range(10):
print(f"Current no. {x}\t Previous no. {i}\t Sum {x+i}")
i=x
def task2():
list=[10,20,30,40,10]
print("\nTask 2 ")
if list[0]==list[-1]:
print("True\n")
def task3():
list = [10, 20, [300, 400, [5000, 6000], 500], 30, 40]
list[2][2].insert(2,7000)
print("Task 3")
print(list)
def task4():
keys = ['Ten', 'Twenty', 'Thirty']
values = [10, 20, 30]
newdict = {}
print("\nTask 4")
for x in range(len(keys)):
newdict = dict(zip(keys,values))
#newdict.update({keys:values})
print(newdict)
def task5():
sampleDict = {
"class": {
"student": {
"name": "Mike",
"marks": {
"physics": 70,
"history": 80
}
}
}
}
print("\nTask 5")
print(sampleDict['class']['student']['marks']['history'])
def file_handling():
with open("C:\\Users\\Dell\\Downloads\\file1.txt", "r") as f1:
d = f1.readlines()
l = list()
for x in d:
key, value = x.split(":")
a = {
key: value.strip()
}
l.append(a)
print(l)
#print(list)
def file_handlingtask():
try:
f = open('C:\\Users\\Dell\\Downloads\\file1.txt','r')
file = f.readlines()
l=list()
for x in file:
(key, value) = x.split(':')
a = {
key : value.strip()
}
l.append(a)
print(l)
except NameError:
print("Exception occurs")
task1()
task2()
task3()
task4()
task5()
file_handling()
file_handlingtask() |
# Generated by Django 2.1.2 on 2018-12-01 11:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20181112_2232'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'permissions': (('view_user_instance', 'View User Instance'), ('add_user_instance', 'Add User Instance'), ('edit_user_instance', 'Edit User Instance'), ('delete_user_instance', 'Delete User Instance'))},
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('portalapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Title', models.CharField(max_length=256)),
('date', models.DateTimeField(default=datetime.datetime.now)),
('venue', models.CharField(max_length=512)),
('content', models.TextField()),
('image', models.ImageField(help_text='Maximum resolution 800x600. Larger images will be resized.', upload_to='eventphotos')),
],
),
]
|
string=raw_input()
lno= len(filter(lambda x: x.islower(),list(string)))
uno= len(filter(lambda x: x.isupper(),list(string)))
if lno<uno:
print string.upper()
else:
print string.lower()
|
""" Handling of OCaml bytecode files ending with .byte extension. """
import logging
from .code import load_code
from .marshall import read_value
logger = logging.getLogger("ocaml")
class ByteCodeReader:
"""Reader for bytecode files."""
MAGIC_V023 = "Caml1999X023"
def __init__(self, reader):
self.reader = reader
def read(self):
num_sections = self.read_trailer()
section_size = 8
section_header_pos = -16 - num_sections * section_size
self.reader.f.seek(section_header_pos, 2)
sections = self.read_section_descriptions(num_sections)
all_sections_size = sum(s[1] for s in sections)
self.reader.f.seek(section_header_pos - all_sections_size, 2)
return self.read_sections(sections)
def read_sections(self, sections):
fn_map = {
"CODE": self.read_code_section,
"DATA": self.process_data_section,
}
result = {}
for name, length in sections:
data = self.reader.read_bytes(length)
if name in fn_map:
logger.info("Processing: %s", name)
value = fn_map[name](data)
result[name] = value
else:
logger.error("TODO: %s", name)
return result
def read_trailer(self):
""" Read magic header """
self.reader.f.seek(-16, 2)
num_sections = self.reader.read_u32()
magic_len = len(self.MAGIC_V023)
magic = self.reader.read_bytes(magic_len)
magic = magic.decode("ascii")
if magic != self.MAGIC_V023:
raise ValueError("Unexpected magic value {}".format(magic))
return num_sections
def read_section_descriptions(self, num_sections):
sections = []
for _ in range(num_sections):
name = self.reader.read_bytes(4).decode("ascii")
length = self.reader.read_u32()
logger.debug("section %s with %s bytes", name, length)
sections.append((name, length))
return sections
def read_code_section(self, data):
if len(data) % 4 != 0:
raise ValueError("Code must be a multiple of 4 bytes")
return load_code(data)
def process_data_section(self, data):
read_value(data)
|
# Generated by Django 2.2.7 on 2019-11-19 07:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20191118_1333'),
]
operations = [
migrations.AddField(
model_name='book',
name='image',
field=models.ImageField(default='', upload_to='photos'),
preserve_default=False,
),
]
|
import unittest
from pitcoin_modules.settings import *
from pitcoin_modules.transaction import Transaction, Input, Output
from pitcoin_modules.storage_handlers.utxo_pool import UTXOStorage
class TestUTXOPool(unittest.TestCase):
storage_filepath = PROJECT_ROOT + "/storage/.utxo_pool_test.txt"
def test_updating_utxo_pool_with_tx(self):
storage = UTXOStorage(self.storage_filepath)
storage.delete_all_otputs()
outp0 = Output("", 10, txid="07c0efe33946c5f81b5a86d79eda89e47979d4796d5ec675a9fccde7c31c4f50", vout=1)
storage.add_new_output(outp0)
outp1 = Output(
900,
"76a914cabf271134a5f9228132598c8b4e6ad4586532f888ac",
txid="1423215db125380dd21051c0d22f31fd4be2a25794b8789796343f4015c1baff",
vout=1
)
outp2 = Output(
4999999100,
"76a914e8e4b375038b0a1a1dc70543eab7ea6ce279df4388ac",
txid="1423215db125380dd21051c0d22f31fd4be2a25794b8789796343f4015c1baff",
vout=2
)
tx = Transaction(
[
Input(
"07c0efe33946c5f81b5a86d79eda89e47979d4796d5ec675a9fccde7c31c4f50",
1,
"404bb493aa8509356c1295c65acd3a44c339729d865422a47cb15631cda545ee3fc2eb86b418a5bb90202040430b723fdbf8429ff232bfa521c25da09539644093410450e829ca678c60031a11b990fea865e03ba35d0579aa62750b918b98c4b935d803ecc57a4bb2fc2ab1193a87fca5386d71516aca89df267fc907bcb3b84d396a"
)
],
[outp1, outp2],
0
)
storage.update_with_new_transaction(tx)
outp_list = storage.get_all_outputs()
self.assertFalse(outp0 in outp_list)
self.assertTrue(outp1 in outp_list)
self.assertTrue(outp2 in outp_list)
def test_contains(self):
storage = UTXOStorage(self.storage_filepath)
storage.delete_all_otputs()
outp0 = Output("", 10, txid="07c0efe33946c5f81b5a86d79eda89e47979d4796d5ec675a9fccde7c31c4f50", vout=1)
self.assertFalse(storage.contains_output(outp0))
storage.add_new_output(outp0)
self.assertTrue(storage.contains_output(outp0))
def test_get_all_unspent_outputs_for_address(self):
storage = UTXOStorage(self.storage_filepath)
storage.delete_all_otputs()
outp1 = Output(
900,
"76a914cabf271134a5f9228132598c8b4e6ad4586532f888ac",
txid="1423215db125380dd21051c0d22f31fd4be2a25794b8789796343f4015c1baff",
vout=1
)
outp2 = Output(
4999999100,
"76a914e8e4b375038b0a1a1dc70543eab7ea6ce279df4388ac",
txid="1423215db125380dd21051c0d22f31fd4be2a25794b8789796343f4015c1baff",
vout=2
)
storage.add_new_output(outp1)
storage.add_new_output(outp2)
outp_list = storage.get_all_unspent_outputs_for_address("1KV2VGQiTB1B5KPEyyEPvifcqfS6PUxdxj")
self.assertTrue(outp1 in outp_list)
self.assertFalse(outp2 in outp_list)
|
from django.urls import path
from application.consumers import ApplicationConsumer
from application.dashboardConsumer import DashboardConsumer
ws_urlpatterns = [
path('application/<app_code>/workspace/', ApplicationConsumer.as_asgi()),
path('dashboard/', DashboardConsumer.as_asgi())
]
|
#!/usr/bin/python
""" Main Class for the Bot """
__author__ = 'BiohZn'
import sys
sys.dont_write_bytecode = True
from irccon import irc
class main:
def __init__(self):
self.buffer = ''
self.irc = irc()
def connect(self):
self.irc.connect()
def read(self):
self.buffer += self.irc.socket.recv(8192)
while self.buffer.find('\r\n') != -1:
currentline = self.buffer.split('\r\n')[0]
self.buffer = self.buffer[len(currentline)+2:]
self.irc.parse(currentline)
if __name__ == '__main__':
bot = main()
bot.connect()
while 1:
if bot.irc.connected:
bot.read()
|
from Parser import Parser
from Code import Code
from SymbolTable import SymbolTable
import sys
import os
import pdb
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/add/Add.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/max/MaxL.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/pong/PongL.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/Rect/RectL.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/max/Max.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/Rect/Rect.asm'
DEFAULTPATH='/Users/pengfeigao/git/assembler/test/pong/Pong.asm'
class Assembler:
def __init__(self, inputPath):
with open(inputPath, 'r') as f:
self.content = f.readlines()
fileName = inputPath.split('/')[-1][:-4]
outputName = fileName+'.hack'
self.outputPath = inputPath.replace(fileName+'.asm', outputName)
self.hackCmds = []
def run(self, isWrite=True):
parser = Parser(self.content)
symbolTable = SymbolTable()
hackCmds = self.hackCmds
asmCmds = parser.asmCmds
print(asmCmds)
lineNum = 0
for asmCmd in asmCmds:
commandType = parser.commandType(asmCmd)
if commandType == 'L':
symbol = parser.symbol(asmCmd)
symbolTable.addEntry(symbol, lineNum)
else: lineNum += 1
ramNo = 16
for asmCmd in asmCmds:
commandType = parser.commandType(asmCmd)
if commandType in ['A','L']:
symbol = parser.symbol(asmCmd)
if not symbol.isdigit():
if symbolTable.contains(symbol):
address = symbolTable.getAddress(symbol)
else:
symbolTable.addEntry(symbol,ramNo)
address = ramNo
ramNo+=1
else: address = symbol
if commandType == 'A': hackCmds += ['0'+decimalToBinary15(address)+ '\n']
elif commandType == 'C':
dest = parser.dest(asmCmd)
print('dest:', dest)
print('code dest',Code.dest(dest))
comp = parser.comp(asmCmd)
print('comp:', comp)
print('code comp',Code.comp(comp))
jump = parser.jump(asmCmd)
print('jump:', jump)
print('code jump',Code.dest(jump))
hackCmds += ['111'+Code.comp(comp)+Code.dest(dest)+Code.jump(jump)+'\n']
print(hackCmds)
print(self.outputPath)
if isWrite:
with open(self.outputPath, 'w') as f:
f.write(''.join(hackCmds))
def decimalToBinary15(decimal):
decimal = int(decimal)
binArr = []
while decimal:
reminder = decimal%2
binArr.append(str(reminder))
decimal = int(decimal/2)
offsetArr = ['0']*(15-len(binArr))
ret = ''.join(offsetArr+list(reversed(binArr)))
return ret
if __name__ == '__main__':
args = sys.argv
if len(args) < 2: inputPath = DEFAULTPATH
else: inputPath = sys.argv[1]
assembler = Assembler(inputPath)
assembler.run()
|
#!/usr/bin/env python
from view import *
from model import *
app = App()
app.model = Model()
app.model.connect()
app.mainloop() |
# -*- coding: utf-8 -* -
'''
使用fast ai进行数据增广
'''
'''
五个步骤避免过拟合:
获取更多数据、数据增广、generalized architectures、正则化、降低网络复杂度
'''
'''
fastai中有三种基本的变化:transforms_basic, transforms_side_on 和 transforms_top_down,这三种变化由fastai源码中的transforms.py中的三个独立的类定义。
transforms_basic包括RandomRotate、RandomLighting;
transforms_side_on包括transforms_basic、RandomFlip;
transforms_top_down包括transforms_basic、RandomDihedral
'''
from matplotlib.pyplot import plot
import fastai
from fastai import vision
im = vision.open_image("cluo.jpg")
im.show(figsize=(10,5))
|
# -*- coding:utf-8 -*-
import sys
import logging
def get_level(le=None):
if le is None or le.lower() == "noset":
return logging.DEBUG
if isinstance(le, bytes):
le = le.decode()
if isinstance(le, str):
try:
le = int(le)
except:
if le.upper() in logging._nameToLevel.keys():
return getattr(logging, le.upper())
else:
raise KeyError("please input ('CRITICAL:50','ERROR':40,'WARN','WARNING':30,'INFO':20,'DEBUG':10)")
if isinstance(le, int):
if le in logging._levelToName.keys():
l = logging._levelToName[le]
else:
raise KeyError("please input ('CRITICAL:50','ERROR':40,'WARNING':30,'INFO':20,'DEBUG':10)")
return getattr(logging,l)
class LoggerBase(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
org = super(LoggerBase, cls)
cls._instance = org.__new__(cls)
return cls._instance
def __init__(self, logger_name="", file_name=None, level=None):
if not self.__dict__:
self.loggername = logger_name
self.fielname = file_name
self.level = get_level(level)
self.logger = logging.getLogger(self.loggername)
self.logger.setLevel(self.level)
formater = logging.Formatter(fmt="[ %(asctime)s ] %(name)s - %(levelname)s : %(message)s",datefmt="%Y-%m-%d %H:%M:%S")
if self.fielname is not None:
fs = logging.FileHandler(self.filename)
fs.setFormatter(formater)
self.logger.addHandler(fs)
cs = logging.StreamHandler(sys.stdout)
cs.setFormatter(formater)
self.logger.addHandler(cs)
else:
self.level = get_level(level)
self.logger.setLevel(self.level)
def info(self, mes=None):
if mes is None:
raise ValueError("mes not is None")
if self.level == 50:
self.logger.critical(mes)
elif self.level == 40:
self.logger.error(mes)
elif self.level == 30:
self.logger.warning(mes)
elif self.level == 20:
self.logger.info(mes)
elif self.level == 10 or self.level == 0:
self.logger.debug(mes)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
"""
This module defines the main application classes for the decks.
This module defines the following classes:
- Deck
"""
# ------------------------------------------------------------------------------
# Standard modules
# ------------------------------------------------------------------------------
from random import shuffle
from xml.etree.ElementTree import parse
# from __future__ import print_function
# ------------------------------------------------------------------------------
# Application modules
# ------------------------------------------------------------------------------
from main import __xml__
from module.debug import *
from Class.Investigator import Investigator
from Class.CommonItem import CommonItem
from Class.Monster import Monster
# ------------------------------------------------------------------------------
# Deck container
# ------------------------------------------------------------------------------
class Deck:
"""
Class gathering all the informations about a deck
"""
def __init__(self, xml_file, expansion_list):
"""
Initializes all the information about the deck
"""
_xml_file = __xml__ + xml_file + ".xml"
tree = parse(_xml_file)
root = tree.getroot()
self.remaining_cards, self.discarding_cards = [], []
cut = xml_file.find('_list') - len(xml_file) - 1
for _elt in root.findall(xml_file[:cut]):
for exp in expansion_list:
if _elt.find('expansion').text == exp:
if xml_file.find("investigator") > -1:
self.name = "investigators' deck"
self.remaining_cards.append(Investigator(_elt))
elif xml_file.find("common_item") > -1:
self.name = "common items' deck"
new_item = CommonItem(_elt)
for _iel in range(0, new_item.count):
self.remaining_cards.append(new_item)
elif xml_file.find("monster") > -1:
self.name = "monsters' bag"
new_item = Monster(_elt)
for _iel in range(0, new_item.count):
self.remaining_cards.append(new_item)
# shuffle(self.remaining_cards)
self.cards_number = len(self.remaining_cards)
print(_("We put ") + str(self.cards_number)+ _(" cards")
+ _(" in the ") + self.name + ".")
def draw_card(self):
"""
Draws the first card of the deck (and remove it from the deck)
"""
card = self.remaining_cards.pop(0)
return card
def discard_card(self, card):
"""
Discards the card on the discard deck
"""
self.discarding_cards.append(card)
def mix(self):
"""
Mixes the discard deck with the remaining cards
"""
for _iel in self.discarding_cards:
self.remaining_cards.append(_iel)
self.discarding_cards = []
shuffle(self.remaining_cards)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
from app import views, models
|
from urllib.request import urlopen
url="http://www.baidu.com"
#发送请求
response=urlopen(url)
#读取内容
info=response.read().decode('utf-8')
#打印内容
print(info)
#打印状态码
print(response.getcode())
#打印真实url
print(response.geturl())
#打印响应头
print(response.info()) |
import numpy as np
# a = np.arange(10,20,2) # 等差数列,包头不包尾
# a = np.arange(12).reshape(3,4)
a = np.linspace(0.5 , 10 ,20).reshape(4,5) # 线性等分
print(a)
|
# Calendar, Graphical calendar applet with novel interface
#
# test.py
#
# Copyright (c) 2010, Brandon Lewis <brandon_lewis@berkeley.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import cal
import weekview
import gobject
import gtk
import traceback
import sys
import datetime
import os
import recurrence
class TestApp(cal.App):
path = "test.data"
class Tester:
def __init__(self, test_case):
self.app = TestApp()
gobject.timeout_add(1000, self.run_test_case, test_case(self.app))
self.app.run()
os.unlink("test.data")
def run_test_case(self, iterator):
print "Tick"
try:
scheduler = iterator.next()
except StopIteration:
print "Test Case Finished Successfully"
self.app.quit()
return False
except Exception, e:
print "An error occured"
self.app.quit()
traceback.print_exc()
return False
scheduler.schedule(self, iterator)
return False
class Sleep(object):
def __init__(self, timeout=1000):
self.timeout = timeout
def schedule(self, tester, iterator):
gobject.timeout_add(self.timeout, tester.run_test_case, iterator)
class WaitForSignal(object):
def __init__(self, obj, signame):
self.obj = obj
self.signame = signame
self.iterator = None
self.sigid = None
def schedule(self, tester, iterator):
self.sigid = self.obj.connect(self.signame, self._handler)
self.iterator = iterator
self.tester = tester
def _handler(self, *args):
self.tester.run_test_case(self.iterator)
self.obj.disconnect(self.sigid)
def basic_test(app):
yield Sleep(100)
def test_select_area(app):
yield Sleep(100)
cmd = weekview.SelectArea(app.weekview.timed, [100, 100])
cmd.update((100, 100 + app.weekview.timed.hour_height),
(0, app.weekview.timed.hour_height),
True)
yield Sleep()
assert type(app.info.selection_recurrence) == recurrence.Period
r = app.info.selection_recurrence
yield Sleep()
cmd.undo()
assert app.info.selection_recurrence == None
yield Sleep()
cmd.do()
assert app.info.selection_recurrence == r
yield Sleep()
def test_new_event(app):
yield Sleep(100)
s = datetime.datetime.today()
e = s + datetime.timedelta(hours=1)
app.info.selection_recurrence = r = recurrence.Period(
recurrence.Weekly(0), datetime.time(12, 00), datetime.time(13, 00))
cmd = cal.NewEvent(app)
cmd.do()
assert app.info.selection_recurrence == None
cmd.undo()
assert app.info.selection_recurrence == r
cmd.do()
assert app.info.selection_recurrence == None
def test_select_and_delete_event(app):
cmd = weekview.SelectArea(app.weekview.timed, (100, 100))
cmd.update((100, 100 + app.weekview.timed.hour_height),
(0, app.weekview.timed.hour_height))
r = app.info.selection_recurrence
cmd = cal.NewEvent(app)
cmd.do()
yield Sleep()
event = cmd.event
cmd = weekview.SelectPoint(app.weekview.timed, 110, 110)
cmd.do()
yield Sleep()
assert app.info.selected == (event, 0)
cmd = cal.DelEvent(app)
cmd.do()
assert app.weekview.timed.selected == None
cmd.undo()
for test in [basic_test,
test_select_area,
test_new_event,
test_select_and_delete_event]:
Tester(test)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import photo.models
class Migration(migrations.Migration):
dependencies = [
('photo', '0002_auto_20150304_2023'),
]
operations = [
migrations.AddField(
model_name='photo',
name='thumbnail',
field=models.ImageField(max_length=500, blank=True, upload_to=photo.models.get_upload_file_name_thumb, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='photo',
name='image',
field=models.ImageField(upload_to=photo.models.get_upload_file_name),
preserve_default=True,
),
migrations.AlterField(
model_name='photocategory',
name='category',
field=models.CharField(max_length=100),
preserve_default=True,
),
]
|
import os
import sys
import unittest
from glob import glob
from operator import itemgetter
def module_name_to_class(module_name):
class_name = module_name.replace('_', ' ')
class_name = ''.join(x for x in class_name.title() if not x.isspace())
return class_name
def get_test_cases(directory):
directory = directory[0:-1] if directory[-1:] is '/' else directory
tests = glob(directory + '/test_*.py')
test_list = []
for module_path in tests:
module_name = os.path.basename(module_path).replace('.py', '')
class_name = module_name_to_class(module_name)
mod = __import__(module_name, fromlist=[class_name])
klass = getattr(mod, class_name)
# add a default priority
if not hasattr(klass, 'priority'):
klass.priority = 1000
test_list.append(klass)
# lower priority number ... the sooner it gets loaded
return sorted(test_list, key=lambda k: k.priority, reverse=False)
def run_tests(directory="./"):
test_list = get_test_cases(directory)
test_load = unittest.TestLoader()
cases = [test_load.loadTestsFromTestCase(t) for t in test_list]
test_suite = unittest.TestSuite(cases)
return unittest.TextTestRunner(verbosity=9).run(test_suite)
if __name__ == '__main__':
tests_dir = os.path.dirname(os.path.realpath(__file__))
# Include modules from parent directory
sys.path.append("{}/..".format(tests_dir))
sys.exit(run_tests(tests_dir).wasSuccessful() is False)
|
import os
import numpy as np
from data import dictClass
import pandas as pd
import MLI_getElem as getElem
from copy import deepcopy as copy
import re
def run(nCore=None):
if nCore==None:
os.system('time mli.x > mli.log')
else:
os.system('time mpirun -n '+str(nCore)+' mli.x > mli.log')
def elem2str(elemList):
f = '\n'
try:
f = f+elemList.str()
except:
for item in elemList:
if isinstance(item, str):
f = f+ item + ' \n'
else:
try:
f = f+item.str() + ' \n'
except TypeError:
print('following is not MLI command')
print(item)
return f
class buildMenu(dictClass):
def __init__(self,elemList=[],latticeList=[]):
self.elemList = elemList
self.latticeList = latticeList
def str(self):
return '#menu \n' + elem2str(self.elemList) + elem2str(self.latticeList)
class buildLabor(dictClass):
def __init__(self,elemList=[]):
self.list = elemList
def str(self):
f = '\n#labor \n'
for item in self.list:
if isinstance(item, str):
f = f+ item + ' \n'
elif isinstance(item, int):
f = f+ str(item) + '*'
else:
try:
f = f+item.name + ' \n'
except TypeError:
print('following is not MLI command')
print(item)
return f
#%%============================================================================
# read outputs
#==============================================================================
def readTransferMap(fname='mli.out'):
Ind1 = []
Ind2 = []
Val = []
M = np.zeros([6,6])
iM = -1
iG = -1
iT = -1
iU = -1
with open(fname) as f:
lines = f.readlines()
if fname[:4]=='fort':
flagM = False
flagG = False
for i,line in enumerate(lines):
line = line.strip().split()
lines[i] = line
if len(line) == 3 and not flagM:
iM = i
flagM = True
flagG = False
if len(line) == 2 and not flagG:
iG = i
flagM = False
flagG = True
# --- get Matrix ---
if iM != -1:
for line in lines[iM:iG]:
M[int(line[0])-1,int(line[1])-1]=float(line[2])
M = pd.DataFrame(M,index=range(1,7),columns=range(1,7))
# --- get generating polynomial ---
if iG != -1:
for line in lines[iG:]:
Ind1.append(int(line[0]))
Val.append(float(line[1]))
G = pd.DataFrame({'GP':Val},index=Ind1)
Ind1 = []
Ind2 = []
Val = []
elif fname=='mli.out':
for i,line in enumerate(lines):
if 'nonzero matrix elements in full precision:' in line:
iM = i
if 'nonzero elements in generating polynomial are :' in line:
iG = i
if 'nonzero elements in second order matrix are :' in line:
iT = i
if 'nonzero elements in third order matrix are :' in line:
iU = i
# --- get Matrix ---
if iM != -1:
for line in lines[iM+1:]:
if line=='\n':
break
items = line.strip().split()
M[int(items[0])-1,int(items[1])-1]=float(items[2])
M = pd.DataFrame(M,index=range(1,7),columns=range(1,7))
# --- get generating polynomial ---
if iG != -1:
with open(fname) as f:
for line in lines[iG+2:]:
if line=='\n':
break
#Ind1.append(line[2:22])
Ind1.append(int(line[4:7]))
Ind2.append(line[9:22])
Val.append(float(line[23:-1]))
G = pd.DataFrame({'exponents':Ind2,'GP':Val},index=Ind1)
Ind1 = []
Ind2 = []
Val = []
if iT != -1:
with open(fname) as f:
for line in lines[iT+2:]:
if line=='\n':
break
#Ind1.append(line[2:22])
Ind1.append(line[10:12])
Ind2.append(line[12:24])
Val.append(float(line[25:-1]))
T = pd.DataFrame({'target':Ind1,'poly':Ind2,'val':Val})
Ind1 = []
Ind2 = []
Val = []
if iU != -1:
with open(fname) as f:
for line in lines[iU+2:]:
if line=='\n':
break
#Ind1.append(line[2:22])
Ind1.append(line[10:12])
Ind2.append(line[12:24])
Val.append(float(line[25:-1]))
U = pd.DataFrame({'target':Ind1,'poly':Ind2,'val':Val})
Ind1 = []
Ind2 = []
Val = []
else:
raise ValueError('fname')
return M,G,T,U
def getTBT(npt,nturn,fname='rays.out'):
TBT = np.loadtxt(fname)
dim = TBT.ndim
if dim==1:
return TBT[1:-1]
else:
dummy,nCol = TBT.shape
if nCol>7:
TBT = TBT[:npt*nturn,1:-1]
else:
TBT = TBT[:npt*nturn,:-1]
out = np.zeros([npt,nturn,6])
for i in range(nturn):
out[:,i,:] = TBT[i*npt:(i+1)*npt,:].reshape([npt,6])
return out
#%%============================================================================
# write inputs
#==============================================================================
def writeInputfile(elemList,latticeList,labor,fname='mli.in'):
menu = buildMenu(elemList,latticeList)
with open(fname,'w') as f:
f.write(menu.str())
try:
f.write(labor.str())
except:
laborStr='#labor\n'
for item in labor:
laborStr=laborStr+item+'\n'
f.write(laborStr)
#%%============================================================================
# read inputs
#==============================================================================
def _isint(s):
try:
int(s)
return True
except ValueError:
return False
def _isfloat(s):
try:
float(s.replace('D','E',1).replace('d','e',1))
return True
except ValueError:
return False
def _isvariable(s,variables):
if s in variables.keys():
return True
else:
return False
def _str2val(s,variables):
if _isint(s):
return int(s)
elif _isfloat(s):
return float(s.replace('D','E',1).replace('d','e',1))
elif _isvariable(s,variables):
return variables[s]
else:
for var in sorted(variables, key=len, reverse=True):
s=s.replace(var,str(variables[var]))
try:
s=s.replace('D','E').replace('d','e')
pi = np.pi
tan = np.tan
sin = np.sin
cos = np.cos
exp = np.exp
return eval(s)
except:
return s
def _file2rawlines(fname):
lines = []
tmp = ''
variables = {}
with open(fname) as f:
flagLine = False
for line in f:
line=line.replace('\n','').lower()
line=line.lstrip()
if line.find('!')==0 or line=='' or line[:3]=='>>>':
continue
i = line.find('!')
if i!=-1:
line = line[:i]
if 'line' in line and '(' in line:
line=line.replace(':',',')
flagLine = True
if flagLine:
line=line.replace('&','')
i=line.find(')')
if i!=-1:
flagLine = False
tmp = tmp + line
lines.append(copy(tmp))
tmp = ''
else:
tmp = tmp + line
else:
line=line.replace(' ','')
lines.append(line)
return lines
def _rawlines2var(raw):
var = {}
raw2 = raw.copy()
k=0
for i,line in enumerate(raw):
icomma = line.find(',')
icolon = line.find(':')
iequal = line.find('=')
if icomma==-1 and icolon==-1 and iequal!=-1:
var[line[:iequal]]=_str2val(line[iequal+1:],var)
del raw2[i-k]
k=k+1
return var,raw2
def _rawlines2elem(raw,variables):
elems=[]
raw2 = raw.copy()
k=0
for i,line in enumerate(raw):
icolon = line.find(':')
if icolon!=-1:
del raw2[i-k]
k=k+1
istart= line.find('{')
if istart==-1:
line = re.split(':|,|=',line)
else:
iend = line.find('}')
line = re.split(':|,|=',line[:istart]) + [line[istart:iend+1]] + re.split(':|,|=',line[iend+1:])
line = list(filter(('').__ne__, line))
name = line[0].replace(' ','')
elem = line[1].replace(' ','')
if elem == 'beam':
f = getElem.beam(name=name)
elif elem == 'units':
f = getElem.units(name=name)
elif elem == 'globaldefaults':
f = getElem.globaldefaults(name=name)
elif elem == 'autotrack':
f = getElem.autotrack(name=name)
elif elem == 'particledump':
f = getElem.particledump(name=name)
elif elem == 'iden':
f = getElem.iden(name=name)
elif elem == 'end':
f = getElem.end(name=name)
elif elem == 'raytrace':
f = getElem.raytrace(name=name)
elif elem == 'ptm':
f = getElem.ptm(name=name)
elif elem == 'tasm':
f = getElem.tasm(name=name)
elif elem == 'stm':
f = getElem.stm(name=name)
elif elem == 'gtm':
f = getElem.gtm(name=name)
elif elem == 'tmo':
f = getElem.tmo(name=name)
elif elem == 'tmi':
f = getElem.tmi(name=name)
elif elem == 'vary':
f = getElem.vary(name=name)
elif elem == 'aim':
f = getElem.aim(name=name)
elif elem == 'nlinsert':
f = getElem.nlinsert(name=name)
elif elem == 'monitor':
f = getElem.monitor(name=name)
elif elem == 'marker':
f = getElem.marker(name=name)
elif elem == 'drift':
f = getElem.drift(name=name)
elif elem == 'quadrupole':
f = getElem.quadrupole(name=name)
elif elem == 'vkicker':
f = getElem.vkicker(name=name)
elif elem == 'sextupole':
f = getElem.sextupole(name=name)
elif elem == 'thlm':
f = getElem.thlm(name=name)
elif elem == 'sbend':
f = getElem.sbend(name=name)
elif elem == 'dipedge':
f = getElem.dipedge(name=name)
elif elem == 'srfc':
f = getElem.srfc(name=name)
elif elem == 'line':
continue
else:
print(elem + ' is not recognized. skipping...')
continue
if len(line)>2:
for i in range(2,len(line),2):
f[line[i]]=_str2val(line[i+1],variables)
f.update()
elems.append(f)
return elems,raw2
def _rawlines2lattice(raw,elems):
raw2=[]
for line in raw:
line=line.replace('&',' ')
line=line.replace('(',' ')
line=line.replace(')',' ')
line=line.replace('=',' ')
line=line.replace(',',' ')
raw2 = raw2 + line.split()
iElems = [i for i in range(len(raw2))]
ilattices = []
elemList = raw2.copy()
k=0
for i,item in enumerate(raw2):
if item == 'line':
iElems.remove(i)
iElems.remove(i-1)
ilattices.append(i)
for i in iElems:
for elem in elems:
if elem.name == raw2[i]:
raw2[i]=elem
nlattices = len(ilattices)
lattices = [0]*nlattices
i=-1
for i in range(nlattices-1):
lattices[i]=getElem.line(name=raw2[ilattices[i]-1],elemList=raw2[ilattices[i]+1:ilattices[i+1]-1])
lattices[i+1]=getElem.line(name=raw2[ilattices[i+1]-1],elemList=raw2[ilattices[i+1]+1:])
return lattices
def _unroll_nested_lines(latticeList,elemList):
for a,lat_a in enumerate(latticeList):
line = lat_a.list
i=0
while(True):
flag = True
iStar = line[i].find('*')
if iStar>0:
tmp = [line[i][iStar+1:]]*int(line[i][:iStar])
line = line[:i] + copy(tmp) +line[i+1:]
lat_a.list = line
for lat_b in latticeList[:a]:
if line[i] == lat_b.name:
flag = False
line = line[:i] +copy(lat_b.list) +line[i+1:]
lat_a.list = line
i = i + len(lat_b.list)
if i==len(line):
break
if flag:
i=i+1
if i==len(line):
break
def _rawlines_to_elemList_n_latticeList(rawlines):
var,raw2=_rawlines2var(rawlines)
elemList,raw2=_rawlines2elem(raw2,var)
latticeList =_rawlines2lattice(raw2,elemList)
_unroll_nested_lines(latticeList,elemList)
return elemList,latticeList
def readInputfile(fname='mli.in'):
labor = []
rawlines = _file2rawlines(fname)
i_menu = -1
for i_labor,line in enumerate(rawlines):
if line=='#menu':
i_menu = copy(i_labor)
if line=='#labor':
break
elemList,latticeList = _rawlines_to_elemList_n_latticeList(rawlines[i_menu+1:i_labor])
labor= rawlines[i_labor+1:]
return elemList,latticeList,labor
#%%============================================================================
# Lattice Menipulate
#==============================================================================
def sext2thin(elemList,latticeList,brho=None):
newList=[]
sextupoles={}
for item in elemList:
if item.elem == 'sextupole':
thin_drift = getElem.drift(name='drift_thin_'+item.name,l=0.5*item.l)
if 'k2' in item:
thin_multi = getElem.thlm(name=item.name,k2l=item.l*item.k2)
elif 'g2' in item:
if brho!=None:
thin_multi = getElem.thlm(name=item.name,k2l=item.l*item.g2*2.0/brho)
else:
raise ValueError('argument brho is needed to convert g2 to k2')
else:
UserWarning('the sextupole '+item.name+' does not have g2 or k2 defined.')
thin_multi = getElem.thlm(name=item.name,k2l=0.0)
tmp = [thin_drift,thin_multi].copy()
sextupoles[item.name]=[thin_drift.name,item.name,thin_drift.name]
else:
tmp = [item].copy()
newList = newList+tmp
newLatticeList = latticeList.copy()
for lattice in newLatticeList:
tmpLattice = lattice.list.copy()
iSext = 0
for i,item in enumerate(tmpLattice):
if item in sextupoles.keys():
lattice.list[i+iSext]=sextupoles[item][0]
lattice.list.insert(i+iSext+1,sextupoles[item][1])
lattice.list.insert(i+iSext+2,sextupoles[item][2])
iSext=iSext+2
return newList,newLatticeList
def removeElems(elem,elemList,latticeList):
newList=[]
names=[]
for item in elemList:
if item.elem == elem:
names.append(item.name)
else:
newList.append(item)
newLatticeList = latticeList.copy()
for lattice in newLatticeList:
tmpLattice = lattice.list.copy()
iElem = 0
for i,item in enumerate(tmpLattice):
if item in names:
lattice.list.pop(i-iElem)
iElem=iElem+1
return newList,newLatticeList
|
# -*- coding: utf-8 -*-
__author__ = 'Liu'
from sys import argv
# 插入的exists模块的作用的是检测该文件是否存在
from os.path import exists
script, from_file, to_file = argv
print("Copying for %s to %s" % (from_file, to_file))
# 此处in_file作为的是打开的文件,indata是文件的内容
in_file = open(from_file).read()
indata = in_file.read()
print("This input file is %d bytes long" % len(indata))
print("Does the output file exist?---->%r" % exists(to_file))
print("Ready,hit return to continue,ctrl+c to abort")
input()
out_file = open(to_file, 'w')
# 相当于把indata的内容写入out_file
out_file.write(indata)
print("All right,Well done")
out_file.close()
in_file.close()
|
from validate_docbr import CNPJ
class Cnpj:
def __init__(self, documento):
documento = str(documento)
if self.validador_cnpj(documento):
self.cpnj = documento
else:
raise ValueError('=- CNPJ invalido!! -=')
def validador_cnpj(self, documento):
if len(documento) == 14:
validador = CNPJ()
return validador.validate(documento)
else:
raise ValueError('=- Quantidade de digitos invalido!! -=')
def formatar_cnpj(self):
mascara = CNPJ()
return mascara.mask(self.cpnj)
def __str__(self):
return self.formatar_cnpj() |
from abaqusConstants import *
from .Area import Area
from .AreaStyle import AreaStyle
from .Axis import Axis
from .Legend import Legend
from .LineStyle import LineStyle
from .TextStyle import TextStyle
class DefaultChartOptions:
"""The DefaultChartOptions object is used to hold on default chart and axis attributes. The
DefaultChartOptions object attributes are used whenever Chart or Axis are created. A
DefaultChartOptions object is automatically created when opening a session.
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.defaultChartOptions
"""
areaStyle: AreaStyle = AreaStyle()
def setValues(
self,
areaStyle: AreaStyle = AreaStyle(),
aspectRatio: float = None,
defaultAxis1Options: Axis = Axis(),
defaultAxis2Options: Axis = Axis(),
gridArea: Area = Area(),
legend: Legend = Legend(),
majorAxis1GridStyle: LineStyle = LineStyle(),
majorAxis2GridStyle: LineStyle = LineStyle(),
minorAxis1GridStyle: LineStyle = LineStyle(),
minorAxis2GridStyle: LineStyle = LineStyle(),
tagAreaStyle: AreaStyle = AreaStyle(),
tagBorder: LineStyle = LineStyle(),
tagTextStyle: TextStyle = TextStyle(),
useQuantityType: Boolean = ON,
):
"""This method modifies the DefaultChartOptions object.
Parameters
----------
areaStyle
An AreaStyle object specifying an AreaStyle used to hold on to the default display
properties for the chart area.
aspectRatio
A Float specifying the default aspect ratio of the grid area. A value of -1 specifies
that the gridArea will take up all available space. The default value is −1.
defaultAxis1Options
An Axis object specifying an Axis object used to hold on to the default properties for
direction 1 axes—the abscissa for a Cartesian chart.
defaultAxis2Options
An Axis object specifying an Axis object used to hold on to the default properties for
direction 2 axes—the ordinate for a Cartesian chart.
gridArea
An Area object specifying how to display the grid area by default.
legend
A Legend object specifying the default attributes for the legend of the chart.
majorAxis1GridStyle
A LineStyle object specifying the default line properties to be used when drawing major
gridlines along axis 1.
majorAxis2GridStyle
A LineStyle object specifying the default line properties to be used when drawing major
gridlines along axis 2.
minorAxis1GridStyle
A LineStyle object specifying the default line properties to be used when drawing minor
gridlines along axis 1.
minorAxis2GridStyle
A LineStyle object specifying the default line properties to be used when drawing minor
gridlines along axis 2.
tagAreaStyle
An AreaStyle object specifying the default area properties to be used when creating
tags.
tagBorder
A LineStyle object specifying the default tag area border properties to be used when
creating tags.
tagTextStyle
A TextStyle object specifying the default text properties to be used when creating tags.
useQuantityType
A Boolean specifying whether to use the QuantityType to associate curves with axes. The
default value is ON.
"""
pass
|
from SavingsGoal import SavingsGoal
def main():
Car = SavingsGoal("Accord", 8000)
# Car.start()
# Car.setNewTotalAmount(100)
print(str(Car.getName()))
exit
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: moddemod
# datetime: 2019/12/25 下午9:54
# ide: PyCharm
import binascii
import base64
s = '636A56355279427363446C4A49454A7154534230526D684356445A31614342354E326C4B4946467A5769426961453067'
r = binascii.unhexlify(s)
print(r)
r1 = base64.b64decode(r)
print(r1)
# b'cjV5RyBscDlJIEJqTSB0RmhCVDZ1aCB5N2lKIFFzWiBiaE0g'
# b'r5yG lp9I BjM tFhBT6uh y7iJ QsZ bhM '
|
#%% Imports and Declarations
import torch
import numpy as np
import cv2, time
class data_encoder_pytorch:
def __init__(self, grid_dim):
'''
grid_dim = [Height, Width] or [Y,X] or [Rows, Columns]
'''
self.grid_dim = grid_dim
def __cornered_to_centered__(self, batch):
'''
Batch Shape: (batch_size, 4)
[x1, y1, x2, y2]
Replace torch by numpy to make it independent from torch
'''
batch[..., 2] = batch[..., 2] - batch[..., 0] # Width
batch[..., 3] = batch[..., 3] - batch[..., 1] # Height
batch[..., 0] = batch[..., 0] + (batch[..., 2]/2.0) # X center
batch[..., 1] = batch[..., 1] + (batch[..., 3]/2.0) # Y center
return batch
def __centered_to_cornered__(self, batch):
'''
Batch Shape: (batch_size, 4)
[x, y, w, h]
Replace torch by numpy to make it independent from torch
'''
batch[:, 2] = batch[:, 0] + batch[:, 2]
batch[:, 3] = batch[:, 1] + batch[:, 3]
return batch
def annotaion_encoding(self, bbox, image_size, box_format='cornered'):
'''
Inputs:
bbox : (batch_size, 4) in torch.FloatTensor
[x1, y1, x2, y2]'cornered' or [x, y, w, h]'centered'
image_size = (batch_size, 2) [height, width] torch.FloatTensor
box_format = 'cornered' / 'centered'
output:
batch : Xgrid, Ygrid, Xrel_c, Yrel_c, Xrel_w, Yrel_w
'''
centered_batch = bbox
if box_format == 'cornered':
centered_batch = self.__cornered_to_centered__(bbox)
# Image and Grid Basic Parameters
grid_dim = self.grid_dim
image_width = image_size[:, 1]
image_height = image_size[:, 0]
gridx_count = float(grid_dim[1])
gridy_count = float(grid_dim[0])
Xcenter = centered_batch[:, 0]
Ycenter = centered_batch[:, 1]
Xwidth = centered_batch[:, 2]
Yheight = centered_batch[:, 3]
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid coordinates where center of annotated bounding box lies
Xgrid = torch.floor(Xcenter / grid_width)
Ygrid = torch.floor(Ycenter / grid_height)
# Relative coordinated of center of Bounding box wrt to grid cell size
Xrel_c = (Xcenter - (Xgrid*grid_width)) / grid_width
Yrel_c = (Ycenter - (Ygrid*grid_height)) / grid_height
# Relative width and height of bounding box wrt to grid cell size
Xrel_w = Xwidth/image_width
Yrel_w = Yheight/image_height
return Xgrid, Ygrid, Xrel_c, Yrel_c, Xrel_w, Yrel_w
def decode_grid(self, image_size, grid_coords, rel_center, rel_width):
'''
Input:
Image Size : (batch_size, 2) [Y, X]
grid_coord : (batch_size, 2) [X, Y]
Relative Center: (batch_size, 2) [X, Y]
Relative Width : (batch_size, 2) [X, Y]
Output:
bounding box coordinates [x1, y1, x2, y2] 'Cornered' format
'''
grid_dim = self.grid_dim
print(image_size)
image_width = image_size[:, 1]
image_height = image_size[:, 0]
gridx_count = float(grid_dim[1])
gridy_count = float(grid_dim[0])
Xgrid = grid_coords[:, 0]
Ygrid = grid_coords[:, 1]
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid leftmost point
grid_X_coord = (grid_width * Xgrid)
grid_Y_coord = (grid_height * Ygrid)
centerx_offset = (grid_width * rel_center[:, 0])
centery_offset = (grid_height * rel_center[:, 1])
width = (image_width * rel_width[:, 0])
height = (image_height * rel_width[:, 1])
print("Decode:", image_size, image_width, grid_width, grid_X_coord, centerx_offset, width)
X1 = torch.floor(grid_X_coord + centerx_offset - width/2).unsqueeze(1)
Y1 = torch.floor(grid_Y_coord + centery_offset - height/2).unsqueeze(1)
X2 = torch.floor(grid_X_coord + centerx_offset + width/2).unsqueeze(1)
Y2 = torch.floor(grid_Y_coord + centery_offset + height/2).unsqueeze(1)
stacked = torch.hstack((X1, Y1, X2, Y2))
return stacked
# Only supports one detection per image, to be implemented for multiple detections per image
def to_grids(self, grid_coords, rel_center, rel_width, classes, class_count, grids=None):
'''
Call annotation_encoding to get all parameters
Input:
grid_coord : (batch_size, 2) [X, Y]
Relative Center: (batch_size, 2) [X, Y]
Relative Width : (batch_size, 2) [X, Y]
classes : (batch_size, 1) [C]
class_count : (batch_size, 1) [count]
grid : (batch_size, S, S, C + B*5) # grid format already existing
Output:
bounding box coordinates [[xstart, ystart], [xend, yend]] 'Cornered' format
'''
batch_size = grid_coords.shape[0]
if grids is None:
grids = torch.zeros((batch_size, self.grid_dim[0], self.grid_dim[1], class_count + 5))
for i in range(batch_size):
gridx = grid_coords[i, 0].long() # long not required as numpy
gridy = grid_coords[i, 1].long()
cx = rel_center[i, 0]
cy = rel_center[i, 1]
dx = rel_width[i, 0]
dy = rel_width[i, 1]
target = classes[i].long() # Location of Class
grids[i, gridy, gridx, target] = 1.0 # Class is present
grids[i, gridy, gridx, class_count + 0] = 1.0 # Presence of Object
grids[i, gridy, gridx, class_count + 1] = cx # Center X
grids[i, gridy, gridx, class_count + 2] = cy # Center Y
grids[i, gridy, gridx, class_count + 3] = dx # Width
grids[i, gridy, gridx, class_count + 4] = dy # Height
return grids
def show_image(self, dimentions, image, title='Image Display', wait=True,
color=(255,0,0), thickness=1):
'''
dimentions: [x1, y1, x2, y2]
image : OpenCV image
'''
xstart = int(dimentions[0])
ystart = int(dimentions[1])
xend = int(dimentions[2])
yend = int(dimentions[3])
cv2.rectangle(image,
(xstart, ystart),
(xend, yend),
color,
thickness)
cv2.imshow(title, image)
if(wait == True):
key = cv2.waitKey(0)
cv2.destroyAllWindows()
return key
def resize_bb_coord(self, actual_im_size, target_im_size, bbox, format='cornered'):
'''
actual_im_size : (batch_size, 2) [Height, Width]
target_im_size : [Height, Width]
bbox : (batch_size, 4), [x1, y1, x2, y2] or [x, y, w, h] :: cornered / centered
'''
if format == 'centered':
bbox = self.__centered_to_cornered__(bbox)
actual_im_size = torch.FloatTensor(actual_im_size)
target_im_size = torch.FloatTensor([ [target_im_size[0], target_im_size[1]] for _ in range(actual_im_size.shape[0])])
ratio_width = (target_im_size[:, 1]/actual_im_size[:, 1]).detach().cpu().numpy()
ratio_height = (target_im_size[:, 0]/actual_im_size[:, 0]).detach().cpu().numpy()
bbox[:, 0] = bbox[:, 0] * ratio_width
bbox[:, 1] = bbox[:, 1] * ratio_height
bbox[:, 2] = bbox[:, 2] * ratio_width
bbox[:, 3] = bbox[:, 3] * ratio_height
return bbox.astype(np.int32)
def find_box(self, prediction, grid_dim, classes, boxes, conf_threshold=0.8):
prediction = prediction.view(-1, grid_dim[0], grid_dim[1],classes+boxes*5)
confidence_scores = prediction[..., classes]
max_indexes = []
# return box with max confidence (only one box)
for cs in confidence_scores:
max_indexes.append(torch.argmax(cs))
max_indexes = torch.Tensor(max_indexes)
row = torch.div(max_indexes, grid_dim[1], rounding_mode='trunc')
col = max_indexes % grid_dim[0] # Remainder
grid_coords = []
rel_center = []
rel_width = []
for index, r in enumerate(row):
grid_coords.append((r.long(), col[index].long()))
rel_center.append(prediction[index, r.long(), col[index].long(), classes + 1: classes + 3])
rel_width.append(prediction[index, r.long(), col[index].long(), classes + 3: classes + 5])
grid_coords = torch.FloatTensor(grid_coords)
rel_center = torch.vstack(rel_center)
rel_width = torch.vstack(rel_width)
return grid_coords, rel_center, rel_width
def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
"""
Calculates intersection over union
Parameters:
boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
Returns:
tensor: Intersection over union for all examples
"""
if box_format == "midpoint":
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
if box_format == "corners":
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4] # (N, 1)
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)
# .clamp(0) is for the case when they do not intersect
intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
return intersection / (box1_area + box2_area - intersection + 1e-6)
class data_encoder:
def __init__(self, grid_dim):
'''
grid_dim = [Height, Width] or [Y,X] or [Rows, Columns]
'''
self.grid_dim = grid_dim
def __cornered_to_centered__(self, batch):
'''
Batch Shape: (batch_size, 4)
[x1, y1, x2, y2]
Replace torch by numpy to make it independent from torch
'''
batch[..., 2] = batch[..., 2] - batch[..., 0] # Width
batch[..., 3] = batch[..., 3] - batch[..., 1] # Height
batch[..., 0] = batch[..., 0] + (batch[..., 2]/2.0) # X center
batch[..., 1] = batch[..., 1] + (batch[..., 3]/2.0) # Y center
return batch
def __centered_to_cornered__(self, batch):
'''
Batch Shape: (batch_size, 4)
[x, y, w, h]
Replace torch by numpy to make it independent from torch
'''
batch[:, 2] = batch[:, 0] + batch[:, 2]
batch[:, 3] = batch[:, 1] + batch[:, 3]
return batch
def annotaion_encoding(self, batch, image_size, box_format='cornered'):
'''
Inputs:
batch : (batch_size, 4) in torch.FloatTensor
[x1, y1, x2, y2]'cornered' or [x, y, w, h]'centered'
image_size = [height, width]
box_format = 'cornered' / 'centered'
output:
batch : [batch_size, 6]
[gridx, gridy, centerx, centery, rel_width, rel_height]
'''
print(type(batch))
centered_batch = batch
if box_format == 'cornered':
centered_batch = self.__cornered_to_centered__(batch)
# Image and Grid Basic Parameters
grid_dim = self.grid_dim
image_width = float(image_size[1])
image_height = float(image_size[0])
gridx_count = float(grid_dim[1])
gridy_count = float(grid_dim[0])
Xcenter = centered_batch[:, 0]
Ycenter = centered_batch[:, 1]
Xwidth = centered_batch[:, 2]
Yheight = centered_batch[:, 3]
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid coordinates where center of annotated bounding box lies
Xgrid = torch.floor(Xcenter / grid_width)
Ygrid = torch.floor(Ycenter / grid_height)
# Relative coordinated of center of Bounding box wrt to grid cell size
Xrel_c = (Xcenter - (Xgrid*grid_width)) / grid_width
Yrel_c = (Ycenter - (Ygrid*grid_height)) / grid_height
# Relative width and height of bounding box wrt to grid cell size
Xrel_w = Xwidth/grid_width
Yrel_w = Yheight/grid_height
return Xgrid, Ygrid, Xrel_c, Yrel_c, Xrel_w, Yrel_w
def annotation_encoding_(self, annot_coord, image_size):
'''
INPUTS:
annot_coord = [[X1, Y1], [X2, Y2]]
X1, Y1 = Top Left coordinate
X2, Y2 = Bottom Right coordinate
image_size = [height, width]
OUTPUTS:
grid_coord = [grid X, grid Y] # This shows the grid's location
relative_center = [X center, Y center] # This shows the relative
center point of annotation box wrt width and height
of each grid.
relative_width = [Width, Height] # This shows the relative width and height
of annotation box wrt width and height of each grid.
'''
# Image and Grid Basic Parameters
grid_dim = self.grid_dim
image_width = image_size[1]
image_height = image_size[0]
gridx_count = grid_dim[1]
gridy_count = grid_dim[0]
# Annotation Parameters basic and inferred
Xstart = annot_coord[0][0]
Ystart = annot_coord[0][1]
Xend = annot_coord[1][0]
Yend = annot_coord[1][1]
Xwidth = Xend - Xstart
Yheight = Yend - Ystart
Xcenter = (Xstart + Xend) / 2
Ycenter = (Ystart + Yend) / 2
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid coordinates where center of annotated bounding box lies
Xgrid = np.floor(Xcenter / grid_width)
Ygrid = np.floor(Ycenter / grid_height)
grid_coord = [int(Xgrid), int(Ygrid)]
# Relative coordinated of center of Bounding box wrt to grid cell size
Xrel_c = (Xcenter - (Xgrid*grid_width)) / grid_width
Yrel_c = (Ycenter - (Ygrid*grid_height)) / grid_height
relative_center = [Xrel_c, Yrel_c]
# Relative width and height of bounding box wrt to grid cell size
Xrel_w = (Xend - Xstart)/grid_width
Yrel_w = (Yend - Ystart)/grid_height
relative_width = [Xrel_w, Yrel_w]
return grid_coord, relative_center, relative_width
def decode_grid(self, image_size, grid_coords, rel_center, rel_width):
'''
Input:
Image Size : [Y, X]
grid_coord : (batch_size, 2) [X, Y]
Relative Center: (batch_size, 2) [X, Y]
Relative Width : (batch_size, 2) [X, Y]
Output:
bounding box coordinates [[xstart, ystart], [xend, yend]] 'Cornered' format
'''
grid_dim = self.grid_dim
image_width = float(image_size[1])
image_height = float(image_size[0])
gridx_count = float(grid_dim[1])
gridy_count = float(grid_dim[0])
Xgrid = grid_coords[:, 0]
Ygrid = grid_coords[:, 1]
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid leftmost point
grid_X_coord = (grid_width * Xgrid)
grid_Y_coord = (grid_height * Ygrid)
centerx_offset = (grid_width * rel_center[:, 0])
centery_offset = (grid_height * rel_center[:, 1])
width = (grid_width * rel_width[:, 0])
height = (grid_height * rel_width[:, 1])
X1 = torch.floor(grid_X_coord + centerx_offset - width/2).unsqueeze(1)
Y1 = torch.floor(grid_Y_coord + centery_offset - height/2).unsqueeze(1)
X2 = torch.floor(grid_X_coord + centerx_offset + width/2).unsqueeze(1)
Y2 = torch.floor(grid_Y_coord + centery_offset + height/2).unsqueeze(1)
stacked = torch.hstack((X1, Y1, X2, Y2))
return stacked
def decode_grid_(self, image_size, grid_coord, rel_center, rel_width):
'''
Input:
Image Size : [Y, X]
grid_coord : [Y, X]
Relative Center: [X, Y]
Relative Width : [X, Y]
Output:
bounding box coordinates [[xstart, ystart], [xend, yend]]
'''
grid_dim = self.grid_dim
image_width = image_size[1]
image_height = image_size[0]
gridx_count = grid_dim[1]
gridy_count = grid_dim[0]
Xgrid = grid_coord[1]
Ygrid = grid_coord[0]
# Grid Cell Height and Width
grid_width = image_width / gridx_count
grid_height = image_height / gridy_count
# Grid leftmost point
grid_X_coord = (grid_width * Xgrid)
grid_Y_coord = (grid_height * Ygrid)
centerx_offset = (grid_width * rel_center[0])
centery_offset = (grid_height * rel_center[1])
width = (grid_width * rel_width[0])
height = (grid_height * rel_width[1])
X1 = int(grid_X_coord + centerx_offset - width/2)
Y1 = int(grid_Y_coord + centery_offset - height/2)
X2 = int(grid_X_coord + centerx_offset + width/2)
Y2 = int(grid_Y_coord + centery_offset + height/2)
return [X1,Y1], [X2,Y2]
def resize_bb_coord(self, actual_im_size, target_im_size, bbox, format='cornered'):
'''
actual_im_size : [Height, Width]
target_im_size : [Height, Width]
bbox : [x1, y1, x2, y2] or [x, y, w, h] :: cornered / centered
'''
if format == 'centered':
bbox = self.__centered_to_cornered__(bbox)
ratio_width = float(target_im_size[1])/float(actual_im_size[1])
ratio_height = float(target_im_size[0])/float(actual_im_size[0])
bbox[:, 0] = bbox[:, 0] * ratio_width
bbox[:, 1] = bbox[:, 1] * ratio_height
bbox[:, 2] = bbox[:, 2] * ratio_width
bbox[:, 3] = bbox[:, 3] * ratio_height
return bbox
def resize_bb_coord_(self, actual_im_size, target_im_size, bbox):
'''
actual_im_size : [Height, Width]
target_im_size : [Height, Width]
bbox : [[Top Left], [bottom Right]]
'''
xstart = bbox[0][0]
ystart = bbox[0][1]
xend = bbox[1][0]
yend = bbox[1][1]
ratio_width = target_im_size[1]/actual_im_size[1]
ratio_height = target_im_size[0]/actual_im_size[0]
xs = int(xstart * ratio_width)
ys = int(ystart * ratio_height)
xe = int(xend * ratio_width)
ye = int(yend * ratio_height)
dimentions = [[xs, ys], [xe, ye]]
return dimentions
def bb_presence_grid_coordinates(self, prediction, threshold):
'''
target: Single matrix output from model [Y, X, Channels]
threshold: Minimum confidence value (in double)
'''
bb_details = []
height, width, channels = prediction.shape
for y in range(height):
for x in range(width):
if(prediction[y][x][0] >= threshold): # Channel 0 marks bb presence
grid_coordinate = [y, x]
relative_center = [prediction[y][x][1], prediction[y][x][2]] # [X, Y]
relative_dimention = [prediction[y][x][3], prediction[y][x][4]] # [X, Y]
bb_details.append([grid_coordinate, relative_center, relative_dimention])
return np.array(bb_details)
def show_image(self, dimentions, image, title='Image Display', wait=True,
color=(255,0,0), thickness=1):
'''
dimentions: [[xstart, ystart], [xend, yend]]
image : OpenCV image
'''
xstart = dimentions[0][0]
ystart = dimentions[0][1]
xend = dimentions[1][0]
yend = dimentions[1][1]
cv2.rectangle(image,
(xstart, ystart),
(xend, yend),
color,
thickness)
cv2.imshow(title, image)
if(wait == True):
key = cv2.waitKey(0)
return key
if __name__ == '__main__':
encoder = data_encoder_pytorch((7,7))
image_size = torch.FloatTensor([[400,600],
[100, 100]])
coordinates = [[333, 72, 425, 158],
[30.0, 30.0, 60.0, 60.0]]
coordinates = torch.FloatTensor(coordinates)
start_time = time.time()
Xgrid, Ygrid, Xrel_c, Yrel_c, Xrel_w, Yrel_w = encoder.annotaion_encoding(coordinates, image_size)
grid_coord = torch.hstack((Xgrid.unsqueeze(1), Ygrid.unsqueeze(1)))
rel_center = torch.hstack((Xrel_c.unsqueeze(1), Yrel_c.unsqueeze(1)))
rel_width = torch.hstack((Xrel_w.unsqueeze(1), Yrel_w.unsqueeze(1)))
print(Xgrid)
print(Ygrid)
print(Xrel_c)
print(Yrel_c)
coords = encoder.decode_grid(image_size, grid_coord, rel_center, rel_width)
print(coords)
print("Time Taken:", time.time() - start_time)
# %%
|
class Group(object):
def __init__(self, parent=None):
self.parent = parent
self.children = set()
if parent:
self.depth = parent.depth + 1
parent.children.add(self)
else:
self.depth = 1
def score_with_children(self):
score = self.depth
for child in self.children:
score += child.score_with_children()
return score
def run(_in):
current = None
top = None
expect_garbage = False
expect_skip = False
garbage_count = 0
for char in _in:
if expect_skip:
expect_skip = False
elif expect_garbage:
if char == '!':
expect_skip = True
elif char == '>':
expect_garbage = False
else:
garbage_count += 1
elif char == '<':
expect_garbage = True
elif char == '{':
if not current:
current = Group()
top = current
else:
current = Group(current)
elif char == '}':
current = current.parent
return (top.score_with_children(), garbage_count)
if __name__ == '__main__':
with open('09.input') as f:
print(run(f.read().strip()))
|
'''
Created on Mar 7, 2014
@author: huunguye
References:
1. Markov Chains and Monte Carlo Methods (Ioana)
'''
import random
#######################################################
# Gibbs sampler for bivariate normal (p.49 [1])
#
def sample_bivariate_normal():
mu1 = 0.0
mu2 = 0.0
sig1 = 1.0 # sigma_1^2
sig2 = 1.0 # sigma_2^2
sig12 = 0.3 # sigma_12
MAX_T = 10000
X1 = [0.0 for i in range(MAX_T+1)]
X2 = [0.0 for i in range(MAX_T+1)]
for t in range(1,MAX_T+1):
X1[t] = random.gauss(mu1 + sig12/sig2*(X2[t-1]-mu2), sig1-sig12*sig12/sig2)
X2[t] = random.gauss(mu2 + sig12/sig1*(X1[t]-mu1), sig2-sig12*sig12/sig1)
count = 0
for t in range(1,MAX_T+1):
if X1[t] >= 0 and X2[t] >= 0:
count += 1
#
print "% area in first quadrant =", float(count)/MAX_T
#######################################################
if __name__ == '__main__':
sample_bivariate_normal() |
from setuptools import setup, Extension
resistance_yespower_module = Extension('resistance_yespower',
sources = ['yespower.c',
'yespower-platform.c',
'yespower-opt.c',
'yespower-ref.c',
'sha256.c'
],
extra_compile_args=['-O2', '-funroll-loops', '-fomit-frame-pointer'],
include_dirs=['.'])
setup (name = 'resistance_yespower',
version = '1.0.1',
author_email = 'developer@resistance.io',
author = 'Resistance Developers',
url = 'https://github.com/ResistancePlatform/resistance_yespower_python3',
description = 'Bindings for yespower-1.0.1 proof of work used by Resistance',
ext_modules = [resistance_yespower_module])
|
class Solution:
def findPerm(self, s, n):
max = n
min = 1
res = [None]*n
Stack = []
j = 0
for i in range(1,n):
if (s[i-1] == "I"):
Stack.append(i)
while(Stack):
res[j] = Stack.pop()
j += 1
else:
Stack.append(i)
Stack.append(n)
while(Stack):
res[j] = Stack.pop()
j +=1
return res
# max -=1
# else:
# res.append(min)
# min+=1
# res.append(max)
# res.reverse()
# return res
g = Solution()
print(g.findPerm("DDIIIID",8))
|
lista = []
while True:
a = int(input())
if a == 0:
break
lista.append(a)
def inverte(lista):
return lista[::-1]
for i in inverte(lista):
print(i) |
from django.http import HttpResponse
import datetime
from django.template import Template, Context
#from django.template import loader, get_template#no tan simplificado como el de abajo
from django.template.loader import get_template #---> MAS DIRECTO EL PROCESO
from django.shortcuts import render
class taco_special(object):
def __init__(self, pastor, queso):
self.pastor = pastor
self.queso = queso
def menu(request): #aqui esta el ejemplo de una vista, incluyendo(parametros de python en html a travez de un contexto"
"""ya hemos minimizado la importacion de un template con el ((get_template))
-importando >>get_template<< a travez de la funcion: >>loader<<,
-con esto no ahorramos: importar el archivo html, luego leerlo,
luego cerrarlo, y luego "renderizarlo" === juntar todo y mostrarlo
-finalmente mostramos la pagina con el return, y los mostramos
juntando el file con el render"""
ts = taco_special("pastor", "queso")
menu_gringas = ["suadero", "pastor", "campechana", "tripa", "longamuerdes"]
hoy = datetime.datetime.now()
#doc_externo = open("C:/Users/dancr/Desktop/pag/pag/templates/vista1.html") #metodo para abrir la pagina
#doc_externo=get_template('vista1.html') #metodo simplificado para abrir(ya indicando donde estan las plantillas en settings.)
#plt = Template(doc_externo.read()) #leer el template
#doc_externo.close() #poder cerrar el template
#ctx = Context({"menu_gringas":menu_gringas}) #sincronizar el diccionario con la palntilla
#muestramenu = doc_externo.render({"gringas":menu_gringas, "fecha":hoy, "ingrediente_1":ts.pastor, "ingrediente_2":ts.queso}) #metodo ya simplificado usando get_template
####return HttpResponse(muestramenu)
return render(request, "vista1.html", {"gringas":menu_gringas, "fecha":hoy, "ingrediente_1":ts.pastor, "ingrediente_2":ts.queso})
#El modulo """shurtucts""" funciona para mostar la vista html sin necesidad
#..de estar guardando los objetos del proceso para que se vea la vista en mas variables.
|
# -*- coding:utf-8 -*-
class ListNode:
def __init__(self, x=None):
ListNode.val = x
ListNode.next = None
class Solution:
def deleteDuplication(self, pHead):
'''
题目描述
在一个排序的链表中,存在重复的结点,请删除该链表中重复的结点,重复的结点不保留,返回链表头指针。
例如,链表1->2->3->3->4->4->5 处理后为 1->2->5
:param pHead:
:return:
'''
first = ListNode(-1)
first.next = pHead
last = first
while pHead and pHead.next:
if pHead.val == pHead.next.val:
val = pHead.val
while pHead and val == pHead.val:
pHead = pHead.next
last.next = pHead
else:
last = pHead
pHead = pHead.next
return first.next
s = Solution()
singleNode = ListNode()
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(3)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
print(s.deleteDuplication(singleNode))
print(s.deleteDuplication(node1))
|
#coding: utf-8
#---------------------------------------------------------
# Um programa que recebe duas notas e calcula a média.
#---------------------------------------------------------
# Média Aritmética - Exercício #007
#---------------------------------------------------------
n1 = float(input('Digite sua primeira nota: '))
n2 = float(input('Digite sua outra nota: '))
print('---' *22)
print('\033[35mSua média total foi: {}\033[m'.format((n1+n2)/2))
print('---' *22)
|
#!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import pickle
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
print "Size Of The Enron DataSet: {0}".format(len(enron_data))
print "Features In The Enron DataSet: {0}".format(len(enron_data.values()[0]))
pois = [key for key, value in enron_data.items() if value['poi']]
print "Number Of POIs: {0}".format(len(pois))
print "Stock Held By James Prentice: {0}".format(enron_data["PRENTICE JAMES"]["total_stock_value"])
names = ["LAY KENNETH L", "SKILLING JEFFREY K", "FASTOW ANDREW S"]
payments = [(name, enron_data[name]["total_payments"]) for name in names]
print "{0} took most money.".format(sorted(payments, key=lambda x: x[1], reverse=True)[0][0])
salaries = [key for key, value in enron_data.items() if value["salary"] != "NaN"]
print "Number Of Folks Having A Quantified Salary: {0}".format(len(salaries))
emails = [key for key, value in enron_data.items() if value["email_address"] != "NaN"]
print "Number Of Folks Having A Known E-Mail Address: {0}".format(len(emails))
NaNPayments = [key for key, value in enron_data.items() if value["total_payments"] == "NaN"]
print "Number And Percentage Of Folks Having A 'NaN' For Total Payments: {0}, {1}%".format(len(NaNPayments), round(100.0 * len(NaNPayments) / len(enron_data), 3))
|
"""
Copyright (C) 2010 Laszlo Simon
This file is part of Treemap.
Treemap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Treemap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Treemap. If not, see <http://www.gnu.org/licenses/>.
Author : Laszlo Simon <laszlo.simon@gmail.com>
"""
import sys
import random
def load(input):
data = []
f = open(input, 'r')
f.readline()
for line in f:
line = line.rstrip()
array = line.split('\t')
data.append({
'name' : array[0].rstrip(),
'left' : float(array[1]),
'top' : float(array[2]),
'width' : float(array[3]),
'height': float(array[4])
})
f.close()
return(data)
def save(output, data, w, h):
f = open(output, 'w')
print >>f, '''
\\documentclass[]{article}
\\RequirePackage{pstricks}
\\usepackage{pst-all}
\\renewcommand\\familydefault{\\sfdefault}
\\begin{document}
\\noindent
\\thispagestyle{empty}
\\begin{center}
\\begin{pspicture}(%(w)d,%(h)d)
''' % { 'w': w, 'h': h}
for box in data:
name = box['name']
texname = name.replace("_","\\_")
L = box['left']
T = h - box['top']
R = box['left'] + box['width']
B = h - (box['top'] + box['height'])
rnd = random.random()
color1 = 0.5 + 0.5 * rnd
color2 = 0.4 + 0.4 * rnd
if box['width'] > box['height']:
TX = L + 0.1
TY = T - 0.1
rot = 0
else:
TX = L + 0.1
TY = B + 0.1
rot = 90
print >>f, '''
\\newrgbcolor{color1%(name)s}{%(color1)f %(color1)f 1}
\\newrgbcolor{color2%(name)s}{%(color2)f %(color2)f 1}
\\psframe[linewidth=0.5pt,
fillstyle=gradient,
gradangle=0,
gradbegin=color1%(name)s,
gradend=color2%(name)s](%(L)f,%(T)f)(%(R)f,%(B)f)
\\psclip{\\psframe[linestyle=none](%(L)f,%(T)f)(%(R)f,%(B)f)}
\\rput[tl]{%(rot)f}(%(TX)f,%(TY)f){\\tiny %(texname)s}
\\endpsclip
''' % { 'name': name,
'texname': texname,
'color1': color1,
'color2': color2,
'L': L, 'T': T,
'R': R, 'B': B,
'TX': TX, 'TY': TY,
'rot': rot}
print >>f, """
\\end{pspicture}
\\end{center}
\\end{document}
"""
f.close()
#
# M A I N
#
input = sys.argv[1]
output = sys.argv[2]
w = float(sys.argv[3])
h = float(sys.argv[4])
#input = 'test.csv'
#output = 'test.tex'
#w = 14
#h = 14
data = load(input)
save(output, data, w, h)
|
from django.shortcuts import get_object_or_404
from decimal import Decimal
from django.conf import settings
from products.models import Product
def cart_contents(request):
cart_items = []
total = 0
product_count = 0
cart = request.session.get('cart', {})
for item_id, item_data in cart.items():
if isinstance(item_data, int):
product = get_object_or_404(Product, pk=item_id)
total += item_data * product.price
sub_total = item_data * product.price
product_count += item_data
cart_items.append({
'item_id': item_id,
'quantity': item_data,
'product': product,
'sub_total': sub_total
})
elif 'product_ram' not in item_data.keys():
product = get_object_or_404(Product, pk=item_id)
ram_options = list(item_data.keys())
for opt in ram_options:
for power, quantity in item_data[opt].items():
total += quantity * product.price
sub_total = quantity * product.price
product_count += quantity
cart_items.append({
'item_id': item_id,
'quantity': quantity,
'product': product,
'ram': opt,
'power': power,
'sub_total': sub_total
})
else:
product = get_object_or_404(Product, pk=item_id)
for ram, quantity in item_data['product_ram'].items():
total += quantity * product.price
sub_total = quantity * product.price
product_count += quantity
cart_items.append({
'item_id': item_id,
'quantity': quantity,
'product': product,
'ram': ram,
'sub_total': sub_total
})
if total < settings.FREE_DELIVERY:
delivery = total * Decimal(settings.DELIVERY_PERCENT / 100)
free_delivery_delta = settings.FREE_DELIVERY - total
else:
delivery = 0
free_delivery_delta = 0
grand_total = delivery + total
context = {
'cart_items': cart_items,
'total': total,
'product_count': product_count,
'delivery': delivery,
'free_delivery_delta': free_delivery_delta,
'free_delivery_threshold': settings.DELIVERY_PERCENT,
'grand_total': grand_total,
}
return context
|
#####################################################
# Python script that reads betaV images which are
# FITS images that has pixel values of the flux ratios
# between V = g-0.59(g-r)-0.01 (Jester+05) and Spitzer
# IRAC 1 band and analyze
# written by Duho Kim (2/19/18)
######################################################
from pyraf import iraf
from astropy.io import fits
from astropy.io import ascii
import numpy as np
import numpy.ma as ma
from astropy.wcs import WCS
from astropy.table import Table, vstack
from astropy.visualization import make_lupton_rgb
from astropy.cosmology import Planck15 as cosmo
import os
import os.path
import sys
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib import colors
import matplotlib.image as mpimg
from mpl_toolkits.axes_grid1 import make_axes_locatable
from astropy.coordinates import SkyCoord
import math
from shutil import copyfile
from pandas import DataFrame, read_csv
import pandas as pd
from operator import itemgetter
import copy
AV_fit = False
bv_fit = False
cmap = 'gist_rainbow' ### color map for betaV images
cmap2 = colors.ListedColormap(['white','cyan','gray'])
pscale = 0.6 ### pixel scale ["]
snr = 3 ### signal to noise ratio that we constrain
#bv0s = [0.645,0.64,0.695,0.74,0.895,1.075,1.41] ### beta_V,0 for E0,S0(SFH3),Sa,Sb,Sbc(SFH4),Sc,Sd(SFH5) from Table 5 Kim+17
bv0s=[ [1.9409089,1.9044532,1.3527486,1.1198820,0.82968015,0.58503551], # BetaVzero values for SFH3,4,5 and Z=.0001,.0004,.004,.008,.02,.05
[1.9860456,1.9576220,1.4390702,1.2023316,0.91737698,0.65453906],
[2.3880801,2.3478914,1.6838646,1.4124115,1.1048444,0.77272439]]
htypes = ['E0','S0','S0/a','Sa','Sab','Sb','Sbc','Sc','Scd','Sd','Sdm','Sm','Im','?'] # Hubble types corresponding Nair+10 T-Type
agntypes = ['SF','transition/mixed AGN','Seyfert','LINER'] # Kauffmann+03 from Nair+10
work_dir = '/Users/dhk/work/data/NGC_IC/'
cat_dir = '/Users/dhk/work/cat/NGC_IC/'
ned1=ascii.read("/Users/dhk/work/cat/NGC_IC/ned_result_1.txt")
ned2=ascii.read("/Users/dhk/work/cat/NGC_IC/ned_result_2.txt")
ned3=ascii.read("/Users/dhk/work/cat/NGC_IC/ned_result_3.txt")
ned4=ascii.read("/Users/dhk/work/cat/NGC_IC/ned_result_4.txt")
ned5=ascii.read("/Users/dhk/work/cat/NGC_IC/ned_result_5.txt")
# row | input object name | object name | RA | Dec | Gal. Ext. Burstein & Heiles A_B mag | Object Type
# (1) | (2) | (3) | (4)| (5) | (6) | (7)
# Redshift | Redshift Uncertainty | Mag./Filter | Major Diam | Minor Diam | Morph. | Ref.|
# (8) | (9) | (10) | (11) | (12) | (13) | (14)|
ned_tot=vstack([ned1,ned2,ned3,ned4,ned5]) # Total NED output catalog
#sha_cat=ascii.read(cat_dir+'sha_quarry_batch_257_without_prefix.txt')
sha_cat=ascii.read(cat_dir+'sha_ned_match_off_lt_1_SDSS_100psfs_b_over_a_gt_05_from_Tom_2_erase_Tamura_match_segmentation.csv') # Sample NGC/IC numbers
sha2fig=np.load('/Users/dhk/work/data/NGC_IC/pdf/sha2fig.npy') # load figure numbers for each galaxies
rc3=ascii.read("/Users/dhk/work/cat/NGC_IC/RC3/myrc3.dat")
rosa_AV=np.genfromtxt("/Users/dhk/work/data/rosa/AV.txt") # read A_V values for different Hubble types from CALIFA paper
file=r'/Users/dhk/work/cat/NGC_IC/Steinicke/NI2018.xls' # read NGC/IC catalogue
df = pd.read_excel(file)
file=r'/Users/dhk/work/cat/NGC_IC/ttype.xls' # read T-type
ttype = pd.read_excel(file)
####################### Jarrett+03 ########################################################
# Modified logarithmic visualization method P' = sqrt( log(1+P/(n*sig)) ), where P is the pixel intensity value, sig is the image rms "noise",
# and n is a threshold throttle (w/ satisfactory values between 5 and 10
def Jarrett(pix_values,sigma):
n = 7.5
return np.sqrt(np.log10(1+pix_values/(n*sigma)))
############################################################################################
gal_type=[]
betav_med=[]
betav_257=np.zeros(len(sha_cat))
betav_257_std=np.zeros(len(sha_cat))
ttype_257=np.zeros(len(sha_cat))
ttype_257_err=np.zeros(len(sha_cat))
betav_257_center=np.zeros(len(sha_cat))
betav_257_center_std=np.zeros(len(sha_cat))
######################### SURFACE BRIGHTNESS MEASURE #######################################
def isophote(fits,cx,cy,pa,boa,rad_min,rad_max,rad_num):
fits[:] = float('nan')
yy,xx = np.ogrid[cy-rad_max:cy+rad_max,cx-rad_max:cx+rad_max] # opposite convention between FITS & Python array indexing
r = np.logspace(np.log10(rad_min),np.log10(rad_max),rad_num+1)
iso_mean = np.zeros(rad_num)
iso_med = np.zeros(rad_num)
iso_std = np.zeros(rad_num)
tot_mean = np.zeros(rad_num)
tot_med = np.zeros(rad_num)
pa_rad = math.radians(-pa) # convert PA convention to functional form with units
for ii in range(1,rad_num+1):
# General Equation of an Ellipse : (x*cos(A) + y*sin(A))^2/a^2 + (x*sin(A)-y*cos(A))^2/b^2 = 1
ind1 = 1 < ((yy-cy)*math.cos(pa_rad)+(xx-cx)*math.sin(pa_rad))**2/(r[ii-1]+1)**2 + ((yy-cy)*math.sin(pa_rad)-(xx-cx)*math.cos(pa_rad))**2/((r[ii-1]+1)*boa)**2
ind2 = 1 > ((yy-cy)*math.cos(pa_rad)+(xx-cx)*math.sin(pa_rad))**2/(r[ii]-1)**2 + ((yy-cy)*math.sin(pa_rad)-(xx-cx)*math.cos(pa_rad))**2/((r[ii]-1)*boa)**2
ind = ind1 * ind2
anul = fits[ind]
iso_mean[ii-1] = np.nanmean(anul)
iso_med[ii-1] = np.nanmedian(anul)
iso_std[ii-1] = np.nanstd(anul)
fits[ind] = ii*10
if ii==1:
tot_mean[0] = iso_mean[ii-1]*np.sum(ind)
tot_med[0] = iso_med[ii-1]*np.sum(ind)
else:
tot_mean[ii-1] = tot_mean[ii-2] + iso_mean[ii-1]*np.sum(ind)
tot_med[ii-1] = tot_med[ii-2] + iso_med[ii-1]*np.sum(ind)
return fits, r[1:], iso_mean, iso_med, iso_std, tot_mean, tot_med
#############################################################################################
#for x in range(0,len(sha_cat)):
for x in range(0,5):
name = sha_cat['col1'][x] # NGC/IC name
###### NGC match ####
if name[0]=='n':
galnum = name[3:].strip()
ngc_match = df.loc[(df['N']=='N') & (df['NI']==int(galnum))]
if len(galnum) == 3:
galnum='0'+galnum
elif len(galnum) ==2:
galnum='00'+galnum
elif len(galnum) ==1:
galnum='000'+galnum
rc3name = 'NGC'+galnum
table_name = 'NGC '+galnum
elif name[0]=='i':
galnum = name[2:].strip()
ngc_match = df.loc[(df['N']=='I') & (df['NI']==int(galnum))]
if len(galnum) == 3:
galnum='0'+galnum
elif len(galnum)==2:
galnum='00'+galnum
elif len(galnum)==1:
galnum='000'+galnum
rc3name = 'IC'+galnum
table_name = 'IC '+galnum
pa = ngc_match.iloc[0,21]
if math.isnan(pa):
pa=0
elif pa > 90:
pa=pa-180.0
#### RC3 catalogue #########
rc3_match = rc3[[j for j,s in enumerate(rc3['name']) if s.strip() == rc3name]]
if len(rc3_match) != 1:
print('rc3 match is not one')
ttype_257_err[x] = 1
elif rc3_match['eT'][0] == '*':
ttype_257_err[x] = 1
else:
ttype_257_err[x] = rc3_match['eT'][0]
###### T-type match ####
ttype_match = ttype.loc[ttype['name']==table_name]
T = ttype_match.iloc[0,5]
ttype_257[x] = T
if T < -3:
bv0 = bv0s[0][5] # for Dust profile on Figure
bv0Zs = bv0s[0] # for Dust profiles 6 Zs
AV_prof=rosa_AV[0][2:]
if T < 0 and T >= -3:
bv0 = bv0s[0][5]
bv0Zs = bv0s[0]
AV_prof=rosa_AV[2][2:]
if T < 2 and T >=0:
bv0 = bv0s[1][4]
bv0Zs = bv0s[1]
AV_prof=rosa_AV[4][2:]
if T < 4 and T >=2:
bv0 = bv0s[1][4]
bv0Zs = bv0s[1]
AV_prof=rosa_AV[6][2:]
if T < 5 and T >=4:
bv0 = bv0s[1][3]
bv0Zs = bv0s[1]
AV_prof=rosa_AV[8][2:]
if T < 7 and T >=5:
bv0 = bv0s[2][3]
bv0Zs = bv0s[2]
AV_prof=rosa_AV[10][2:]
if T < 9 and T >=7:
bv0 = bv0s[2][3]
bv0Zs = bv0s[2]
AV_prof=rosa_AV[12][2:]
if T>=9:
bv0 = -1 # no BetaVzero is available for these types
###### NED match ####
ned_match = ned_tot[ned_tot['col2']==name]
if x == 90:
GalExtFactor=10.0**(0.072/2.5) # NGC 4561, A_V=0.072 (NED)
else:
GalExtFactor=10.0**(ned_match['col6'][0]/2.5) # read Galactic extinction from NED [Vint/Vobs]
gal_type.append(ned_match['col13'][0])
semi = ned_match['col11'][0] # read semi-major axis from NED [']
thumb = int(semi*100) # a half size of figure size
z = ned_match['col8'][0] # read redshift from NED
boa = ned_match['col12'][0] / ned_match['col11'][0]
##### WCS read #####
w = WCS(work_dir+'SHA/SHA_NGC_IC_LONG_mean/'+name+'.fits')
i,j = w.all_world2pix(sha_cat['col2'][x],sha_cat['col3'][x],1)
##### READ FITS #####
hdu = fits.open(work_dir+'SDSS/g/'+name+'-gir.fits')
gir = hdu[0].data
hdu = fits.open(work_dir+'SDSS/gir_seg/'+name+'-gir.seg.fits')
gir_seg = hdu[0].data
hdu = fits.open(work_dir+'SDSS/r/'+name+'-rir.fits')
rir = hdu[0].data
hdu = fits.open(work_dir+'SDSS/rir_seg/'+name+'-rir.seg.fits')
rir_seg = hdu[0].data
hdu = fits.open(work_dir+'SHA/SHA_NGC_IC_LONG_mean/'+name+'.fits')
i1 = hdu[0].data
hdu = fits.open(work_dir+'SHA/SHA_NGC_IC_LONG_mean/'+name+'.std.fits')
i1u = hdu[0].data
###### Calculate V-band ##########
V = 10**(np.log10(gir)+0.59*(np.log10(rir/gir))+0.004) # Jester+05 All stars R-I < 1.15
###### Calculate betaV, betag ####
betav_raw = V/(i1*2.330451129)*GalExtFactor # correct for zero point difference between SDSS & Spitzer
betag_raw = gir/(i1*2.330451129)*GalExtFactor # and Galactic Extinction
###### READ SE SEGMENTATION MAP ####
hdu = fits.open(work_dir+'SHA/segmap/'+name+'.fits')
i1_seg = hdu[0].data
####### SEGMENATION & select SNR > 3 pixels #################
betav_seg = V/(i1*2.330451129)*GalExtFactor
betav_seg[np.where(i1_seg!=1)] = float('nan')
V_seg = copy.deepcopy(V)
V_seg[np.where(i1_seg!=1)] = float('nan')
gir_cent = gir[int(j)-500:int(j)+500,int(i)-500:int(i)+500] # only use central region due to stripe regions after registering
gir_seg_cent = gir_seg[int(j)-500:int(j)+500,int(i)-500:int(i)+500]
rir_cent = rir[int(j)-500:int(j)+500,int(i)-500:int(i)+500]
rir_seg_cent = rir_seg[int(j)-500:int(j)+500,int(i)-500:int(i)+500]
gerr = np.nanstd(gir_cent[np.where(gir_seg_cent==0)])
rerr = np.nanstd(rir_cent[np.where(rir_seg_cent==0)])
betav_seg[(i1/i1u < snr) | (gir/gerr < snr) | (rir/rerr < snr)] = float('nan')
betav_257[x] = np.nanmean(betav_seg)
betav_257_std[x] = np.nanstd(betav_seg)
betav_257_center[x] = np.nanmean(betav_seg[int(j)-1:int(j)+2,int(i)-1:int(i)+2])
betav_257_center_std[x] = np.nanstd(betav_seg[int(j)-1:int(j)+2,int(i)-1:int(i)+2])
####### PLOT FIGURES for each sample ###############
fig = plt.figure(figsize=(12,7.8))
h11 = fig.add_axes([0.0,0.5,0.315,0.5])
h12 = fig.add_axes([0.315,0.5,0.315,0.5])
h13 = fig.add_axes([0.63,0.5,0.331,0.5])
h21 = fig.add_axes([0.0,0.0,0.315,0.5])
h22 = fig.add_axes([0.315,0.0,0.315,0.5])
minsize = min([min(i1.shape),min(rir.shape),min(gir.shape)])
if thumb*2 > minsize:
thumb=int(minsize/2)
img11 = make_lupton_rgb ( i1[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]*2.33, \
rir[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb], \
gir[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb] , Q=10, stretch=0.5)
h11.imshow(img11,origin='lower')
h11.get_xaxis().set_visible(False)
h11.get_yaxis().set_visible(False)
h11.text(0.02,0.95,r'R : IRAC 3.6$\mu$m',color='white',fontsize=15,transform=h11.transAxes,fontweight='bold')
h11.text(0.02,0.90,'G : SDSS r',color='white',fontsize=15,transform=h11.transAxes,fontweight='bold')
h11.text(0.02,0.85,'B : SDSS g',color='white',fontsize=15,transform=h11.transAxes,fontweight='bold')
h11.text(0.6,0.95,table_name,color='white',fontsize=15,transform=h11.transAxes,fontweight='bold')
h11.text(0.3,0.05,ned_match['col13'][0],color='white',fontsize=15,transform=h11.transAxes,fontweight='bold')
kpc_arcmin = cosmo.kpc_proper_per_arcmin(z) # read xxx kpc / arcmin
arcmin_5kpc = 5.0/kpc_arcmin # calculate xxx arcmin / 5 kpc
frac_5kpc = arcmin_5kpc*100.0/(2*thumb) # calculate fraction of a length of 5 kpc in fig size
h11.plot([0.05,0.05+frac_5kpc.value],[0.02,0.02],color='white',transform=h11.transAxes)
h11.text(0.02,0.05,'5kpc, '+'{:4.2f}'.format(arcmin_5kpc.value)+'\'',color='white',fontsize=12,transform=h11.transAxes)
img1=h12.imshow(betav_raw[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],cmap=cmap,clim=[0,2],origin='lower')
h12.get_xaxis().set_visible(False)
h12.get_yaxis().set_visible(False)
h12.text(0.02,0.9,r'$\beta_{V}$ (V/3.6$\mu$m)',color='black',fontsize=18,transform=h12.transAxes,fontweight='bold')
img2=h13.imshow(betag_raw[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],cmap=cmap,clim=[0,2],origin='lower')
img2.axes.figure.colorbar(img2,cax=make_axes_locatable(img2.axes).append_axes("right",size="5%",pad=0.0))
h13.get_xaxis().set_visible(False)
h13.get_yaxis().set_visible(False)
h13.text(0.02,0.9,r'$\beta_{g}$ (g/3.6$\mu$m)',color='black',fontsize=18,transform=h13.transAxes,fontweight='bold')
i1_seg[np.where(i1_seg > 2)]=2
h21.imshow(i1_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],cmap=cmap2,origin='lower')
h21.get_xaxis().set_visible(False)
h21.get_yaxis().set_visible(False)
h21.text(0.02,0.95,'Segmentation',fontsize=15,transform=h21.transAxes,fontweight='bold')
h21.text(0.02,0.90,'& Mask out',fontsize=15,transform=h21.transAxes,fontweight='bold')
h22.imshow(betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],cmap=cmap,clim=[0,2],origin='lower')
h22.get_xaxis().set_visible(False)
h22.get_yaxis().set_visible(False)
h22.text(0.02,0.95,'S/N > 3',fontsize=15,transform=h22.transAxes,fontweight='bold')
dust_map = 2.5*np.log10(bv0/betav_seg)
dust_map[np.where(betav_seg > bv0)] = 0.0 # assign A_V = 0 for bv > bv0
########## surface profile measure #############
fits_anul, rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(V_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],int(i),int(j),pa,boa,2,thumb,11)
half_light = max(tot_med)/2.0
hlr_idx = min(enumerate(np.abs(tot_med-half_light)),key=itemgetter(1))[0] # find half-light radius
hlr3_idx = min(enumerate(np.abs(rads[hlr_idx]*3.0-rads)),key=itemgetter(1))[0] # find half-light radius*3.0
h25 = fig.add_axes([0.843-0.3,0.09,0.1,0.1])
h25.plot(rads,tot_med)
h25.plot([rads[hlr_idx],rads[hlr_idx]],[0,max(tot_med)])
if bv0 != -1:
h23 = fig.add_axes([0.63,0.0,0.331,0.5])
img3=h23.imshow(fits_anul,origin='lower')
# img3.axes.figure.colorbar(img3,cax=make_axes_locatable(img3.axes).append_axes("right",size="5%",pad=0.0))
# h23.text(0.02,0.95,'DUST MAP (Av)',fontsize=15,transform=h23.transAxes,fontweight='bold')
# h23.text(0.02,0.90,r'2.5$\times$log('+'{:5.3f}'.format(bv0)+r'/$\beta_{V}$)',fontsize=15,transform=h23.transAxes,fontweight='bold')
h23.get_xaxis().set_visible(False)
h23.get_yaxis().set_visible(False)
ellipse1 = Ellipse((thumb,thumb),rads[hlr_idx],rads[hlr_idx]*boa,pa+90,fill=False)
ellipse2 = Ellipse((thumb,thumb),rads[hlr3_idx],rads[hlr3_idx]*boa,pa+90,fill=False)
h23.add_artist(ellipse1)
h23.add_artist(ellipse2)
######## surface betaV & A_V profile measure ##########
if bv_fit:
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_bv.txt',(rads,iso_mean,iso_med,iso_std))
if AV_fit:
rads, iso_mean_map, iso_med_map, iso_std_map, tot_mean, tot_med = isophote(dust_map[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb],int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[0]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV1.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[1]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV2.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[2]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV3.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[3]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV4.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[4]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV5.txt',(rads,iso_mean,iso_med,iso_std))
rads, iso_mean, iso_med, iso_std, tot_mean, tot_med = isophote(2.5*np.log10(bv0Zs[5]/betav_seg[int(j)-thumb:int(j)+thumb,int(i)-thumb:int(i)+thumb]),int(i),int(j),pa,boa,2,thumb,11)
np.savetxt(work_dir+'ellipse/scratch/'+name+'_AV6.txt',(rads,iso_mean,iso_med,iso_std))
h24 = fig.add_axes([0.843,0.39,0.1,0.1])
h24.plot(rads/rads[hlr_idx],iso_med_map,color='red')
h24.fill_between(rads/rads[hlr_idx],iso_med_map-iso_std_map,iso_med_map+iso_std_map,color='red',alpha=0.3)
h24.plot(np.arange(0.,3.,0.1),AV_prof[1:],color='black',alpha=0.5)
h24.fill_between(np.arange(0.,3.,0.1),AV_prof[1:]-AV_prof[0],AV_prof[1:]+AV_prof[0],alpha=0.2,color='black')
plt.xticks([1,3])
h24.set(xlabel='HLR',ylabel='Av')
else:
h23 = fig.add_axes([0.63,0.0075,0.331,0.485])
h23.get_yaxis().set_visible(False)
h23.get_xaxis().set_visible(False)
h23.text(0.02,0.9,'No '+r'$\beta_{V,0}$ is available for this T-type',size=15)
fig.savefig(work_dir+'pdf/'+name+'.tex.pdf')
plt.close(fig)
if int((sha2fig[x]-1)%2):
copyfile(work_dir+'pdf/'+name+'.tex.pdf','/Users/dhk/Documents/publish/ngcic/figset2_'+str(int((sha2fig[x]-1)/2))+'b_test.pdf')
else:
copyfile(work_dir+'pdf/'+name+'.tex.pdf','/Users/dhk/Documents/publish/ngcic/figset2_'+str(int((sha2fig[x]-1)/2))+'a_test.pdf')
#np.save('betav_ttype.npy',[betav_257,betav_257_std,ttype_257,ttype_257_err,betav_257_center,betav_257_center_std])
|
import torch
import torchvision
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
import nvidia.dali.types as types
class HybridPipelineTrain(Pipeline):
def __init__(self, batch_size, output_size, num_threads, device_id, images_directory):
super(HybridPipelineTrain, self).__init__(batch_size, num_threads, device_id, seed = 12)
self.input = ops.FileReader(file_root = images_directory, random_shuffle = True, initial_fill = 21)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmn = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
output_layout="HWC"
)
self.coin = ops.random.CoinFlip(probability = 0.5)
self.flip = ops.Flip(device = "gpu")
self.rsz = ops.Resize(resize_x = output_size[0], resize_y = output_size[1], device = "gpu")
def define_graph(self):
jpegs, labels = self.input(name="Reader")
images = self.decode(jpegs)
images = self.rsz(images)
images = self.flip(images, horizontal = self.coin())
images = self.cmn(images)
# images are on the GPU
return (images, labels)
# Create the dataloader
def DataLoader(DATA_SIZE, batch_size, output_size, train_data_path, nvidiadali = False):
if nvidiadali:
pipe = HybridPipelineTrain(batch_size=batch_size, output_size = output_size, num_threads=2, device_id=0, images_directory=train_data_path)
pipe.build()
ITERATIONS_PER_EPOCH = DATA_SIZE // batch_size
train_data_loader = DALIClassificationIterator([pipe], reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL, auto_reset=True)
else:
img_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(output_size),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = torchvision.datasets.ImageFolder(root=train_data_path, transform=img_transforms)
train_data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=2)
return train_data_loader |
__author__ = 'chira'
def module_function():
print('this is a function from Module_import.py') |
import abc
import numpy as np
import tensorflow as tf
from tf_v2_support import placeholder
from trainer import tf_module
class ExplainPredictor:
def __init__(self, num_tags, sequence_output, modeling_option="default"):
self.num_tags = num_tags
self.sequence_output = sequence_output
if modeling_option == "co":
self.modeling_co()
else:
self.modeling_ce()
def modeling_ce(self):
self.ex_probs = []
self.ex_logits = []
with tf.variable_scope("explain"):
for tag_idx in range(self.num_tags):
with tf.variable_scope("ex_{}".format(tag_idx)):
ex_logits = tf.layers.dense(self.sequence_output, 2, name="ex_{}".format(tag_idx))
ex_prob = tf.nn.softmax(ex_logits)
self.ex_logits.append(ex_logits)
self.ex_probs.append(ex_prob[:, :, 1])
def modeling_co(self):
self.ex_probs = []
self.ex_logits = []
with tf.variable_scope("explain"):
for tag_idx in range(self.num_tags):
with tf.variable_scope("ex_{}".format(tag_idx)):
ex_logits = tf.layers.dense(self.sequence_output, 1, name="ex_{}".format(tag_idx))
ex_logits = tf.squeeze(ex_logits, axis=2)
self.ex_logits.append(ex_logits)
self.ex_probs.append(ex_logits)
def get_score(self):
return self.ex_probs
class ExplainModelingInterface(abc.ABC):
@abc.abstractmethod
def get_losses(self):
pass
@abc.abstractmethod
def get_per_inst_losses(self):
pass
@abc.abstractmethod
def batch2feed_dict(self, labels):
pass
@abc.abstractmethod
def get_scores(self):
pass
@staticmethod
@abc.abstractmethod
def action_to_label(good_action):
pass
class ExplainModeling(ExplainModelingInterface):
def __init__(self, sequence_output, seq_max, num_tags, origin_batch2feed_dict):
self.tag_info = []
with tf.variable_scope("explain"):
for tag_idx in range(num_tags):
self.tag_info.append(self.model_tag(sequence_output, seq_max, "ex_{}".format(tag_idx)))
self.origin_batch2feed_dict = origin_batch2feed_dict
@abc.abstractmethod
def model_tag(self, sequence_output, seq_max, var_name):
pass
def feed_ex_batch(self, labels):
feed_dict = {}
for tag_idx, info in enumerate(self.tag_info):
feed_dict[info['labels']] = labels[tag_idx * 2]
feed_dict[info['mask']] = labels[tag_idx * 2 + 1]
return feed_dict
def get_losses(self):
return [info['loss'] for info in self.tag_info]
# return : List[ [300], [300], [300] ]
def get_scores(self):
return [info['score'] for info in self.tag_info]
def get_per_inst_losses(self):
return [info['losses'] for info in self.tag_info]
pass
def get_loss(self):
return tf.reduce_mean(self.get_losses())
def batch2feed_dict(self, batch):
x0, x1, x2, y = batch[:4]
labels = batch[4:]
feed_dict = self.origin_batch2feed_dict((x0, x1, x2, y))
feed_dict.update(self.feed_ex_batch(labels))
return feed_dict
def get_ex_scores(self, label_idx):
return self.get_scores()[label_idx]
class CorrelationModeling(ExplainModeling):
def __init__(self, sequence_output, seq_max, num_tags, origin_batch2feed_dict):
super(CorrelationModeling, self).__init__(sequence_output, seq_max, num_tags, origin_batch2feed_dict)
def model_tag(self, sequence_output, seq_max, var_name):
ex_labels = placeholder(tf.float32, [None, seq_max])
valid_mask = placeholder(tf.float32, [None, 1])
with tf.variable_scope(var_name):
ex_logits = tf.layers.dense(sequence_output, 1, name=var_name)
ex_logits = tf.reshape(ex_logits, [-1, seq_max])
labels_ = tf.cast(tf.greater(ex_labels, 0), tf.float32)
losses = tf_module.correlation_coefficient_loss(ex_logits, -labels_)
losses = valid_mask * losses
loss = tf.reduce_mean(losses)
score = ex_logits
return {
'labels': ex_labels,
'mask':valid_mask,
'ex_logits': ex_logits,
'score': score,
'losses':losses,
'loss': loss
}
@staticmethod
def action_to_label(good_action):
pos_reward_indice = np.int_(good_action)
loss_mask = -pos_reward_indice + np.ones_like(pos_reward_indice) * 0.1
return loss_mask, [1]
@staticmethod
def get_null_label(any_action):
return np.int_(any_action) , [0]
class CrossEntropyModeling(ExplainModeling):
def __init__(self, sequence_output, seq_max, num_tags, origin_batch2feed_dict):
super(CrossEntropyModeling, self).__init__(sequence_output, seq_max, num_tags, origin_batch2feed_dict)
def model_tag(self, sequence_output, seq_max, var_name):
ex_label = placeholder(tf.int32, [None, seq_max])
valid_mask = placeholder(tf.float32, [None, 1])
with tf.variable_scope(var_name):
ex_logits = tf.layers.dense(sequence_output, 2, name=var_name)
ex_prob = tf.nn.softmax(ex_logits)[:, :, 1]
losses = tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(ex_label, 2), logits=ex_logits)
losses = valid_mask * losses
loss = tf.reduce_mean(losses)
return {
'labels': ex_label,
'mask': valid_mask,
'ex_logits': ex_logits,
'score': ex_prob,
'losses': losses,
'loss': loss
}
@staticmethod
def action_to_label(good_action):
pos_reward_indice = np.int_(good_action)
loss_mask = pos_reward_indice
return loss_mask, [1]
@staticmethod
def get_null_label(any_action):
return np.int_(any_action) , [0] |
def Articles():
articles = [
{
'id':1,
'title':'Article One',
'body':'This article is about the Ashes 2005.',
'author':'Chiranth',
'create_date':'14-12-2018'
},
{
'id':2,
'title':'Article Two',
'body':'This article is about the Wimbeldon 2003.',
'author':'Vishak',
'create_date':'14-12-2018'
},
{
'id':3,
'title':'Article Three',
'body':'This article is about the FIFA WC 2010.',
'author':'Tejas',
'create_date':'14-12-2018'
}
]
return articles |
from django.test import TestCase
from rest_framework.authtoken.models import Token
from django.test import Client
# self.user = Usuario.objects.create_user(
# nome='test',
# email='test@email.com',
# password='test',
# )
# token, created = Token.objects.get_or_create(user=self.user)
# self.client = Client(HTTP_AUTHORIZATION='Token ' + token.key) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.