text string | size int64 | token_count int64 |
|---|---|---|
from lib.core.exceptions import AppException
class PathError(AppException):
"""
User input Error
"""
class EmptyOutputError(AppException):
"""
Empty Output Error
"""
| 194 | 57 |
from zpy.api.resource import ZResource, HTTP_METHODS
class GreetingResource(ZResource):
blocked_methods = [
HTTP_METHODS.POST,
HTTP_METHODS.DELETE,
HTTP_METHODS.PATCH,
HTTP_METHODS.PUT,
]
def __init__(self, **kwargs) -> None:
super().__init__()
def get(self):
l, i = super().new_operation()
try:
return self.success({"greeting": "hello world!"}, logger=l)
except Exception as e:
return self.handle_exceptions(e, l, i)
| 527 | 178 |
# coding: utf-8
import os
class PyMorphy(object):
@property
def PYMORPHY_DICTS(self):
return {
'ru': {
'dir': os.path.join(self.ROOT_PATH, 'data/pymorphy'),
},
} | 227 | 82 |
import re
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
print('lista de palavras: ', frase.split())
| 172 | 60 |
import logging
import random
from .base.camp import Camp
from .base.formation import Formation
from .game.janggi_game import JanggiGame
from .game.game_log import GameLog
from .ui.game_player import GamePlayer
from .ui.replay_viewer import ReplayViewer
from .proto import log_pb2
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
def replay(filepath: str):
"""
Replay a game by parsing the log file at the given path.
Args:
filepath (str): Path of the proto-serialized log file.
"""
log_file = open(filepath, "rb")
log_proto = log_pb2.Log()
log_proto.ParseFromString(log_file.read())
game_log = GameLog.from_proto(log_proto)
game_log.generate_board_log()
replay_viewer = ReplayViewer(game_log)
replay_viewer.run()
def play(game: JanggiGame):
"""
Play a game by running GamePlayer.
Args:
game (JanggiGame): Pre-initialized game to play.
"""
player = GamePlayer(game)
player.run()
def generate_random_game():
"""Generate a random Janggi game."""
camp = Camp(random.choice([-1, 1]))
cho_formation = Formation(random.randint(1, 4))
han_formation = Formation(random.randint(1, 4))
return JanggiGame(camp, cho_formation, han_formation)
| 1,253 | 423 |
from data_interface import Dataset, Data_Interface
from utils import functions as ufunc
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import os
import rasterio as rio
import rasterio.mask as riom
import shapely
from IPython import embed
import sys
sys.path.append('/home/seba/Projects/swisssmartfarming')
rgb_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/rgb/'
'20200626_flight2_blackfly_rgb_transparent_mosaic_group1.tif')
ms_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/nir/'
'20200626_flight2_photonfocus_nir_transparent_reflectance_group1.tif')
masks_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'trees.shp')
boundary_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'boundary.shp')
dataset = rio.open(rgb_path)
shapefile = gpd.read_file(masks_path)
shapes = shapefile.geometry
# (img_mask, transf_mask) = riom.mask(dataset, shapes)
# img_mask = np.swapaxes(img_mask, 0, 2)
# plt.imshow(img_mask[:,:,0:3])
boundary = gpd.read_file(boundary_path)
tree_masks = gpd.read_file(masks_path)
dataset = Dataset(
name='gubler',
date='20200626',
rgb_path=rgb_path,
ms_path=ms_path,
mask_shapefile=tree_masks,
outer_shapefile=boundary,
rgb_bands_to_read=[0, 1, 2],
ms_bands_to_read=None,
)
dataset = [dataset]
di_train = Data_Interface(dataset, {'tree': 1, 'car': 2})
img, msk = di_train.get_pair()
# plt.imshow(msk)
save_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train'
di_train.save(save_path=save_path)
# x1003_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train/masks/x1003_y1009.png'
# x1003 = ufunc.read_img2array(x1003_path)
| 1,762 | 755 |
from rest_framework.views import APIView
from rest_framework.response import Response
class HelloApiView(APIView):
"""TEST API VIEW"""
def get(self, request, format=None):
"""Returns a list of API features"""
an_apiview=[
'Uses HTTP methods as function (get,post, put, delete, patch)'
'Is Similar to a traditional Django View'
'Gives you the most control over your application logic'
'Is mapped manually to the URLs'
]
return Response({'message':'Hello', 'an_apiview': an_apiview})
| 574 | 157 |
# "Database code" for the DB Forum.
import psycopg2
import bleach
DNAME = "forum"
#POSTS = [("This is the first post.", datetime.datetime.now())]
def get_posts():
"""Return all posts from the 'database', most recent first."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("select content, time from posts order by time desc")
rows = c.fetchall()
db.close()
return rows
#def get_posts():
#"""Return all posts from the 'database', most recent first."""
#return reversed(POSTS)
def add_post(content):
"""Add a post to the 'database' with the current timestamp."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("INSERT INTO posts values (%s) ", (bleach.clean(content),))
db.commit()
db.close()
#def add_post(content):
#"""Add a post to the 'database' with the current timestamp."""
#POSTS.append((content, datetime.datetime.now()))
| 911 | 310 |
#!/usr/bin/env python3
#
# IP: HILICS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import Tkinter as tk
from Tkinter.font import Font
except ImportError:
import tkinter as tk
from tkinter.font import Font
from PIL import Image, ImageTk
import threading
import time
from sims.DoorSim import DoorSim
from widgets.AlarmCircle import AlarmCircle
class Door(tk.Frame):
def __init__(self, master=None, default_bg=None, default_fg=None, width=800, height=480):
self.master = master
super().__init__(master)
self.default_bg = default_bg
self.default_fg = default_fg
self.high_color = '#EC7600'
self.low_color = '#678CB1'
self.alarm_color = '#C00000'
self.door_color = '#152020'
self.green_color = '#93C763'
self.default_width = width
self.default_height = height
self.door_pos = 0
self.panels = []
self.doorsim = DoorSim()
self.pack()
self.create_widgets()
self.running = True
self.thread = threading.Thread(target=self.worker_thread)
self.thread.setDaemon(True)
self.thread.start()
self.thread2 = threading.Thread(target=self.sim_thread)
self.thread2.setDaemon(True)
self.thread2.start()
def clean_up(self):
self.running = False
self.thread.join(1.0)
self.thread2.join(1.0)
self.doorsim.close()
self.master.destroy()
def config_bg(self, wid):
if not self.default_bg is None:
wid['bg'] = self.default_bg
def config_fg(self, wid):
if not self.default_fg is None:
wid['fg'] = self.default_fg
def config_frame(self, frame):
frame['borderwidth'] = 1
frame['relief'] = tk.RIDGE
frame.pack_propagate(0)
frame.grid_propagate(0)
self.config_bg(frame)
def config_btn(self, btn):
btn['font'] = Font(root=self.master, family='Helvetica', size=18)
btn['width'] = 8
btn['height'] = 2
btn['activebackground'] = self.default_bg
btn['activeforeground'] = self.default_fg
btn['bd'] = 0
btn['highlightthickness'] = 1
btn['relief'] = 'ridge'
self.config_bg(btn)
self.config_fg(btn)
def config_label(self, lab):
self.config_bg(lab)
self.config_fg(lab)
def sim_thread(self):
while self.running:
try:
self.doorsim.update()
time.sleep(0.01)
except Exception as e:
print(e)
def worker_thread(self):
while self.running:
try:
self.update_buttons()
self.update_indicators()
self.update_motor()
self.update_alarms()
self.update_door(int(self.doorsim.doorpos))
self.update_switches()
time.sleep(0.01)
except Exception as e:
print(e)
def update_alarms(self):
self.top_crash_alarm.update(self.doorsim.top_alarm)
self.btm_crash_alarm.update(self.doorsim.btm_alarm)
self.motor_alarm.update(self.doorsim.motor_alarm)
def update_switches(self):
if self.doorsim.open_switch:
self.canvas.itemconfig(self.open_switch, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_switch, fill=self.low_color , outline=self.low_color )
if self.doorsim.closed_switch:
self.canvas.itemconfig(self.close_switch, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.close_switch, fill=self.low_color , outline=self.low_color)
if self.doorsim.prox_switch:
self.canvas.itemconfig(self.prox_switch, fill=self.high_color, outline=self.high_color)
self.canvas.itemconfig(self.car, state='normal')
else:
self.canvas.itemconfig(self.prox_switch, fill=self.low_color , outline=self.low_color)
self.canvas.itemconfig(self.car, state='hidden')
if self.doorsim.impact_switch:
self.canvas.itemconfig(self.impact_switch, fill=self.high_color, outline=self.high_color)
self.canvas.itemconfig(self.explosion, state='normal')
else:
self.canvas.itemconfig(self.impact_switch, fill=self.low_color , outline=self.low_color)
self.canvas.itemconfig(self.explosion, state='hidden')
def update_motor(self):
if self.doorsim.motor_up:
self.canvas.itemconfig(self.motor_up, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.motor_up, fill=self.low_color , outline=self.low_color )
if self.doorsim.motor_down:
self.canvas.itemconfig(self.motor_down, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.motor_down, fill=self.low_color , outline=self.low_color )
def update_buttons(self):
if self.doorsim.open_btn:
self.canvas.itemconfig(self.open_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_btn, fill=self.low_color , outline=self.low_color )
if self.doorsim.close_btn:
self.canvas.itemconfig(self.close_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.close_btn, fill=self.low_color , outline=self.low_color )
if self.doorsim.stop_btn:
self.canvas.itemconfig(self.stop_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.stop_btn, fill=self.low_color , outline=self.low_color )
def update_indicators(self):
if self.doorsim.open_ind:
self.canvas.itemconfig(self.open_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_ind, fill=self.low_color , outline=self.low_color )
if self.doorsim.closed_ind:
self.canvas.itemconfig(self.closed_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.closed_ind, fill=self.low_color , outline=self.low_color )
if self.doorsim.ajar_ind:
self.canvas.itemconfig(self.ajar_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.ajar_ind, fill=self.low_color , outline=self.low_color )
def update_door(self, pos):
# Dead zone at top of door (stops from opening completely)
#if pos < 10:
# pos = 10
if not self.door_pos == pos:
self.door_pos = pos # 0 - 100
panel_height = 75 # pixels
pos = int(pos * 0.9) + 10.0
coords = self.canvas.coords(self.door_rect)
startx = coords[0] + 2
starty = coords[1] + 2
endx = coords[2] - 2
endy = coords[3] - 2
if len(self.panels) < 1:
self.canvas.tag_raise(self.car)
num_panels = 1 + int((endy - starty) / panel_height)
for __ in range(0, num_panels):
p = self.canvas.create_rectangle(0, 0, 1, 1, outline=self.door_color, fill=self.door_color, state='hidden')
self.panels.append(p)
self.canvas.tag_raise(self.explosion)
#print(self.panels)
next_panel_endy = starty + int(((endy - starty) * pos) / 100)
for p in self.panels:
if next_panel_endy < starty:
self.canvas.itemconfig(p, state='hidden')
else:
sy = next_panel_endy - panel_height
ey = next_panel_endy
next_panel_endy = sy - 2
if sy < starty:
sy = starty
self.canvas.coords(p, startx, sy, endx, ey)
self.canvas.itemconfig(p, state='normal')
#self.canvas.pack()
def open_btn_click(self, event):
self.doorsim.open_btn = not self.doorsim.open_btn
def close_btn_click(self, event):
self.doorsim.close_btn = not self.doorsim.close_btn
def stop_btn_click(self, event):
self.doorsim.stop_btn = not self.doorsim.stop_btn
def round_rectangle(self, canvas, x1, y1, x2, y2, radius=25, **kwargs):
points = [x1+radius, y1,
x1+radius, y1,
x2-radius, y1,
x2-radius, y1,
x2, y1,
x2, y1+radius,
x2, y1+radius,
x2, y2-radius,
x2, y2-radius,
x2, y2,
x2-radius, y2,
x2-radius, y2,
x1+radius, y2,
x1+radius, y2,
x1, y2,
x1, y2-radius,
x1, y2-radius,
x1, y1+radius,
x1, y1+radius,
x1, y1]
return canvas.create_polygon(points, kwargs, smooth=True)
def setup_frame1(self):
frame = tk.Frame(self, width=800, height=400)
self.config_frame(frame)
frame.grid(row = 0, column=0, columnspan=1, rowspan=1)
# lab = tk.Label(frame, text='Door', font=("Helvetica", 16))
# self.config_label(lab)
# lab.grid(column=1, row=0, columnspan=1, pady=10)
# frame.grid_columnconfigure(0, weight=1)
# frame.grid_columnconfigure(2, weight=1)
self.canvas = tk.Canvas(frame, width=800, height=400, bd=0, highlightthickness=0, relief='ridge')
self.config_bg(self.canvas)
########## Door Frame ##########
width = 300
height = 300
sx = (800 - width) / 2
sy = (400 - height) / 2
ex = sx + width
ey = sy + height
coords = [sx, sy, ex, ey]
self.door_rect = self.canvas.create_rectangle(coords[0], coords[1], coords[2], coords[3], outline=self.default_fg, fill=self.default_fg)
font = 'Helvetica 16 bold'
r = 15
self.btm_crash_alarm = AlarmCircle(self.canvas, sx + 100, ey + 25, r, self.alarm_color, self.default_bg, 'CRASH!', font)
self.top_crash_alarm = AlarmCircle(self.canvas, sx + 100, sy - 25, r, self.alarm_color, self.default_bg, 'CRASH!', font)
x = sx + (ex - sx) / 2
y = ey - 10
img = Image.open('images/explosion.png')
img.thumbnail((250, 250), Image.ANTIALIAS)
self.explosion_img_junk = ImageTk.PhotoImage(img)
self.explosion = self.canvas.create_image(x, y, anchor='s', image=self.explosion_img_junk)
img = Image.open('images/Car1.png')
img.thumbnail((250, 250), Image.ANTIALIAS)
self.car_img_junk = ImageTk.PhotoImage(img)
self.car = self.canvas.create_image(x, y, anchor='s', image=self.car_img_junk)
########## Limit Switches ##########
sx = coords[0] - 30
sy = coords[1] + 20
ex = sx + 20
ey = sy + 20
self.open_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/0', fill=self.default_fg)
self.canvas.create_text(sx-5, sy+10, anchor='e', text = 'Limit', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[0] - 30
sy = coords[3] - 40
ex = sx + 20
ey = sy + 20
self.close_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/1', fill=self.default_fg)
self.canvas.create_text(sx-5, sy+10, anchor='e', text = 'Limit', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[2] + 10
sy = coords[3] - (height / 2) - 10
ex = sx + 20
ey = sy + 20
self.impact_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.low_color, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/6', fill=self.default_fg)
self.canvas.create_text(ex+5, sy+10, anchor='w', text = 'Impact', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[2] + 10
sy = coords[3] - 40
ex = sx + 20
ey = sy + 20
self.prox_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.low_color, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/5', fill=self.default_fg)
self.canvas.create_text(ex+5, sy+10, anchor='w', text = 'Proximity', font=("Helvetica", 10), fill=self.default_fg)
########## Motor Indicators ##########
sx = coords[2] + 75
sy = coords[1] + 10
ex = sx + 50
ey = sy + 40
self.motor_up = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.motor_alarm = AlarmCircle(self.canvas, sx, ey + 25, r, self.alarm_color, self.default_bg, 'ALARM!', font)
m = sx + (ex - sx) / 2
offset = 9
self.canvas.create_line(sx+offset, ey-offset, m, sy+offset, fill=self.default_bg, width=5)
self.canvas.create_line(m, sy+offset, ex-offset, ey-offset, fill=self.default_bg, width=5)
self.canvas.create_text(sx-5, sy+20, anchor='e', text = 'O:0/0', fill=self.default_fg)
sx = sx + 52
ex = sx + 50
ey = sy + 40
self.motor_down = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
m = sx + (ex - sx) / 2
self.canvas.create_line(sx+offset, sy+offset, m, ey-offset, fill=self.default_bg, width=5)
self.canvas.create_line(m, ey-offset, ex-offset, sy+offset, fill=self.default_bg, width=5)
self.canvas.create_text(ex+5, sy+20, anchor='w', text = 'O:0/1', fill=self.default_fg)
self.canvas.create_text(sx-1, sy-15, anchor='c', text = 'Motor', font=("Helvetica", 14), fill=self.default_fg)
########## Button Panel ##########
ht = 200
wd = 125
sx = 10
sy = 380 - ht
ex = sx + wd
ey = sy + ht
#rect = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.default_fg)
self.round_rectangle(self.canvas, sx, sy, ex, ey, radius=50, outline=self.default_fg, fill=self.default_fg)
x = sx + ((ex - sx) / 2)
y = sy + 20
self.canvas.create_text(x, y, anchor='c', text = 'Buttons', font=("Helvetica", 14), fill=self.default_bg)
r = 20
x = sx + ((ex - sx) / 2)
y = sy + 15 + (1 * (ey - sy) / 4)
self.open_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
#self.open_btn = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.open_btn, '<Button-1>', self.open_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/2', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'OPEN', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (2 * (ey - sy) / 4)
self.close_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.close_btn, '<Button-1>', self.close_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/3', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'CLOSE', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (3 * (ey - sy) / 4)
self.stop_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.stop_btn, '<Button-1>', self.stop_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/4', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'STOP', fill=self.default_bg)
########## Car Button ##########
w = 65
h = 40
x = (sx + ((ex - sx) / 2)) - w/2
y = sy - h - h
rect = self.round_rectangle(self.canvas, x, y, x+w, y+h, radius=20, outline=self.green_color, fill=self.green_color)
lab = self.canvas.create_text(x + w/2, y+h/2, anchor='c', text = 'Car', fill=self.default_bg, font='Helvetica 12 bold')
self.canvas.tag_bind(rect, '<Button-1>', self.doorsim.begin_car)
self.canvas.tag_bind(lab, '<Button-1>', self.doorsim.begin_car)
########## Indicator Panel ##########
ht = 200
wd = 125
sx = 790 - wd
sy = 380 - ht
ex = sx + wd
ey = sy + ht
#rect = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.default_fg)
self.round_rectangle(self.canvas, sx, sy, ex, ey, radius=50, outline=self.default_fg, fill=self.default_fg)
x = sx + ((ex - sx) / 2)
y = sy + 20
self.canvas.create_text(x, y, anchor='c', text = 'Indicators', font=("Helvetica", 14), fill=self.default_bg)
r = 20
x = sx + ((ex - sx) / 2)
y = sy + 15 + (1 * (ey - sy) / 4)
self.open_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/2', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'OPEN', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (2 * (ey - sy) / 4)
self.closed_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/3', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'CLOSE', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (3 * (ey - sy) / 4)
self.ajar_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/4', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'AJAR', fill=self.default_bg)
self.canvas.pack()
def normal_speed_clk(self):
self.doorsim.time_scale = 1.0
def double_speed_clk(self):
self.doorsim.time_scale = 2.0
def quad_speed_clk(self):
self.doorsim.time_scale = 4.0
def setup_bottom_frame(self):
frame = tk.Frame(self, width=self.default_width, height=80)
self.config_frame(frame)
frame.grid(row = 1, column=0, columnspan=1, rowspan=1)
self.normal_speed = tk.Button(frame, text='x1 Speed', command=self.normal_speed_clk)
self.config_btn(self.normal_speed)
self.normal_speed.place(relx=0.10, rely=0.5, anchor=tk.CENTER)
self.quad_speed = tk.Button(frame, text='x4 Speed', command=self.quad_speed_clk)
self.config_btn(self.quad_speed)
self.quad_speed.place(relx=0.280, rely=0.5, anchor=tk.CENTER)
self.ccrCanvas = tk.Canvas(frame, bg=self.default_bg, width=77,height=77, bd=0, highlightthickness=0, relief='ridge')
self.ccrCanvas.place(relx=0.5, rely=0.5, anchor=tk.CENTER)
img = Image.open('./images/ccr_logo.png').resize((77, 77), Image.ANTIALIAS)
self.ccrImage = ImageTk.PhotoImage(img)
self.ccrCanvas.create_image(0,0,image=self.ccrImage,anchor="nw")
self.logoCanvas = tk.Canvas(frame, bg=self.default_bg, width=180,height=77, bd=0, highlightthickness=0, relief='ridge')
self.logoCanvas.place(relx=0.680, rely=0.5, anchor=tk.CENTER)
self.logoImage = ImageTk.PhotoImage(file='./images/afit_logo.png')
self.logoCanvas.create_image(0,0,image=self.logoImage,anchor="nw")
self.quit = tk.Button(frame, text='Back', command=self.clean_up)
self.config_btn(self.quit)
self.quit.place(relx=0.9, rely=0.5, anchor=tk.CENTER)
def create_widgets(self):
self.master.minsize(width=self.default_width, height=self.default_height)
self.master.maxsize(width=self.default_width, height=self.default_height)
self.setup_frame1()
self.setup_bottom_frame()
| 19,553 | 9,036 |
#!/usr/bin/env python
output_name = './build/libs/java.jar'
def setup():
import os, datetime, subprocess
if os.path.exists(os.path.join(os.getcwd(), "setup.log")):
print("'setup.log' exists. Java implementation setup correctly")
return
print("Watch for Errors - Requires Java SDK and Runtime")
try:
with open('setup.log', 'w') as logFile:
logFile.write("# This is an autogenerated file made by 'run.py' on {}\n".format(datetime.datetime.now()))
logFile.write("# => DO NOT DELETE THIS FILE OR SETUP WILL BE CALLED AGAIN\n")
logFile.flush()
subprocess.run(["javac", "-version"], stdout = logFile, stderr = logFile, check=True)
subprocess.run(["gradle", "-v"], stdout = logFile, stderr = logFile, check=True)
subprocess.run(["java", "-version"], stdout = logFile, stderr = logFile, check=True)
logFile.flush()
logFile.write("\n# Setup completed on {}".format(datetime.datetime.now()))
#end logFile
except Exception as e:
print(e)
if os.path.exists('setup.log'):
os.remove('setup.log')
#end run
def build():
import os, subprocess
# Use gradle's '--no-daemon' option to avoid keeping a process up that can interfere with our tests.
retcode = subprocess.call(["gradle", "fullBuild", "--no-daemon"])
if retcode != 0:
raise AssertionError("Build failed")
print("Built Java implementation as {}".format(output_name))
#end run
def run(cmd_args):
import subprocess
retcode = subprocess.call(["java", "-jar", output_name] + cmd_args)
if retcode != 0:
raise RuntimeError("Program run returned non-zero exit code")
#end run
if __name__=="__main__":
import sys, os
setup()
build()
if os.path.basename(sys.argv[0]) == os.path.basename(__file__):
run(sys.argv[1:])
# end main
| 1,925 | 600 |
"""
Tags
http://developer.pardot.com/kb/api-version-4/tags/
http://developer.pardot.com/kb/object-field-references/#tag
"""
tag = [{'name': 'id',
'type': 'integer'},
{'name': 'name',
'type': 'varchar(512)'},
{'name': 'created_at',
'type': 'timestamp'},
{'name': 'updated_at',
'type': 'timestamp'}]
| 353 | 128 |
import mykde
class Action(mykde.BaseAction):
name = "VLC"
description = "VLC Media Player"
packages = ['vlc']
| 125 | 46 |
hello everyone,
fighting~
| 26 | 10 |
import sys
from .sitimeunit import SITimeUnit
isPython3Compat = (sys.version_info.major == 3)
isPython36Compat = (isPython3Compat and (sys.version_info.minor >= 6))
def normalize_frac_seconds(a, b):
"""Returns 3-tuple containing (normalized frac_seconds for a, normalized
frac_seconds for b, most precise (smallest) frac_seconds_exponent between
both), where "normalized" is the frac_seconds multiplied to be equalivent
under the more precise frac_seconds_exponent.
Ex. a.frac_seconds = 10
a.frac_seconds_exponent = -1
b.frac_seconds = 12
b.frac_seconds_exponent = -2
returns: (100, 12, -2)
"""
# Lots of code to handle singular "second" as used in datetime and
# DateTime, and plural "seconds" as used in timedelta and
# TimeDelta...
if hasattr(a, "frac_second") and hasattr(a, "frac_second_exponent"):
a_frac_seconds = a.frac_second
a_frac_seconds_exponent = a.frac_second_exponent
elif hasattr(a, "frac_seconds") and hasattr(a, "frac_seconds_exponent"):
a_frac_seconds = a.frac_seconds
a_frac_seconds_exponent = a.frac_seconds_exponent
elif hasattr(a, "microsecond"):
a_frac_seconds = a.microsecond
a_frac_seconds_exponent = SITimeUnit.MICROSECONDS
elif hasattr(a, "microseconds"):
a_frac_seconds = a.microseconds
a_frac_seconds_exponent = SITimeUnit.MICROSECONDS
else:
raise TypeError("invalid type for a: %s" % type(a))
if hasattr(b, "frac_second") and hasattr(b, "frac_second_exponent"):
b_frac_seconds = b.frac_second
b_frac_seconds_exponent = b.frac_second_exponent
elif hasattr(b, "frac_seconds") and hasattr(b, "frac_seconds_exponent"):
b_frac_seconds = b.frac_seconds
b_frac_seconds_exponent = b.frac_seconds_exponent
elif hasattr(b, "microsecond"):
b_frac_seconds = b.microsecond
b_frac_seconds_exponent = SITimeUnit.MICROSECONDS
elif hasattr(b, "microseconds"):
b_frac_seconds = b.microseconds
b_frac_seconds_exponent = SITimeUnit.MICROSECONDS
else:
raise TypeError("invalid type for b: %s" % type(b))
if a_frac_seconds_exponent == b_frac_seconds_exponent:
return (a_frac_seconds, b_frac_seconds,
a_frac_seconds_exponent)
multiplier = 10 ** (abs(a_frac_seconds_exponent -
b_frac_seconds_exponent))
# a is more precise, multiply b
if a_frac_seconds_exponent < b_frac_seconds_exponent:
return (a_frac_seconds, b_frac_seconds * multiplier,
a_frac_seconds_exponent)
# b is more precise, multiply a
else:
return (a_frac_seconds * multiplier, b_frac_seconds,
b_frac_seconds_exponent)
def get_subsecond_component(frac_seconds, frac_seconds_exponent,
subsec_component_exponent, upper_exponent_limit):
"""Return the number of subseconds from frac_seconds *
(10**frac_seconds_exponent) corresponding to subsec_component_exponent that
does not exceed upper_exponent_limit.
For example:
If frac_seconds*(10**frac_seconds_exponent) is 0.1234567,
upper_exponent_limit is SITimeUnit.SECONDS, and subsec_component_exponent is
SITimeUnit.MICROSECONDS, 123456 would be returned.
If frac_seconds*(10**frac_seconds_exponent) is 0.123456789,
upper_exponent_limit is SITimeUnit.MICROSECONDS, and
subsec_component_exponent is SITimeUnit.NANOSECONDS, 789 would be returned.
Same example as above, but with upper_exponent_limit = SITimeUnit.SECONDS,
123456789 would be returned.
"""
total_subsecs = int(frac_seconds * (10 ** (frac_seconds_exponent -
subsec_component_exponent)))
return total_subsecs % (10 ** abs(subsec_component_exponent -
upper_exponent_limit))
| 3,914 | 1,317 |
"""
An agent representing the (retail) customer behavior following a Poisson distribution for demand.
"""
import networkx as nx
from scse.api.module import Agent
import numpy as np
import logging
logger = logging.getLogger(__name__)
class PoissonCustomerOrder(Agent):
_DEFAULT_MAX_MEAN = 10
def __init__(self, run_parameters):
simulation_seed = run_parameters['simulation_seed']
self._rng = np.random.RandomState(simulation_seed)
self._max_mean = run_parameters.get('customer_max_mean',
self._DEFAULT_MAX_MEAN)
self._DEFAULT_NEWSVENDOR_CUSTOMER = 'Customer'
def get_name(self):
return 'order_generator'
def reset(self, context, state):
self._asin_list = context['asin_list']
def compute_actions(self, state):
# There are two modes of operation: (a) simulates the ASIN selection itself, (b) simulates
# for a requested set of ASINs. This is defined in the context.
actions = []
for asin in self._asin_list:
# Generate demand from poisson distribution with mean in range [0, max]
mean_demand = self._rng.rand() * self._max_mean
demand_realization = round(max(1, self._rng.poisson(mean_demand)))
action = {
'type': 'customer_order',
'asin': asin,
'origin': None,
'destination': self._DEFAULT_NEWSVENDOR_CUSTOMER,
'quantity': demand_realization,
'schedule': state['clock']
}
logger.debug("{} bought {} units of {}.".format(
self._DEFAULT_NEWSVENDOR_CUSTOMER, demand_realization, asin))
actions.append(action)
return actions
| 1,784 | 501 |
import numpy as np
import os
from utils import MINERL_DATA_ROOT, CUMULATIVE_REWARDS
import sys
import pandas
def time_to_rewards(data_set, trajectory):
"""
Takes a data_set and a trajectory, and returns times (in ticks) to achieve each cumulative reward (from the last
cumulative reward, not from start).
:param data_set: data set name (for example: 'MineRLObtainDiamond-v0')
:param trajectory: trajectory path
:return: a list of times to achieve cumulative rewards
"""
doc = os.path.join(MINERL_DATA_ROOT, data_set, trajectory, 'rendered.npz')
f = np.load(doc)
rewards = list(f['reward'])
times = []
c = 0
sum_rew = 0
for i in range(len(rewards)):
while rewards[i] + sum_rew >= CUMULATIVE_REWARDS[c]:
times.append(i)
c += 1
sum_rew += rewards[i]
time_periods = [times[i] - times[i - 1] for i in range(1, len(times))]
return time_periods
def main():
if len(sys.argv) > 1:
data_set = sys.argv[1]
else:
data_set = 'MineRLObtainDiamond-v0'
path = os.path.join(MINERL_DATA_ROOT, data_set)
trajectories = os.listdir(path)
trajectories.sort()
trajectory_times = []
for trajectory in trajectories:
time_periods = time_to_rewards(data_set, trajectory)
trajectory_times.append(time_periods)
reward_times = [[] for _ in range(len(CUMULATIVE_REWARDS[1:-1]))]
for times in trajectory_times:
for i in range(len(times)):
reward_times[i].append(times[i])
reward_times = [sorted(i) for i in reward_times]
mean = [0] + [sum(i) // len(i) for i in reward_times if len(i) > 0]
median = [0] + [i[len(i) // 2] for i in reward_times if len(i) > 0]
counts = [len(trajectories)] + [len(i) for i in reward_times if len(i) > 0]
d = {'mean': {}, 'median': {}, 'counts': {}}
for i in range(len(mean)):
d['mean'][CUMULATIVE_REWARDS[i]] = mean[i]
d['median'][CUMULATIVE_REWARDS[i]] = median[i]
d['counts'][CUMULATIVE_REWARDS[i]] = counts[i]
print('\ntimes to achieve cumulative rewards(in ticks) and number of trajectories that achieve them')
print(pandas.DataFrame.from_dict(d, orient='index').to_string())
if __name__ == "__main__":
main()
| 2,340 | 896 |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..container.x_index_access import XIndexAccess as XIndexAccess_f0910d6d
class XViewDataSupplier(XInterface_8f010a43):
"""
gives access to some properties describing all open views to a document
Each view is described by a sequence< .com.sun.star.beans.PropertyValue >. Through this interface the state of all open views can be retrieved and restored later. These states can also be made persistent so that a document loader can create all views of the correct types and restore their state to the state when the document was saved.
See Also:
`API XViewDataSupplier <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1document_1_1XViewDataSupplier.html>`_
"""
__ooo_ns__: str = 'com.sun.star.document'
__ooo_full_ns__: str = 'com.sun.star.document.XViewDataSupplier'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.document.XViewDataSupplier'
@abstractmethod
def getViewData(self) -> 'XIndexAccess_f0910d6d':
"""
retrieve information about currently opened view to restore it later
"""
@abstractmethod
def setViewData(self, Data: 'XIndexAccess_f0910d6d') -> None:
"""
restore all views which will be represented by given data argument
"""
__all__ = ['XViewDataSupplier']
| 2,224 | 679 |
import typing as t
from ....extensions import ExtensionMixin
from ...flarum.core.discussions import DiscussionFromBulk
class PreventNecrobumpingDiscussionMixin(DiscussionFromBulk):
@property
def fof_prevent_necrobumping(self) -> t.Optional[int]:
"""
I have no idea what this does either, sorry.
"""
return self.attributes.get("fof-prevent-necrobumping", None)
class PreventNecrobumpingExtension(ExtensionMixin):
AUTHOR = 'fof'
NAME = 'prevent-necrobumping'
@classmethod
def mixin(cls):
super().mixin(DiscussionFromBulk, PreventNecrobumpingDiscussionMixin)
| 634 | 209 |
import os
import json
from copy import copy
from subprocess import call, Popen, PIPE, STDOUT
import time
import numpy as np
import pandas as pd
from pyproj import Transformer
import rasterio
import fiona
from affine import Affine
from shapely.geometry import shape
from scipy.ndimage.morphology import binary_erosion
from pandas.plotting import register_matplotlib_converters
import matplotlib
import matplotlib.pyplot as plt
import flopy
from flopy.utils import GridIntersect
import richdem as rd
from gsflow.builder import GenerateFishnet, FlowAccumulation, PrmsBuilder, ControlFileBuilder
from gsflow.builder.builder_defaults import ControlFileDefaults
from gsflow.builder import builder_utils as bu
from gsflow.prms.prms_parameter import ParameterRecord
from gsflow.prms import PrmsData, PrmsParameters
from gsflow.control import ControlFile
from gsflow.output import StatVar
from model_config import PRMSConfig
from gsflow_prep import PRMS_NOT_REQ
from datafile import write_basin_datafile
register_matplotlib_converters()
pd.options.mode.chained_assignment = None
# RichDEM flow-direction coordinate system:
# 234
# 105
# 876
d8_map = {5: 1, 6: 2, 7: 4, 8: 8, 1: 16, 2: 32, 3: 64, 4: 128}
class StandardPrmsBuild(object):
def __init__(self, config):
self.cfg = PRMSConfig(config)
self.res = float(self.cfg.hru_cellsize)
self.proj_name_res = '{}_{}'.format(self.cfg.project_name,
self.cfg.hru_cellsize)
for folder in ['hru_folder', 'parameter_folder', 'control_folder', 'data_folder', 'output_folder']:
folder_path = os.path.join(self.cfg.project_folder,
self.proj_name_res,
getattr(self.cfg, folder))
setattr(self.cfg, folder, folder_path)
if not os.path.isdir(folder_path):
os.makedirs(folder_path, exist_ok=True)
self.parameters = None
self.control = None
self.data = None
self.zeros = None
with fiona.open(self.cfg.study_area_path, 'r') as src:
self.raster_meta = src.meta
self.basin_geo = [shape(f['geometry']) for f in src][0]
self.prj = self.cfg.study_area_path.replace('.shp', '.prj')
self.control_file = os.path.join(self.cfg.control_folder,
'{}.control'.format(self.proj_name_res))
self.parameter_file = os.path.join(self.cfg.parameter_folder,
'{}.params'.format(self.proj_name_res))
self.data_file = os.path.join(self.cfg.data_folder, '{}.data'.format(self.proj_name_res))
def write_parameter_file(self):
builder = PrmsBuilder(
self.streams,
self.cascades,
self.modelgrid,
self.dem.ravel(),
hru_type=self.hru_lakeless.ravel(),
hru_subbasin=self.hru_lakeless.ravel())
self.parameters = builder.build()
self.parameters.hru_lat = self.lat
self.parameters.hru_lon = self.lon
self.parameters.add_record_object(ParameterRecord('hru_x',
np.array(self.modelgrid.xcellcenters.ravel(),
dtype=float).ravel(),
dimensions=[['nhru', len(self.lon)]],
datatype=2))
self.parameters.add_record_object(ParameterRecord('hru_y',
np.array(self.modelgrid.ycellcenters.ravel(),
dtype=float).ravel(),
dimensions=[['nhru', len(self.lat)]],
datatype=2))
areas = np.ones_like(self.lat) * self.hru_area
self.parameters.add_record_object(ParameterRecord('hru_area',
np.array(areas, dtype=float).ravel(),
dimensions=[['nhru', len(self.lat)]],
datatype=2))
# self.build_lakes()
self._build_veg_params()
self._build_soil_params()
[self.parameters.add_record_object(rec) for rec in self.data_params]
[self.parameters.remove_record(rec) for rec in PRMS_NOT_REQ]
self.parameters.write(self.parameter_file)
def write_control_file(self):
controlbuild = ControlFileBuilder(ControlFileDefaults())
self.control = controlbuild.build(name='{}.control'.format(self.proj_name_res),
parameter_obj=self.parameters)
self.control.model_mode = ['PRMS']
self.control.executable_desc = ['PRMS Model']
self.control.executable_model = [self.cfg.prms_exe]
self.control.cascadegw_flag = [0]
self.control.et_module = ['potet_jh']
self.control.precip_module = ['xyz_dist']
self.control.temp_module = ['xyz_dist']
self.control.solrad_module = ['ccsolrad']
self.control.rpt_days = [7]
self.control.snarea_curve_flag = [0]
self.control.soilzone_aet_flag = [0]
self.control.srunoff_module = ['srunoff_smidx']
# 0: standard; 1: SI/metric
units = 0
self.control.add_record('elev_units', [units])
self.control.add_record('precip_units', [units])
self.control.add_record('temp_units', [units])
self.control.add_record('runoff_units', [units])
self.control.start_time = [int(d) for d in self.cfg.start_time.split(',')] + [0, 0, 0]
self.control.subbasin_flag = [0]
self.control.transp_module = ['transp_tindex']
self.control.csv_output_file = [os.path.join(self.cfg.output_folder, 'output.csv')]
self.control.param_file = [self.parameter_file]
self.control.subbasin_flag = [0, ]
self.control.parameter_check_flag = [0, ]
self.control.add_record('end_time', [int(d) for d in self.cfg.end_time.split(',')] + [0, 0, 0])
self.control.add_record('model_output_file', [os.path.join(self.cfg.output_folder, 'output.model')],
datatype=4)
self.control.add_record('var_init_file', [os.path.join(self.cfg.output_folder, 'init.csv')],
datatype=4)
self.control.add_record('data_file', [self.data_file], datatype=4)
stat_vars = ['runoff',
'basin_tmin',
'basin_tmax',
'basin_ppt',
'basin_rain',
'basin_snow',
'basin_potsw',
'basin_potet',
'basin_net_ppt',
'basin_intcp_stor',
'basin_pweqv',
'basin_snowmelt',
'basin_snowcov',
'basin_sroff',
'basin_hortonian',
'basin_infil',
'basin_soil_moist',
'basin_recharge',
'basin_actet',
'basin_gwstor',
'basin_gwflow',
'basin_gwsink',
'basin_cfs',
'basin_ssflow',
'basin_imperv_stor',
'basin_lake_stor',
'basin_ssstor']
self.control.add_record('statsON_OFF', values=[1], datatype=1)
self.control.add_record('nstatVars', values=[len(stat_vars)], datatype=1)
self.control.add_record('statVar_element', values=['1' for _ in stat_vars], datatype=4)
self.control.add_record('statVar_names', values=stat_vars, datatype=4)
self.control.add_record('stat_var_file', [os.path.join(self.cfg.output_folder, 'statvar.out')],
datatype=4)
disp_vars = [('basin_cfs', '1'),
('runoff', '1'),
('basin_gwflow', '2'),
('basin_sroff', '2'),
('basin_ssflow', '2'),
('basin_actet', '3'),
('basin_potet', '3'),
('basin_perv_et', '3'),
('basin_pweqv', '4'),
('basin_snow', '4'),
('basin_snowdepth', '4'),
('basin_snowmelt', '4')]
self.control.add_record('dispVar_plot', values=[e[1] for e in disp_vars], datatype=4)
self.control.add_record('statVar_names', values=stat_vars, datatype=4)
self.control.add_record('dispVar_element', values=['1' for _ in disp_vars], datatype=4)
self.control.add_record('gwr_swale_flag', [1])
# remove gsflow control objects
self.control.remove_record('gsflow_output_file')
self.control.write(self.control_file)
def write_datafile(self, units='metric'):
self.nmonths = 12
ghcn = self.cfg.prms_data_ghcn
stations = self.cfg.prms_data_stations
gages = self.cfg.prms_data_gages
with open(stations, 'r') as js:
sta_meta = json.load(js)
sta_iter = sorted([(v['zone'], v) for k, v in sta_meta.items()], key=lambda x: x[0])
tsta_elev, tsta_nuse, tsta_x, tsta_y, psta_elev = [], [], [], [], []
for _, val in sta_iter:
if units != 'metric':
elev = val['elev'] / 0.3048
else:
elev = val['elev']
tsta_elev.append(elev)
tsta_nuse.append(1)
tsta_x.append(val['proj_coords'][1])
tsta_y.append(val['proj_coords'][0])
psta_elev.append(elev)
self.data_params = [ParameterRecord('nrain', values=[len(tsta_x)], datatype=1),
ParameterRecord('ntemp', values=[len(tsta_x)], datatype=1),
ParameterRecord('psta_elev', np.array(psta_elev, dtype=float).ravel(),
dimensions=[['nrain', len(psta_elev)]], datatype=2),
ParameterRecord('psta_nuse', np.array(tsta_nuse, dtype=int).ravel(),
dimensions=[['nrain', len(tsta_nuse)]], datatype=1),
ParameterRecord(name='ndist_psta', values=[len(tsta_nuse), ], datatype=1),
ParameterRecord('psta_x', np.array(tsta_x, dtype=float).ravel(),
dimensions=[['nrain', len(tsta_x)]], datatype=2),
ParameterRecord('psta_y', np.array(tsta_y, dtype=float).ravel(),
dimensions=[['nrain', len(tsta_y)]], datatype=2),
ParameterRecord('tsta_elev', np.array(tsta_elev, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_elev)]], datatype=2),
ParameterRecord('tsta_nuse', np.array(tsta_nuse, dtype=int).ravel(),
dimensions=[['ntemp', len(tsta_nuse)]], datatype=1),
ParameterRecord(name='ndist_tsta', values=[len(tsta_nuse), ], datatype=1),
ParameterRecord('tsta_x', np.array(tsta_x, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_x)]], datatype=2),
ParameterRecord('tsta_y', np.array(tsta_y, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_y)]], datatype=2),
bu.tmax_adj(self.nhru),
bu.tmin_adj(self.nhru),
ParameterRecord(name='nobs', values=[1, ], datatype=1),
]
outlet_sta = self.modelgrid.intersect(self.pour_pt[0][0], self.pour_pt[0][1])
outlet_sta = self.modelgrid.get_node([(0,) + outlet_sta])
self.data_params.append(ParameterRecord('outlet_sta',
values=[outlet_sta[0] + 1, ],
dimensions=[['one', 1]],
datatype=1))
if units == 'metric':
allrain_max = np.ones((self.nhru * self.nmonths)) * 3.3
tmax_allrain = np.ones((self.nhru * self.nmonths)) * 3.3
tmax_allsnow = np.ones((self.nhru * self.nmonths)) * 0.0
else:
allrain_max = np.ones((self.nhru * self.nmonths)) * 38.0
tmax_allrain = np.ones((self.nhru * self.nmonths)) * 38.0
tmax_allsnow = np.ones((self.nhru * self.nmonths)) * 32.0
self.data_params.append(ParameterRecord('tmax_allrain_sta', allrain_max,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('tmax_allrain', tmax_allrain,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('tmax_allsnow', tmax_allsnow,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('snowpack_init',
np.ones_like(self.ksat).ravel(),
dimensions=[['nhru', self.nhru]],
datatype=2))
if not os.path.isfile(self.data_file):
write_basin_datafile(station_json=stations,
gage_json=gages,
ghcn_data=ghcn,
out_csv=None,
data_file=self.data_file,
units=units)
self.data = PrmsData.load_from_file(self.data_file)
def build_model_files(self):
self._build_grid()
self.write_datafile(units='standard')
self.write_parameter_file()
self.write_control_file()
def write_raster_params(self, name, values=None):
out_dir = os.path.join(self.cfg.raster_folder, 'resamples', self.cfg.hru_cellsize)
if not isinstance(values, np.ndarray):
values = self.parameters.get_values(name).reshape((self.modelgrid.nrow, self.modelgrid.ncol))
_file = os.path.join(out_dir, '{}.tif'.format(name))
with rasterio.open(_file, 'w', **self.raster_meta) as dst:
dst.write(values, 1)
def _build_grid(self):
with fiona.open(self.cfg.study_area_path, 'r') as domain:
geo = [f['geometry'] for f in domain][0]
geo = shape(geo)
self.bounds = geo.bounds
self.modelgrid = GenerateFishnet(bbox=self.cfg.elevation,
xcellsize=float(self.cfg.hru_cellsize),
ycellsize=float(self.cfg.hru_cellsize))
self.fishnet_file = os.path.join(self.cfg.hru_folder, 'fishnet.shp')
self.modelgrid.write_shapefile(self.fishnet_file, prj=self.prj)
self._prepare_rasters()
x = self.modelgrid.xcellcenters.ravel()
y = self.modelgrid.ycellcenters.ravel()
self.nhru = (x * y).size
self.hru_area = (float(self.cfg.hru_cellsize) ** 2) * 0.000247105
trans = Transformer.from_proj('epsg:{}'.format(5071), 'epsg:4326', always_xy=True)
self.lon, self.lat = trans.transform(x, y)
self.zeros = np.zeros((self.modelgrid.nrow, self.modelgrid.ncol))
self.nnodes = self.zeros.size
self._build_domain_params()
self._build_terrain_params(mode='richdem')
def _build_terrain_params(self, mode='pygsflow'):
"""This method computes flow accumulation/direction rasters for both
RichDEM and PyGSFLOW. RichDEM seems to fill depressions more effectively and is fast."""
self.dem = rd.LoadGDAL(self.cfg.elevation, no_data=0.0)
if np.any(self.dem == 0.0):
for r in range(self.dem.shape[0]):
d = self.dem[r, :].ravel()
idx = np.arange(len(d))
self.dem[r, :] = np.interp(idx, idx[d > 0.0], d[d > 0.0])
if mode == 'richdem':
# RichDEM flow accumulation and direction
rd.FillDepressions(self.dem, epsilon=0.0001, in_place=True)
self.dem = rd.rdarray(self.dem, no_data=0, dtype=float)
rd_flow_accumulation = rd.FlowAccumulation(self.dem, method='D8')
props = rd.FlowProportions(dem=self.dem, method='D8')
# remap directions to pygsflow nomenclature
dirs = np.ones_like(rd_flow_accumulation)
for i in range(1, 9):
dirs = np.where(props[:, :, i] == 1, np.ones_like(dirs) * i, dirs)
rd_flow_directions = copy(dirs)
for k, v in d8_map.items():
rd_flow_directions[dirs == k] = v
# manually flow corners and edges inward
rd_flow_directions[0, 0] = 2
rd_flow_directions[0, -1] = 8
rd_flow_directions[-1, 0] = 128
rd_flow_directions[-1, -1] = 32
rd_flow_directions[0, 1:-1] = 4
rd_flow_directions[1:-1, 0] = 1
rd_flow_directions[1:-1, -1] = 16
rd_flow_directions[-1, 1:-1] = 64
self.flow_direction = rd_flow_directions
self.flow_accumulation = rd_flow_accumulation
elif mode == 'pygsflow':
# pygsflow flow accumulation and direction
fa = FlowAccumulation(self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters,
verbose=False)
self.flow_direction = fa.flow_directions(dijkstra=True, breach=0.001)
self.flow_accumulation = fa.flow_accumulation()
else:
raise NotImplementedError('Must choose between "pygsflow" and "richdem" for '
'flow calculations')
fa = FlowAccumulation(
self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters,
hru_type=self.hru_lakeless,
flow_dir_array=self.flow_direction,
verbose=False)
self.watershed = fa.define_watershed(self.pour_pt,
self.modelgrid,
fmt='xy')
self.streams = fa.make_streams(self.flow_direction,
self.flow_accumulation,
threshold=100,
min_stream_len=10)
self.cascades = fa.get_cascades(streams=self.streams,
pour_point=self.pour_pt, fmt='xy',
modelgrid=self.modelgrid)
self.hru_aspect = bu.d8_to_hru_aspect(self.flow_direction)
self.hru_slope = bu.d8_to_hru_slope(self.flow_direction,
self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters)
def _build_domain_params(self):
ix = GridIntersect(self.modelgrid, method='vertex', rtree=True)
shape_input = [('outlet', 'model_outlet_path'),
('lake_id', 'lake_path'),
('hru_type', 'study_area_path')]
for param, path in shape_input:
shp_file = getattr(self.cfg, path)
feats = features(shp_file)
data = copy(self.zeros)
for i, f in enumerate(feats, start=1):
geo = shape(f['geometry'])
idx = ix.intersects(geo)
for x in idx:
data[x[0]] = i
outfile = os.path.join(self.cfg.hru_folder, '{}.txt'.format(param))
if param == 'outlet':
setattr(self, 'pour_pt', [[geo.x, geo.y]])
if param == 'hru_type':
erode = binary_erosion(data)
border = erode < data
setattr(self, 'border', border)
lakeless = np.where(border, self.zeros + 3, data)
setattr(self, 'hru_lakeless', lakeless)
data = np.where(self.lake_id > 0, self.zeros + 2, data)
setattr(self, param, data)
np.savetxt(outfile, data, delimiter=' ')
def _build_lakes(self):
lakes = bu.lake_hru_id(self.lake_id)
nlake = ParameterRecord(
name='nlake', values=[np.unique(self.lake_id)], datatype=1, file_name=None
)
nlake_hrus = ParameterRecord(
name='nlake_hrus', values=[np.count_nonzero(self.lake_id)], datatype=1, file_name=None
)
[self.parameters.add_record_object(l) for l in [lakes, nlake, nlake_hrus]]
def _build_veg_params(self):
self._prepare_lookups()
covtype = bu.covtype(self.landfire_type, self.covtype_lut)
covden_sum = bu.covden_sum(self.landfire_cover, self.covdensum_lut)
covden_win = bu.covden_win(covtype.values, self.covdenwin_lut)
rad_trncf = bu.rad_trncf(covden_win.values)
snow_intcp = bu.snow_intcp(self.landfire_type, self.snow_intcp_lut)
srain_intcp = bu.srain_intcp(self.landfire_type, self.srain_intcp_lut)
wrain_intcp = bu.wrain_intcp(self.landfire_type, self.snow_intcp_lut)
vars_ = [covtype, covden_sum, covden_win, rad_trncf, snow_intcp, srain_intcp,
wrain_intcp]
for v in vars_:
self.parameters.add_record_object(v)
self.root_depth = bu.root_depth(self.landfire_type, self.rtdepth_lut)
def _build_soil_params(self):
cellsize = int(self.cfg.hru_cellsize)
soil_type = bu.soil_type(self.clay, self.sand)
# awc meters to inches
self.awc = self.awc * 1000 / 25.4
soil_moist_max = bu.soil_moist_max(self.awc, self.root_depth)
soil_moist_init = bu.soil_moist_init(soil_moist_max.values)
soil_rech_max = bu.soil_rech_max(self.awc, self.root_depth)
soil_rech_init = bu.soil_rech_init(soil_rech_max.values)
# ksat mircrometer/sec to inches/day
self.ksat = self.ksat * 3.4 / 1000
ssr2gw_rate = bu.ssr2gw_rate(self.ksat, self.sand, soil_moist_max.values)
ssr2gw_sq = bu.ssr2gw_exp(self.nnodes)
slowcoef_lin = bu.slowcoef_lin(self.ksat, self.hru_aspect.values, cellsize, cellsize)
slowcoef_sq = bu.slowcoef_sq(self.ksat, self.hru_aspect.values, self.sand,
soil_moist_max.values, cellsize, cellsize)
# parameterize this
sat_threshold = ParameterRecord('sat_threshold',
np.ones_like(self.ksat).ravel(),
dimensions=[['nhru', self.nhru]],
datatype=2)
hru_percent_imperv = bu.hru_percent_imperv(self.nlcd)
hru_percent_imperv.values /= 100
carea_max = bu.carea_max(self.nlcd) / 100
vars_ = [soil_type, soil_moist_max, soil_moist_init, soil_rech_max, soil_rech_init,
ssr2gw_rate, ssr2gw_sq, slowcoef_lin, slowcoef_sq, hru_percent_imperv, carea_max,
self.hru_aspect, self.hru_slope, sat_threshold]
for v in vars_:
self.parameters.add_record_object(v)
def _prepare_rasters(self):
"""gdal warp is > 10x faster for nearest, here, we resample a single raster using nearest, and use
that raster's metadata to resample the rest with gdalwarp"""
_int = ['landfire_cover', 'landfire_type', 'nlcd']
_float = ['elevation', 'sand', 'clay', 'loam', 'awc', 'ksat']
rasters = _int + _float
first = True
modelgrid = GenerateFishnet(self.cfg.elevation, xcellsize=1000, ycellsize=1000)
for raster in rasters:
in_path = getattr(self.cfg, raster)
out_dir = os.path.join(self.cfg.raster_folder, 'resamples', self.cfg.hru_cellsize)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, '{}.tif'.format(raster))
setattr(self.cfg, raster, out_path)
txt = out_path.replace('.tif', '.txt')
if os.path.exists(out_path) and os.path.exists(txt):
with rasterio.open(out_path, 'r') as src:
a = src.read(1)
if raster in ['sand', 'clay', 'loam', 'ksat', 'awc']:
a /= 100.
if first:
self.raster_meta = src.meta
first = False
setattr(self, raster, a)
continue
if raster in _float:
rsample, _dtype = 'min', 'Float32'
else:
rsample, _dtype = 'nearest', 'UInt16'
if first:
robj = flopy.utils.Raster.load(in_path)
array = robj.resample_to_grid(modelgrid, robj.bands[0], method=rsample, thread_pool=8)
example_raster = os.path.join(out_dir, 'flopy_raster.tif')
self.raster_meta = robj._meta
sa = copy(self.raster_meta['transform'])
transform = Affine(1000., sa[1], sa[2], sa[3], -1000., sa[5])
self.raster_meta.update({'height': array.shape[0],
'width': array.shape[1],
'transform': transform})
with rasterio.open(example_raster, 'w', **self.raster_meta) as ex:
ex.write(array, 1)
first = False
s = time.time()
b = self.bounds
warp = [self.cfg.gdal_warp_exe, in_path, out_path,
'-te', str(b[0]), str(b[1]), str(b[2] + self.res), str(b[3]),
'-ts', str(array.shape[1]), str(array.shape[0]),
'-multi', '-wo', '-wo NUM_THREADS=8',
'-ot', _dtype, '-r', rsample,
'-dstnodata', '0', '-srcnodata', '0', '-overwrite']
call(warp, stdout=open(os.devnull, 'wb'))
print('gdalwarp {} on {}: {} sec\n'.format(rsample, raster, time.time() - s))
with rasterio.open(out_path, 'r') as src:
a = src.read(1)
if raster in ['sand', 'clay', 'loam', 'ksat', 'awc']:
a /= 100.
if first:
self.raster_meta = src.raster_meta
first = False
setattr(self, raster, a)
np.savetxt(txt, a)
def _prepare_lookups(self):
req_remaps = ['covtype.rmp', 'covdenwin.rmp', 'srain_intcp.rmp',
'snow_intcp.rmp', 'rtdepth.rmp', 'covdensum.rmp',
'wrain_intcp.rmp']
for rmp in req_remaps:
rmp_file = os.path.join(self.cfg.remap_folder, rmp)
lut = bu.build_lut(rmp_file)
_name = '{}_lut'.format(rmp.split('.')[0])
setattr(self, _name, lut)
class MontanaPrmsModel:
def __init__(self, control_file, parameter_file, data_file):
self.control_file = control_file
self.parameter_file = parameter_file
self.data_file = data_file
self.control = ControlFile.load_from_file(control_file)
self.parameters = PrmsParameters.load_from_file(parameter_file)
self.data = PrmsData.load_from_file(data_file)
self.statvar = None
def run_model(self, stdout=None):
for obj_, var_ in [(self.control, 'control'),
(self.parameters, 'parameters'),
(self.data, 'data')]:
if not obj_:
raise TypeError('{} is not set, run "write_{}_file()"'.format(var_, var_))
buff = []
normal_msg = 'normal termination'
report, silent = True, False
argv = [self.control.get_values('executable_model')[0], self.control_file]
model_ws = os.path.dirname(self.control_file)
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report:
buff.append(c)
else:
break
if stdout:
with open(stdout, 'w') as fp:
if report:
for line in buff:
fp.write(line + '\n')
return success, buff
def get_statvar(self):
self.statvar = StatVar.load_from_control_object(self.control)
df = self.statvar.stat_df.drop(columns=['Hour', 'Minute', 'Second'])
return df
def features(shp):
with fiona.open(shp, 'r') as src:
return [f for f in src]
def plot_stats(stats):
fig, ax = plt.subplots(figsize=(16, 6))
ax.plot(stats.Date, stats.basin_cfs_1, color='r', linewidth=2.2, label="simulated")
ax.plot(stats.Date, stats.runoff_1, color='b', linewidth=1.5, label="measured")
ax.legend(bbox_to_anchor=(0.25, 0.65))
ax.set_xlabel("Date")
ax.set_ylabel("Streamflow, in cfs")
# ax.set_ylim([0, 2000])
# plt.savefig('/home/dgketchum/Downloads/hydrograph.png')
plt.show()
plt.close()
if __name__ == '__main__':
matplotlib.use('TkAgg')
conf = './model_files/uyws_parameters.ini'
stdout_ = '/media/research/IrrigationGIS/Montana/upper_yellowstone/gsflow_prep/uyws_carter_1000/out.txt'
prms_build = StandardPrmsBuild(conf)
prms_build.build_model_files()
prms = MontanaPrmsModel(prms_build.control_file,
prms_build.parameter_file,
prms_build.data_file)
prms.run_model(stdout_)
stats = prms.get_statvar()
plot_stats(stats)
pass
# ========================= EOF ====================================================================
| 30,988 | 9,942 |
# Importing libraries
import imaplib, email
user = 'vsjtestmail@gmail.com'
password = 'TestMa1lPass'
imap_url = 'imap.gmail.com'
# Function to get email content part i.e its body part
def get_body(msg):
if msg.is_multipart():
return get_body(msg.get_payload(0))
else:
return msg.get_payload(None, True)
# Function to search for a key value pair
def search(key, value, con):
result, data = con.search(None, key, '"{}"'.format(value))
return data
# Function to get the list of emails under this label
def get_emails(result_bytes):
msgs = [] # all the email data are pushed inside an array
for num in result_bytes[0].split():
typ, data = con.fetch(num, '(RFC822)')
msgs.append(data)
return msgs
# this is done to make SSL connection with GMAIL
con = imaplib.IMAP4_SSL(imap_url)
# logging the user in
con.login(user, password)
# calling function to check for email under this label
con.select('Inbox')
# fetching emails from this user "tu**h*****1@gmail.com"
msgs = get_emails(search('FROM', 'champaksworld@gmail.com', con))
# Uncomment this to see what actually comes as data
print(msgs)
print(type(msgs))
print(len(msgs))
| 1,147 | 408 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
| 5,695 | 1,746 |
from dataclasses import dataclass
@dataclass
class Config:
"""Blockchain configs"""
# persistence configs
BLOCKS_FILE_NAME = "data/blocks.json"
MEMPOOL_FILE_NAME = "data/mempool.json"
WALLET_BALANCE_FILE_NAME = "data/balance.json"
SEED_WORDS_FILE_NAME = "data/seed_word_list.txt"
NODE_LIST_FILE_NAME = "data/node_list.json"
# transaction configs
TRANSACTION_FEE = 0.02
MINING_REWARD_SENDER = "THE BLOCKCHAIN"
MINING_REWARD_RECIPIENT = "MINING REWARD"
MINING_REWARD = 50
| 522 | 217 |
import logging
import os
import argparse
from collections import defaultdict
logger = logging.getLogger("paddle")
logger.setLevel(logging.INFO)
def parse_train_cmd():
parser = argparse.ArgumentParser(
description="PaddlePaddle text classification example.")
parser.add_argument(
"--nn_type",
type=str,
help=("A flag that defines which type of network to use, "
"available: [dnn, cnn]."),
default="dnn")
parser.add_argument(
"--train_data_dir",
type=str,
required=False,
help=("The path of training dataset (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used."),
default=None)
parser.add_argument(
"--test_data_dir",
type=str,
required=False,
help=("The path of testing dataset (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used."),
default=None)
parser.add_argument(
"--word_dict",
type=str,
required=False,
help=("The path of word dictionary (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used. If this parameter "
"is set, but the file does not exist, word dictionay "
"will be built from the training data automatically."),
default=None)
parser.add_argument(
"--label_dict",
type=str,
required=False,
help=("The path of label dictionay (default: None).If this parameter "
"is not set, paddle.dataset.imdb will be used. If this parameter "
"is set, but the file does not exist, word dictionay "
"will be built from the training data automatically."),
default=None)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="The number of training examples in one forward/backward pass.")
parser.add_argument(
"--num_passes",
type=int,
default=10,
help="The number of passes to train the model.")
parser.add_argument(
"--model_save_dir",
type=str,
required=False,
help=("The path to save the trained models."),
default="models")
return parser.parse_args()
def build_dict(data_dir,
save_path,
use_col=0,
cutoff_fre=0,
insert_extra_words=[]):
values = defaultdict(int)
for file_name in os.listdir(data_dir):
file_path = os.path.join(data_dir, file_name)
if not os.path.isfile(file_path):
continue
with open(file_path, "r") as fdata:
for line in fdata:
line_splits = line.strip().split("\t")
if len(line_splits) < use_col: continue
for w in line_splits[use_col].split():
values[w] += 1
with open(save_path, "w") as f:
for w in insert_extra_words:
f.write("%s\t-1\n" % (w))
for v, count in sorted(
values.iteritems(), key=lambda x: x[1], reverse=True):
if count < cutoff_fre:
break
f.write("%s\t%d\n" % (v, count))
def load_dict(dict_path):
return dict((line.strip().split("\t")[0], idx)
for idx, line in enumerate(open(dict_path, "r").readlines()))
def load_reverse_dict(dict_path):
return dict((idx, line.strip().split("\t")[0])
for idx, line in enumerate(open(dict_path, "r").readlines()))
| 3,585 | 1,041 |
from .models import Card
from .helpers import fetch_unidentified, populate_db
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
import json
def index(request):
next = fetch_unidentified()
if next:
return redirect('card', card_num=next)
else:
return redirect('done')
def done(request):
cards = Card.objects.all()
list = [(card.num, card.color, card.rank) for card in cards]
return HttpResponse(json.dumps(list))
def reset(request):
if request.method == 'GET':
return render(request, 'reset.html')
elif request.method == 'POST':
confirmation_text = request.POST.get('confirmation')
print(confirmation_text)
if confirmation_text.upper() == "RESET":
populate_db()
return HttpResponse("reset")
else:
return HttpResponse("not reset")
def card(request, card_num):
if request.method == 'GET':
try:
card = Card.objects.get(num=card_num)
except Card.DoesNotExist:
raise Http404("Card does not exist")
image_path = 'training_data/' + str(card_num) + '.png'
if card.color != '':
next_num = card_num + 1
return render(request, 'cards/card_done.html',
{'identified_card': str(card), 'path': image_path,
'next': next_num })
return render(request, 'cards/card.html',
{'path': image_path, 'card_num': card_num})
elif request.method == 'POST':
try:
card = Card.objects.get(num=card_num)
trash = request.POST.get('trash', None)
color = request.POST.get('color', None)
rank = request.POST.get('rank', None)
if trash == 'true':
card.color = 'not_card'
card.save()
elif color and rank:
card.color = color
card.rank = rank
card.save()
except Card.DoesNotExist:
raise Http404("Card does not exist")
return redirect('index')
| 2,188 | 616 |
"""Calculate opinion perplexity for different numbers of topics
Calclulate opinion perplexity for the test set as described in [Fang et al.
2012] section 5.1.1.
This script should be run after experiment_number_of_topics.py.
Usage: python cptm/experiment_calculate_perplexity.py /path/to/experiment.json.
"""
import pandas as pd
import logging
from multiprocessing import Pool
import argparse
from cptm.utils.experiment import load_config, get_corpus, get_sampler
def calculate_perplexity(config, corpus, nPerplexity, nTopics):
sampler = get_sampler(config, corpus, nTopics, initialize=False)
results = []
for s in nPerplexity:
logger.info('doing perplexity calculation ({}, {})'.format(nTopics, s))
tw_perp, ow_perp = sampler.perplexity(index=s)
results.append((nTopics, s, tw_perp, ow_perp))
logger.info('finished perplexity calculation for {} topics'.
format(nTopics))
return results
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nPerplexity = [0] + range(9, config.get('nIter')+1, 10)
# calculate perplexity
pool = Pool(processes=config.get('nProcesses'))
results = [pool.apply_async(calculate_perplexity, args=(config, corpus,
nPerplexity, n))
# reverse list, so longest calculation is started first
for n in nTopics[::-1]]
pool.close()
pool.join()
# aggrate and save results
data = [p.get() for p in results]
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for result in data:
for n, s, tw_perp, ow_perp in result:
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
outDir = config.get('outDir')
logger.info('writing perplexity results to {}'.format(outDir.format('')))
topic_perp.to_csv(outDir.format('perplexity_topic.csv'))
opinion_perp.to_csv(outDir.format('perplexity_opinion.csv'))
| 2,439 | 810 |
from flask_restful import Resource
import app
from app.services.healthcheck import HealthApi
class HealthApiV1(HealthApi):
pass
| 133 | 39 |
#Eulers method
import numpy as np
def dy(ynew,xnew,y,x,h):
dyvalue = y-x
return dyvalue
#Note: change the derivative function based on question!!!!!! Example: y-x
y0 = 0.5 #float(input"what is the y(0)?")
h = 0.1 #float(input"h?")
x_final = 0.3 #float(input"x_final")
#initiating input variables
x = 0
y = y0
# remember to change yn+1 and xn+1 values if you already know them!!!
ynew = 0
xnew = 0
i = 0
#####################################################
iterations = x_final/h
while x <= x_final:
derivative_of_y = dy(ynew,xnew,y,x,h)
xnew = x + h
ynew = y + (xnew - x)*(derivative_of_y)
print("iteration: ____ ")
print(i)
print("\n")
print("x = ")
print(xnew)
print("\n")
print("y = ")
print(ynew)
x = xnew
y = ynew
i+=1
| 819 | 330 |
"""
Contains Net and NetDictionary class for creating a random collection of CNN structures
or loading a previously created collection.
"""
from __future__ import division, print_function
from random import random
import os.path
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import numpy as np
from numpy.random import randint as r_i
from tqdm import tqdm
DEBUG = False #prints tensor size after each network layer during network creation
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.cuda.is_available():
torch.cuda.set_device(0)
else:
print('**** CUDA not available - continuing with CPU ****')
#global classes
class Net(nn.Module):
"""
Build pytorch module using eval() on incoming model tensor and lists of eval strings
for layers and params.
"""
def __init__(self, modelinputtensor, layerlist, layerparams, **kwargs):
"""
args:
modelinputtensor: example model input tensor (including an arbitrary batch dimension)
layerlist: list of pytorch nn fucntions as their 'F' namespace equivalents
Example: 'nn.MaxPool2d' should be supplied as 'F.max_pool2d'
layerparams: list of _independent_ params in their nn form and passed as a tuple.
kwargs:
activations: list of activation functions for forward layers. the length
of the list must match the length of layerlist exactly even
though the activation function supplied for any pooling
layers will be ignored and the final value supplied will
always be replaced by Sigmoid.
Example:
The first conv2d layer will have 3 params in a tuple
of form (in_channels, out_channels, kernel_size).
Subsequent conv2d layers will have _2_ params in a tuple
of form (out_channels, kernel_size) since the in_channels
are determined by the previous layer.
Pooling layers will always have params of the form (x, y)
corresponding to the pooling window size.
Linear layers will always have a single param corresponding to
the number of out features for the layer since input
features are determined by the preceding layer)
"""
super(Net, self).__init__()
self.activations = kwargs.get('activations', ['F.relu' for layer in layerlist])
self.lyrs, self.fwdlyrs = self.get_layers(modelinputtensor, layerlist, layerparams, self.activations, DEBUG)
def forward(self, x):
"""
"""
for f in self.fwdlyrs:
x = eval(f)
return torch.sigmoid(x)
def get_layers(self, testtensor, funcs, params, activations, debug):
"""
Build network layers from supplied test tensor, funcs, and param eval strings.
"""
initlayers = nn.ModuleList()
fwdlayers = list()
if debug == 1:
print(testtensor.size())
lastsize = testtensor.size()
lastsize = None
lyr = 0
with torch.no_grad():
for fn, pa in zip(funcs, params):
if lastsize is not None:
if fn.__name__ == 'conv2d':
pa = (lastsize[1], pa[0], pa[1])
elif fn.__name__ == 'linear':
if not testtensor.ndim == 2:
testtensor = testtensor.view(-1, self.num_flat_features(testtensor))
fwdlayers.append("x.view(-1,self.num_flat_features(x))")
lastsize = testtensor.size()
pa = (lastsize[1], pa)
if fn.__name__ == 'conv2d':
paeval = ",".join(tuple(map(str, (pa[1], pa[0], pa[2], pa[2]))))
paeval = "torch.tensor(np.random.rand(" + paeval + "), dtype=torch.float32)"
elif fn.__name__ == 'max_pool2d':
paeval = ",".join(tuple(map(str, pa)))
elif fn.__name__ == 'linear':
paeval = ",".join(tuple(map(str, (pa[1], pa[0]))))
paeval = "torch.tensor(np.random.rand(" + paeval + "),dtype=torch.float32)"
if not fn.__name__ == 'linear' or pa[0] > pa[1]:
testtensor = fn(testtensor, eval(paeval))
lastsize = testtensor.size()
initlayers.append(eval(self.__get_init_equivalent(fn.__name__, pa)))
fwdlayers.append(self.__get_fwd_equivalent(fn.__name__, lyr))
lyr += 1
if debug == 1:
print(testtensor.size())
elif debug == 1:
print('NetDictionary: Eliminating linear layer - out features > previous layer')
fwdlayers[-1] = 'self.lyrs[' + str(lyr - 1) + '](x)'
return initlayers, fwdlayers
def num_flat_features(self, x):
"""
Calculate number of flat features in a given net layer.
Useful for transitioning between conv and linear layers.
"""
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def __get_init_equivalent(self, funcname, initparams):
"""
Construct eval string from supplied funtions and parameters for the
style required in the torch.nn.Module __init__.
"""
return 'nn.' + ''.join([val.capitalize()
for val in funcname.split('_')
]) + '(' + ",".join(tuple(map(str, initparams))) + ')'
def __get_fwd_equivalent(self, funcname, lyrnum):
"""
Construct eval string from supplied funtions and parameters for the
style required in the torch.nn.Module __init__.
"""
if not funcname == 'max_pool2d':
return self.activations[lyrnum] + '(self.lyrs[' + str(lyrnum) + '](x))'
else:
return 'self.lyrs[' + str(lyrnum) + '](x)'
class NetDictionary(dict):
"""
Holds a dictionary of Net with functions to build a model tensor and
random layer and param lists
"""
def __init__(self, network_count, test_tensor, total_labels, import_export_filename, **kwargs):
"""
Initialize a dictionary of randomly structured CNNs to test various network configurations.
args: network_count: number of networks to generate
test_tensor: a tensor that can be used to construct network layers
total_labels: the number of labels being predicted for the networks
import_export_filename: if file exists on initialization, the information in the
file will be used to reconstruct a prior network.
kwargs: optimizers: list of tuples of form (eval strings for optimizer creation, label)
default=[("optim.SGD(d['net'].parameters(), lr=0.0001, momentum=0.9)", "SGD"),
("optim.Adam(d['net'].parameters(), lr=0.0001)", "Adam"),
("optim.Adam(d['net'].parameters(), lr=0.00001)", "Adam1")]
force_rebuild: override import from file and recreate network even if
import_export_filename already exists, false
force_training: imported network training is bypassed if set to false
default=True
conv_layer_activation: activation function used by conv layers
default=F.relu
pooling_probability: approximate fraction of random networks that are assigned
a pooling layer, default = 0.5
first_conv_layer_depth, 4
max_conv_layers, 5
min_conv_layers, 1
max_kernel_size, 7
min_kernel_size, 3
max_out_channels, 12
min_out_channels, 4
linear_layer_activation, F.relu
init_linear_out_features, 1000
linear_feature_deadband, 20
max_layer_divisor, 20
min_layer_divisor, 4
"""
super(NetDictionary, self).__init__()
self.net_count = network_count
self.label_count = total_labels
self.import_export_filename = import_export_filename
self.__test_tensor = test_tensor
self._trained = False
self.force_rebuild = kwargs.get('force_rebuild', False)
self.force_training = kwargs.get('force_training', True)
self.pooling_probability = kwargs.get('pooling_probability', 0.5)
self.init_from_file = os.path.exists(import_export_filename) and not self.force_rebuild
if self.init_from_file and not self.force_rebuild:
self.__import_networks()
else:
self.__build_networks(**kwargs)
def __import_networks(self):
"""
Read layer info and net state dicts from disk.
"""
net_info = torch.load(self.import_export_filename)
self.__options = net_info['options'].copy()
self.optimizers = self.__options['optimizers']
for n_key, n_dict in net_info['state_dicts'].items():
d = dict()
d['net_number'] = net_info['net_numbers'][n_key]
d['func_list'] = net_info['func_lists'][n_key]
d['params'] = net_info['params'][n_key]
d['activations'] = net_info['activations'][n_key]
funcs = [eval(f) for f in d['func_list']]
d['net'] = Net(self.__test_tensor, funcs, d['params'],activations=d['activations'])
d['net'].load_state_dict(n_dict)
d['optimizer_type'] = net_info['optimizer_types'][n_key]
d['criterion'] = nn.BCELoss()
d['optimizer'] = eval([optim[0] for optim in self.optimizers if optim[1] == d['optimizer_type']][0])
d['loss_dictionary'] = net_info['loss_dictionaries'][n_key]
self.__setitem__(n_key, d)
self._trained = True
def __build_networks(self, **kwargs):
"""
build a new set of randomized networks
"""
self.__options = {
'optimizers': kwargs.get('optimizers',
[("optim.SGD(d['net'].parameters(), lr=0.0001, momentum=0.9)",
"SGD"),
("optim.Adam(d['net'].parameters(), lr=0.0001)", "Adam"),
("optim.Adam(d['net'].parameters(), lr=0.00001)", "Adam1"),
]),
'convolution_layer_options': {
'activation' : kwargs.get('conv_layer_activation', 'F.relu'),
'first_layer_depth' : kwargs.get('first_conv_layer_depth', 4),
'max_layers' : kwargs.get('max_conv_layers', 5),
'min_layers' : kwargs.get('min_conv_layers', 1),
'max_kernel_size' : kwargs.get('max_kernel_size', 7),
'min_kernel_size' : kwargs.get('min_kernel_size', 3),
'max_out_channels' : kwargs.get('max_out_channels', 12),
'min_out_channels' : kwargs.get('min_out_channels', 4),
},
'linear_layer_options': {
'activation' : kwargs.get('linear_layer_activation', 'F.relu'),
'init_out_features' : kwargs.get('init_linear_out_features', 1000),
'feature_deadband' : kwargs.get('linear_feature_deadband', 20),
'max_layer_divisor' : kwargs.get('max_layer_divisor', 20),
'min_layer_divisor' : kwargs.get('min_layer_divisor', 4),
},
}
self.optimizers = self.__options['optimizers']
for i in tqdm(range(self.net_count)):
cfs, cps = self.__get_convolution_layers(self.__options['convolution_layer_options'])
lfs, lps = self.__get_linear_layers(self.__options['linear_layer_options'])
funcs = cfs
params = cps
activations = [self.__options['convolution_layer_options']['activation'] for f in cfs]
if (random() < self.pooling_probability):
funcs.extend([F.max_pool2d])
pool_size = np.random.randint(2,4)
activations.extend(['F.relu'])
params.extend([(pool_size, pool_size)])
funcs.extend(lfs)
activations.extend([self.__options['linear_layer_options']['activation'] for f in lfs])
func_list = ['F.' + f.__name__ for f in funcs]
params.extend(lps)
for opt in self.optimizers:
d = dict()
d['net'] = Net(self.__test_tensor, funcs, params, activations=activations)
d['net_number'] = i
d['func_list'] = func_list
d['params'] = params
d['activations'] = activations
d['optimizer_type'] = opt[1]
d['criterion'] = nn.BCELoss()
d['optimizer'] = eval(opt[0])
self.__setitem__(str(i) + '-' + opt[1], d)
def __get_convolution_layers(self, c):
"""
Dynamically create a list of convolution layers. Parameters are used to manage the size,
complexity, and structure of each layer. NEEDS IMPROVEMENT.
"""
fncs, parms = list(),list()
fncs.append(F.conv2d)
parms.append((c['first_layer_depth'],
r_i(c['min_out_channels'], c['max_out_channels'] + 1),
r_i(c['min_kernel_size'], c['max_kernel_size']+1)))
for i in range(r_i(c['min_layers']-1,c['max_layers'])):
fncs.append(F.conv2d)
parms.append((r_i(c['min_out_channels'],c['max_out_channels'] + 1),
r_i(c['min_kernel_size'], c['max_kernel_size']+1)))
return fncs, parms
def __get_linear_layers(self, d):
"""
Dynamically create a list of linear layers.
Parameters are used to manage the size of each layer.
NEEDS IMPROVEMENT.
"""
fncs, parms = list(), list()
fncs.append(F.linear)
parms.append(d['init_out_features'])
nextoutfeatures = int(d['init_out_features']/r_i(d['min_layer_divisor'],
d['max_layer_divisor'] + 1))
while nextoutfeatures > self.label_count + d['feature_deadband']:
fncs.append(F.linear)
parms.append(nextoutfeatures)
nextoutfeatures = int(nextoutfeatures/r_i(d['min_layer_divisor'],
d['max_layer_divisor'] + 1))
fncs.append(F.linear)
parms.append(self.label_count)
return fncs,parms
def train_validate_networks(self, train_data, validation_images, validation_labels, loss_recording_rate):
for k, d in self.items():
net = d['net']
net.to(DEVICE)
net.train()
criterion = d['criterion']
optimizer = d['optimizer']
train_losses = []
validation_losses = []
last_loss = 0.0
running_loss = 0.0
if self.force_training or not self.init_from_file:
pbar = tqdm(enumerate(train_data), total=len(train_data))
for i, data in pbar:
#get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
#zero the parameter gradients
optimizer.zero_grad()
#forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % loss_recording_rate == loss_recording_rate - 1:
train_losses.append((i + 1, (running_loss - last_loss)/loss_recording_rate))
pbar.set_description(desc='net name: %s; loss: %.3f' % (k, running_loss/(i + 1)))
pbar.update()
last_loss = running_loss
last_loss = 0.0
valid_loss = 0.0
net.eval()
with torch.no_grad():
pbar = tqdm(enumerate(zip(validation_images,validation_labels)),total=len(validation_labels))
for j, (v_in, v_lab) in pbar:
v_in, v_lab = v_in.to(DEVICE), v_lab.to(DEVICE)
outputs = net(v_in)
loss = criterion(outputs, v_lab)
valid_loss += loss.item()
if j % loss_recording_rate == loss_recording_rate - 1:
validation_losses.append((j + 1, (valid_loss - last_loss)/loss_recording_rate))
last_loss = valid_loss
pbar.set_description(desc='net name: %s; loss: %.3f; validation loss: %.3f'
% (k, running_loss/len(train_data), valid_loss/(j + 1)))
pbar.update()
self[k]['loss_dictionary'] = {'train_losses':train_losses,
'validation_losses': validation_losses,
}
net.cpu()
self._trained = True
def export_networks(self):
"""
Write info required to reconstruct this NetDictionary to disk.
"""
state_dicts = {key : d['net'].state_dict() for key, d in self.items()}
net_numbers = {key : d['net_number'] for key, d in self.items()}
func_lists = {key : d['func_list'] for key, d in self.items()}
params = {key : d['params'] for key, d in self.items()}
activations = {key : d['activations'] for key, d in self.items()}
optimizer_types = {key : d['optimizer_type'] for key, d in self.items()}
loss_dictionaries = {key : d['loss_dictionary'] for key, d in self.items()}
torch.save({'state_dicts':state_dicts,
'net_numbers':net_numbers,
'func_lists':func_lists,
'params':params,
'activations':activations,
'optimizer_types':optimizer_types,
'options':self.__options,
'test_tensor':self.__test_tensor,
'loss_dictionaries':loss_dictionaries,
}, self.import_export_filename) | 18,896 | 5,357 |
import asyncio
import logging
import os
from vmshepherd.drivers import Drivers
from vmshepherd.http import WebServer
from vmshepherd.utils import gen_id, prefix_logging
from vmshepherd.worker import Worker
class VmShepherd:
def __init__(self, config):
self.config = config
self.root_dir = os.path.dirname(__file__)
self.instance_id = gen_id(rnd_length=5)
self.setup_logging()
self.runtime_manager = Drivers.get(
'runtime', self.config['runtime'],
instance_id=self.instance_id
)
self.preset_manager = Drivers.get(
'presets', self.config['presets'],
runtime=self.runtime_manager,
defaults=self.config.get('defaults', {})
)
self.worker = Worker(
runtime=self.runtime_manager, presets=self.preset_manager,
interval=int(self.config.get('worker_interval', 5)),
autostart=self.config.get('autostart', True)
)
http_conf = self.config.get('http', None)
if http_conf:
self.web = WebServer(self, http_conf)
asyncio.ensure_future(self.web.start())
async def run(self, run_once=False):
if run_once:
await self.worker.run_once()
else:
await self.worker.run_forever()
def setup_logging(self):
logger = logging.getLogger()
log_level = self.config.get('log_level', 'info').upper()
logger.setLevel(log_level)
if logger.getEffectiveLevel() == logging.DEBUG:
logging.debug('DEBUG mode enabled')
prefix_logging(self.instance_id)
def reload(self, with_config=None):
self.config = with_config or self.config
self.runtime_manager.reconfigure(self.config.get('runtime'))
self.preset_manager.reconfigure(self.config.get('presets'), self.config.get('defaults'))
Drivers.flush()
| 1,917 | 583 |
"""Classes to describe different kinds of Slack specific event."""
import json
from opsdroid.events import Message
class Blocks(Message):
"""A blocks object.
Slack uses blocks to add advenced interactivity and formatting to messages.
https://api.slack.com/messaging/interactivity
Blocks are provided in JSON format to Slack which renders them.
Args:
blocks (string or dict): String or dict of json for blocks
room (string, optional): String name of the room or chat channel in
which message was sent
connector (Connector, optional): Connector object used to interact with
given chat service
raw_event (dict, optional): Raw message as provided by chat service.
None by default
Attributes:
created: Local date and time that message object was created
user: String name of user sending message
room: String name of the room or chat channel in which message was sent
connector: Connector object used to interact with given chat service
blocks: Blocks JSON as string
raw_event: Raw message provided by chat service
raw_match: A match object for a search against which the message was
matched. E.g. a regular expression or natural language intent
responded_to: Boolean initialized as False. True if event has been
responded to
"""
def __init__(self, blocks, *args, **kwargs):
"""Create object with minimum properties."""
super().__init__("", *args, **kwargs)
self.blocks = blocks
if isinstance(self.blocks, list):
self.blocks = json.dumps(self.blocks)
| 1,762 | 415 |
from .object_tracker import ObjectTracker
| 42 | 12 |
from os.path import join
import argparse
import pickle
import warnings
import pandas as pd
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
import utils
from malconv import Malconv
from preprocess import preprocess
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Malconv-keras classifier training')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--limit', type=float, default=0., help="limit gpy memory percentage")
parser.add_argument('--max_len', type=int, default=200000, help="model input legnth")
parser.add_argument('--win_size', type=int, default=500)
parser.add_argument('--val_size', type=float, default=0.1, help="validation percentage")
parser.add_argument('--save_path', type=str, default='../saved/', help='Directory to save model and log')
parser.add_argument('--model_path', type=str, default='../saved/malconv.h5', help="model to resume")
parser.add_argument('--save_best', action='store_true', help="Save model with best validation accuracy")
parser.add_argument('--resume', action='store_true')
parser.add_argument('csv', type=str)
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
# callbacks
ear = EarlyStopping(monitor='val_acc', patience=5)
mcp = ModelCheckpoint(join(save_path, 'malconv.h5'),
monitor="val_acc",
save_best_only=save_best,
save_weights_only=False)
print("[*] x_train length: ", len(x_train))
print("[*] y_train length: ", len(y_train))
print("[*] x_test length: ", len(x_test))
print("[*] y_test length: ", len(y_test))
validation_data=utils.data_generator(x_test, y_test, max_len, batch_size)
print("[*] validation_data: ", validation_data)
history = model.fit_generator(
utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size + 1,
epochs=epochs,
verbose=verbose,
callbacks=[ear, mcp],
validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
validation_steps=len(x_test)//batch_size + 1)
return history
if __name__ == '__main__':
args = parser.parse_args()
# limit gpu memory
if args.limit > 0:
utils.limit_gpu_memory(args.limit)
print("[*] Flag0")
# prepare model
if args.resume:
model = load_model(args.model_path)
else:
model = Malconv(args.max_len, args.win_size)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print("[*] Flag1")
# prepare data
# preprocess is handled in utils.data_generator
df = pd.read_csv(args.csv, header=None)
data, label = df[0].values, df[1].values
x_train, x_test, y_train, y_test = utils.train_test_split(data, label, args.val_size)
print('Train on %d data, test on %d data' % (len(x_train), len(x_test)))
print("[*] Flag2")
history = train(model, args.max_len, args.batch_size, args.verbose, args.epochs, args.save_path, args.save_best)
print("[*] Flag3")
with open(join(args.save_path, 'history.pkl'), 'wb') as f:
pickle.dump(history.history, f)
| 3,449 | 1,179 |
'''We will test all routegetter methods in this test suite'''
from os.path import join, abspath, sep
import unittest
import logging
import routesparser
from faker import Faker
LOG_FILE = join(sep.join(sep.split(abspath(__file__))[:-1]), 'log', 'testing', 'testing.log')
class RoutesGetterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteGetterTests')
cls.log.setLevel(logging.DEBUG)
cls.routegetter = routesparser.RouteGetter(url='http://www.cyride.com/index.aspx'
, payload={'page':1212})
cls.data_generator = Faker()
def setUp(self):
self.bad_url = self.data_generator.url()
def test_cyride_request(self):
'''we want to test that our request succeeds at cyride'''
log = self.log.getChild('test_cyride_request')
request = self.routegetter.request
self.assertNotEqual(request.status_code, 404)
log.debug('%s, %s', request.url, request)
@unittest.expectedFailure
def test_bad_url(self):
log = self.log.getChild('test_bad_url')
request = routesparser.get_request(self.bad_url)
self.assertEqual(request.status_code, 404)
log.debug(request.url, request)
class RoutesParserTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteParserTests')
cls.log.setLevel(logging.DEBUG)
cls.routeparser = routesparser.RouteParser()
def test_souped_data(self):
log = self.log.getChild('test_souped_data')
pretty_html = self.routeparser.pretty_html
self.assertIsNotNone(self.routeparser.pretty_html)
log.info(pretty_html.title.string)
if __name__ == '__main__':
logging.basicConfig(filename=LOG_FILE, filemode='w')
unittest.main()
| 1,870 | 603 |
def parse(o, prefix=""):
def flatten(lis):
new_lis = []
for item in lis:
if isinstance(item, list):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
try:
return {
"str": lambda: (prefix, o),
"int": lambda: parse(str(o), prefix=prefix),
"float": lambda: parse(str(o), prefix=prefix),
"bool": lambda: parse(1 if o else 0, prefix=prefix),
"NoneType": lambda: parse("", prefix=prefix),
"list": lambda: flatten([parse(io, "{}{}{}".format(prefix, "_" if prefix else "", ik).upper()) for ik, io in enumerate(o)]),
"dict": lambda: flatten([parse(io, "{}{}{}".format(prefix, "_" if prefix else "", ik).upper()) for ik, io in o.items()]),
}[type(o).__name__]()
except KeyError:
raise ValueError("type '{}' not supported".format(type(o).__name__))
| 961 | 294 |
from setuptools import setup
VERSION = '0.0.4'
DESCRIPTION = 'Hello world checking'
# Setting up
setup(
name="hello_world",
version=VERSION,
author="Kishan Tongrao",
author_email="kishan.tongs@gmail.com",
description=DESCRIPTION,
long_description_content_type="text/markdown",
packages=['hello_world'],
include_package_data=True,
install_requires=[],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) | 710 | 224 |
import hashlib
def str_hash(s):
return int(int(hashlib.sha224(s.encode('utf-8')).hexdigest(), 16) % ((1 << 62) - 1))
| 123 | 59 |
# -*- coding:utf-8 -*-
import certifi
import pycurl
import requests
import os
import json
import uuid
from StringIO import StringIO
def byteify(input_data):
# convert json to list
if isinstance(input_data, dict):
return {byteify(key): byteify(value) for key, value in input_data.iteritems()}
elif isinstance(input_data, list):
return [byteify(element) for element in input_data]
elif isinstance(input_data, unicode):
return input_data.encode('utf-8')
else:
return input_data
def read_in_chunks(file_object, chunk_size=1024):
# post chunk encoding data
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
class Authentication:
# get token
def __init__(self, client_id, client_secret):
self.AccessUrl = "https://oxford-speech.cloudapp.net/token/issueToken"
self._clientId = client_id
# provided by MS
self._clientSecret = client_secret
# provided by MS
self.request_data = 'grant_type=client_credentials&client_id=' + self._clientId+'&client_secret='
self.request_data += self._clientSecret + '&scope=https://speech.platform.bing.com'
# opt must be a string object not a unicode object
data_l = len(self.request_data)
http_header = [
"Content-Type:application/x-www-form-urlencoded"
]
storage = StringIO()
# the way to get response and print it
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.old_where())
c.setopt(pycurl.URL, self.AccessUrl)
c.setopt(c.HTTPHEADER, http_header)
c.setopt(c.POST, 1)
c.setopt(c.POSTFIELDSIZE, data_l)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 30)
c.setopt(c.POSTFIELDS, self.request_data)
# --------------------------------------------
c.setopt(c.WRITEFUNCTION, storage.write)
# --------------------------------------------
c.perform()
c.close()
body = storage.getvalue()
storage.close()
self._token = byteify(json.loads(body.decode()))
def getAccessToken(self):
return self._token["access_token"]
class MsSpeechRequest:
def __init__(self, audiofile, audioSamplerate=16000, clientid='', clientsecret='', locale='zh-CN', deviceOS='Rasbian'):
if audiofile == None:
print 'audio input wrong'
return
try:
self._auth = Authentication(clientid, clientsecret)
except Exception as e:
print 'failed get access token.details:%s',e.__str__()
self._RequestUri = "https://speech.platform.bing.com/recognize"
self._RequestUri += "?scenarios=smd"
self._RequestUri += "&appid="
# input appid
self._RequestUri += "&locale="+locale
self._RequestUri += "&device.os="+deviceOS
self._RequestUri += "&version=3.0"
self._RequestUri += "&format=json"
self._RequestUri += "&instanceid="
# input instance id
self._RequestUri += "&requestid="+str(uuid.uuid4())
self._audioFile = audiofile
self._audioSamplerate = audioSamplerate.__str__()
self._token = self._auth.getAccessToken()
# print self._token
self._response = ''
def post_request(self):
headers = {}
headers['Accept'] = 'application/json;text/xml'
headers['Content-Type'] = 'audio/wav; codec=\"audio/pcm\"; samplerate='+self._audioSamplerate
headers['Authorization'] = 'Bearer '+'%s' % self._token
try:
with open(self._audioFile,'rb') as f:
r=requests.post(self._RequestUri, data=read_in_chunks(f), headers=headers, stream=True)
print r
self._response = byteify(r.text)
print self._response
except Exception as e:
print 'failed get request response. Details:%s',e.__str__()
def returnResult(self):
self.post_request()
return self._response
#---------------------
#作者:艾木的星辰
#来源:CSDN
#原文:https://blog.csdn.net/joyjun_1/article/details/52563713
#版权声明:本文为博主原创文章,转载请附上博文链接! | 4,354 | 1,397 |
# CannyStill.py
import cv2
import numpy as np
import os
###################################################################################################
def main():
imgOriginal = cv2.imread("image.jpg") # open image
if imgOriginal is None: # if image was not read successfully
print "error: image not read from file \n\n" # print error message to std out
os.system("pause") # pause so user can see error message
return # and exit function (which exits program)
imgGrayscale = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY) # convert to grayscale
imgBlurred = cv2.GaussianBlur(imgGrayscale, (5, 5), 0) # blur
imgCanny = cv2.Canny(imgBlurred, 100, 200) # get Canny edges
cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE) # create windows, use WINDOW_AUTOSIZE for a fixed window size
cv2.namedWindow("imgCanny", cv2.WINDOW_AUTOSIZE) # or use WINDOW_NORMAL to allow window resizing
cv2.imshow("imgOriginal", imgOriginal) # show windows
cv2.imshow("imgCanny", imgCanny)
cv2.waitKey() # hold windows open until user presses a key
cv2.destroyAllWindows() # remove windows from memory
return
###################################################################################################
if __name__ == "__main__":
main()
| 1,565 | 428 |
"""
Timeseries from DataFrame
=========================
"""
import seaborn as sns
sns.set(style="darkgrid")
gammas = sns.load_dataset("gammas")
sns.tsplot(gammas, "timepoint", "subject", "ROI", "BOLD signal")
| 212 | 82 |
from datetime import date
import boundaries
boundaries.register('British Columbia electoral districts',
domain='British Columbia',
last_updated=date(2011, 12, 12),
name_func=boundaries.attr('edname'),
id_func=boundaries.attr('edabbr'),
authority='Elections BC',
source_url='http://www.elections.bc.ca/index.php/voting/electoral-maps-profiles/geographic-information-system-spatial-data-files-2011/',
data_url='http://www.elections.bc.ca/docs/map/redis11/GIS/ED_Province.exe',
encoding='iso-8859-1',
) | 533 | 195 |
import logging
import magic
import os
from cms.medias.utils import get_file_type_size
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from . import settings as app_settings
from . utils import to_webp
logger = logging.getLogger(__name__)
FILETYPE_IMAGE = getattr(settings, 'FILETYPE_IMAGE',
app_settings.FILETYPE_IMAGE)
def set_file_meta(media_object):
data = get_file_type_size(media_object)
media_object.file_size = data['file_size']
media_object.file_type = data['mime_type']
def webp_image_optimizer(media_object):
for field_name in ('file', 'image'):
field = getattr(media_object, field_name, None)
if field:
break
if not getattr(field, '_file', None): # pragma: no cover
return
mimetype = magic.Magic(mime=True).from_buffer(field._file.file.read())
if mimetype in FILETYPE_IMAGE:
field._file.seek(0)
byte_io = to_webp(field._file)
byte_io.seek(0, os.SEEK_END)
content_size = byte_io.tell()
byte_io.seek(0)
fname = '.'.join(field.name.split('.')[:-1]) + '.webp'
field._file = InMemoryUploadedFile(file = byte_io,
name = fname,
content_type = 'image/webp',
size = content_size,
charset='utf-8',
field_name = field_name)
field._file._name = fname
field.name = fname
field._file.size = content_size
field._file.content_type = 'image/webp'
# if they are valuable ... otherwise nothins happens to model
media_object.file_size = content_size
media_object.file_type = 'image/webp'
logger.info(f'Image {fname} converted from {mimetype} to {media_object.file_type}')
return True
def remove_file(media_object):
fpath = media_object.file.path
try:
os.remove(fpath)
except Exception as e: # pragma: no cover
_msg = 'Media Hook remove_file: {} cannot be removed: {}'
logger.error(_msg.format(fpath, e))
| 2,228 | 674 |
from django_fsu import url
from . import views
urlpatterns = [
url('login/', views.login, name='login'),
url('logout/', views.logout, name='logout'),
url('profile/<int:pk>', views.profile, name='profile'),
]
| 222 | 77 |
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['PocLibrary.py']
APP_NAME = "PocLibrary"
DATA_FILES = []
OPTIONS = {
'iconfile': 'logo.icns',
'plist': {
'CFBundleName': APP_NAME,
'CFBundleDisplayName': APP_NAME,
'CFBundleGetInfoString': "Personal Poc Library",
'CFBundleVersion': "1.0",
'CFBundleShortVersionString': "1.0",
'NSHumanReadableCopyright': u"Copyright © 2020, Coldsnap, All Rights Reserved"
},
'packages': ['wx','pyperclip'],
'resources': 'Library'
}
setup(
name=APP_NAME,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 747 | 273 |
# encoding: utf-8
import base64
import hashlib
import hmac
import re
import six
from six.moves.urllib.parse import quote
from Crypto.Cipher import AES
class Url(object):
unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>[^/]{28,}?))/)?'
debug = '(?:(?P<debug>debug)/)?'
meta = '(?:(?P<meta>meta)/)?'
trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?'
crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?'
fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?'
dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?'
halign = r'(?:(?P<halign>left|right|center)/)?'
valign = r'(?:(?P<valign>top|bottom|middle)/)?'
smart = r'(?:(?P<smart>smart)/)?'
filters = r'(?:filters:(?P<filters>.+?\))/)?'
image = r'(?P<image>.+)'
compiled_regex = None
@classmethod
def regex(cls, has_unsafe_or_hash=True):
reg = ['/?']
if has_unsafe_or_hash:
reg.append(cls.unsafe_or_hash)
reg.append(cls.debug)
reg.append(cls.meta)
reg.append(cls.trim)
reg.append(cls.crop)
reg.append(cls.fit_in)
reg.append(cls.dimensions)
reg.append(cls.halign)
reg.append(cls.valign)
reg.append(cls.smart)
reg.append(cls.filters)
reg.append(cls.image)
return ''.join(reg)
@classmethod
def parse_decrypted(cls, url):
if cls.compiled_regex:
reg = cls.compiled_regex
else:
reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False))
result = reg.match(url)
if not result:
return None
result = result.groupdict()
int_or_0 = lambda value: 0 if value is None else int(value)
values = {
'debug': result['debug'] == 'debug',
'meta': result['meta'] == 'meta',
'trim': result['trim'],
'crop': {
'left': int_or_0(result['crop_left']),
'top': int_or_0(result['crop_top']),
'right': int_or_0(result['crop_right']),
'bottom': int_or_0(result['crop_bottom'])
},
'adaptive': result['adaptive'] == 'adaptive',
'full': result['full'] == 'full',
'fit_in': result['fit_in'] == 'fit-in',
'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']),
'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']),
'horizontal_flip': result['horizontal_flip'] == '-',
'vertical_flip': result['vertical_flip'] == '-',
'halign': result['halign'] or 'center',
'valign': result['valign'] or 'middle',
'smart': result['smart'] == 'smart',
'filters': result['filters'] or '',
'image': 'image' in result and result['image'] or None
}
return values
@classmethod
def generate_options(cls,
debug=False,
width=0,
height=0,
smart=False,
meta=False,
trim=None,
adaptive=False,
full=False,
fit_in=False,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
filters=None):
url = []
if debug:
url.append('debug')
if meta:
url.append('meta')
if trim:
if isinstance(trim, bool):
url.append('trim')
else:
url.append('trim:%s' % trim)
crop = crop_left or crop_top or crop_right or crop_bottom
if crop:
url.append('%sx%s:%sx%s' % (
crop_left,
crop_top,
crop_right,
crop_bottom
))
if fit_in:
fit_ops = []
if adaptive:
fit_ops.append('adaptive')
if full:
fit_ops.append('full')
fit_ops.append('fit-in')
url.append('-'.join(fit_ops))
if horizontal_flip:
width = '-%s' % width
if vertical_flip:
height = '-%s' % height
if width or height:
url.append('%sx%s' % (width, height))
if halign != 'center':
url.append(halign)
if valign != 'middle':
url.append(valign)
if smart:
url.append('smart')
if filters:
url.append('filters:%s' % filters)
return '/'.join(url)
@classmethod
def encode_url(kls, url):
return quote(url, '/:?%=&()~",\'')
class Cryptor(object):
def __init__(self, security_key):
if isinstance(security_key, six.string_types):
security_key = security_key.encode('utf-8')
self.security_key = (security_key * 16)[:16]
def encrypt(self,
width,
height,
smart,
adaptive,
full,
fit_in,
flip_horizontal,
flip_vertical,
halign,
valign,
trim,
crop_left,
crop_top,
crop_right,
crop_bottom,
filters,
image):
generated_url = Url.generate_options(
width=width,
height=height,
smart=smart,
meta=False,
adaptive=adaptive,
full=full,
fit_in=fit_in,
horizontal_flip=flip_horizontal,
vertical_flip=flip_vertical,
halign=halign,
valign=valign,
trim=trim,
crop_left=crop_left,
crop_top=crop_top,
crop_right=crop_right,
crop_bottom=crop_bottom,
filters=filters
)
url = "%s/%s" % (generated_url, hashlib.md5(image.encode('utf-8')).hexdigest())
pad = lambda b: b + (16 - len(b) % 16) * b"{"
cipher = AES.new(self.security_key)
encrypted = base64.urlsafe_b64encode(cipher.encrypt(pad(url.encode('utf-8'))))
return encrypted.decode('utf-8')
def get_options(self, encrypted_url_part, image_url):
try:
opt = self.decrypt(encrypted_url_part)
except ValueError:
opt = None
if not opt and not self.security_key and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
security_key = self.storage.get_crypto(image_url)
if security_key is not None:
cr = Cryptor(security_key)
try:
opt = cr.decrypt(encrypted_url_part)
except ValueError:
opt = None
if opt is None:
return None
image_hash = opt and opt.get('image_hash')
image_hash = image_hash[1:] if image_hash and image_hash.startswith('/') else image_hash
path_hash = hashlib.md5(image_url.encode('utf-8')).hexdigest()
if not image_hash or image_hash != path_hash:
return None
opt['image'] = image_url
opt['hash'] = opt['image_hash']
del opt['image_hash']
return opt
def decrypt(self, encrypted):
cipher = AES.new(self.security_key)
# try:
debased = base64.urlsafe_b64decode(encrypted.encode('utf-8'))
decrypted = cipher.decrypt(debased).rstrip(b'{').decode('utf-8')
# except TypeError:
# return None
result = Url.parse_decrypted('/%s' % decrypted)
result['image_hash'] = result['image']
del result['image']
return result
class Signer:
def __init__(self, security_key):
if isinstance(security_key, six.string_types):
security_key = security_key.encode('utf-8')
self.security_key = security_key
def validate(self, actual_signature, url):
url_signature = self.signature(url)
return url_signature == actual_signature
def signature(self, url):
result = base64.urlsafe_b64encode(
hmac.new(self.security_key, url.encode('utf-8'), hashlib.sha1).digest())
# hmac.new(self.security_key, unicode(url).encode('utf-8'), hashlib.sha1).digest())
return result.decode()
| 8,840 | 2,782 |
""" Config class for training the InvNet """
import argparse
from dp_layer.graph_layer.edge_functions import edge_f_dict as d
def get_parser(name):
"""
:param name: String for Config Name
:return: parser
"""
parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
return parser
class MNISTConfig(argparse.Namespace):
def build_parser(self):
parser = get_parser("InvNet config")
parser.add_argument('--dataset', default='mnist', help='circle / polycrystalline')
parser.add_argument('--lr',default=01e-04)
parser.add_argument('--output_path', default='./output_dir', help='output directory')
parser.add_argument('--data_dir', default='/data/MNIST')
parser.add_argument('--gpu', default=1, help='Selecting the gpu')
parser.add_argument('--data_size', default=64, type=int)
parser.add_argument('--batch_size', default=32,type=int, help='Batch size for training')
parser.add_argument('--hidden_size', default=32, type=int,help='Hidden size used for generator and discriminator')
parser.add_argument('--critic_iter', default=5, type=int,help='Number of iter for descriminator')
parser.add_argument('--proj_iter', default=3, type=int, help='Number of iteration for projection update.')
parser.add_argument('--end_iter', default=30000, help='How many iterations to train for.')
parser.add_argument('--lambda_gp', default=10, help='gradient penalty hyperparameter')
parser.add_argument('--restore_mode', default=False,
help='If True, it will load saved model from OUT_PATH and continue to train')
parser.add_argument('--max_op', default=False)
parser.add_argument('--edge_fn', default='diff_exp')
parser.add_argument('--make_pos', type=bool,default=True)
parser.add_argument('--proj_lambda',type=float,default=1)
parser.add_argument('--include_dp', type=int, default=True)
parser.add_argument('--top2bottom', dest='top2bottom', action='store_true')
parser.add_argument('--no-top2bottom', dest='top2bottom', action='store_false')
parser.set_defaults(top2bottom=False)
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
class MicroStructureConfig(argparse.Namespace):
def build_parser(self):
parser = get_parser("MicroConfig config")
parser.add_argument('--lr',default=01e-04)
parser.add_argument('--output_path', default='./output_dir', help='output directory')
parser.add_argument('--data_dir', default='/data/datasets/two_phase_morph/')
parser.add_argument('--gpu', default=1, type= int,help='Selecting the gpu')
parser.add_argument('--data_size',default=64,type=int)
parser.add_argument('--batch_size', default=32,type=int, help='Batch size for training')
parser.add_argument('--hidden_size', default=32, type=int,help='Hidden size used for generator and discriminator')
parser.add_argument('--critic_iter', default=5, type=int,help='Number of iter for descriminator')
parser.add_argument('--proj_iter', default=1, type=int, help='Number of iteration for projection update.')
parser.add_argument('--end_iter', default=50000, help='How many iterations to train for.')
parser.add_argument('--lambda_gp', default=10, help='gradient penalty hyperparameter')
parser.add_argument('--restore_mode', default=False,
help='If True, it will load saved model from OUT_PATH and continue to train')
parser.add_argument('--max_op', default=False)
parser.add_argument('--edge_fn', choices=list(d.keys()),default='diff_exp')
parser.add_argument('--make_pos', type=bool, default=False)
parser.add_argument('--proj_lambda', type=float, default=1)
parser.add_argument('--include_dp',type=int,default=True)
parser.add_argument('--top2bottom', dest='top2bottom', action='store_true')
parser.add_argument('--no-top2bottom', dest='top2bottom', action='store_false')
parser.set_defaults(top2bottom=False)
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
| 4,420 | 1,314 |
# -*- coding: utf-8 -*-
"""
flask-rst.modules.tags
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by Christoph Heer.
:license: BSD, see LICENSE for more details.
"""
from math import log
from flask import Blueprint, render_template
from jinja2 import Markup
from flaskrst.modules.blog import posts
def get_tags():
tags = {}
for post in posts:
post_tags = [tag.lower() for tag in post.config.get('tags', [])]
for tag in post_tags:
if tag not in tags:
tags[tag] = 1
else:
tags[tag] += 1
return tags
def get_posts_by_tag(name):
posts_with_tag = []
for post in posts:
post_tags = [tag.lower() for tag in post.config.get('tags', [])]
for tag in post_tags:
if tag == name and post not in posts_with_tag:
posts_with_tag.append(post)
return posts_with_tag
def template_tags(doc):
tags = [tag.lower() for tag in doc.config.get('tags', [])]
return Markup(render_template('tags_inside_post.html', tags=tags))
tags = Blueprint('tags', __name__)
@tags.route("/tags/")
def cloud():
tags = get_tags()
for tag in tags:
tags[tag] = 100 + log(tags[tag] or 1) * 20
return render_template('tags_cloud.html',
tags=tags
)
@tags.route("/tags/<tag>/")
def tag(tag):
blog_posts = get_posts_by_tag(tag)
return render_template('tags_taged_with.html',
tag=tag,
blog_posts=blog_posts
)
def setup(app, cfg):
app.jinja_env.globals['tags'] = template_tags
app.register_blueprint(tags) | 1,599 | 550 |
# coding: utf-8
from __future__ import print_function, unicode_literals
import re
import pytest
import sqlitefts as fts
from sqlitefts import fts5, fts5_aux
apsw = pytest.importorskip("apsw")
class SimpleTokenizer(fts.Tokenizer):
_p = re.compile(r"\w+", re.UNICODE)
def tokenize(self, text):
for m in self._p.finditer(text):
s, e = m.span()
t = text[s:e]
l = len(t.encode("utf-8"))
p = len(text[:s].encode("utf-8"))
yield t, p, p + l
class SimpleFTS5Tokenizer(fts5.FTS5Tokenizer):
_p = re.compile(r"\w+", re.UNICODE)
def tokenize(self, text, flags):
for m in self._p.finditer(text):
s, e = m.span()
t = text[s:e]
l = len(t.encode("utf-8"))
p = len(text[:s].encode("utf-8"))
yield t, p, p + l
def test_createtable():
c = apsw.Connection(":memory:")
name = "simple"
sql = "CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name)
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(sql)
r = (
c.cursor()
.execute(
"SELECT type, name, tbl_name, sql FROM sqlite_master WHERE type='table' AND name='fts'"
)
.fetchone()
)
assert r == ("table", "fts", "fts", sql)
c.close()
def test_insert():
c = apsw.Connection(":memory:")
name = "simple"
content = "これは日本語で書かれています"
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.cursor().execute("INSERT INTO fts VALUES(?)", (content,))
assert c.changes() == 1
r = c.cursor().execute("SELECT content FROM fts").fetchone()
assert r[0] == content
c.close()
def test_match():
c = apsw.Connection(":memory:")
name = "simple"
contents = [("abc def",), ("abc xyz",), ("あいうえお かきくけこ",), ("あいうえお らりるれろ",)]
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.cursor().executemany("INSERT INTO fts VALUES(?)", contents)
r = c.cursor().execute("SELECT * FROM fts").fetchall()
assert len(r) == 4
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'def'").fetchall()
assert len(r) == 1 and r[0][0] == contents[0][0]
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'xyz'").fetchall()
assert len(r) == 1 and r[0][0] == contents[1][0]
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'zzz'").fetchall()
assert len(r) == 0
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'あいうえお'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'かきくけこ'").fetchall()
assert len(r) == 1 and r[0][0] == contents[2][0]
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'らりるれろ'").fetchall()
assert len(r) == 1 and r[0][0] == contents[3][0]
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'まみむめも'").fetchall()
assert len(r) == 0
c.close()
def test_full_text_index_queries():
name = "simple"
docs = [
(
"README",
"sqlitefts-python provides binding for tokenizer of SQLite Full-Text search(FTS3/4). It allows you to write tokenizers in Python.",
),
(
"LICENSE",
"""Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:""",
),
("日本語", "あいうえお かきくけこ さしすせそ たちつてと なにぬねの"),
]
with apsw.Connection(":memory:") as c:
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(
"CREATE VIRTUAL TABLE docs USING FTS4(title, body, tokenize={})".format(
name
)
)
c.cursor().executemany("INSERT INTO docs(title, body) VALUES(?, ?)", docs)
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'Python'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'bind'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'らりるれろ'").fetchall()
assert len(r) == 0
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'binding'")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'binding'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'あいうえお'")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'かきくけこ'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'body:かきくけこ'")
.fetchall()[0]
)
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:bind'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:README'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:日本語'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH 'bind'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE title MATCH 'README'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH '日本語'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to in'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Py*'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Z*'").fetchall()
assert len(r) == 0
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'あ*'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'ん*'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tokenizer SQLite\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお たちつてと'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"あいうえお たちつてと\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tok* SQL*\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tok* of SQL*\"'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"あ* さ*\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"あ* かきくけこ さ*\"'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer NEAR SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/2 SQLite'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/3 SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR たちつてと'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/2 たちつてと'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/3 たちつてと'")
.fetchall()
)
assert len(r) == 1
def test_tokenizer_output():
name = "simple"
with apsw.Connection(":memory:") as c:
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(
"CREATE VIRTUAL TABLE tok1 USING fts3tokenize({})".format(name)
)
expect = [
("This", 0, 4, 0),
("is", 5, 7, 1),
("a", 8, 9, 2),
("test", 10, 14, 3),
("sentence", 15, 23, 4),
]
for a, e in zip(
c.cursor().execute(
"SELECT token, start, end, position "
"FROM tok1 WHERE input='This is a test sentence.'"
),
expect,
):
assert e == a
s = "これ は テスト の 文 です"
expect = [(None, 0, -1, 0)]
for i, t in enumerate(s.split()):
expect.append(
(t, expect[-1][2] + 1, expect[-1][2] + 1 + len(t.encode("utf-8")), i)
)
expect = expect[1:]
for a, e in zip(
c.cursor().execute(
"SELECT token, start, end, position " "FROM tok1 WHERE input=?", [s]
),
expect,
):
assert e == a
@pytest.mark.xfail(
apsw.using_amalgamation, reason="FTS5 with APSW+Amalgamation not supported"
)
def test_fts5_api_from_db():
with apsw.Connection(":memory:") as c:
fts5api = fts5.fts5_api_from_db(c)
assert fts5api.iVersion == 2
assert fts5api.xCreateTokenizer
@pytest.mark.xfail(
apsw.using_amalgamation,
reason="FTS5 with APSW+Amalgamation not supported",
raises=fts.Error,
)
def test_aux_and_tokenize():
c = apsw.Connection(":memory:")
try:
fts5_aux.register_aux_function(c, "tokenize", fts5_aux.aux_tokenize)
cur = c.cursor()
cur.execute("CREATE VIRTUAL TABLE fts USING FTS5(content)")
cur.executemany("INSERT INTO fts VALUES(?)", (["hello world"], ["こんにちは 世界"]))
cur.execute("SELECT COUNT(*) FROM fts")
assert 2 == cur.fetchone()[0]
cur.execute("SELECT tokenize(fts, 0) FROM fts")
assert [x[0] for x in cur.fetchall()] == ["hello, world", "こんにちは, 世界"]
finally:
c.close()
| 12,560 | 4,152 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
генератор случайных чисел
"""
import random
print ''.join([random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) for x in range(12)]) | 211 | 106 |
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet.gluon import nn, rnn
from config import config
nJoints = config.NETWORK.nJoints
class MyLSTM(gluon.Block):
def __init__(self, cfg, **kwargs):
super(MyLSTM, self).__init__(**kwargs)
self.hidden_dim = cfg.NETWORK.hidden_dim
with self.name_scope():
self.drop1 = nn.Dropout(cfg.NETWORK.dropout1)
self.drop2 = nn.Dropout(cfg.NETWORK.dropout2)
self.encoder = rnn.LSTMCell(hidden_size=self.hidden_dim, prefix='encoder_')
self.decoder = rnn.LSTMCell(hidden_size=self.hidden_dim, prefix='decoder_')
self.output_layer = nn.Dense(3*nJoints)
def forward(self, inputs, init_state, start_token):
state = init_state
for item in inputs:
mid_hidden, state = self.encoder(self.drop1(item), state)
# decoder
ins = start_token
pred = [] #seqLength, 64x(3x16)
for i in range(config.DATASET.seqLength):
hidden_state, state = self.decoder(self.drop1(ins), state)
output = self.output_layer(self.drop2(hidden_state)) + ins
ins = output
pred.append(output)
return pred
def get_net(cfg):
return MyLSTM(cfg) | 1,283 | 444 |
from math import ceil, floor
def k_multiply(a, b):
if len(str(a)) == 1 or len(str(b)) == 1:
return int(a)*int(b)
n = max(len(str(a)), len(str(b)))
al = a // 10**(n//2)
ar = a % 10**(n//2)
bl = b // 10**(n//2)
br = b % 10**(n//2)
p1 = k_multiply(al, bl)
p2 = k_multiply(ar, br)
p3 = k_multiply(al + ar, bl + br)
return 10**(2*n//2)*p1 + 10**(n//2)*(p3 - p1 - p2) + p2
if __name__ == "__main__":
print(k_multiply(2104, 2421))
print(k_multiply(21, 24))
print(k_multiply(1, 4))
| 536 | 280 |
import sys
def print_lights(lights):
x = [x for x,y in lights.keys()]
y = [y for x,y in lights.keys()]
minx, maxx = min(x), max(x)
miny, maxy = min(y), max(y)
if maxy - miny < 18:
result = []
for y in range(miny, maxy+1):
for x in range(minx, maxx+1):
result.append("#" if (x, y) in lights else ".")
result.append('\n')
return ''.join(result)
return False
def step(lights):
return {(x + vx, y + vy): (vx, vy) for (x, y), (vx, vy) in lights.items()}
def parse(line):
pos, vel = line[10:].strip().split("> velocity=<",)
posx, posy = [int(i) for i in pos.split(", ")]
velx, vely = [int(i) for i in vel[:-1].split(", ")]
return posx, posy, velx, vely
def main():
lights = {}
for line in sys.stdin.readlines():
posx, posy, velx, vely = parse(line)
lights[posx, posy] = (velx, vely)
for i in range(25000):
result = print_lights(lights)
if result:
print(result)
print(i)
break
lights = step(lights)
if __name__ == '__main__':
main()
| 1,138 | 437 |
import re
import random
import string
import os
supported_types = ['a', 'n', 's']
count_types = []
def fuzzyfy(types, length):
# check type and length parameters for validity
try:
int(length)
except Exception:
return None
if types == '' or types == "":
return None
elif length < 1:
return None
for type in types:
try:
supported_types.index(type)
except Exception:
return None
for type in types:
type_occured = False
for counter in count_types:
if counter[0] == type:
counter[1] += 1
type_occured = True
if type_occured is False:
count_types.append([type, 1])
for counter in count_types:
if counter[1] > 1:
return None
# build fuzzy string
fuzzystr = str("")
for i in range(0, length):
fuzzystr += str(_type_to_char(random.choice(types)))
# check fuzzy string for expected legth
if len(fuzzystr) == length:
return fuzzystr
else:
return None
def _type_to_char(type):
# returns character for a given type
if type == "a":
return(random.choice(string.ascii_lowercase + string.ascii_uppercase))
elif type == "n":
return(random.choice(string.digits))
elif type == "s":
return(random.choice(string.punctuation))
else:
return str("")
def test():
print(fuzzyfy('ab', 2))
print(fuzzyfy('ans', 2))
print(fuzzyfy('', 0))
print(fuzzyfy('ans', 5))
print(fuzzyfy('xxansxx', 99))
print(fuzzyfy('ansans', 9))
print(fuzzyfy('ans', 'a'))
print(fuzzyfy('an', -1))
print(fuzzyfy('ans', 11))
print(fuzzyfy('an', 1))
print(fuzzyfy('san', 5))
if __name__ == '__main__':
s = fuzzyfy('ans', 10)
print(s)
| 1,850 | 649 |
import pytest
# This function is based upon the example of how to
# "[make] test result information available in fixtures" at:
# https://pytest.org/latest/example/simple.html#making-test-result-information-available-in-fixtures
# and:
# https://github.com/pytest-dev/pytest/issues/288
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
def pytest_configure(config):
config.addinivalue_line("markers",
"slow_test: slow tests aren't run under Valgrind")
| 787 | 252 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for working with the current thread context."""
import threading
class _ThreadData(threading.local):
def __init__(self):
self.benchmark_spec = None
_thread_local = _ThreadData()
def SetThreadBenchmarkSpec(benchmark_spec):
"""Sets the current thread's BenchmarkSpec object."""
_thread_local.benchmark_spec = benchmark_spec
def GetThreadBenchmarkSpec():
"""Gets the current thread's BenchmarkSpec object.
If SetThreadBenchmarkSpec() has not been called in either the current thread
or in an ancestor, then this method will return None by default.
"""
return _thread_local.benchmark_spec
| 1,236 | 362 |
import arcade
arcade.open_window(500,750,"Rainbow")
arcade.set_background_color(arcade.color.SKY_BLUE)
arcade.start_render()
arcade.draw_parabola_filled(25,80,500,300,arcade.color.RED,0)
arcade.draw_parabola_filled(50,80,470,280,arcade.color.ORANGE,0)
arcade.draw_parabola_filled(75,80,440,260,arcade.color.YELLOW ,0)
arcade.draw_parabola_filled(100,80,410,240,arcade.color.GREEN,0)
arcade.draw_parabola_filled(125,80,380,220,arcade.color.BLUE,0)
arcade.draw_parabola_filled(150,80,350,200,arcade.color.INDIGO,0)
arcade.draw_parabola_filled(175,80,320,180,arcade.color.VIOLET,0)
arcade.finish_render()
arcade.run()
| 615 | 330 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-02-13 18:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portal', '0016_auto_20190213_1645'),
]
operations = [
migrations.RemoveField(
model_name='professor',
name='nome_abreviado',
),
]
| 403 | 154 |
#!/usr/bin/env python
''' simple bottle drop module'''
import time
mpstate = None
hold_pwm = 983
release_pwm = 1776
drop_channel = 5
drop_time = 2.0
class drop_state(object):
def __init__(self):
self.waiting = False
self.start_drop = 0
def name():
'''return module name'''
return "drop"
def description():
'''return module description'''
return "bottle drop control"
def cmd_drop(args):
'''drop a bottle'''
mpstate.drop_state.start_drop = time.time()
mpstate.drop_state.waiting = True
mpstate.status.override[drop_channel-1] = release_pwm
mpstate.override_period.force()
print("started drop")
def check_drop(m):
'''check if drop is complete'''
if mpstate.drop_state.waiting and time.time() > mpstate.drop_state.start_drop+drop_time:
mpstate.status.override[drop_channel-1] = 0
mpstate.drop_state.waiting = False
mpstate.override_period.force()
print("drop complete")
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.drop_state = drop_state()
mpstate.command_map['drop'] = (cmd_drop, "drop bottle")
print("drop initialised")
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'RC_CHANNELS_RAW':
check_drop(m)
if m.get_type() == 'PARAM_VALUE':
if str(m.param_id) == 'RC5_FUNCTION' and m.param_value != 1.0:
print("DROP WARNING: RC5_FUNCTION=%u" % m.param_value)
| 1,510 | 525 |
import pandas as pd
import numpy as np
from numpy.random import randn
from craft_ai.pandas import MISSING_VALUE, OPTIONAL_VALUE
from random import random, randint
NB_OPERATIONS = 300
NB_MANY_OPERATIONS = 1000
SIMPLE_AGENT_BOOSTING_CONFIGURATION = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "timezone"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "day_of_week", "is_generated": True},
"d": {"type": "timezone"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "timezone"},
"f": {"type": "day_of_week", "is_generated": True},
"g": {"type": "month_of_year"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
SIMPLE_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "continuous"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = pd.DataFrame(
[[str(randint(1, 3)), random()] for i in range(NB_OPERATIONS)],
columns=["a", "b"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_BOOSTING_DATA = pd.DataFrame(
[
[str(randint(1, 3)), random(), random(), random(), "+01:00"]
for i in range(NB_OPERATIONS)
],
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_BOOSTING_MANY_DATA = pd.DataFrame(
[
[str(randint(1, 3)), random(), random(), random(), "+01:00"]
for i in range(NB_MANY_OPERATIONS)
],
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_MANY_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_DATA = pd.DataFrame(
randn(NB_OPERATIONS, 5),
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_MANY_DATA = pd.DataFrame(
randn(NB_MANY_OPERATIONS, 5),
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_MANY_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_DATA_DICT = [
{
"timestamp": 1558741230,
"context": {"a": 10, "b": 10, "c": 10, "d": 10, "e": 10},
},
{"timestamp": 1558741331, "context": {"a": 10, "b": 11, "c": 12, "e": 13}},
{"timestamp": 1558741432, "context": {"a": 13, "b": 44, "c": 33, "d": 22}},
{"timestamp": 1558741533, "context": {"a": 11, "d": 55, "e": 55}},
{"timestamp": 1558741634, "context": {"a": 33, "c": 66, "d": 22, "e": 44}},
{"timestamp": 1558741735, "context": {"a": 1, "b": 33, "c": 33, "d": 44}},
]
COMPLEX_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
"operations_as_events": True,
"learning_period": 3600 * 24 * 365,
"tree_max_operations": 50000,
}
COMPLEX_AGENT_CONFIGURATION_2 = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
"operations_as_events": True,
"learning_period": 3600 * 24 * 365,
"tree_max_operations": 50000,
}
COMPLEX_AGENT_DATA = pd.DataFrame(
[
[1, "Pierre", "+02:00"],
[2, "Paul"],
[3],
[4],
[5, "Jacques"],
[6],
[7],
[8, np.nan, "+01:00"],
[9],
[10],
],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
COMPLEX_AGENT_DATA_2 = pd.DataFrame(
[
[1, "Pierre", "+02:00", [8, 9]],
[2, "Paul"],
[3],
[4],
[5, "Jacques"],
[6],
[7],
[8, np.nan, "+01:00", [1, 2, 3]],
[9],
[10],
],
columns=["a", "b", "tz", "arrays"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
DATETIME_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"myTimeOfDay": {"type": "time_of_day"},
"myCoolTimezone": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 3600,
"min_samples_per_leaf": 1,
}
DATETIME_AGENT_DATA = pd.DataFrame(
[
[1, "Pierre", "+02:00"],
[2, "Paul"],
[3, np.nan, "+04:00"],
[4],
[5, "Jacques", "UTC"],
[6],
[7, np.nan, "+08:00"],
[8],
[9],
[10, np.nan, "+10:00"],
],
columns=["a", "b", "myCoolTimezone"],
index=pd.date_range("20200101 00:00:00", periods=10, freq="H").tz_localize("UTC"),
)
MISSING_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
MISSING_AGENT_DATA = pd.DataFrame(
[
[1, MISSING_VALUE, "+02:00"],
[2, "Paul"],
[3, OPTIONAL_VALUE],
[4],
[5, "Jacques"],
[6],
[np.nan, OPTIONAL_VALUE],
[8, None, "+01:00"],
[9],
[10],
],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
MISSING_AGENT_DATA_DECISION = pd.DataFrame(
[[1, MISSING_VALUE, "+02:00"], [3, OPTIONAL_VALUE]],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=2, freq="D").tz_localize("Europe/Paris"),
)
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"1_b": {"type": "enum"},
"None": {"type": "enum"},
"_c": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
INVALID_PYTHON_IDENTIFIER_DATA = pd.DataFrame(
[
[1, "Pierre", "Mignon", "Toto", "+02:00"],
[2, "Paul"],
[3],
[4, "Tata", "Tutu"],
[5, "Jacques"],
[6],
[7],
[8, np.nan, np.nan, np.nan, "+01:00"],
[9],
[10],
],
columns=["a", "1_b", "None", "_c", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
INVALID_PYTHON_IDENTIFIER_DECISION = pd.DataFrame(
[
[1, "Pierre", "Mignon", "Toto", "+02:00"],
[2, "Paul", "Mignon", "Toto", "+02:00"],
[3, "Tata", "Tutu", "Toto", "+02:00"],
],
columns=["a", "1_b", "None", "_c", "tz"],
index=pd.date_range("20200101", periods=3, freq="D").tz_localize("Europe/Paris"),
)
EMPTY_TREE = {
"_version": "2.0.0",
"configuration": {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
},
"trees": {
"b": {"output_values": [], "prediction": {"confidence": 0, "nb_samples": 0}}
},
}
VALID_GENERATOR_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "continuous"},
},
"output": ["a"],
"time_quantum": 100,
"operations_as_events": True,
"learning_period": 6000000,
"tree_max_operations": 50000,
"filter": ["test_filter"],
}
VALID_COMPLEX_GENERATOR_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"operations_as_events": True,
"learning_period": 6000000,
"tree_max_operations": 50000,
"filter": ["test_filter"],
}
VALID_TIMESTAMP = 1577833200
VALID_LAST_TIMESTAMP = 1577847600
| 9,232 | 3,950 |
import tornado.web
from content import PAGES
def page_controller(handler_instance, path):
if path in PAGES:
handler_instance.write(PAGES[path].serialize())
else:
handler_instance.set_status(404)
handler_instance.write({
'message': 'A resource was not found for this path.'
})
| 305 | 99 |
#!/usr/bin/python3
# Copyright (C) 2020 Sam Steele
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import xml.etree.ElementTree as ET
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
INFLUXDB_HOST = 'localhost'
INFLUXDB_PORT = 8086
INFLUXDB_USERNAME = 'root'
INFLUXDB_PASSWORD = 'root'
GAMING_DATABASE = 'gaming'
f = open('/run/shm/influx-retropie', 'r')
start = datetime.utcfromtimestamp(int(f.readline().strip()))
platform = f.readline().strip()
emulator = f.readline().strip()
rom = name = os.path.basename(f.readline().strip())
end = datetime.utcfromtimestamp(int(f.readline().strip()))
duration = (end - start).seconds
f.close()
if not rom:
rom = name = emulator
platform = "Linux"
#Ignore games played less than 60 seconds
if duration < 60:
print("Ignoring '" + emulator + ": " + name +"' played less than 60 seconds")
sys.exit()
#Ignore non-games and Macintosh platform which doesn't provide game names
if platform == "macintosh" or rom.startswith("+") or rom == "Desktop.sh" or rom == "Kodi.sh" or rom == "Steam Link.sh":
print("Ignoring non-game: '" + emulator + ": " + name +"'")
sys.exit()
gamelist = os.path.expanduser('~/.emulationstation/gamelists/' + platform + '/gamelist.xml')
if os.path.exists(gamelist):
root = ET.parse(gamelist).getroot()
for game in root.findall('game'):
path = os.path.basename(game.find('path').text)
if path == name:
name = game.find('name').text
break
if platform == "nes":
platform = "NES"
elif platform == "snes":
platform = "SNES"
elif platform == "gba":
platform = "Game Boy Advance"
elif platform == "gbc":
platform = "Game Boy Color"
elif platform == "megadrive" or platform == "genesis":
platform = "Sega Genesis"
elif platform == "sega32x":
platform = "Sega 32X"
elif platform == "segacd":
platform = "Sega CD"
elif platform == "pc":
platform = "MS-DOS"
elif platform == "scummvm":
platform = "ScummVM"
elif platform == "mame-libretro":
platform = "Arcade"
elif platform == "mastersystem":
platform = "Sega MasterSystem"
else:
platform = platform.capitalize()
url = ""
image = ""
if name == "openttd":
name = "OpenTTD"
url = "https://www.openttd.org"
image = "https://www.openttd.org/static/img/layout/openttd-128.gif"
if url and image:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration,
"image": image,
"url": url
}
}]
else:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration
}
}]
try:
client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)
client.create_database(GAMING_DATABASE)
except InfluxDBClientError as err:
print("InfluxDB connection failed: %s" % (err))
sys.exit()
try:
client.switch_database(GAMING_DATABASE)
client.write_points(points)
except InfluxDBClientError as err:
print("Unable to write points to InfluxDB: %s" % (err))
sys.exit()
print("Successfully wrote %s data points to InfluxDB" % (len(points)))
| 3,767 | 1,392 |
# Molecule
#
# This program takes in a molecular formula and creates a Lewis diagram and a 3D
# model of the molecule as the output.
#
# Author: Ved Pradhan
# Since: December 31, 2021
import json
import matplotlib.pyplot as plt
import sys
import math
# Opens the JSON file for use.
with open("elements.json", "r", encoding="utf8") as file:
data = json.load(file)
# Gets the formula and charge from the user.
formula = input("\n\n\nWelcome to Molecule! Please enter a molecular formula "
+ "(case sensitive): ")
temp = input("What is the charge of the molecule? Enter an integer (0 for no "
+ "charge): ")
try:
charge = int(temp)
except ValueError:
print("Error: '" + temp + "' is not a valid charge.\n\n\n")
sys.exit()
# A list to store each individual atom in the molecule.
atoms = []
# A dictionary to store each type of element and its frequency.
element_frequency = {}
# A list to store the bonds between Atom objects.
bonds = []
# Class to represent each individual atom in the molecule.
class Atom:
def __init__(self, symbol):
self.symbol = symbol
self.element = get_element(symbol)
if self.element != False:
self.enegativity = self.element["electronegativity_pauling"]
self.expected_ve = self.get_valence_electrons()
self.loose_ve = 0
self.sigma_bonds = 0
self.pi_bonds = 0
self.formal_charge = 0
self.total_ve = 0
self.lewis_x = 0
self.lewis_y = 0
# Returns the number of valence electrons the atom is expected to have.
def get_valence_electrons(self):
if self.symbol == "He":
return 2
elif 9 <= self.element["ypos"] <= 10:
return 2
elif 2 <= self.element["xpos"] <= 12:
return 2
else:
return self.element["xpos"] % 10
# Updates the formal charge of the atom.
def update_formal_charge(self):
self.formal_charge = self.expected_ve - self.loose_ve - self.sigma_bonds - self.pi_bonds
# Updates the total number of valence electrons, including shared ones.
def update_total_ve(self):
self.total_ve = self.loose_ve + 2 * (self.sigma_bonds + self.pi_bonds)
# Returns essential information about the atom as a string.
def __str__(self):
return (self.element["name"] + ": " + str(self.loose_ve) + " loose, "
+ str(self.sigma_bonds) + " sigma, " + str(self.pi_bonds) + " pi")
# Retrieves the element corresponding to the given symbol.
def get_element(symbol):
for element in data["elements"]:
if element["symbol"] == symbol:
return element
print("Error: Element '" + symbol + "' not found.\n\n\n")
return False
# Parses through the inputted formula, splitting it into elements and frequencies.
def parse(form):
i = 1
while i < len(form) and not(ord('A') <= ord(form[i]) <= ord('Z')):
i += 1
j = i - 1
while j >= 0 and ord('0') <= ord(form[j]) <= ord('9'):
j -= 1
if j < 0:
print("Error: The formula cannot start with a number.\n\n\n")
sys.exit()
symbol_part = form[:j+1]
number_part = form[j+1:i]
rest = form[i:]
ele = get_element(symbol_part)
if number_part == "":
number = 1
else:
number = int(number_part)
element_frequency[symbol_part] = number
for i in range(number):
atoms.append(Atom(symbol_part))
if len(rest) > 0:
parse(rest)
# Prints a "not supported" message and quits the program.
def noSupport():
print("Sorry, this molecule is not supported yet.\n\n\n")
sys.exit()
# Checks if the molecule is supported.
def check():
if len(element_frequency) != 2:
noSupport()
symb1 = list(element_frequency)[0]
symb2 = list(element_frequency)[1]
global center
global outer
if symb1 == "H":
center = symb2
outer = symb1
elif symb2 == "H":
center = symb1
outer = symb2
elif get_element(symb1)["electronegativity_pauling"] < get_element(symb2)["electronegativity_pauling"]:
center = symb1
outer = symb2
elif get_element(symb1)["electronegativity_pauling"] > get_element(symb2)["electronegativity_pauling"]:
center = symb2
outer = symb1
else:
noSupport()
if element_frequency[center] != 1:
noSupport()
# Bonds two atoms together; updates in the object and the data structure.
def bond(atom1, atom2, type):
bonds.append((atom1, atom2, type))
if (type == "sigma"):
atom1.sigma_bonds += 1
atom2.sigma_bonds += 1
if (type == "pi"):
atom1.pi_bonds += 1
atom2.pi_bonds += 1
# Distributes the valence electrons as loose ones or through bonds.
def distribute():
total_ve = 0
for a in atoms:
total_ve += a.expected_ve
total_ve -= charge
left_ve = total_ve
global centerAtom
centerAtom = -1
global outerAtoms
outerAtoms = []
for a in atoms:
if a.symbol == center:
centerAtom = a
elif a.symbol == outer:
outerAtoms.append(a)
for o in outerAtoms:
bond(centerAtom, o, "sigma")
left_ve -= 2
want_ve = -1
if outer == "H" or outer == "He":
want_ve = 0
else:
want_ve = 6
if left_ve // len(outerAtoms) >= want_ve:
for o in outerAtoms:
o.loose_ve += want_ve
left_ve -= want_ve
if left_ve >= 0:
centerAtom.loose_ve += left_ve
else:
noSupport()
# Draws the lewis diagram using matplotlib.
def draw_lewis():
centerAtom.lewis_x = 0
centerAtom.lewis_y = 0
plt.style.use('_mpl-gallery')
fig, ax = plt.subplots()
fig.suptitle(formula, fontsize=14, fontweight='bold')
ax.text(0, 0, centerAtom.symbol, verticalalignment='center', horizontalalignment='center')
for i in range(len(outerAtoms)):
o = outerAtoms[i]
o.lewis_x = math.cos(2 * i * math.pi / len(outerAtoms))
o.lewis_y = math.sin(2 * i * math.pi / len(outerAtoms))
ax.text(o.lewis_x, o.lewis_y, o.symbol, verticalalignment='center', horizontalalignment='center')
for b in bonds:
x1 = (2 * b[0].lewis_x + b[1].lewis_x) / 3
x2 = (b[0].lewis_x + 2 * b[1].lewis_x) / 3
y1 = (2 * b[0].lewis_y + b[1].lewis_y) / 3
y2 = (b[0].lewis_y + 2 * b[1].lewis_y) / 3
plt.plot([x1, x2], [y1, y2], color='gray')
for a in atoms:
x_shift = 0
y_shift = 0
for i in range(a.loose_ve):
if 0 <= i <= 1:
x_shift = -0.2
elif 2 <= i <= 3:
y_shift = -0.2
elif 4 <= i <= 5:
x_shift = 0.2
elif 6 <= i <= 7:
y_shift = 0.2
if i == 0 or i == 5:
y_shift = 0.05
elif i == 1 or i == 4:
y_shift = -0.05
elif i == 2 or i == 7:
x_shift = -0.05
elif i == 3 or i == 6:
x_shift = 0.05
ax.scatter(x = a.lewis_x + x_shift, y = a.lewis_y + y_shift + 0.03,
s = 4, color='black')
axes = plt.gca()
axes.set_aspect(1)
plt.xlim([-1.75, 1.75])
plt.ylim([-1.7, 1.8])
axes.axes.xaxis.set_visible(False)
axes.axes.yaxis.set_visible(False)
plt.show()
parse(formula)
check()
distribute()
print(element_frequency)
for a in atoms:
print(a)
draw_lewis()
print("\n\n\n")
| 7,470 | 2,649 |
from flask import Blueprint
from flask import render_template, redirect, url_for, request, session, jsonify
from flask_login import login_user, logout_user, current_user
from app.transaction import bp
from app.transaction.model_att import Attendence, AttendenceSchema , CompanySchema
from app.employee.model import Employee
from app.master.model import Company
from app import db, ma
from datetime import datetime
import json
@bp.route('/attendence/', methods=['GET'])
def show_attendence():
return render_template('transaction/attendence.html')
@bp.route('/attendence/get', methods=['POST'])
def get_attendence():
if request.method == "POST":
payload = request.json
if payload != None:
payload_date = payload['date'].split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
company = payload['company']
data = Attendence.query.filter(
Attendence.company.any(Company.id == int(company)), Attendence.date == payload_date).all()
data_schema = AttendenceSchema(many=True)
json_data = data_schema.dumps(data)
return jsonify(json_data)
else:
return jsonify({'message': 'Empty Data Recieved'})
else:
return jsonify({'message': 'Invalid HTTP Request , use POST.'})
@bp.route('/attendence/employee/<emp_id>', methods=['GET'])
def emp_attendence(emp_id):
if request.method == "GET":
year = datetime(datetime.now().year, 1, 1)
data = Attendence.query.filter(
Attendence.employee.any(Employee.id == int(emp_id)), Attendence.date >= year).all()
day_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
early_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
late_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for item in data:
index = int(datetime.strptime(
str(item.date).split(" ")[0], "%Y-%m-%d").month)-1
day_att[index] = item.daysatt
early_att[index] = item.earlygoing
late_att[index] = item.latecomin
json_data = json.dumps(
{'day_att': day_att, 'early_att': early_att, 'late_att': late_att})
return jsonify(json_data)
else:
return jsonify({'message': 'Invalid HTTP request method.'})
# @bp.route('/attendence/employee/data/<emp_id>', methods=['POST'])
# def emp_attendence_data(emp_id):
# if request.method == "POST":
# data = Attendence.query.filter(
# Attendence.employee.any(Employee.id == int(emp_id))).all()
# # data_schema = AttendenceSchema(many=True)
# today = datetime.now()
# today.year()
# return jsonify(json_data)
# else:
# return jsonify({'message': 'Invalid HTTP request method.'})
@bp.route('/attendence/summary/latecomin', methods=['POST'])
def summary_late_attendence():
if request.method == "POST":
# Setting fiscal Year
today = datetime.now()
payload_date = datetime(
int(today.year), int(1), int(1))
payload_date_end = datetime(
int(today.year + 1), int(1), int(1))
all_emps = Employee.query.filter(Employee.flag == 0).all()
payload = {}
payload_late = {}
payload_early = {}
for emp in all_emps:
data = Attendence.query.filter(Attendence.employee.any(Employee.id == int(emp.id)),
Attendence.date >= payload_date, Attendence.date <= payload_date_end).all()
day_att = 0
early_att = 0
late_att = 0
for item in data:
day_att += item.daysatt
early_att += item.earlygoing
late_att += item.latecomin
company_schema = CompanySchema(many=True)
payload_data = {'name': emp.name, 'company': company_schema.dumps(emp.company), 'day_att': day_att, 'early_att': early_att, 'late_att': late_att}
payload_late.update({emp.id: payload_data})
payload_late = sorted(payload_late.items(), key = lambda x : x[1]['late_att'])[::-1][:5]
payload_early = sorted(payload_early.items(), key = lambda x : x[1]['early_att'])[::-1][:5]
payload['early'] = payload_early
payload['late'] = payload_late
return jsonify(payload)
else:
return jsonify({'message': 'Invalid HTTP request method.'})
@bp.route('/attendence/save', methods = ['POST'])
def save_attendence():
if request.method == 'POST':
payload=request.json
if payload != None:
payload_data=payload['data']
payload_date=payload['date'].split('-')
payload_date=datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
# Date checks to be done
table_columns=(
'daysatt',
'latecomin',
'earlygoing'
)
try:
# Need Update cehck inside
for item in payload_data:
new_data=Attendence()
emp=Employee.query.filter_by(
id = int(item['id'])).first()
company=Company.query.filter_by(
id = int(payload['company'])).first()
new_data.company.append(company)
new_data.employee.append(emp)
for field in table_columns:
val=item[field]
if val == '' or val is None:
continue
setattr(new_data, field, val)
setattr(new_data, 'date', payload_date)
if 'tdsval' in item.keys():
if item['tdsval'] != "":
setattr(new_data, 'tds', item['tdsval'])
if 'other_deduction' in item.keys():
val=item['other_deduction']
if val == '' or val is None:
continue
setattr(new_data, 'other_deduction',
item['other_deduction'])
if 'esival' in item.keys():
if item['esival'] != "":
setattr(new_data, 'esi', item['esival'])
if 'pfval' in item.keys():
if item['pfval'] != "":
setattr(new_data, 'pf', item['pfval'])
db.session.add(new_data)
db.session.commit()
return jsonify({'success': 'Data Added'})
except Exception as e:
db.session.rollback()
return jsonify({'message': 'Something went wrong'})
return jsonify({'message': 'Something went wrong'})
else:
return jsonify({'message': 'Empty data.'})
@bp.route('/attendence/update', methods = ['POST'])
def update_attendence():
if request.method == 'POST':
payload=request.json
if payload != None:
table_columns=(
'daysatt',
'latecomin',
'earlygoing'
)
try:
# Need Update check inside
for item in payload:
saved_att = db.session.query(Attendence).filter_by(
id=int(item['id'])).first()
for field in table_columns:
val = item[field]
if val == '' or val is None:
continue
setattr(saved_att, field, val)
if 'tdsval' in item.keys():
val = item['tdsval']
if val == '' or val is None:
continue
setattr(saved_att, 'tds', item['tdsval'])
if 'other_deduction' in item.keys():
val = item['other_deduction']
if val == '' or val is None:
continue
setattr(saved_att, 'other_deduction',
item['other_deduction'])
if 'esival' in item.keys():
val = item['esival']
if val == '' or val is None:
continue
setattr(saved_att, 'esi', item['esival'])
if 'pfval' in item.keys():
val = item['pfval']
if val == '' or val is None:
continue
setattr(saved_att, 'pf', item['pfval'])
db.session.commit()
return jsonify({'success': 'Data Updated'})
except Exception as e:
print(str(e))
db.session.rollback()
return jsonify({'message': 'Something went wrong'})
return jsonify({'message': 'Something went wrong'})
else:
return jsonify({'message': 'Empty data.'})
| 9,185 | 2,567 |
#!/usr/bin/env python3
"""
Usage:
python monitor.py randopt_results/simple_example/
"""
import sys
import os
import time
import curses
import randopt as ro
USE_MPL = True
USE_CURSES = True
try:
from terminaltables import AsciiTable, SingleTable
except:
raise('run pip install terminaltables')
try:
import matplotlib.pyplot as plt
except:
print('matplotlib not found, live plotting disable.')
USE_MPL = False
def table_statistics(counts, timings, minimums, maximums, name='Experiment'):
minimum = "{0:.3f}".format(minimums[-1])
maximum = "{0:.3f}".format(maximums[-1])
timing = "{0:.2f}".format(timings[-1])
data = [
['Results Count', 'Minimum Result', 'Maximum Result', 'Time Elapsed'],
[counts[-1], minimum, maximum, timing],
]
if USE_CURSES:
table = AsciiTable(data, name)
else:
table = SingleTable(data, name)
# table = SingleTable(data, name)
table.inner_heading_row_border = True
table.inner_row_border = True
table.inner_column_border = True
table.outer_border = False
table.justify_columns = {0: 'center', 1: 'center', 2: 'center', 3: 'center'}
return table.table
def plot_statistics(counts, timings, minimums, maximums, name='Experiment'):
plt.ion()
plt.clf()
# Min subplot
plt.subplot(211)
plt.title('Experiment ' + name + ' Statistics')
plt.plot(counts, minimums, label='Minimum')
plt.legend()
plt.ylabel('Result')
# Min subplot
plt.subplot(212)
plt.plot(counts, maximums, label='Maximum')
plt.legend()
plt.xlabel('Number of experiments')
plt.ylabel('Result')
# This renders the figure
plt.pause(0.05)
if __name__ == '__main__':
exp_path = sys.argv[1]
if exp_path[-1] == '/':
exp_path = exp_path[:-1]
exp_dir, exp_name = os.path.split(exp_path)
exp = ro.Experiment(exp_name, directory=exp_dir)
# init interactive display
if USE_CURSES:
screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(False)
screen.keypad(True)
start_time = time.time()
timings = []
minimums = []
maximums = []
counts = []
try:
while True:
minimums.append(exp.minimum().result)
maximums.append(exp.maximum().result)
counts.append(exp.count())
timings.append(time.time() - start_time)
if USE_MPL:
plot_statistics(counts, timings, minimums, maximums, exp_name)
table = table_statistics(
counts, timings, minimums, maximums, exp_name)
if USE_CURSES:
screen.addstr(0, 0, 'Experiment ' + exp_name + ' Statistics')
for i, line in enumerate(table.split('\n')):
line = line.replace('-', u'\u2500')
line = line.replace('|', u'\u2502')
line = line.replace('+', u'\u253c')
screen.addstr(2 + i, 0, line)
screen.refresh()
else:
print(table)
if USE_MPL:
plt.pause(5)
else:
time.sleep(5)
finally:
if USE_CURSES:
curses.echo()
curses.nocbreak()
screen.keypad(True)
curses.endwin()
| 3,385 | 1,120 |
from django.contrib import admin
from .models import Post, Comment
class PostAdmin(admin.ModelAdmin):
list_display = ('author','title','created_date','published_date','image')
class CommentAdmin(admin.ModelAdmin):
list_display = ('post','author','created_date')
admin.site.register(Post,PostAdmin)
admin.site.register(Comment,CommentAdmin)
| 355 | 104 |
import os
import ray
from ray import tune
@ray.remote(num_gpus=1)
def use_gpu():
print("ray.get_gpu_ids(): {}".format(ray.get_gpu_ids()))
print("CUDA_VISIBLE_DEVICES: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
if __name__ == "__main__":
ray.init()
print("ray.get_gpu_ids(): {}".format(ray.get_gpu_ids()))
#print("CUDA_VISIBLE_DEVICES: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
| 403 | 185 |
#!/usr/bin/env python
#
# Copyright (C) 2006 Huub van Dam, Science and Technology Facilities Council,
# Daresbury Laboratory.
# All rights reserved.
#
# Developed by: Huub van Dam
# Science and Technology Facilities Council
# Daresbury Laboratory
# Computational Science and Engineering Department
# Computational Chemistry Group
# http://www.cse.clrc.ac.uk/ccg
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal with the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimers.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# Neither the names of the Science and Technology Facilities Council,
# Daresbury Laboratory, the Computational Science and Engineering Department,
# the Computational Chemistry Group, nor the names of its contributors may be
# used to endorse or promote products derived from this Software without
# specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS WITH THE SOFTWARE.
import os
import sys
import string
import toldiff_files
import toldiff_lcs
import toldiff_diff
import toldiff_update
import toldiff_transfer
import toldiff_show
import toldiff_tokens
def max(a,b):
"""Return the maximum value of the two arguments"""
if a >= b:
result = a
else:
result = b
return result
def license_toldiff(fp,errfp):
"""Print out the license information to the specified file object."""
try:
fp.write("""
Copyright (C) 2006 Huub van Dam, Science and Technology Facilities Council,
Daresbury Laboratory.
All rights reserved.
Developed by: Huub van Dam
Science and Technology Facilities Council
Daresbury Laboratory
Computational Science and Engineering Department
Computational Chemistry Group
http://www.cse.clrc.ac.uk/ccg
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal with the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimers.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the documentation
and/or other materials provided with the distribution.
Neither the names of the Science and Technology Facilities Council,
Daresbury Laboratory, the Computational Science and Engineering Department,
the Computational Chemistry Group, nor the names of its contributors may be
used to endorse or promote products derived from this Software without
specific prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS WITH THE SOFTWARE.
\n""")
sys.exit(1)
except IOError, e:
(errno,errmsg) = e
try:
errfp.write("toldiff: error writing license information\n")
errfp.write("toldiff: error message: ")
errfp.write(errmsg)
errfp.write("\n")
except IOError, e:
pass
sys.exit(5)
def usage_toldiff(fp,errfp):
"""Print out the usage information to the specified file object."""
try:
fp.write("""
Usage:
toldiff [[--diff] <reference file> <data file>]
[--update <reference file> <data file>]
[--transfer <reference file> <new reference file>]
[--show <reference file>]
[--tolerance <tolerance file name>]
[--new-tolerance <new tolerance file name>]
[--diff-exe <diff executable>]
[--output full|summary|none]
[--summary <identical>:<equivalent>:<different>]
[--exit <identical>:<equivalent>:<different>]
[--[no]exact] [--[no]tolerant] [--[no]best]
[--itol-scale <integer tolerance scale factor>]
[--ftol-scale <floating point tolerance scale factor>]
[--ctol-scale <complex tolerance scale factor>]
[--separators <separator character list>]
[--guides <number of guides>]
[--[no]backtrack]
[--help] [--license] [--version]
Toldiff is a script that compares two files allowing for tolerable
differences. Tolerable differences often arise in meta data like who ran
the test and on which date, timing data, and which machines and how many
processors were used. In scientific/technical codes additional variations
may result from numerical accuracy limitations.
Toldiff is designed to assist in software testing by suppressing tolerable
or trivial differences and highlighting only the significant ones. This
facilitates checking whether an output a program has just produced matches
the reference result obtained in the past.
The toldiff script knows of the following files:
A. The reference file:
- THE correct file
B. The data file:
- A file the correctness of which is to be tested against the reference
file. Once its correctness has been established it may be used to update
the tolerances.
C. The tolerance file:
- This file records where all allowed differences can occur, if any.
D. The new reference file:
- The file that is to replace the reference file after a change has
taken place that outdates the reference file
E. The new tolerance file:
- This file records where allowed differences can occur relative to the
new reference file instead of the current reference file.
The script offers three processes:
1. The diff process:
- This process reports all differences between the reference file and
the data file that are not explicitly tolerated.
2. The update process:
- This process updates the tolerances file adding all differences between
the reference file and the data file that were not tolerated before.
3. The transfer process:
- If the current reference file needs to be replaced by a new one this
process will carry as many as possible known tolerances relative to the
current reference file over to the new reference file.
There are various command line options to control toldiff. In cases where
environment variables can be used as an alternative to command line options
the precedence is handled as:
- environment variables take precedence over default settings
- command line options take precedence over environment variables.
There are three categories of options this script will recognise:
1. Process options:
1.1 --diff <reference file name> <data file name>
This triggers the script to perform the default diff process of
comparing the data file against the reference file.
1.2 --update <reference file name> <data file name>
This requests the update process to be performed updating the
tolerance file to allow for any differences between the reference and
data files.
During this process the new tolerances computed can be scaled by a
factor that is equal to or larger than one. This may be useful when
the expected fluctuations are larger than the current differences.
Separate scale factors may be set for each of the three different
numerical data types supported, i.e. integer, floating point, and
complex. The scale factors are always floating point numbers but
after scaling the tolerance the result is rounded where appropriate.
1.2.1 --itol-scale <integer tolerance scale factor>
Sets the scale factor for integer tolerances.
1.2.2 --ftol-scale <floating point tolerance scale factor>
Sets the scale factor for floating point tolerances.
1.2.3 --ctol-scale <complex tolerance scale factor>
Sets the scale factor for complex tolerances.
1.3 --tolerance <tolerance file name>
This option allows explicit specification of the tolerance file name.
If omitted the script will construct a name for the tolerance file
from the name of the reference file.
1.4 --transfer <reference file name> <new reference file name>
This option invokes the transfer process to migrate as many tolerances
as possible from the current reference file over to the new one.
1.5 --new-tolerance <new tolerance file name>
This option allows for the explicit specification of the name of the
new tolerance file. If this is omitted the script will construct a name
for the new tolerance file from the new reference file name.
1.6 --diff-exe <diff executable>
This option enbles replacing some of the Python diff implementation
by invoking a binary diff program. This greatly improves the
performance without changing the functionality. As an alternative
mechanism the environment variable TOLDIFF_EXE may be set to specify
the diff program. In case both the command line option and the
environment variable are provided the command line option has
precedence.
1.7 --output full|summary|none
This option controls the amount of output toldiff produces. The default
setting "full" results in printing a full diff output. The setting
"summary" suppresses the diff output and replaces it with a short
string for files being identical, equivalent or different. The values
of these strings can be specified with the --summary option. Finally,
setting "none" suppresses all output. Other than the --output option
setting the TOLDIFF_OUTPUT environment variable does the same.
1.8 --summary <identical>:<equivalent>:<different>
This option allows the specification of short results for toldiff. The
first string is reported if the reference file and data file are
identical. The second string is reported if the reference and data files
are not identical but all differences are tolerated. The last string
is reported if there are differences that are not tolerated. The
default strings are "identical", "equivalent", and "different". Finally,
these settings can be specified by setting the TOLDIFF_SUMMARY
environment variable. In both ways the values are colomn separated.
1.9 --exit <identical>:<equivalent>:<different>
This option specifies the exit codes for toldiff. The first value is
reported if the reference file and data file are identical. The second
value is reported if the reference and data files are not identical but
all differences are tolerated. The last value is reported if there are
differences that are not tolerated. The default values are 0, 0, and 1.
Finally, these settings can be specified by setting the TOLDIFF_EXIT
environment variable. In both ways the values are colomn separated.
1.10 --separators <separator character list>
Toldiff splits the data in the reference file and the data file into
tokens. It always uses white space to separate tokens. However it may
be necessary to break the tokens up further. It uses any characters
in the separator character list for that purpose. As the tolerances
depend on the separator character list this list can only be specified
when the tolerance file is created. In all other instances specifying
this list will be ignored.
Of course there is the potential to discover that the current set of
separator characters stored in the tolerance file is not optimal.
In that case the transfer process can be used to create a new tolerance
file based on a new set of separators. The specified separator list
will be used to create the new tolerance file.
The separator character list is specified as a white space separated
list of characters, e.g.
--separators "% = ,"
Alternatively the separator character list may be specified using
the environment variable TOLDIFF_SEPARATORS.
1.11 --guides <number of guides>
Tokens are typically short character sequences. As a result if a token
has changed there is a significant chance it will accidentally match
another token. This results in rather unexpected tolerances. Guides
are dummy tokens that direct the diff process to match tokens correctly
even if the tokens do not match exactly. The number of guides used
determines strict this enforcement is, 0 means no enforcement, 2 means
maximum enforcement. Alternatively the environment variable
TOLDIFF_GUIDES may be used.
1.12 --[no]backtrack
Another way to deal with the issue discussed under --guides is to let
the tolerant diff procedure re-analyse some of the differences found
initially. Initially a traditional diff procedure is used that finds
exact matches. As this cannot take tolerances into account suboptimal
matches may result. Rather than rigidly adhering to the matches the
initial diff has found the --backtrack option extends the differences
to the nearest number of whole lines. These whole line sections are
then re-analysed using the tolerant diff procedure, thus allowing
matches to be found that the initial diff by design cannot find.
The environment variable TOLDIFF_BACKTRACK may be used instead of the
command line flag.
Both the --guides and --backtrack options are designed to deal with the
situation where adjacent tokens have overlapping ranges of valid values.
However, even in these situations unintended matches are unlikely unless
the values have very few relevant digits. I.e. is the tolerance is such
that only 1 digit may change then the chance of accidently matching a
neighbouring number is 1 in 10, if 3 digits may change then the chance is
1 in 1000. As a result one may want to check whether the extra expense of
using the --guides and --backtrack options is justified given the
associated risk.
2. Information options:
2.1 --help
Print this information on how to use this scripts.
2.2 --show <reference file name>
Prints the reference file marking all the known tolerances on it.
This allows checking how the program has resolved differences through
the tolerances chosen.
The tolerances are marked on each line in the following order:
1. The number of lines that may be inserted after this line.
2. Whether this line may be deleted in which case it will be marked by
a 'X', otherwise white space indicates that the line has to be
present.
3. The contents of the line are shown with those characters that may
change replaced by '#'.
2.3 --version
Print the version number of the toldiff script you are using.
2.4 --license
Print the license conditions under which this script is distributed.
3. Debug options:
These options are normally set automatically based on the requirements of
the selected process. The default settings aim to complete the selected
process with the highest efficiency. However, for debugging purposes it
is possible to override these settings. You are free to try them to your
own peril.
3.1 --[no]exact
Enable or disable the file differencing procedure that is based on
exact line matches.
3.2 --[no]tolerant
Enable or disable the file differencing procedure that uses a line
comparison which allows for tolerable differences between lines.
3.3 --[no]best
Enable or disable the file differencing procedure that matches lines
based on maximum similarity.
Copyright 2006, Huub van Dam, Science and Technology Facilities Council,
Daresbury Laboratory\n""")
sys.exit(1)
except IOError, e:
(errno,errmsg) = e
try:
errfp.write("toldiff: error writing usage information\n")
errfp.write("toldiff: error message: ")
errfp.write(errmsg)
errfp.write("\n")
except IOError, e:
pass
sys.exit(5)
def load_file(filename,err_fp,separators,nguides):
"""Open and load a file. Returns the file text and the number of lines.
The routine also handles I/O errors. I.e. it reports the error to the
user and terminates the program.
When the file is read the appropriate number of guides are inserted
as specified by nguides.
"""
text = toldiff_tokens.tokenized_file()
lines = 0
tokens = 0
try:
file_fp = open(filename,"r")
(text,lines,tokens) = toldiff_files.load_plain_text(file_fp,text,lines,tokens,separators,nguides)
file_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error on file: ")
err_fp.write(filename)
err_fp.write("\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(10)
return (text,lines,tokens)
def store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,err_fp,separators,nguides):
"""Open and write the tolerance file. The routine handles any I/O errors.
I.e. it reports the error to the user and terminates the program."""
try:
tol_fp = open(tol_fnm,"w")
toldiff_files.save_tolerances(tol_fp,chg_txt,add_txt,del_txt,err_fp,separators,nguides)
tol_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error encountered attempting to write: ")
err_fp.write(tol_fnm)
err_fp.write("\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(30)
def run_diff(diff_exe,ref_fnm,dat_fnm,ref,dat,fp):
"""This routine starts off an external diff program.
As the tokenized versions of the reference and data files do not exist
these have to be written first. Next the diff program is started.
Both the stdout and stderr file descriptors are returned as due file
buffer space the diff program cannot complete if stdout is not read.
So only after reading stdout to drive diff to completion can stderr be
checked to see if diff ran successfully.
If an error is reported on stderr this should be passed on to the user
and the program should terminate.
After diff has run the tokenized files should be deleted.
- diff_exe - the path of the diff executable
- ref_fnm - the filename for the temporary tokenized reference file
- dat_fnm - the filename for the temporary tokenized data file
- ref - the tokenized reference
- dat - the tokenized data
- fp - a file descriptor for error reporting
"""
cmd = diff_exe+" "+ref_fnm+" "+dat_fnm
try:
ref_fp = open(ref_fnm,"w")
toldiff_files.save_tokenized(ref_fp,ref,fp)
ref_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on tokenized reference file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
try:
dat_fp = open(dat_fnm,"w")
toldiff_files.save_tokenized(dat_fp,dat,fp)
dat_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on tokenized data file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
try:
(in_fp,out_fp,err_fp) = os.popen3(cmd)
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on external diff standard error file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
in_fp.close()
return (out_fp,err_fp)
def find_overall_lcs(lexact,ltol,lbest,tol,ref_fnm,dat_fnm,diff_exe,feps,ieps,err_fp,separators,nguides,snake_trim,update):
"""Find the overall LCS including the tolerances. The general procedure is
simply to establish the exact LCS, then try to resolve as much of the
mismatches by considering the tolerances, then try to match the remaining
differences to minimize the mismatches.
This routine will read in the reference file and the data file as well.
The reason for this is that this is more efficient in case an external
diff program is used for the first phase.
The routine returns the overall LCS, the reference file text, the data
file text and beginning and ending token numbers of both files.
This routine allows each phase to be disabled explicitly through a
flag passed in as an argument:
- lexact: if false skip the exact matching
- ltol : if false skip the tolerant matching
- lbest : if false skip the minimal difference matching.
The number of guides is specified in nguides. This is used in reading
in the reference and data files.
"""
lcs = [ ]
if lexact:
if diff_exe == "":
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
lcs = toldiff_lcs.find_lcs1(ref,Ntb,Nte,dat,Mtb,Mte)
else:
error = false
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
#
# Construct temporary file names
#
pid = os.getpid()
# The extra "a" and "b" ensure unique file names even if the reference
# and data file names are the same.
tmp_ref_fnm = ref_fnm+"a"+str(pid)
tmp_dat_fnm = dat_fnm+"b"+str(pid)
#
# Construct temporary files, invoke diff and parse diff output
#
(diff_out_fp,diff_err_fp) = run_diff(diff_exe,tmp_ref_fnm,tmp_dat_fnm,ref,dat,err_fp)
lcs = toldiff_diff.diff_to_lcs(Ntb,Nte,Mtb,Mte,diff_out_fp,err_fp)
diff_out_fp.close()
#
# Delete temporary files
#
os.remove(tmp_ref_fnm)
os.remove(tmp_dat_fnm)
#
# Check whether the diff program detected any errors
#
try:
line = diff_err_fp.readline()
while line:
error = true
err_fp.write("toldiff:"+line)
line = diff_err_fp.readline()
diff_err_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error on external diff standard error file\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(25)
if error:
sys.exit(20)
else:
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
#Snake trimming may only be used here!
if (snake_trim and ((ltol and len(tol) > 0 ) or (update and lbest))):
lcs = toldiff_lcs.trim_snakes(lcs,ref,Ntb,Nte,dat,Mtb,Mte)
#
if (len(tol) <= 0) or (not ltol):
#
# No tolerances were specified or this phase is explicitly suppressed
#
pass
#
else:
#
# Consider all the differences and try to resolve as many as possible.
#
if (len(lcs) <= 0):
#
# Then the new LCS is simply the result of the tolerant diff
#
lcs = toldiff_lcs.find_lcs2(tol,ref,Ntb,Nte,dat,Mtb,Mte,feps,ieps)
#
else:
#
# First consider whether there is anything to compare before the first
# snake
#
lcs1 = lcs
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > Mtb) and (ybot1 > Ntb):
lcs = toldiff_lcs.find_lcs2(tol,ref,Ntb,ybot1-1,dat,Mtb,xbot1-1,feps,ieps)
else:
lcs = [ ]
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
while (len(lcs1) > 0 ):
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > xtop0+1) and (ybot1 > ytop0+1):
lcs2 = toldiff_lcs.find_lcs2(tol,ref,ytop0+1,ybot1-1,dat,xtop0+1,xbot1-1,feps,ieps)
lcs = lcs + lcs2
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
if (Nte >= ytop0+1) and (Mte >= xtop0+1):
#
# The some more stuff at the end left to do
#
lcs2 = toldiff_lcs.find_lcs2(tol,ref,ytop0+1,Nte,dat,xtop0+1,Mte,feps,ieps)
lcs = lcs + lcs2
if (not lbest):
#
# This phase is explicitly suppressed
#
pass
#
else:
#
# Consider all the differences and try to match different lines as best as
# possible minimizing the number of differences.
#
#Snake trimming does not work here as the lcs3 may pair tokens up in a way
#that is different from what lcs2 would do. The result of this inconsistency
#is that some differences will never be tolerated! Clearly this breaks
#toldiff.
#lcs = toldiff_lcs.trim_snakes(lcs,ref,Ntb,Nte,dat,Mtb,Mte)
if (len(lcs) <= 0):
#
# Then the new LCS is simply the result of the best match diff,
# which will probably hurt as this will get very expensive.
#
lcs = toldiff_lcs.find_lcs3(tol,ref,Ntb,Nte,dat,Mtb,Mte,feps,ieps)
#
else:
#
# First consider whether there is anything to compare before the first
# snake
#
lcs1 = lcs
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > Mtb) and (ybot1 > Ntb):
lcs = toldiff_lcs.find_lcs3(tol,ref,Ntb,ybot1-1,dat,Mtb,xbot1-1,feps,ieps)
else:
lcs = [ ]
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
while (len(lcs1) > 0 ):
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > xtop0+1) and (ybot1 > ytop0+1):
lcs2 = toldiff_lcs.find_lcs3(tol,ref,ytop0+1,ybot1-1,dat,xtop0+1,xbot1-1,feps,ieps)
lcs = lcs + lcs2
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
if (Nte >= ytop0+1) and (Mte >= xtop0+1):
#
# There is some more stuff at the end left to do
#
lcs2 = toldiff_lcs.find_lcs3(tol,ref,ytop0+1,Nte,dat,xtop0+1,Mte,feps,ieps)
lcs = lcs + lcs2
return (lcs,ref,Ntb,Nte,dat,Mtb,Mte)
def construct_tolerance_filename(ref_fnm,dat_fnm,tol_fnm):
if tol_fnm == "":
#
# No tolerance file name given so try and construct one
#
i = string.rfind(ref_fnm,".")
if i == -1:
#
# The reference file name has no extension.
#
ref_ext = ""
i = len(ref_fnm)+1
#
else:
#
# The reference file name has an extension extract it.
#
ref_ext = ref_fnm[i:]
#
j = string.rfind(dat_fnm,".")
if j == -1:
#
# The data file name has no extension.
#
dat_ext = ""
#
else:
#
# The data file name has an extension extract it.
#
dat_ext = dat_fnm[j:]
#
tol_ext = ".tol"
if (tol_ext == ref_ext) or (tol_ext == dat_ext):
tol_ext = ".tlr"
if (tol_ext == ref_ext) or (tol_ext == dat_ext):
tol_ext = ".tlc"
tol_fnm = ref_fnm[:i]+tol_ext
return tol_fnm
true = (0 == 0)
false = not true
#
# Set up the default comparison options
#
diff = 1
update = 2
transfer = 3
show = 4
process = diff
lexact = true
ltol = true
lbest = false
#
# Set up default comparison results
#
identical = 1
equivalent = 2
different = 3
#
# Set up default comparison exit codes
#
exit_identical = 0
exit_equivalent = 0
exit_different = 1
#
# Set up default comparison summary texts
#
text_identical = "identical"
text_equivalent = "equivalent"
text_different = "different"
#
# Set up output options and default output option
#
output_full = 3
output_summary = 2
output_none = 1
output = output_full
#
# Set up the default list of separator characters for the reference and
# data file tokenisation. In addition to these characters whitespace will
# be used as token separator as well. Note that a long list of separators
# deteriorates the performance significantly.
#
# Separators is the list of additional separator characters used for the
# reference file and the data file.
# Separators_new is the list of additional separator characters used for the
# new reference file in case of a transfer operation.
#
separators = []
separators_new = []
#
# Set the default snake trimming behaviour
#
snake_trim = false
#
# Set the default number of guides
#
nguides = 0
#
# Set up default precisions for floating point and integer numbers
#
feps = 1.0e-12
ieps = 0.1
#
# Set up default scale factors for new tolerances
#
tol_scale = 1.0
itol_scale = tol_scale
ftol_scale = tol_scale
ctol_scale = tol_scale
lcs = [ ]
diff_exe = ""
tol_fnm = ""
tol_new_fnm = ""
ref_fnm = ""
dat_fnm = ""
narg = len(sys.argv)
iarg = 1
if os.environ.has_key("TOLDIFF_EXE"):
diff_exe = os.environ["TOLDIFF_EXE"]
if os.environ.has_key("TOLDIFF_OUTPUT"):
output = os.environ["TOLDIFF_OUTPUT"]
if output == "FULL" or output == "full":
output = output_full
elif output == "SUMMARY" or output == "summary":
output = output_summary
elif output == "NONE" or output == "none":
output = output_none
if os.environ.has_key("TOLDIFF_EXIT"):
exit_codes = os.environ["TOLDIFF_EXIT"]
exit_codes = string.split(exit_codes,":")
if len(exit_codes) == 3:
exit_identical = int(exit_codes[0])
exit_equivalent = int(exit_codes[1])
exit_different = int(exit_codes[2])
if os.environ.has_key("TOLDIFF_SUMMARY"):
text_summaries = os.environ["TOLDIFF_SUMMARY"]
text_summaries = string.split(text_summaries,":")
if len(text_summaries) == 3:
text_identical = text_summaries[0]
text_equivalent = text_summaries[1]
text_different = text_summaries[2]
if os.environ.has_key("TOLDIFF_ITOLSCALE"):
itol_scale = max(tol_scale,float(os.environ["TOLDIFF_ITOLSCALE"]))
if os.environ.has_key("TOLDIFF_FTOLSCALE"):
ftol_scale = max(tol_scale,float(os.environ["TOLDIFF_FTOLSCALE"]))
if os.environ.has_key("TOLDIFF_CTOLSCALE"):
ctol_scale = max(tol_scale,float(os.environ["TOLDIFF_CTOLSCALE"]))
if os.environ.has_key("TOLDIFF_SEPARATORS"):
separators = string.split(os.environ["TOLDIFF_SEPARATORS"])
separators_new = string.split(os.environ["TOLDIFF_SEPARATORS"])
if os.environ.has_key("TOLDIFF_GUIDES"):
nguides = max(0,int(os.environ["TOLDIFF_GUIDES"]))
if os.environ.has_key("TOLDIFF_BACKTRACK"):
tmptxt = os.environ["TOLDIFF_BACKTRACK"]
tmptxt = tmptxt.lower()
if tmptxt == "yes" or tmptxt == "y":
snake_trim = true
elif tmptxt == "no" or tmptxt == "n":
snake_trim = false
else:
try:
sys.stderr.write("toldiff: invalid value for TOLDIFF_BACKTRACK should be \"yes\" or \"no\"\n")
except IOError, e:
pass
sys.exit(5)
if narg == 1:
usage_toldiff(sys.stdout,sys.stderr)
while iarg < narg:
if sys.argv[iarg] == "--exact":
lexact = true
elif sys.argv[iarg] == "--noexact":
lexact = false
elif sys.argv[iarg] == "--tolerant":
ltol = true
elif sys.argv[iarg] == "--notolerant":
ltol = false
elif sys.argv[iarg] == "--best":
lbest = true
elif sys.argv[iarg] == "--nobest":
lbest = false
elif sys.argv[iarg] == "--tolerance":
iarg = iarg + 1
if iarg < narg:
tol_fnm = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing tolerance file name\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--new-tolerance":
iarg = iarg + 1
if iarg < narg:
tol_new_fnm = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing new tolerance file name\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--diff-exe":
iarg = iarg + 1
if iarg < narg:
diff_exe = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing diff executable specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--diff":
process = diff
elif sys.argv[iarg] == "--update":
process = update
elif sys.argv[iarg] == "--transfer":
process = transfer
elif sys.argv[iarg] == "--show":
process = show
elif sys.argv[iarg] == "--version":
toldiff_files.version_toldiff(sys.stdout,sys.stderr)
sys.exit(0)
elif sys.argv[iarg] == "--help":
usage_toldiff(sys.stdout,sys.stderr)
elif sys.argv[iarg] == "--license":
license_toldiff(sys.stdout,sys.stderr)
elif sys.argv[iarg] == "--exit":
iarg = iarg + 1
if iarg < narg:
exit_codes = sys.argv[iarg]
exit_codes = string.split(exit_codes,":")
if len(exit_codes) == 3:
exit_identical = int(exit_codes[0])
exit_equivalent = int(exit_codes[1])
exit_different = int(exit_codes[2])
else:
try:
sys.stderr.write("toldiff: missing exit codes specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--summary":
iarg = iarg + 1
if iarg < narg:
text_summaries = sys.argv[iarg]
text_summaries = string.split(text_summaries,":")
if len(text_summaries) == 3:
text_identical = text_summaries[0]
text_equivalent = text_summaries[1]
text_different = text_summaries[2]
else:
try:
sys.stderr.write("toldiff: missing summaries specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--output":
iarg = iarg + 1
if iarg < narg:
output = sys.argv[iarg]
if output == "FULL" or output == "full":
output = output_full
elif output == "SUMMARY" or output == "summary":
output = output_summary
elif output == "NONE" or output == "none":
output = output_none
else:
sys.stderr.write("toldiff: unknown output specification: %s\n" % output)
sys.exit(5)
else:
try:
sys.stderr.write("toldiff: missing output specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--itol-scale":
iarg = iarg + 1
if iarg < narg:
itol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing integer tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--ftol-scale":
iarg = iarg + 1
if iarg < narg:
ftol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing floating point tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--ctol-scale":
iarg = iarg + 1
if iarg < narg:
ctol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing complex tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--separators":
iarg = iarg + 1
if iarg < narg:
separators = string.split(sys.argv[iarg])
separators_new = string.split(sys.argv[iarg])
i = 0
n = len(separators)
while (i < n):
if len(separators[i]) != 1:
sys.stderr.write("toldiff: separator character list is not a list of single characters\n")
sys.stderr.write("toldiff: --separators \""+sys.argv[iarg]+"\"\n")
sys.exit(5)
i = i + 1
elif sys.argv[iarg] == "--guides":
iarg = iarg + 1
if iarg < narg:
nguides = max(0,int(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing number of guides\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--backtrack":
snake_trim = true
elif sys.argv[iarg] == "--nobacktrack":
snake_trim = false
else:
argstr = sys.argv[iarg]
if (process < show) and (iarg == narg-2):
ref_fnm = sys.argv[iarg]
iarg = iarg + 1
dat_fnm = sys.argv[iarg]
elif (process == show) and (iarg == narg-1):
ref_fnm = sys.argv[iarg]
elif argstr[0:1] == "-":
try:
sys.stderr.write("toldiff: unknow option encountered: ")
sys.stderr.write(argstr)
sys.stderr.write("\n")
except IOError, e:
pass
sys.exit(8)
else:
sys.stderr.write("toldiff: missing reference or data files?\n")
sys.exit(9)
iarg = iarg + 1
if ref_fnm == "":
sys.stderr.write("toldiff: error: no reference filename given\n")
sys.exit(5)
if (process < show) and (dat_fnm == ""):
sys.stderr.write("toldiff: error: no data filename given\n")
sys.exit(6)
tol_fnm = construct_tolerance_filename(ref_fnm,dat_fnm,tol_fnm)
if process == transfer:
tol_new_fnm = construct_tolerance_filename(dat_fnm,ref_fnm,tol_new_fnm)
ref_txt = { }
dat_txt = { }
chg_txt = { }
add_txt = { }
del_txt = { }
ref_lines = 0
dat_lines = 0
try:
tol_fp = open(tol_fnm,"r")
(chg_txt,add_txt,del_txt,separators) = toldiff_files.load_tolerances(tol_fp,separators,nguides)
tol_fp.close()
except IOError, e:
#
# If an exception was thrown it is assumed that there is no valid
# tolerance file present. Hence proceed as if there is no tolerance
# information.
#
pass
if process == diff:
(lcs,ref_txt,Ntb,Nte,dat_txt,Mtb,Mte) = find_overall_lcs(lexact,ltol,lbest,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,false)
lcs = toldiff_lcs.filter_lcs(lcs,Ntb,Nte,Mtb,Mte,add_txt,del_txt)
analysis = toldiff_diff.lcs_analysis(Ntb,Nte,Mtb,Mte,lcs,identical,equivalent,different)
if output == output_full:
(line_lcs,Nlb,Nle,Mlb,Mle) = toldiff_lcs.lcs_tokens2lines(lcs,ref_txt,Ntb,Nte,dat_txt,Mtb,Mte,nguides)
toldiff_diff.lcs_to_diff(ref_txt,Nlb,Nle,dat_txt,Mlb,Mle,line_lcs,sys.stdout,sys.stderr,nguides)
elif output == output_summary:
if analysis == identical:
sys.stdout.write("%s" % text_identical)
elif analysis == equivalent:
sys.stdout.write("%s" % text_equivalent)
elif analysis == different:
sys.stdout.write("%s" % text_different)
else:
sys.stderr.write("illegal value of analysis")
elif output == output_none:
pass
else:
sys.stderr.write("illegal value of output")
if analysis == identical:
sys.exit(exit_identical)
elif analysis == equivalent:
sys.exit(exit_equivalent)
elif analysis == different:
sys.exit(exit_different)
else:
sys.stderr.write("illegal value of analysis")
elif process == update:
(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines) = find_overall_lcs(true,true,true,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,true)
chg_txt = toldiff_update.lcs_to_change(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,chg_txt,feps,ieps,itol_scale,ftol_scale,ctol_scale)
add_txt = toldiff_update.lcs_to_add(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,add_txt)
del_txt = toldiff_update.lcs_to_delete(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,del_txt)
store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,sys.stderr,separators,nguides)
elif process == transfer:
(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines) = find_overall_lcs(true,true,false,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,false)
(chg_new,add_new,del_new) = toldiff_transfer.transfer_tol(lcs,Nb,ref_lines,Mb,dat_lines,chg_txt,add_txt,del_txt)
store_tolerance(tol_new_fnm,chg_new,add_new,del_new,sys.stderr,separators_new,nguides)
elif process == show:
Nb = 1
Ntb = 1
(ref_txt,Ne,Nte) = load_file(ref_fnm,sys.stderr,separators,nguides)
toldiff_show.show_tolerance(sys.stdout,ref_txt,Nb,Ne,chg_txt,add_txt,del_txt,sys.stderr,nguides)
else:
try:
sys.stderr.write("toldiff: internal error: invalid process")
except IOError, e:
pass
sys.exit(999)
| 42,230 | 13,744 |
from Methods.utils import rmsd, mde
from datetime import datetime
import logging
import json
import sys
def os_display_call(test_path, main, data, multistart=False):
(
filename,
num_atom_init,
total_atoms_ord,
m,
prop_dist,
convex,
fo_non_scaled,
fo_scaled,
ops,
) = main
xi, solution, u, v, lb, ub = ops
# Get logger
logger = logging.getLogger('root.spgLOG')
logger.info(
"########################################## INFO ##########################################"
)
logger.info(
f":: Protein: {filename}, Initial atoms number: {num_atom_init}, after re-ordination {total_atoms_ord}."
)
logger.info(f":: Assessed distances: {m} and known distances: {prop_dist}.")
if convex:
logger.info(
f":: Initial objective value for the relaxed problem: {fo_non_scaled:.4e}"
)
logger.info(
f":: Initial objective value for the relaxed problem --scaled {fo_scaled:.4e}"
)
rmsd_i, mde_i = rmsd(xi, solution), mde(xi, u, v, lb, ub)
logger.info(f":: RMSDi = {rmsd_i:<24.2e} MDEi = {mde_i:.2e}")
# -----------------------------------------------------------------------------------
# Multi-start option --Enabled
# -----------------------------------------------------------------------------------
if multistart:
if type(data) != dict:
logger.warning(":: data type object not match with dict structure!")
logger.warning(":: The process was interrupted")
return exit()
logger.info(":: spg results --multi start: True")
logger.info(
":: Iter - bck -- RMSDf ----- MDEf"
" ----- i_val ----- f_val ----- gtd ----- |d| ----- time(s)"
)
sub_log = {}
k = 0
for key in data:
out, elapsed_time, fo = data[key]
x_spg, backtracking, iterations, fun_o, gtd, norm_d = out
# Statistics:
rmsd_f = rmsd(x_spg, solution)
mde_f = mde(x_spg, u, v, lb, ub)
prompt_string = (
f" {iterations:<5}: {backtracking:<6} {rmsd_f:<11.2e} {mde_f:<10.2e} {fo / 2:<11.2e} "
f"{fun_o / 2:<10.2e} {gtd:<10.2e} {norm_d:<10.2e} {elapsed_time:.3f}"
)
sub_log[k] = {"iter": f'{iterations:<7}', "back": f'{backtracking:<6}', "RMDSf": f'{rmsd_f:<11.2e}',
"MDEf": f'{mde_f:<10.2e}', "fun_i": f'{fo / 2:<11.2e}', "fun_f": f'{fun_o / 2:<10.2e}',
"gtd": f'{gtd:<10.2e}', "norm_d": f'{norm_d:<10.2e}', "time": f'{elapsed_time:.3f}'}
logger.info(prompt_string)
k += 1
logger.info(
"############################################################################################"
)
# -----------------------------------------------------------------------------
# Generating output file with statistics:
# -----------------------------------------------------------------------------
static_dict = {"node": f'{filename}', "init_atom_#": f"{num_atom_init}",
"atom_#_re-ordination": f'{total_atoms_ord}',
"assessed_dist": f'{m}', "Know_dist": f'{prop_dist}'}
if convex:
static_dict["convex"] = True
static_dict["init_fun_val_relax"] = f'{fo_non_scaled:.4e}'
static_dict["init_fun_val_relax_k"] = f'{fo_scaled:.4e}'
else:
static_dict["convex"] = False
static_dict["init_fun_val_relax"] = 'N/A'
static_dict["init_fun_val_relax_k"] = 'N/A'
static_dict["RMSDi"] = f'{rmsd_i:<24.2e}'
static_dict["MDEi"] = f'{mde_i:.2e}'
if type(data) != dict:
logger.warning(":: data type object not match with dict structure!\n")
logger.warning(":: The process was interrupted\n")
multistart_list = []
n = len(sub_log.keys())
for i in range(n):
multistart_list.append(sub_log[i])
static_dict["multi-start"] = multistart_list
static_dict["standard"] = False
static_log = test_path + f"\\spg_static_multistart_LOG.txt"
with open(static_log, "w") as f:
json.dump(static_dict, f)
# -----------------------------------------------------------------------------------
# Multi-start --Disable Standard
# -----------------------------------------------------------------------------------
else:
out, elapsed_time, fo = data
x_spg, backtracking, iterations, fun_o, gtd, norm_d = out
# Statistics:
rmsd_f = rmsd(x_spg, solution)
mde_f = mde(x_spg, u, v, lb, ub)
logger.info(":: spg results --multi start: False")
logger.info(
":: Iter - bck -- RMSDf ----- MDEf"
" ----- i_val ----- f_val ----- gtd ----- |d| ----- time(s)"
)
prompt_string = (
f" {iterations:<5}: {backtracking:<6} {rmsd_f:<11.2e} {mde_f:<10.2e} {fo / 2:<11.2e} "
f"{fun_o / 2:<10.2e} {gtd:<10.2e} {norm_d:<10.2e} {elapsed_time:.3f}"
)
logger.info(prompt_string)
logger.info(
"############################################################################################"
)
# -----------------------------------------------------------------------------
# Generating output file with statistics:
# -----------------------------------------------------------------------------
static_log = test_path + f"\\spg_static_standard_LOG.txt"
static_dict = {"node": f'{filename}', "init_atom_#": f"{num_atom_init}",
"atom_#_re-ordination": f'{total_atoms_ord}',
"assessed_dist": f'{m}', "Know_dist": f'{prop_dist}'}
if convex:
static_dict["convex"] = True
static_dict["init_fun_val_relax"] = f'{fo_non_scaled:.4e}'
static_dict["init_fun_val_relax_k"] = f'{fo_scaled:.4e}'
else:
static_dict["convex"] = False
static_dict["init_fun_val_relax"] = 'N/A'
static_dict["init_fun_val_relax_k"] = 'N/A'
static_dict["RMSDi"] = f'{rmsd_i:<24.2e}'
static_dict["MDEi"] = f'{mde_i:.2e}'
static_dict["multi-start"] = False
static_dict["standard"] = {"iter": f'{iterations:<7}', "back": f'{backtracking:<6}',
"RMDSf": f'{rmsd_f:<11.2e}',
"MDEf": f'{mde_f:<10.2e}', "fun_i": f'{fo / 2:<11.2e}',
"fun_f": f'{fun_o / 2:<10.2e}',
"gtd": f'{gtd:<10.2e}', "norm_d": f'{norm_d:<10.2e}',
"time": f'{elapsed_time:.3f}'}
with open(static_log, "w") as file:
json.dump(static_dict, file)
| 6,973 | 2,373 |
import unittest
import sys
import helpers
sys.path.append('../LODStats')
sys.path.append('../src/restriction-types-stats')
from A69DisjointProperties import A69DisjointProperties
import lodstats
from lodstats import RDFStats
testfile_path = helpers.resources_path
class TestA69DisjointProperties(unittest.TestCase):
def setUp(self):
lodstats.stats.stats_to_do = []
lodstats.stats.results = {}
def test_amount(self):
uri = 'file://' + testfile_path + 'disjointProperties.nt'
rdfstats = RDFStats(uri, format="nt", stats=[A69DisjointProperties])
rdfstats.start_statistics()
self.assertEqual(rdfstats.get_stats_results()['a69disjointproperties']['amount_disjoint_properties'], 11)
def test_avg(self):
uri = 'file://' + testfile_path + 'disjointProperties.nt'
rdfstats = RDFStats(uri, format="nt", stats=[A69DisjointProperties])
rdfstats.start_statistics()
self.assertEqual (rdfstats.get_stats_results()['a69disjointproperties']['avg_disjoint'], 2.75)
def test_median(self):
uri = 'file://' + testfile_path + 'disjointProperties.nt'
rdfstats = RDFStats(uri, format="nt", stats=[A69DisjointProperties])
rdfstats.start_statistics()
self.assertEqual (rdfstats.get_stats_results()['a69disjointproperties']['median_disjoint'], 2.5)
def test_min(self):
uri = 'file://' + testfile_path + 'disjointProperties.nt'
rdfstats = RDFStats(uri, format="nt", stats=[A69DisjointProperties])
rdfstats.start_statistics()
self.assertEqual (rdfstats.get_stats_results()['a69disjointproperties']['min_disjoint'], 2.0)
def test_max(self):
uri = 'file://' + testfile_path + 'disjointProperties.nt'
rdfstats = RDFStats(uri, format="nt", stats=[A69DisjointProperties])
rdfstats.start_statistics()
self.assertEqual (rdfstats.get_stats_results()['a69disjointproperties']['max_disjoint'], 4.0) | 1,968 | 647 |
from kavenegar import KavenegarAPI, APIException, HTTPException
from src.core.settings import OTP_API_KEY
def send_sms(phone, message):
try:
api = KavenegarAPI(OTP_API_KEY)
response = api.sms_send({
'sender': '10008663',
'receptor': phone,
'message': message,
})
print(response)
except APIException as e:
print(e)
except HTTPException as e:
print(e)
| 449 | 146 |
from pytaraxa.test import *
blockNumber()
| 43 | 17 |
import random
import logging
import time
from datetime import timedelta
from pymavlink import mavutil
_log = logging.getLogger(__name__)
def now():
return int(round(time.time()*1000))
def random_scaled_imu_test(url: str, pause: timedelta):
connection = mavutil.mavlink_connection(url)
mav = connection.mav
_log.info("Запускаю генерацию случайных данных БИНС")
_log.info("параметры: %s, %s" % (url, pause))
boot_time = now()
datagen = lambda: int(round(random.uniform(0, 1)*9.8*3))
while True:
msg = mav.scaled_mpu6000_encode(
time_boot_ms=now() - boot_time,
xacc=datagen(),
yacc=datagen(),
zacc=datagen(),
xgyro=datagen(),
ygyro=datagen(),
zgyro=datagen(),
temperature=datagen(),
)
_log.debug(msg)
mav.send(msg)
time.sleep(pause.total_seconds())
| 923 | 328 |
'''
Created on 2020-09-19
@author: wf
'''
import os.path
import tempfile
import unittest
from pathlib import Path
from lodstorage.storageconfig import StorageConfig
import geograpy
import getpass
from geograpy.locator import Locator, City,CountryManager, Location, LocationContext
from collections import Counter
from lodstorage.uml import UML
import re
from tests.basetest import Geograpy3Test
class TestLocator(Geograpy3Test):
'''
test the Locator class from the location module
'''
def lookupQuery(self,viewName,whereClause):
loc=Locator.getInstance()
queryString=f"SELECT * FROM {viewName} where {whereClause} AND pop is not NULL ORDER by pop desc"
lookupRecords=loc.sqlDB.query(queryString)
return lookupRecords
def checkExpected(self,lod,expected):
emap={}
found={}
for key,value in expected:
emap[key]=value
for record in lod:
name=record["name"]
pop=record["pop"]
if name in emap and pop> emap[name]:
found[name]=record
if self.debug:
print(f"{name}:{pop:.0f}")
self.assertEqual(len(found),len(emap))
def testHasViews(self):
'''
test that the views are available
'''
loc=Locator.getInstance()
viewsMap=loc.sqlDB.getTableDict(tableType="view")
for view in ["CityLookup","RegionLookup","CountryLookup"]:
self.assertTrue(view in viewsMap)
def testCityLookup(self):
'''
test the cityLookup to city/region/country object cluster
'''
cityLookupRecords=self.lookupQuery("CityLookup", "label in ('Berlin','Paris','Athens','Singapore')")
expected=[("Berlin",3644000),("Paris",2175000),("Athens",600000),("Singapore",5800000)]
self.checkExpected(cityLookupRecords,expected)
def testRegionLookup(self):
'''
test region Lookup
'''
regionLookupRecords=self.lookupQuery("RegionLookup", "label in ('CA')")
expected=[("California",39000000)]
self.checkExpected(regionLookupRecords,expected)
def testCountryLookup(self):
'''
test country Lookup
'''
#self.debug=True
countryLookupRecords=self.lookupQuery("CountryLookup", "label in ('CA')")
expected=[("Canada",37000000)]
self.checkExpected(countryLookupRecords,expected)
def testIsoRegexp(self):
'''
test regular expression for iso codes
'''
loc=Locator.getInstance()
self.assertFalse(loc.isISO('Singapore'))
query="""
select distinct iso from countries
union
select distinct iso from regions
"""
loc.populate_db()
isocodeRecords=loc.sqlDB.query(query)
for isocodeRecord in isocodeRecords:
isocode=isocodeRecord['iso']
if isocode:
isIso=loc.isISO(isocode)
if not isIso and self.debug:
print(isocode)
self.assertTrue(isIso)
def testWordCount(self):
'''
test the word count
'''
loc=Locator.getInstance()
query="SELECT name from CITIES"
nameRecords=loc.sqlDB.query(query)
if self.debug:
print ("testWordCount: found %d names" % len(nameRecords))
wc=Counter()
for nameRecord in nameRecords:
name=nameRecord['name']
words=re.split(r"\W+",name)
wc[len(words)]+=1
if self.debug:
print ("most common 20: %s" % wc.most_common(20))
def testUML(self):
'''
test adding population data from wikidata to GeoLite2 information
'''
Locator.resetInstance()
loc=Locator.getInstance()
loc.populate_db()
user=getpass.getuser()
if self.debug:
print ("current user is %s" % user)
tableList=loc.sqlDB.getTableList()
uml=UML()
title="""geograpy Tables
2021-08-13
[[https://github.com/somnathrakshit/geograpy3 © 2020-2021 geograpy3 project]]"""
plantUml=uml.tableListToPlantUml(tableList,title=title, packageName="geograpy3")
showUml=True
if showUml or self.debug:
print (plantUml)
def checkExamples(self,examples,countries,debug=False,check=True):
'''
check that the given example give results in the given countries
Args:
examples(list): a list of example location strings
countries(list): a list of expected country iso codes
'''
for index,example in enumerate(examples):
city=geograpy.locateCity(example,debug=debug)
if self.debug:
print("%3d: %22s->%s" % (index,example,city))
if check:
self.assertEqual(countries[index],city.country.iso)
def testGetCountry(self):
'''
test getting a country by name or ISO
'''
locator=Locator()
debug=True
examples=[("DE","Germany"),("US","United States of America"),("USA",None)]
for name,expectedName in examples:
country=locator.getCountry(name)
if debug:
print(country)
if expectedName is None:
self.assertIsNone(country)
else:
self.assertIsNotNone(country)
self.assertEqual(expectedName,country.name)
def testIssue15(self):
'''
https://github.com/somnathrakshit/geograpy3/issues/15
test Issue 15 Disambiguate via population, gdp data
'''
examples=['Paris','Vienna', 'Berlin']
countries=['FR','AT', 'DE']
self.checkExamples(examples, countries)
pass
def testIssue17(self):
'''
test issue 17:
https://github.com/somnathrakshit/geograpy3/issues/17
[BUG] San Francisco, USA and Auckland, New Zealand should be locatable #17
'''
examples=['San Francisco, USA','Auckland, New Zealand']
countries=['US','NZ']
self.checkExamples(examples, countries)
def testIssue19(self):
'''
test issue 19
'''
examples=['Puebla City, Mexico','Newcastle, UK','San Juan, Puerto Rico']
countries=['MX','GB','US']
# For Puerto Rico exist two iso codes one as country and one as US region see https://en.wikipedia.org/wiki/Puerto_Rico in the dataset it is recognized as US region
self.checkExamples(examples, countries)
def testStackOverflow64379688(self):
'''
compare old and new geograpy interface
'''
examples=['John Doe 160 Huntington Terrace Newark, New York 07112 United States of America',
'John Doe 30 Huntington Terrace Newark, New York 07112 USA',
'John Doe 22 Huntington Terrace Newark, New York 07112 US',
'Mario Bianchi, Via Nazionale 256, 00148 Roma (RM) Italia',
'Mario Bianchi, Via Nazionale 256, 00148 Roma (RM) Italy',
'Newark','Rome']
for example in examples:
city=geograpy.locateCity(example,debug=False)
if self.debug:
print(city)
def testStackOverflow64418919(self):
'''
https://stackoverflow.com/questions/64418919/problem-retrieving-region-in-us-with-geograpy3
'''
examples=['Seattle']
for example in examples:
city=geograpy.locateCity(example,debug=False)
print(city)
def testProceedingsExample(self):
'''
test a proceedings title Example
'''
examples=['''Proceedings of the
IEEE 14th International Conference on
Semantic Computing, ICSC 2020,
San Diego, CA, USA,
February 3-5, 2020''']
for example in examples:
places = geograpy.get_place_context(text=example)
if self.debug:
print(places)
city=geograpy.locateCity(example,debug=False)
if self.debug:
print(city)
def testDelimiters(self):
'''
test the delimiter statistics for names
'''
loc=Locator.getInstance()
loc.populate_db()
ddls=["DROP VIEW IF EXISTS allNames","""CREATE VIEW allNames as select name from countries
union select name from regions
union select name from cities"""]
for ddl in ddls:
loc.sqlDB.execute(ddl)
query="SELECT name from allNames"
nameRecords=loc.sqlDB.query(query)
show=self.debug
show=True
if show:
print("found %d name records" % len(nameRecords))
ordC=Counter()
for nameRecord in nameRecords:
name=nameRecord["name"]
for char in name:
code=ord(char)
if code<ord("A"):
ordC[code]+=1
for index,countT in enumerate(ordC.most_common(10)):
code,count=countT
if show:
print ("%d: %d %s -> %d" % (index,code,chr(code),count))
def testIssue22(self):
'''
https://github.com/somnathrakshit/geograpy3/issues/22
'''
url='https://en.wikipedia.org/wiki/2012_Summer_Olympics_torch_relay'
places = geograpy.get_geoPlace_context(url = url)
if self.debug:
print(places)
self.assertTrue(len(places.countries)>5)
self.assertTrue(len(places.regions)>5)
self.assertTrue(len(places.cities)>20)
def testExamples(self):
'''
test examples
'''
examples=['Paris, US-TX','Amsterdam, Netherlands', 'Vienna, Austria','Vienna, Illinois, US','Paris, Texas',
'Austin, TX','Austin, Texas',
]
countries=['US','NL','AT','US','US','US','US']
self.checkExamples(examples, countries,debug=False)
def testIssue41_CountriesFromErdem(self):
'''
test getting Country list from Erdem
'''
countryList=CountryManager.fromErdem()
self.assertEqual(247,len(countryList.countries))
if self.debug:
for country in countryList.countries:
print(country)
def testIssue_42_distance(self):
'''
test haversine and location
'''
loc1=Location()
loc1.lat=0
loc1.lon=0
loc2=Location()
loc2.lat=90
loc2.lon=0
d=loc1.distance(loc2)
#self.debug=True
if self.debug:
print(d)
self.assertAlmostEqual(10007.54,d,delta=0.1)
def testIssue_59_db_download(self):
'''
tests the correct downloading of the backup database in different configurations
'''
def getConfig(tmpdir:str):
config=StorageConfig(cacheFile="locations.db", cacheDirName="geograpyTest", cacheRootDir=tmpdir)
config.cacheFile=f"{config.getCachePath()}/{config.cacheFile}"
return config
def downloadAndTestDB(config:StorageConfig, loc:Locator=None, forceUpdate:bool=False):
'''downloads and tests the downloaded db'''
if loc is None:
loc = Locator(storageConfig=config)
loc.downloadDB(forceUpdate=forceUpdate)
self.assertTrue(os.path.exists(config.cacheFile))
self.assertTrue(loc.db_has_data())
return loc
# test downloading with no file in dir
with tempfile.TemporaryDirectory() as tmpdir:
config=getConfig(tmpdir)
downloadAndTestDB(config)
# test downloading with empty file in dir
with tempfile.TemporaryDirectory() as tmpdir:
config=getConfig(tmpdir)
Path(config.cacheFile).touch() # create empty file
loc=downloadAndTestDB(config)
# test downloading with forceUpdate
# drop a important table to check if it is restored
loc.sqlDB.execute("DROP TABLE countries")
self.assertFalse(loc.db_has_data())
downloadAndTestDB(config,loc=loc, forceUpdate=True)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 12,548 | 3,732 |
# example of automatically starting a thread
from time import sleep
from threading import Thread
# custom thread class that automatically starts threads when they are constructed
class AutoStartThread(Thread):
# constructor
def __init__(self, *args, **kwargs):
# call the the parent constructor
super().__init__(*args, **kwargs)
# start the thread
self.start()
# task function
def task():
print('Task starting')
# block for a moment
sleep(1)
# report
print('Task all done')
# create and start the new thread
thread = AutoStartThread(target=task)
# wait for the new thread to finish
thread.join() | 685 | 187 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Marcel Bollmann <bollmann@linguistics.rub.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import json
import argparse
def splitAt(token, symbol):
result = token.split(symbol)
if len(result) < 2:
return result
return [x+symbol for x in result[:-1]] + [result[-1]]
class MainApplication(object):
def __init__(self, args):
if args.split:
self.split_mod = "|"
self.split_dipl = "#"
else:
self.split_mod = args.split_mod
self.split_dipl = args.split_dipl
self.lines = [x.strip() for x in args.infile.readlines()]
self.token = ' '.join(self.lines)
args.infile.close()
def throw_error(self, error):
print(error)
exit(1)
def performConversions(self):
result = {}
if self.split_mod:
modern = self.token.split(self.split_mod)
result['mod_ascii'] = result['mod_utf'] = \
[m.replace(self.split_dipl, '') for m in modern]
result['mod_trans'] = [m+self.split_mod for m in modern[:-1]] + [modern[-1]]
else:
result['mod_trans'] = result['mod_ascii'] = \
result['mod_utf'] = [self.token]
if self.split_dipl:
dipl = self.token.split(self.split_dipl)
result['dipl_utf'] = [d.replace(self.split_mod, '') for d in dipl]
result['dipl_trans'] = [d+self.split_dipl for d in dipl[:-1]] + [dipl[-1]]
result['dipl_breaks'] = [0] * len(dipl)
else:
result['dipl_trans'] = result['dipl_utf'] = [self.token]
result['dipl_breaks'] = [0]
return result
def run(self):
result = self.performConversions()
print(json.dumps(result))
if __name__ == '__main__':
description = "Reads a file containing a single token and returns it unchanged in JSON format. Intended to be called from within CorA."
epilog = ""
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('infile',
metavar='INPUT',
nargs='?',
default=sys.stdin,
type=argparse.FileType('r'),
help='Input file')
# exists for legacy reasons:
parser.add_argument('-s', '--split',
action='store_true',
default=False,
help=('Parse pipe (|) and hash (#) as tokenization symbols; '
'equivalent to --split-mod="|" --split-dipl="#"'))
parser.add_argument('--split-mod',
default='',
type=str,
help='Symbol to split into two moderns (default: None)')
parser.add_argument('--split-dipl',
default='',
type=str,
help='Symbol to split into two dipls (default: None)')
# parser.add_argument('-e', '--encoding',
# default='utf-8',
# help='Encoding of the input file (default: utf-8)')
arguments = parser.parse_args()
# launching application ...
MainApplication(arguments).run()
| 4,332 | 1,317 |
from typing import Union
import matplotlib.pyplot as plt
import torch
import torchvision
def decode_segmap_to_color_image(masks: torch.Tensor,
colormap: Union[list, tuple],
num_classes: int,
ignore_index: int = None,
ignore_color: Union[list, tuple] = None):
# 각 채널 별로 디코딩하기 위해 복사
r = masks.clone()
g = masks.clone()
b = masks.clone()
# Assign colors according to class for each channel (각 채널 별로 class에 따라 색상 대입)
for i in range(num_classes):
r[masks == i] = colormap[i][0]
g[masks == i] = colormap[i][1]
b[masks == i] = colormap[i][2]
if ignore_index and ignore_color is not None:
r[masks == ignore_index] = ignore_color[0]
g[masks == ignore_index] = ignore_color[1]
b[masks == ignore_index] = ignore_color[2]
decoded_masks = (r.unsqueeze(dim=1), g.unsqueeze(dim=1), b.unsqueeze(dim=1))
decoded_masks = torch.cat(decoded_masks, dim=1).to(torch.float32)
decoded_masks /= 255
return decoded_masks
# Validate dataset loading code
def show_dataset(images: torch.Tensor, targets: torch.Tensor):
to_pil_image = torchvision.transforms.ToPILImage()
plt.rcParams['figure.figsize'] = (17, 6)
plt.rcParams['figure.autolayout'] = True
plt.rcParams['xtick.bottom'] = False
plt.rcParams['xtick.labelbottom'] = False
plt.rcParams['ytick.left'] = False
plt.rcParams['ytick.labelleft'] = False
assert images.shape[0] == targets.shape[0]
for i in range(images.shape[0]):
fig, axs = plt.subplots(1, 2)
axs[0].set_title('Input image')
axs[0].imshow(to_pil_image(images[i].cpu()))
axs[1].set_title('Groundtruth')
axs[1].imshow(targets[i].cpu())
plt.show()
| 1,851 | 708 |
import os
import json
import logging
import argparse
from src.common.translate import translate_time_expression_templates, get_client
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default="data/templates/start_end", help="Templates directory")
parser.add_argument("--lang", default=None, type=str, required=False,
help="Language code. If not specified, computes for all")
args = parser.parse_args()
translate_client = get_client()
# Iterate over languages
if args.lang is not None:
target_langs = [args.lang]
else:
target_langs = [f.replace(".json", "") for f in os.listdir("data/templates/start_end") if "en" not in f]
en_templates = json.load(open(f"{args.template_dir}/en.json"))
for target in target_langs:
logger.info(target)
target_templates = {}
for edge in ["start", "end"]:
target_templates[edge] = translate_time_expression_templates(translate_client, en_templates[edge], target)
with open(f"{args.template_dir}/{target}.json", "w") as f_out:
json.dump(target_templates, f_out, ensure_ascii=False)
if __name__ == '__main__':
main()
| 1,373 | 426 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib.auth import get_user_model
from .forms import DriverSignupForm, RiderSignupForm
from driver.models import Driver
User = get_user_model()
# Create your views here.
def index(request):
return render(request,'index.html')
def driver_signup(request):
if request.method == 'POST':
form = DriverSignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = True
user.is_passenger = False
user.is_driver = True
user.save()
return redirect('/')
else:
form = DriverSignupForm()
return render(request,'registration/driver_signup.html',{'form':form})
def rider_signup(request):
if request.method == 'POST':
form = RiderSignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = True
user.is_passenger = True
user.is_driver = False
user.save()
return redirect('/')
else:
form = RiderSignupForm()
return render(request, 'registration/rider_signup.html', {'form': form})
@login_required
def driver_index(request):
drivers = Driver.objects.all()
user = request.user
# print(drivers)
return render(request,'registration/driver_inex.html', {"drivers":drivers, "user":user}) | 1,477 | 429 |
from manimlib.imports import *
from manim_projects.tony_useful.imports import *
class Test2DLissajousFromLinesIntersection(Scene):
def construct(self):
circle_x = Circle(color=RED).shift(UP * 2.5)
circle_y = Circle(color=RED).shift(LEFT * 2.5)
theta = ValueTracker(0)
point_x = Dot().add_updater(lambda m: m.move_to(circle_x.point_at_angle(1 * theta.get_value())))
point_y = Dot().add_updater(lambda m: m.move_to(circle_y.point_at_angle(3 * theta.get_value())))
line_x = Line(UP * 6 , DOWN * 6).add_updater(lambda m: m.move_to(point_x.get_center()[0] * RIGHT)).set_color(GRAY)
line_y = Line(LEFT * 8, RIGHT * 8).add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP)).set_color(GRAY)
P = Dot(color=BLUE).add_updater(lambda m: m.move_to(line_intersection(line_x.get_start_and_end(), line_y.get_start_and_end())))
path = TracedPath(P.get_center, stroke_width=6, stroke_color=BLUE, min_distance_to_new_point=0.01)
self.add(circle_x, circle_y, point_x, point_y, line_x, line_y, P, path)
self.wait()
self.play(theta.increment_value, PI * 4, run_time=10, rate_func=linear)
self.wait()
class Test2DLissajousFromParametricFunction(Scene):
def construct(self):
line = ParametricFunction(
lambda t: [np.sin(1 * t + PI / 2), np.sin(3 * t), 0],
t_min=0, t_max=4 * TAU, color=BLUE
)
w = ValueTracker(1)
line.add_updater(
lambda m: m.become(
ParametricFunction(
lambda t: [np.sin(1 * t + PI / 2), np.sin(w.get_value() * t), 0],
t_min=0, t_max=4 * TAU, color=BLUE
)
)
)
self.add(line)
self.wait()
self.play(w.increment_value, 8, run_time=10, rate_func=linear)
self.wait()
class Dot3D(Sphere):
CONFIG = {
"radius": 0.08,
"checkerboard_colors": [WHITE, WHITE],
"stroke_width": 0,
}
class Line_(VGroup):
CONFIG = {
"nums": 100,
}
def __init__(self, start, end, **kwargs):
VGroup.__init__(self)
total = end - start
unit = total / self.nums
self.add(Line(start, start + unit, **kwargs))
for i in range(self.nums - 1):
now_start = self[-1].get_end()
now_end = now_start + unit
self.add(Line(now_start, now_end, **kwargs))
class Line__(VGroup):
CONFIG = {
"buff_": 0.02
}
def __init__(self, start, end, **kwargs):
VGroup.__init__(self)
base = Line(start, end, **kwargs)
if start[0] != 0:
self.add(base.copy().shift([0, -self.buff_, self.buff_]))
self.add(base.copy().shift([0, self.buff_, -self.buff_]))
self.add(base.copy().shift([0, self.buff_, self.buff_]))
self.add(base.copy().shift([0, -self.buff_, -self.buff_]))
elif start[1] != 0:
self.add(base.copy().shift([-self.buff_, 0, self.buff_]))
self.add(base.copy().shift([ self.buff_, 0, -self.buff_]))
self.add(base.copy().shift([ self.buff_, 0, self.buff_]))
self.add(base.copy().shift([-self.buff_, 0, -self.buff_]))
else:
self.add(base.copy().shift([-self.buff_, self.buff_, 0]))
self.add(base.copy().shift([ self.buff_, -self.buff_, 0]))
self.add(base.copy().shift([ self.buff_, self.buff_, 0]))
self.add(base.copy().shift([-self.buff_, -self.buff_, 0]))
class Test3DLissajousFromPlaneIntersection(ThreeDScene):
CONFIG = {
"camera_config": {
"background_color": WHITE
},
"dot_class": Dot3D,
"plane_use_ploygon": False,
"line_class": Line,
}
def construct(self):
axes = ThreeDAxes()
# self.add(axes)
self.set_camera_orientation(phi=70*DEGREES, theta=45*DEGREES)
# self.set_camera_orientation(distance=1000000)
self.begin_ambient_camera_rotation(rate=0.5)
circle_x = Circle(color=RED).rotate(PI / 2, RIGHT).shift(DOWN * 2.5).set_shade_in_3d()
circle_y = Circle(color=RED).rotate(PI / 2, DOWN).shift(LEFT * 2.5).set_shade_in_3d()
circle_z = Circle(color=RED).shift(IN * 2.5).set_shade_in_3d()
theta = ValueTracker(0)
point_x = self.dot_class().add_updater(lambda m: m.move_to(circle_x.point_at_angle(1 * theta.get_value()))).set_shade_in_3d().set_color(GREEN)
point_y = self.dot_class().add_updater(lambda m: m.move_to(circle_y.point_at_angle(2 * theta.get_value()))).set_shade_in_3d().set_color(ORANGE)
point_z = self.dot_class().add_updater(lambda m: m.move_to(circle_z.point_at_angle(3 * theta.get_value()))).set_shade_in_3d().set_color(PURPLE)
if self.plane_use_ploygon:
plane_x = Polygon(
np.array([ 2.5, 2.5, 0]),
np.array([-2.5, 2.5, 0]),
np.array([-2.5, -2.5, 0]),
np.array([ 2.5, -2.5, 0]),
fill_color=GREEN,
fill_opacity=0.3,
stroke_width=0
).set_shade_in_3d().add_updater(lambda m: m.move_to(point_x.get_center()[2] * OUT))
plane_y = Polygon(
np.array([ 2.5, 0, 2.5]),
np.array([-2.5, 0, 2.5]),
np.array([-2.5, 0, -2.5]),
np.array([ 2.5, 0, -2.5]),
fill_color=ORANGE,
fill_opacity=0.3,
stroke_width=0
).set_shade_in_3d().add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP))
plane_z = Polygon(
np.array([0, 2.5, 2.5]),
np.array([0, -2.5, 2.5]),
np.array([0, -2.5, -2.5]),
np.array([0, 2.5, -2.5]),
fill_color=PURPLE,
fill_opacity=0.3,
stroke_width=0
).set_shade_in_3d().add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT))
else:
plane_x = ParametricSurface(
lambda u, v: np.array([u, v, 0]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=GREEN, fill_opacity=0.3,
stroke_width=0
).add_updater(lambda m: m.move_to(point_x.get_center()[2] * OUT))
plane_y = ParametricSurface(
lambda u, v: np.array([u, 0, v]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=ORANGE, fill_opacity=0.3,
stroke_width=0
).add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP))
plane_z = ParametricSurface(
lambda u, v: np.array([0, u, v]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=PURPLE, fill_opacity=0.3,
stroke_width=0
).add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT))
line_x = self.line_class(
np.array([-2.5, 0, 0]),
np.array([ 2.5, 0, 0]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP + point_x.get_center()[2] * OUT))
line_y = self.line_class(
np.array([0, -2.5, 0]),
np.array([0, 2.5, 0]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT + point_x.get_center()[2] * OUT))
line_z = self.line_class(
np.array([0, 0, -2.5]),
np.array([0, 0, 2.5]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT + point_y.get_center()[1] * UP))
P = self.dot_class().set_shade_in_3d(False)
P.add_updater(lambda m: m.move_to(np.array([point_z.get_center()[0], point_y.get_center()[1], point_x.get_center()[2]])))
path = TracedPath(P.get_center, stroke_width=6, stroke_color=BLUE, min_distance_to_new_point=0.01)
self.add(circle_x, circle_y, circle_z, point_x, point_y, point_z, P, path, plane_x, plane_y, plane_z, line_x, line_y, line_z)
self.wait()
self.play(theta.increment_value, PI * 4, run_time=10, rate_func=linear)
self.wait(3)
self.stop_ambient_camera_rotation()
self.wait()
self.move_camera(phi=0, theta=TAU, distance=10000)
self.wait(2)
self.move_camera(phi=PI/2, theta=TAU, distance=10000)
self.wait(2)
self.move_camera(phi=PI/2, theta=TAU+PI/2, distance=10000)
self.wait(2)
self.move_camera(phi=70*DEGREES, theta=TAU+45*DEGREES)
self.wait()
class Test3DLissajousFromParametricFunction(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=90*DEGREES, theta=45*DEGREES, distance=10000)
line = ParametricFunction(
lambda t: np.array([np.sin(2 * t + PI / 2), np.sin(2 * t), np.sin(3 * t)]),
t_min=0, t_max=4 * TAU, color=BLUE
).scale(2.5)
self.add(line)
self.wait()
self.begin_ambient_camera_rotation(rate=1)
self.wait(10)
class Intro2DLissajous(Scene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
def construct(self):
para = TexMobject("(", "\\cos \\theta", ",", "\\sin 3\\theta", ")").set_color(BLACK)
para.scale(1.7).to_corner(DR)
circle_x = Circle(color=RED)#.shift(UP * 2.5)
circle_y = Circle(color=RED)#.shift(LEFT * 2.5)
self.wait()
self.play(ShowCreation(VGroup(circle_x, circle_y)))
self.wait()
self.play(
circle_x.shift, UP * 2.5,
circle_y.shift, LEFT * 2.5
)
self.wait()
theta = ValueTracker(0)
theta_label = TexMobject("\\theta = ").scale(1.7).to_corner(DL).set_color(BLACK)
theta_value = DecimalNumber(0, num_decimal_places=2).set_color(BLACK).scale(1.7).next_to(theta_label, RIGHT)
def updater_of_point_x(obj):
obj.move_to(circle_x.point_at_angle(1 * theta.get_value()))
point_x = Dot(color=GOLD).add_updater(updater_of_point_x)
point_y = Dot(color=GOLD).add_updater(lambda m: m.move_to(circle_y.point_at_angle(3 * theta.get_value())))
line_x = Line(UP * 6 , DOWN * 6, stroke_width=2).add_updater(lambda m: m.move_to(point_x.get_center()[0] * RIGHT)).set_color(GRAY).set_opacity(0.6)
line_y = Line(LEFT * 8, RIGHT * 8, stroke_width=2).add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP)).set_color(GRAY).set_opacity(0.6)
self.play(ShowCreation(point_x))
self.play(ShowCreation(line_x))
self.wait()
self.play(theta.increment_value, PI * 2, run_time=5, rate_func=linear)
self.play(Write(para[1]))
theta.set_value(0)
point_x.remove_updater(updater_of_point_x)
self.wait()
self.play(ShowCreation(point_y))
self.play(ShowCreation(line_y))
self.wait()
self.play(theta.increment_value, PI * 2, run_time=5, rate_func=linear)
self.play(Write(para[3]))
self.wait()
theta.set_value(0)
point_x.add_updater(updater_of_point_x)
P = Dot(color=BLUE).add_updater(lambda m: m.move_to([point_x.get_center()[0], point_y.get_center()[1], 0]))
path = TracedPath(P.get_center, stroke_width=6, stroke_color=BLUE, min_distance_to_new_point=0.01)
self.wait()
self.play(
ShowCreation(P),
FadeIn(VGroup(para[0], para[2], para[4])),
FadeIn(VGroup(theta_label, theta_value))
)
theta_value.add_updater(lambda m: m.set_value(theta.get_value()))
self.add(path)
self.wait()
self.play(theta.increment_value, PI * 4, run_time=10, rate_func=linear)
self.wait(3)
class ALotOf2DLissajous(Scene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
def construct(self):
w = ValueTracker(1)
label = VGroup(
DecimalNumber(0).set_color(BLACK).scale(2),
TexMobject(":").set_color(BLACK).scale(2),
TexMobject("3").set_color(BLACK).scale(2)
).arrange(RIGHT)
label.to_corner(UL, buff=1)
label[0].add_updater(lambda m: m.set_value(w.get_value()))
line = ParametricFunction(
lambda t: [np.cos(1 * t), np.sin(3 * t), 0],
t_min=0, t_max=6*TAU, color=BLUE
)
def updater_of_line(obj):
new = ParametricFunction(
lambda t: [np.cos(w.get_value() * t), np.sin(3 * t), 0],
t_min=0, t_max=6*TAU, color=BLUE
)
obj.become(new)
self.play(FadeIn(line), FadeIn(label))
self.wait()
for i in range(6):
line.add_updater(updater_of_line)
self.play(w.increment_value, 1, run_time=1, rate_func=linear)
line.remove_updater(updater_of_line)
self.wait()
self.wait()
line.add_updater(updater_of_line)
self.play(w.set_value, 1)
line.remove_updater(updater_of_line)
self.wait(2)
def smooth2(t, inflection=6):
error = sigmoid(-inflection / 2)
return np.clip(
(sigmoid(inflection * (t - 0.5)) - error) / (1 - 2 * error),
0, 1,
)
class From2DTo3DLissajous(ThreeDScene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
def construct(self):
axes = ThreeDAxes(number_line_config={"include_tip": False})
self.set_camera_orientation(phi=0*DEGREES, theta=90*DEGREES, distance=10000)
line = ParametricFunction(
lambda t: np.array([np.cos(1 * t), np.sin(3 * t), np.sin(2 * t)]),
t_min=0, t_max=4 * TAU, color=BLUE
)
self.add(line)
self.wait()
self.play(FadeIn(axes))
self.wait()
self.move_camera(phi=70*DEGREES, theta=135*DEGREES, rate_func=smooth2, run_time=5)
self.begin_ambient_camera_rotation(rate=0.8)
self.wait(10)
self.play(FadeOut(axes), FadeOut(line))
self.wait()
class ReadyTo3DLissajous(ThreeDScene):
CONFIG = {
"camera_config": {
"background_color": WHITE
},
"dot_class": Dot3D,
"line_class": Line,
}
def construct(self):
axes = ThreeDAxes()
# self.add(axes)
self.set_camera_orientation(phi=70*DEGREES, theta=45*DEGREES)
# self.set_camera_orientation(distance=1000000)
self.begin_ambient_camera_rotation(rate=0.25)
circle_x = Circle(color=RED).rotate(PI / 2, RIGHT).shift(DOWN * 2.5).set_shade_in_3d()
circle_y = Circle(color=RED).rotate(PI / 2, DOWN).shift(LEFT * 2.5).set_shade_in_3d()
circle_z = Circle(color=RED).shift(IN * 2.5).set_shade_in_3d()
self.wait()
self.play(
ShowCreation(VGroup(circle_x, circle_y, circle_z))
)
theta = ValueTracker(0)
def updater_of_point_x(m):
m.move_to(circle_x.point_at_angle(1 * theta.get_value()))
def updater_of_point_y(m):
m.move_to(circle_y.point_at_angle(2 * theta.get_value()))
def updater_of_point_z(m):
m.move_to(circle_z.point_at_angle(3 * theta.get_value()))
point_x = self.dot_class().add_updater(updater_of_point_x).set_color(GREEN)
def updater_of_plane_x(m):
m.move_to(point_x.get_center()[2] * OUT)
plane_x = ParametricSurface(
lambda u, v: np.array([u, v, 0]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=GREEN, fill_opacity=0.3,
stroke_width=0
).add_updater(updater_of_plane_x)
self.wait()
self.play(ShowCreation(point_x))
self.play(ShowCreation(plane_x))
self.wait()
self.play(theta.increment_value, PI*2, run_time=5, rate_func=linear)
self.wait()
point_x.remove_updater(updater_of_point_x)
plane_x.remove_updater(updater_of_plane_x)
theta.set_value(0)
point_y = self.dot_class().add_updater(updater_of_point_y).set_color(ORANGE)
def updater_of_plane_y(m):
m.move_to(point_y.get_center()[1] * UP)
plane_y = ParametricSurface(
lambda u, v: np.array([u, 0, v]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=ORANGE, fill_opacity=0.3,
stroke_width=0
).add_updater(updater_of_plane_y)
line_x = self.line_class(
np.array([-2.5, 0, 0]),
np.array([ 2.5, 0, 0]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_y.get_center()[1] * UP + point_x.get_center()[2] * OUT))
self.wait()
self.play(ShowCreation(point_y))
self.play(ShowCreation(plane_y))
self.play(ShowCreation(line_x))
self.wait()
self.play(theta.increment_value, PI*2, run_time=5, rate_func=linear)
self.wait()
point_y.remove_updater(updater_of_point_y)
plane_y.remove_updater(updater_of_plane_y)
theta.set_value(0)
point_z = self.dot_class().add_updater(updater_of_point_z).set_color(PURPLE)
def updater_of_plane_z(m):
m.move_to(point_z.get_center()[0] * RIGHT)
plane_z = ParametricSurface(
lambda u, v: np.array([0, u, v]),
u_min=-2.5, u_max=2.5, v_min=-2.5, v_max=2.5,
checkerboard_colors=None,
fill_color=PURPLE, fill_opacity=0.3,
stroke_width=0
).add_updater(updater_of_plane_z)
line_y = self.line_class(
np.array([0, -2.5, 0]),
np.array([0, 2.5, 0]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT + point_x.get_center()[2] * OUT))
line_z = self.line_class(
np.array([0, 0, -2.5]),
np.array([0, 0, 2.5]),
fill_color=GOLD_E,
stroke_width=2
).add_updater(lambda m: m.move_to(point_z.get_center()[0] * RIGHT + point_y.get_center()[1] * UP))
self.wait()
self.play(ShowCreation(point_z))
self.play(ShowCreation(plane_z))
self.play(ShowCreation(line_y), ShowCreation(line_z))
self.wait()
self.play(theta.increment_value, PI*2, run_time=5, rate_func=linear)
self.wait()
theta.set_value(0)
P = self.dot_class().set_shade_in_3d(False)
P.add_updater(lambda m: m.move_to(np.array([point_z.get_center()[0], point_y.get_center()[1], point_x.get_center()[2]])))
self.wait()
self.play(ShowCreation(P))
self.stop_ambient_camera_rotation()
self.wait()
self.move_camera(phi=70*DEGREES, theta=405*DEGREES)
class DualAxisIllusion(ThreeDScene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
def construct(self):
self.set_camera_orientation(phi=90*DEGREES, theta=45*DEGREES, distance=10000, gamma=90*DEGREES)
line = ParametricFunction(
lambda t: np.array([np.cos(2 * t), np.sin(2 * t), 0.6 * np.sin(3 * t)]),
t_min=0, t_max=4 * TAU, color=BLUE, stroke_width=20
).scale(2)
self.add(line)
self.wait()
self.begin_ambient_camera_rotation(rate=0.8)
self.wait(10)
class EndScene(ThreeDScene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
def construct(self):
self.set_camera_orientation(phi=90*DEGREES, theta=45*DEGREES, distance=10000, gamma=90*DEGREES)
line = ParametricFunction(
lambda t: np.array([np.cos(2 * t), np.sin(2 * t), 0.6 * np.sin(3 * t)]),
t_min=0, t_max=4 * TAU, color=BLUE, stroke_width=20
).scale(2)
self.play(FadeIn(line))
self.wait()
self.begin_ambient_camera_rotation(rate=0.5)
self.wait(6)
bg = Rectangle(width=16, height=10).set_fill(color=BLACK, opacity=0.8)
self.camera.add_fixed_in_frame_mobjects(bg)
self.wait()
self.play(FadeIn(bg))
thanks = Group(
Text("特别鸣谢", font="Source Han Sans CN").scale(0.55).set_color(RED),
ImageMobject("GZTime.png").scale(0.3),
Text("@GZTime", font="Source Han Serif CN").scale(0.35).set_color(BLUE),
ImageMobject("cigar.png").scale(0.3),
Text("@cigar666", font="Source Han Serif CN").scale(0.35).set_color(BLUE)
)
self.camera.add_fixed_in_frame_mobjects(thanks)
thanks[0].to_corner(UR)
thanks[2].next_to(thanks[0], DOWN, aligned_edge=RIGHT)
thanks[1].next_to(thanks[2], LEFT)
thanks[3].next_to(thanks[1], DOWN)
thanks[4].next_to(thanks[3], RIGHT)
thanks[1:].next_to(thanks[0], DOWN, aligned_edge=RIGHT)
thanks[1].scale(1.5, about_point=thanks[1].get_center())
refer = VGroup(
Text("参考", font="Source Han Sans CN").scale(0.55).set_color(RED),
Text("[1] Wikipedia利萨茹曲线 https://en.wikipedia.org/wiki/Lissajous_curve", font="Source Han Serif CN").scale(0.3),
Text("[2] processing利萨如图形 https://www.bilibili.com/video/av33110155", font="Source Han Serif CN").scale(0.3),
Text("[3] 双轴错觉 https://killedbyapixel.github.io/Dual-Axis-Illusion/", font="Source Han Serif CN").scale(0.3),
Text("[4] 双周错觉代码仓库 https://github.com/KilledByAPixel/Dual-Axis-Illusion", font="Source Han Serif CN").scale(0.3),
)
self.camera.add_fixed_in_frame_mobjects(refer)
refer.arrange(DOWN, aligned_edge=LEFT)
refer.to_corner(DL)
self.wait()
self.play(FadeInFromDown(thanks))
self.play(FadeIn(refer))
self.wait(10)
class VideoCover(Scene):
def construct(self):
background = Rectangle(width=18, height=3.5, fill_opacity=0.7, fill_color=BLACK, stroke_width=0).shift(DOWN*0.5)
title = VGroup(
Text("可视/三维", font="Source Han Serif CN", color=BLUE).scale(1),
Text("Lissajous图形", font="Source Han Serif CN", color=RED).scale(1.2)
).arrange(DOWN, aligned_edge=RIGHT, buff=0.4)
title_bg = VGroup(
Text("可视/三维", font="Source Han Serif CN", color=BLUE_B).scale(1).set_stroke(width=12, opacity=0.4),
Text("Lissajous图形", font="Source Han Serif CN", color=RED_B).scale(1.2).set_stroke(width=12, opacity=0.4)
).arrange(DOWN, aligned_edge=RIGHT, buff=0.4)
title.to_edge(RIGHT, buff=1.3).shift(DOWN*0.5)
title_bg.to_edge(RIGHT, buff=1.3).shift(DOWN*0.5)
author = VGroup(
TextMobject("@鹤翔万里", background_stroke_width=0).scale(1.2).set_color([YELLOW, RED]),
TextMobject("@\ GZTime", background_stroke_width=0).scale(1.2).set_color([WHITE, BLUE])
).arrange(DOWN, aligned_edge=LEFT)
author.shift(LEFT*4 + DOWN*0.5)
self.add(background, title_bg, title, author)
| 23,485 | 9,099 |
import os
import subprocess
import sys
import time
import pkg_resources
from satella.coding import silence_excs
from satella.coding.sequences import smart_enumerate
from satella.files import write_to_file, read_in_file
from socatlord.parse_config import parse_etc_socatlord
def install_socatlord(verbose: bool = False) -> None:
filename = pkg_resources.resource_filename(__name__, 'systemd/socatlord.service')
contents = read_in_file(filename, 'utf-8')
if verbose:
print('Writing /lib/systemd/system/socatlord.service')
write_to_file('/lib/systemd/system/socatlord.service', contents, 'utf-8')
if verbose:
print('Calling systemctl daemon-reload')
os.system('systemctl daemon-reload')
if verbose:
print('Calling systemctl enable socatlord.service')
os.system('systemctl enable socatlord.service')
def start_all_socats(config_file: str, verbose: bool = False) -> None:
processes_and_args = []
for i, proto, host1, port1, host2, port2 in smart_enumerate(parse_etc_socatlord(config_file)):
command = ['socat', '%s-listen:%s,bind=%s,reuseaddr,fork' % (proto, port1, host1),
'%s:%s:%s' % (proto, host2, port2)]
kwargs = {'stdin': subprocess.DEVNULL, 'stdout': subprocess.DEVNULL,
'stderr': subprocess.DEVNULL}
if verbose:
print('Calling %s' % (command,))
kwargs = {}
proc = subprocess.Popen(command, **kwargs)
processes_and_args.append((proc, command))
write_to_file(os.path.join('/var/run/socatlord', str(i)), str(proc.pid), 'utf-8')
if verbose:
print('All socats launched, checking for liveness...')
time.sleep(1)
for i, proc, cmd in smart_enumerate(processes_and_args):
with silence_excs(subprocess.TimeoutExpired):
proc.wait(timeout=0.0)
rc = proc.returncode
print('socat no %s (PID %s) died (RC=%s), command was "%s", aborting' % (i+1, proc.pid,
rc, cmd))
os.unlink(os.path.join('/var/run/socatlord', str(i)))
sys.exit(1)
if verbose:
print('All socats alive, finishing successfully')
def do_precheck(config_file: str, verbose: bool = False):
if os.geteuid():
print('Must run as root. Aborting.')
sys.exit(1)
if not os.path.exists(config_file):
write_to_file(config_file, b'''# Put your redirections here
# eg.
# 443 -> 192.168.1.1:443
# will redirect all TCP traffic that comes to this host (0.0.0.0) to specified host and port
# to redirect UDP traffic just prefix your config with udp, eg.
# udp 443 -> 192.168.1.1:443
# You can additionally specify explicit interfaces to listen on eg.
# 192.168.1.2:443 -> 192.168.1.1:443
''')
if verbose:
print('%s created' % (config_file,))
if not os.path.exists('/var/run/socatlord'):
if verbose:
print('Making directory /var/run/socatlord')
os.mkdir('/var/run/socatlord')
os.chmod('/var/run/socatlord', 0o600)
def kill_all_socats(verbose: bool = False):
for socat in os.listdir('/var/run/socatlord'):
path = os.path.join('/var/run/socatlord', socat)
pid = int(read_in_file(path, 'utf-8'))
try:
if verbose:
print('Killing %s' % (pid, ))
os.kill(pid, 9)
except PermissionError:
print('Failed to kill %s with EPERM' % (pid, ))
except OSError:
print('Failed to kill %s' % (pid, ))
os.unlink(path)
| 3,612 | 1,251 |
"""
Provide a basic set of endpoints for an application to implement OAuth client
functionality.
These endpoints assume that the ``current_app`` has already been configured
with an OAuth client instance from the ``authlib`` package as follows:
.. code-block:: python
from authutils.oauth2.client import OAuthClient
from service.api import app
app.oauth_client = OAuthClient(
'client-id',
client_secret='...',
api_base_url='https://api.auth.net/',
access_token_url='https://auth.net/oauth/token',
authorize_url='https://auth.net/oauth/authorize',
client_kwargs={
'scope': 'openid data user',
'redirect_uri': 'https://service.net/authorize',
},
)
(NOTE the scopes are space-separated.)
"""
from urllib.parse import urljoin
from cdiserrors import APIError
import flask
from flask import current_app
import authutils.oauth2.client.authorize
blueprint = flask.Blueprint("oauth", __name__)
@blueprint.route("/authorization_url", methods=["GET"])
def get_authorization_url():
"""
Provide a redirect to the authorization endpoint from the OP.
"""
# This will be the value that was put in the ``client_kwargs`` in config.
redirect_uri = current_app.oauth_client.session.redirect_uri
# Get the authorization URL and the random state; save the state to check
# later, and return the URL.
authorization_url, state = current_app.oauth_client.generate_authorize_redirect(
redirect_uri
)
flask.session["state"] = state
return authorization_url
@blueprint.route("/authorize", methods=["GET"])
def do_authorize():
"""
Send a token request to the OP.
"""
authutils.oauth2.client.authorize.client_do_authorize()
return "", 204
@blueprint.route("/logout", methods=["GET"])
def logout_oauth():
"""
Log out the user.
To accomplish this, just revoke the refresh token if provided.
"""
url = urljoin(current_app.config.get("USER_API"), "/oauth2/revoke")
token = flask.request.form.get("token")
try:
current_app.oauth_client.session.revoke_token(url, token)
except APIError as e:
msg = "could not log out, failed to revoke token: {}".format(e.message)
return msg, 400
return "", 204
| 2,303 | 685 |
import unittest
from flower.utils import bugreport
from celery import Celery
class BugreportTests(unittest.TestCase):
def test_default(self):
report = bugreport()
self.assertFalse('Unknown Celery version' in report)
self.assertTrue('tornado' in report)
self.assertTrue('humanize' in report)
self.assertTrue('celery' in report)
def test_with_app(self):
app = Celery()
report = bugreport(app)
self.assertFalse('Unknown Celery version' in report)
self.assertTrue('tornado' in report)
self.assertTrue('humanize' in report)
self.assertTrue('celery' in report)
| 655 | 189 |
"""Methods to generate a SINGLE image to represent any ABF.
There are several categories which are grossly analyzed.
gain function:
* current clamp recording where command traces differ by sweep.
* must also have something that looks like an action potential
* will be analyzed with AP detection information
voltage clamp I/V:
* voltage clamp recording where command traces differ by sweep.
* image will simply be an overlay
drug experiment:
* voltage clamp or current clamp where every command is the same
* tags will be reported over a chronological graph
"""
import sys
import os
import glob
import matplotlib.pyplot as plt
sys.path.insert(0,'../../')
import swhlab
def processFolder(abfFolder):
"""call processAbf() for every ABF in a folder."""
if not type(abfFolder) is str or not len(abfFolder)>3:
return
files=sorted(glob.glob(abfFolder+"/*.abf"))
for i,fname in enumerate(files):
print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname))
processAbf(fname,show=False)
plt.show()
return
def processAbf(abfFname,saveAs=False,dpi=100,show=True):
"""
automatically generate a single representative image for an ABF.
If saveAs is given (full path of a jpg of png file), the image will be saved.
Otherwise, the image will pop up in a matplotlib window.
"""
if not type(abfFname) is str or not len(abfFname)>3:
return
abf=swhlab.ABF(abfFname)
plot=swhlab.plotting.ABFplot(abf)
plot.figure_height=6
plot.figure_width=10
plot.subplot=False
plot.figure(True)
if abf.get_protocol_sequence(0)==abf.get_protocol_sequence(1) or abf.sweeps<2:
# same protocol every time
if abf.lengthMinutes<2:
# short (probably a memtest or tau)
ax1=plt.subplot(211)
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(212,sharex=ax1)
plot.figure_protocol()
plt.title("")
else:
# long (probably a drug experiment)
plot.figure_chronological()
else:
# protocol changes every sweep
plots=[211,212] # assume we want 2 images
if abf.units=='mV': # maybe it's something with APs?
ap=swhlab.AP(abf) # go ahead and do AP detection
ap.detect() # try to detect APs
if len(ap.APs): # if we found some
plots=[221,223,222,224] # get ready for 4 images
ax1=plt.subplot(plots[0])
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(plots[1],sharex=ax1)
plot.figure_protocols()
plt.title("protocol")
if len(plots)>2:
# assume we want to look at the first AP
ax2=plt.subplot(plots[2])
plot.rainbow=False
plot.kwargs["color"]='b'
plot.figure_chronological()
plt.gca().get_xaxis().set_visible(False)
plt.title("first AP magnitude")
# velocity plot
plt.subplot(plots[3],sharex=ax2)
plot.abf.derivative=True
plot.rainbow=False
plot.traceColor='r'
plot.figure_chronological()
plt.axis([ap.APs[0]["T"]-.05,ap.APs[0]["T"]+.05,None,None])
plt.title("first AP velocity")
if saveAs:
print("saving",os.path.abspath(saveAs))
plt.savefig(os.path.abspath(saveAs),dpi=dpi)
return
if show:
plot.show()
def selectFile():
"""launch an ABF file selector to determine what to glance at."""
plt.close("all") # get rid of old stuff
print("GLANCING AT A FILE:")
processAbf(swhlab.common.gui_getFile())
def selectFolder():
"""launch a folder selection dialog to glance at every ABF in a folder."""
plt.close("all") # get rid of old stuff
processFolder(swhlab.common.gui_getFolder())
if __name__=="__main__":
print("DONE") | 4,124 | 1,338 |
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.csw.abstract_general_operation_parameter_ref_type import (
OperationParameterGroup,
)
from bindings.csw.actuate_type import ActuateType
from bindings.csw.base_unit import BaseUnit
from bindings.csw.cartesian_cs import CartesianCs
from bindings.csw.concatenated_operation import ConcatenatedOperation
from bindings.csw.conventional_unit import ConventionalUnit
from bindings.csw.coordinate_operation import CoordinateOperation
from bindings.csw.coordinate_reference_system import CoordinateReferenceSystem
from bindings.csw.coordinate_system import CoordinateSystem
from bindings.csw.coordinate_system_axis import CoordinateSystemAxis
from bindings.csw.crs import Crs
from bindings.csw.cylindrical_cs import CylindricalCs
from bindings.csw.datum import Datum
from bindings.csw.definition import Definition
from bindings.csw.definition_proxy import DefinitionProxy
from bindings.csw.definition_type import DefinitionType
from bindings.csw.derived_unit import DerivedUnit
from bindings.csw.ellipsoid import Ellipsoid
from bindings.csw.ellipsoidal_cs import EllipsoidalCs
from bindings.csw.engineering_crs import EngineeringCrs
from bindings.csw.engineering_datum import EngineeringDatum
from bindings.csw.general_conversion_ref_type import (
CompoundCrs,
Conversion,
DerivedCrs,
ProjectedCrs,
GeneralConversion,
GeneralDerivedCrs,
)
from bindings.csw.general_operation_parameter import GeneralOperationParameter
from bindings.csw.general_transformation import GeneralTransformation
from bindings.csw.geocentric_crs import GeocentricCrs
from bindings.csw.geodetic_datum import GeodeticDatum
from bindings.csw.geographic_crs import GeographicCrs
from bindings.csw.image_crs import ImageCrs
from bindings.csw.image_datum import ImageDatum
from bindings.csw.indirect_entry import IndirectEntry
from bindings.csw.linear_cs import LinearCs
from bindings.csw.oblique_cartesian_cs import ObliqueCartesianCs
from bindings.csw.operation_2 import Operation2
from bindings.csw.operation_method import OperationMethod
from bindings.csw.operation_parameter import OperationParameter
from bindings.csw.pass_through_operation import PassThroughOperation
from bindings.csw.polar_cs import PolarCs
from bindings.csw.prime_meridian import PrimeMeridian
from bindings.csw.reference_system import ReferenceSystem
from bindings.csw.show_type import ShowType
from bindings.csw.single_operation import SingleOperation
from bindings.csw.spherical_cs import SphericalCs
from bindings.csw.temporal_crs import TemporalCrs
from bindings.csw.temporal_cs import TemporalCs
from bindings.csw.temporal_datum import TemporalDatum
from bindings.csw.time_calendar import TimeCalendar
from bindings.csw.time_calendar_era import TimeCalendarEra
from bindings.csw.time_clock import TimeClock
from bindings.csw.time_coordinate_system import TimeCoordinateSystem
from bindings.csw.time_ordinal_reference_system import TimeOrdinalReferenceSystem
from bindings.csw.time_reference_system import TimeReferenceSystem
from bindings.csw.transformation import Transformation
from bindings.csw.type_type import TypeType
from bindings.csw.unit_definition import UnitDefinition
from bindings.csw.user_defined_cs import UserDefinedCs
from bindings.csw.vertical_crs import VerticalCrs
from bindings.csw.vertical_cs import VerticalCs
from bindings.csw.vertical_datum import VerticalDatum
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class DictionaryEntryType:
"""An entry in a dictionary of definitions.
An instance of this type contains or refers to a definition object.
The number of definitions contained in this dictionaryEntry is
restricted to one, but a DefinitionCollection or Dictionary that
contains multiple definitions can be substituted if needed.
Specialized descendents of this dictionaryEntry might be restricted
in an application schema to allow only including specified types of
definitions as valid entries in a dictionary.
:ivar time_calendar_era:
:ivar time_clock:
:ivar time_calendar:
:ivar time_ordinal_reference_system:
:ivar time_coordinate_system:
:ivar time_reference_system:
:ivar operation_parameter_group:
:ivar operation_parameter:
:ivar general_operation_parameter:
:ivar operation_method:
:ivar transformation:
:ivar general_transformation:
:ivar conversion:
:ivar general_conversion:
:ivar operation:
:ivar pass_through_operation:
:ivar single_operation:
:ivar concatenated_operation:
:ivar coordinate_operation:
:ivar ellipsoid:
:ivar prime_meridian:
:ivar geodetic_datum:
:ivar temporal_datum:
:ivar vertical_datum:
:ivar image_datum:
:ivar engineering_datum:
:ivar datum:
:ivar oblique_cartesian_cs:
:ivar cylindrical_cs:
:ivar polar_cs:
:ivar spherical_cs:
:ivar user_defined_cs:
:ivar linear_cs:
:ivar temporal_cs:
:ivar vertical_cs:
:ivar cartesian_cs:
:ivar ellipsoidal_cs:
:ivar coordinate_system:
:ivar coordinate_system_axis:
:ivar compound_crs:
:ivar temporal_crs:
:ivar image_crs:
:ivar engineering_crs:
:ivar derived_crs:
:ivar projected_crs:
:ivar general_derived_crs:
:ivar geocentric_crs:
:ivar vertical_crs:
:ivar geographic_crs:
:ivar coordinate_reference_system:
:ivar crs:
:ivar reference_system:
:ivar conventional_unit:
:ivar derived_unit:
:ivar base_unit:
:ivar unit_definition:
:ivar definition_proxy:
:ivar definition_collection:
:ivar dictionary:
:ivar definition: This element in a dictionary entry contains the
actual definition.
:ivar type:
:ivar href:
:ivar role:
:ivar arcrole:
:ivar title:
:ivar show:
:ivar actuate:
:ivar remote_schema:
"""
time_calendar_era: Optional[TimeCalendarEra] = field(
default=None,
metadata={
"name": "TimeCalendarEra",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_clock: Optional[TimeClock] = field(
default=None,
metadata={
"name": "TimeClock",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_calendar: Optional[TimeCalendar] = field(
default=None,
metadata={
"name": "TimeCalendar",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_ordinal_reference_system: Optional[TimeOrdinalReferenceSystem] = field(
default=None,
metadata={
"name": "TimeOrdinalReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_coordinate_system: Optional[TimeCoordinateSystem] = field(
default=None,
metadata={
"name": "TimeCoordinateSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_reference_system: Optional[TimeReferenceSystem] = field(
default=None,
metadata={
"name": "_TimeReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_parameter_group: Optional[OperationParameterGroup] = field(
default=None,
metadata={
"name": "OperationParameterGroup",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_parameter: Optional[OperationParameter] = field(
default=None,
metadata={
"name": "OperationParameter",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_operation_parameter: Optional[GeneralOperationParameter] = field(
default=None,
metadata={
"name": "_GeneralOperationParameter",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_method: Optional[OperationMethod] = field(
default=None,
metadata={
"name": "OperationMethod",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
transformation: Optional[Transformation] = field(
default=None,
metadata={
"name": "Transformation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_transformation: Optional[GeneralTransformation] = field(
default=None,
metadata={
"name": "_GeneralTransformation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
conversion: Optional[Conversion] = field(
default=None,
metadata={
"name": "Conversion",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_conversion: Optional[GeneralConversion] = field(
default=None,
metadata={
"name": "_GeneralConversion",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation: Optional[Operation2] = field(
default=None,
metadata={
"name": "_Operation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
pass_through_operation: Optional[PassThroughOperation] = field(
default=None,
metadata={
"name": "PassThroughOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
single_operation: Optional[SingleOperation] = field(
default=None,
metadata={
"name": "_SingleOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
concatenated_operation: Optional[ConcatenatedOperation] = field(
default=None,
metadata={
"name": "ConcatenatedOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_operation: Optional[CoordinateOperation] = field(
default=None,
metadata={
"name": "_CoordinateOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
ellipsoid: Optional[Ellipsoid] = field(
default=None,
metadata={
"name": "Ellipsoid",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
prime_meridian: Optional[PrimeMeridian] = field(
default=None,
metadata={
"name": "PrimeMeridian",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geodetic_datum: Optional[GeodeticDatum] = field(
default=None,
metadata={
"name": "GeodeticDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_datum: Optional[TemporalDatum] = field(
default=None,
metadata={
"name": "TemporalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_datum: Optional[VerticalDatum] = field(
default=None,
metadata={
"name": "VerticalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
image_datum: Optional[ImageDatum] = field(
default=None,
metadata={
"name": "ImageDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
engineering_datum: Optional[EngineeringDatum] = field(
default=None,
metadata={
"name": "EngineeringDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
datum: Optional[Datum] = field(
default=None,
metadata={
"name": "_Datum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
oblique_cartesian_cs: Optional[ObliqueCartesianCs] = field(
default=None,
metadata={
"name": "ObliqueCartesianCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
cylindrical_cs: Optional[CylindricalCs] = field(
default=None,
metadata={
"name": "CylindricalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
polar_cs: Optional[PolarCs] = field(
default=None,
metadata={
"name": "PolarCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
spherical_cs: Optional[SphericalCs] = field(
default=None,
metadata={
"name": "SphericalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
user_defined_cs: Optional[UserDefinedCs] = field(
default=None,
metadata={
"name": "UserDefinedCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
linear_cs: Optional[LinearCs] = field(
default=None,
metadata={
"name": "LinearCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_cs: Optional[TemporalCs] = field(
default=None,
metadata={
"name": "TemporalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_cs: Optional[VerticalCs] = field(
default=None,
metadata={
"name": "VerticalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
cartesian_cs: Optional[CartesianCs] = field(
default=None,
metadata={
"name": "CartesianCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
ellipsoidal_cs: Optional[EllipsoidalCs] = field(
default=None,
metadata={
"name": "EllipsoidalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_system: Optional[CoordinateSystem] = field(
default=None,
metadata={
"name": "_CoordinateSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_system_axis: Optional[CoordinateSystemAxis] = field(
default=None,
metadata={
"name": "CoordinateSystemAxis",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
compound_crs: Optional[CompoundCrs] = field(
default=None,
metadata={
"name": "CompoundCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_crs: Optional[TemporalCrs] = field(
default=None,
metadata={
"name": "TemporalCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
image_crs: Optional[ImageCrs] = field(
default=None,
metadata={
"name": "ImageCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
engineering_crs: Optional[EngineeringCrs] = field(
default=None,
metadata={
"name": "EngineeringCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
derived_crs: Optional[DerivedCrs] = field(
default=None,
metadata={
"name": "DerivedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
projected_crs: Optional[ProjectedCrs] = field(
default=None,
metadata={
"name": "ProjectedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_derived_crs: Optional[GeneralDerivedCrs] = field(
default=None,
metadata={
"name": "_GeneralDerivedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geocentric_crs: Optional[GeocentricCrs] = field(
default=None,
metadata={
"name": "GeocentricCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_crs: Optional[VerticalCrs] = field(
default=None,
metadata={
"name": "VerticalCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geographic_crs: Optional[GeographicCrs] = field(
default=None,
metadata={
"name": "GeographicCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_reference_system: Optional[CoordinateReferenceSystem] = field(
default=None,
metadata={
"name": "_CoordinateReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
crs: Optional[Crs] = field(
default=None,
metadata={
"name": "_CRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
reference_system: Optional[ReferenceSystem] = field(
default=None,
metadata={
"name": "_ReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
conventional_unit: Optional[ConventionalUnit] = field(
default=None,
metadata={
"name": "ConventionalUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
derived_unit: Optional[DerivedUnit] = field(
default=None,
metadata={
"name": "DerivedUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
base_unit: Optional[BaseUnit] = field(
default=None,
metadata={
"name": "BaseUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
unit_definition: Optional[UnitDefinition] = field(
default=None,
metadata={
"name": "UnitDefinition",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition_proxy: Optional[DefinitionProxy] = field(
default=None,
metadata={
"name": "DefinitionProxy",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition_collection: Optional["DefinitionCollection"] = field(
default=None,
metadata={
"name": "DefinitionCollection",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
dictionary: Optional["Dictionary"] = field(
default=None,
metadata={
"name": "Dictionary",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition: Optional[Definition] = field(
default=None,
metadata={
"name": "Definition",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
type: TypeType = field(
init=False,
default=TypeType.SIMPLE,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
role: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
arcrole: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
show: Optional[ShowType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
actuate: Optional[ActuateType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
remote_schema: Optional[str] = field(
default=None,
metadata={
"name": "remoteSchema",
"type": "Attribute",
"namespace": "http://www.opengis.net/gml",
},
)
@dataclass
class DefinitionMember(DictionaryEntryType):
class Meta:
name = "definitionMember"
namespace = "http://www.opengis.net/gml"
@dataclass
class DictionaryEntry(DictionaryEntryType):
class Meta:
name = "dictionaryEntry"
namespace = "http://www.opengis.net/gml"
@dataclass
class DictionaryType(DefinitionType):
"""A non-abstract bag that is specialized for use as a dictionary which
contains a set of definitions.
These definitions are referenced from other places, in the same and
different XML documents. In this restricted type, the inherited
optional "description" element can be used for a description of this
dictionary. The inherited optional "name" element can be used for
the name(s) of this dictionary. The inherited "metaDataProperty"
elements can be used to reference or contain more information about
this dictionary. The inherited required gml:id attribute allows the
dictionary to be referenced using this handle.
:ivar definition_member:
:ivar dictionary_entry: An entry in this dictionary. The content of
an entry can itself be a lower level dictionary or definition
collection. This element follows the standard GML property
model, so the value may be provided directly or by reference.
Note that if the value is provided by reference, this definition
does not carry a handle (gml:id) in this context, so does not
allow external references to this specific entry in this
context. When used in this way the referenced definition will
usually be in a dictionary in the same XML document.
:ivar indirect_entry: An identified reference to a remote entry in
this dictionary, to be used when this entry should be identified
to allow external references to this specific entry.
"""
definition_member: List[DefinitionMember] = field(
default_factory=list,
metadata={
"name": "definitionMember",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
dictionary_entry: List[DictionaryEntry] = field(
default_factory=list,
metadata={
"name": "dictionaryEntry",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
indirect_entry: List[IndirectEntry] = field(
default_factory=list,
metadata={
"name": "indirectEntry",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
@dataclass
class DefinitionCollection(DictionaryType):
class Meta:
namespace = "http://www.opengis.net/gml"
@dataclass
class Dictionary(DictionaryType):
class Meta:
namespace = "http://www.opengis.net/gml"
| 24,671 | 7,264 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .package import txt2epub as txt2epub
from .package import txt2pdf as txt2pdf
import argparse
__version__ = "0.1.0"
def epub():
parser = argparse.ArgumentParser(
prog='txt2epub.exe',
description='テキストを電子書籍(epub)化する'
)
metadata = parser2metadata(parser)
epub_init = txt2epub(metadata)
print(epub_init.make())
def pdf():
parser = argparse.ArgumentParser(
prog='txt2pdf.exe',
description='テキストをPDF化する'
)
metadata = parser2metadata(parser)
pdf_init = txt2pdf(metadata)
print(pdf_init.make())
def parser2metadata(parser):
parser._actions[0].help = 'ヘルプの表示'
parser.add_argument('-v', '--version', action='version', version=('%(prog)s Ver.' + __version__), help='バージョン情報の表示')
parser.add_argument('PATH', help='フォルダのパス', metavar="PATH")
parser.add_argument('-t', '--title', help='タイトル', type=str, metavar='(STRINGS)')
parser.add_argument('-a', '--author', help='著者名', type=str, metavar='(STRINGS)')
parser.add_argument('-p', '--publisher', help='出版社名', type=str, metavar='(STRINGS)')
parser.add_argument('-tr', '--title_ruby', help='タイトルのルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-s', '--sub_title', help='サブタイトル', type=str, metavar='(STRINGS)')
parser.add_argument('-ar', '--author_ruby', help='著者名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-pr', '--publisher_ruby', help='出版社名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-e', '--epub_version', help='電子書籍のバージョン', type=int, metavar='(INTEGER)', default=1)
parser.add_argument('-o', '--original_first_day', help='初版出版日', metavar='(YYYY-MM-DD)')
parser.add_argument('-u', '--original_url', help='著作物のURL', metavar='(URL)')
parser.add_argument('-i', '--illustrator', help='出版社名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-f', '--fiction', help='フィクション表示', action='store_true')
args = parser.parse_args()
metadata = dict(
path=args.PATH,
title=args.title,
author=args.author,
publisher=args.publisher,
fiction=args.fiction,
sub_title=args.sub_title,
author_ruby=args.author_ruby,
publisher_ruby=args.publisher_ruby,
illustrator=args.illustrator,
version=args.epub_version,
original_first_day=args.original_first_day,
original_url=args.original_url
)
return metadata
| 2,468 | 959 |
from .Scripts import * | 22 | 7 |
import os
import shutil
import unittest
import bpy
from mmd_tools.core import pmx
from mmd_tools.core.model import Model
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_DIR = os.path.join(os.path.dirname(TESTS_DIR), 'samples')
class TestFileIoOperators(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Clean up output from previous tests
"""
output_dir = os.path.join(TESTS_DIR, 'output')
for item in os.listdir(output_dir):
if item.endswith('.OUTPUT'):
continue # Skip the placeholder
item_fp = os.path.join(output_dir, item)
if os.path.isfile(item_fp):
os.remove(item_fp)
elif os.path.isdir(item_fp):
shutil.rmtree(item_fp)
def setUp(self):
"""
We should start each test with a clean state
"""
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=True)
# Add some useful shortcuts
self.context = bpy.context
self.scene = bpy.context.scene
def test_export_shy_cube(self):
"""
This test will load the shy_cube.blend sample and check if it exports correctly.
The following checks will be made:
- The texture is properly copied to the target directory
- The material order is kept
"""
input_blend = os.path.join(SAMPLES_DIR, 'blends', 'shy_cube', 'shy_cube.blend')
if not os.path.isfile(input_blend):
self.fail('required sample file %s not found. Please download it' % input_blend)
output_pmx = os.path.join(TESTS_DIR, 'output', 'shy_cube.pmx')
bpy.ops.wm.open_mainfile(filepath=input_blend)
root = Model.findRoot(self.context.active_object)
rig = Model(root)
orig_material_names = [mat.mmd_material.name_j or mat.name for mat in rig.materials()]
try:
bpy.ops.mmd_tools.export_pmx(filepath=output_pmx)
except Exception:
self.fail("Exception happened during export")
else:
self.assertTrue(os.path.isfile(output_pmx), "File was not created") # Is this a race condition?
# Check if the texture was properly copied
tex_path = os.path.join(os.path.dirname(output_pmx), 'textures', 'blush.png')
self.assertTrue(os.path.isfile(tex_path), "Texture not copied properly")
# Load the resultant pmx file and check the material order is the expected
result_model = pmx.load(output_pmx)
result_material_names = [mat.name for mat in result_model.materials]
same_order = True
for orig, result in zip(orig_material_names, result_material_names):
if orig != result:
same_order = False
break
self.assertTrue(same_order, "Material order was lost")
if __name__ == '__main__':
import sys
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
unittest.main()
| 3,150 | 965 |
"""
Module for gocdapi Stage class
"""
from gocdapi.gobase import GoBase
class Stage(GoBase):
"""
Class to hold Go Server Stage information
"""
def __init__(self, go_server, pipeline, data):
"""Inits Stage objects.
Args:
go_server (Go): A Go object which this Stage belongs to.
pipeline (Pipeline): A Go pipeline which this Stage belongs to.
data (str): A json string representing the Stage configuration
"""
self.pipeline = pipeline
super(self.__class__, self).__init__(go_server, data=data)
def __str__(self):
"""Returns a pretty representation of the object
Returns:
str: representation of the object
"""
return 'Stage @ %s' % self.go_server.baseurl
def cancel(self):
"""Cancels the stage
Will do a POST request to go/api/stages/PIPELINE_NAME/STAGE_NAME/cancel
"""
url = self.build_url('cancel')
self.do_post(url)
def history(self, offset=0):
"""Gets the history of the Stage
Go server returns 10 instances at a time, sorted in reverse order.
You can use offset argument which tells the API how many instances to skip.
Will do a GET request to go/api/stages/PIPELINE_NAME/STAGE_NAME/history/OFFSET
Args:
offset (int): how many instances to skip
Returns:
str: JSON representing job history
"""
url = self.build_url('history/%s' % offset)
return self.get_json_data(url)
def _poll(self):
"""Will create and define the attributes of the stage.
Uses _data attribute populated by inherited methods, updating object attributes using the bunch pattern.
Also sets the stage url.
"""
self.__dict__.update(self._data)
self.set_self_url('go/api/stages/%s/%s/' % (self.pipeline.name, self.name))
| 1,939 | 540 |
import pyaf.tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 32,20); | 106 | 45 |
# Desenvolva um programa que leia o comprimento de três retas e diga ao usuário se elas podem ou não formar um triângulo (pesquisar o princípio matemático que explica a formação de um triangulo).
r1 = float(input('Informe o comprimento da primeira reta: '))
r2 = float(input('Informe o comprimento da segunda reta: '))
r3 = float(input('Informe o comprimento da terceira reta: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print(f'As medidas {r1}, {r2} e {r3} são capazes de formar um triângulo!')
else:
print(f'As medidas {r1}, {r2} e {r3} não são capazes de formar um triângulo!')
| 602 | 236 |
from functools import partial
from typing import Dict
import copy
from torch.nn import Module
from torchvision.models.resnet import *
from .ssd_resnet import SSD300_ResNet
from .ssd_vgg import SSD300_VGG16
from .backbones import *
__REGISTERED_MODELS__ = {
"SSD300_ResNet": SSD300_ResNet,
"SSD300_VGG16": SSD300_VGG16
}
__REGISTERED_BACKBONES__ = {
"ResNetBackbone": ResNetBackbone,
"VGGBackbone": VGGBackbone
}
def _get_model_instance(name):
try:
return __REGISTERED_MODELS__[name]
except:
raise Exception("Model {} not available".format(name))
def get_model_partial(model_cfg, n_classes: int) -> partial:
model_dict: Dict = copy.deepcopy(model_cfg)
name = model_dict.pop("arch")
model = _get_model_instance(name)
return partial(model, n_classes=n_classes, **model_dict)
def get_backbone(backbone_config) -> Backbone:
backbone_dict = copy.deepcopy(backbone_config)
t = backbone_dict.pop("type")
return __REGISTERED_BACKBONES__[t](**backbone_dict)
| 1,070 | 427 |
inp = input("Enter string: ")
input_string = ord(inp)
print(input_string)
| 75 | 29 |
def no_bool_bubble_sort(unsorted_list):
for y in range(1, len(unsorted_list) - 1):
for x in range(len(unsorted_list) - y):
try:
value = \
(((unsorted_list[x] - unsorted_list[x + 1]) // abs(
unsorted_list[x + 1] - unsorted_list[x])) + 1) // 2
unsorted_list[x], unsorted_list[x + 1] = unsorted_list[x + value], unsorted_list[x + 1 - value]
except ZeroDivisionError:
pass
return unsorted_list
| 533 | 181 |
# https://www.codewars.com/kata/5536a85b6ed4ee5a78000035
import math
def tour(friends, friend_towns, home_to_town_distances):
arr = []
for a in friends:
for b in friend_towns:
if a in b:
arr.append(home_to_town_distances[b[-1]])
dist = arr[0] + arr[-1]
i = 1
while i < len(arr):
dist += math.sqrt(arr[i] ** 2 - arr[i - 1] ** 2)
i += 1
return int(dist)
| 430 | 180 |
import numpy as np
from numba import njit
from ._api import * # noqa: F403
from .handle import mkl_h
__all__ = [
'mult_ab',
'mult_abt'
]
@njit(nogil=True)
def mult_ab(a_h, b_h):
if a_h.H and b_h.H:
h = lk_mkl_spmab(a_h.H, b_h.H)
else:
h = 0
return mkl_h(h, a_h.nrows, b_h.ncols, None)
@njit(nogil=True)
def mult_abt(a_h, b_h):
if a_h.H and b_h.H:
h = lk_mkl_spmabt(a_h.H, b_h.H)
else:
h = 0
return mkl_h(h, a_h.nrows, b_h.nrows, None)
@njit(nogil=True)
def mult_vec(a_h, x):
y = np.zeros(a_h.nrows, dtype=np.float64)
if a_h.H:
_x = ffi.from_buffer(x)
_y = ffi.from_buffer(y)
lk_mkl_spmv(1.0, a_h.H, _x, 0.0, _y)
return y
| 733 | 398 |
# Copyright (C) 2021, A10 Networks Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from acos_client import errors as acos_errors
from acos_client.v30 import base
class LicenseRequest(base.BaseV30):
url_prefix = '/glm/create-license-request'
def _set(self, create_license_request):
params = {
'create-license-request': create_license_request
}
return params
def create(self, create_license_request=None, **kwargs):
params = self._set(create_license_request=None)
return self._post(self.url_prefix, params, axapi_args=kwargs)
def update(self, create_license_request=None, **kwargs):
params = self._set(create_license_request=None)
return self._post(self.url_prefix, params, axapi_args=kwargs)
def put(self, create_license_request=None, **kwargs):
params = self._set(create_license_request=None)
return self._put(self.url_prefix, params, axapi_args=kwargs)
def delete(self):
return self._delete(self.url_prefix)
class MultiLicenseException(Exception):
def __init__(self):
self.message = ("Only one of the following attributes can be "
"used to define a new license: existing_org, "
"existing_user, new_user, or name. These cannot "
"be used in conjuction.")
super(MultiLicenseException, self).__init__()
class NewLicense(base.BaseV30):
url_prefix = '/glm/new-license'
def create(self, account_name=None, country=None, existing_org=None,
glm_password=None, last_name=None, name=None, new_email=None,
new_password=None, new_user=None, org_id=None, phone=None,
license_type=None, existing_user=None, first_name=None,
glm_email=None):
params = {
"new-license": {}
}
xor = bool(existing_org) + bool(existing_user) + bool(new_user) + bool(name)
if xor > 1:
raise MultiLicenseException()
if existing_org:
params['new-license'] = self.minimal_dict({
'existing-org': existing_org,
'org-id': org_id
})
elif existing_user:
if not glm_email:
raise acos_errors.RequiredAttributeNotSpecified(
self.url_prefix, "existing_user", ["glm_email"])
params['new-license'] = self.minimal_dict({
'existing-user': existing_user,
'glm-email': glm_email,
'glm-password': glm_password
})
elif new_user:
if not new_email:
raise acos_errors.RequiredAttributeNotSpecified(
self.url_prefix, "new_user", ["new_email"])
params['new-license'] = self.minimal_dict({
'new-user': new_user,
'new-email': new_email,
'new-password': new_password,
'account-name': account_name,
'first-name': first_name,
'last-name': last_name,
'country': country,
'phone': phone
})
elif name:
if not license_type:
raise acos_errors.RequiredAttributeNotSpecified(
self.url_prefix, "name", ["license_type"])
params['new-license'] = self.minimal_dict({
'name': name,
'type': license_type
})
self._post(self.url_prefix, params)
| 4,088 | 1,191 |
from engine.gameobject import Gameobject
objects = []
def create(obj: Gameobject):
objects.append(obj)
obj.start()
def delete(obj: Gameobject):
for child in obj.children:
delete(child)
obj.end()
objects.remove(obj)
def find(typ):
for o in objects:
if isinstance(o, typ):
return o
| 310 | 106 |
input1str = 'R998,U367,R735,U926,R23,U457,R262,D473,L353,U242,L930,U895,R321,U683,L333,U623,R105,D527,R437,D473,L100,D251,L958,U384,R655,U543,L704,D759,R529,D176,R835,U797,R453,D650,L801,U437,L468,D841,R928,D747,L803,U677,R942,D851,R265,D684,L206,U763,L566,U774,L517,U337,L86,D585,R212,U656,L799,D953,L24,U388,L465,U656,L467,U649,R658,U519,L966,D290,L979,D819,R208,D907,R941,D458,L882,U408,R539,D939,R557,D771,L448,U460,L586,U148,R678,U360,R715,U312,L12,D746,L958,U216,R275,D278,L368,U663,L60,D543,L605,D991,L369,D599,R464,D387,L835,D876,L810,U377,L521,U113,L803,U680,L732,D449,R891,D558,L25,U249,L264,U643,L544,U504,R876,U403,R950,U19,L224,D287,R28,U914,R906,U970,R335,U295,R841,D810,R891,D596,R451,D79,R924,U823,L724,U968,R342,D349,R656,U373,R864,U374,L401,D102,L730,D886,R268,D188,R621,U258,L788,U408,L199,D422,R101,U368,L636,U543,R7,U722,L533,U242,L340,D195,R158,D291,L84,U936,L570,D937,L321,U947,L707,U32,L56,U650,L427,U490,L472,U258,R694,U87,L887,U575,R826,D398,R602,U794,R855,U225,R435,U591,L58,U281,L834,D400,R89,D201,L328,U278,L494,D70,L770,D182,L251,D44,R753,U431,R573,D71,R809,U983,L159,U26,R540,U516,R5,D23,L603,U65,L260,D187,R973,U877,R110,U49,L502,D68,R32,U153,R495,D315,R720,D439,R264,D603,R717,U586,R732,D111,R997,U578,L243,U256,R147,D425,L141,U758,R451,U779,R964,D219,L151,D789,L496,D484,R627,D431,R433,D761,R355,U975,L983,U364,L200,U578,L488,U668,L48,D774,R438,D456,L819,D927,R831,D598,L437,U979,R686,U930,L454,D553,L77,D955,L98,U201,L724,U211,R501,U492,L495,U732,L511'
input2str = 'L998,U949,R912,D186,R359,D694,L878,U542,L446,D118,L927,U175,R434,U473,R147,D54,R896,U890,R300,D537,R254,D322,R758,D690,R231,U269,R288,U968,R638,U192,L732,D355,R879,U451,R336,D872,L141,D842,L126,U584,L973,D940,R890,D75,L104,U340,L821,D590,R577,U859,L948,D199,L872,D751,L368,U506,L308,U827,R181,U94,R670,U901,R739,D48,L985,D801,R722,D597,R654,D606,R183,U646,R939,U677,R32,U936,L541,D934,R316,U354,L415,D930,R572,U571,R147,D609,L534,D406,R872,D527,L816,D960,R652,D429,L402,D858,R374,D930,L81,U106,R977,U251,R917,U966,R353,U732,L613,U280,L713,D937,R481,U52,R746,U203,L500,D557,L209,U249,R89,D58,L149,U872,R331,D460,R343,D423,R392,D160,L876,U981,L399,D642,R525,U515,L537,U113,R886,D516,L301,D680,L236,U399,R460,D869,L942,D280,R669,U476,R683,D97,R199,D444,R137,D489,L704,D120,R753,D100,L737,U375,L495,D325,R48,D269,R575,U895,L184,D10,L502,D610,R618,D744,R585,U861,R695,D775,L942,U64,L819,U161,L332,U513,L461,D366,R273,D493,L197,D97,L6,U63,L564,U59,L699,U30,L68,U861,R35,U564,R540,U371,L115,D595,L412,D781,L185,D41,R207,D264,R999,D799,R421,D117,R377,D571,R268,D947,R77,D2,R712,D600,L516,U389,L868,D762,L996,U205,L178,D339,L844,D629,R67,D732,R109,D858,R630,U470,L121,D542,L751,U353,L61,U770,R952,U703,R264,D537,L569,U55,L795,U389,R836,U166,R585,U275,L734,U966,L130,D357,L260,U719,L647,D606,R547,U575,R791,U686,L597,D486,L774,U386,L163,U912,L234,D238,L948,U279,R789,U300,R117,D28,L833,U835,L340,U693,R343,D573,R882,D241,L731,U812,R600,D663,R902,U402,R831,D802,L577,U920,L947,D538,L192' #221
test0input1str = 'R8,U5,L5,D3' #6 #30
test0input2str = 'U7,R6,D4,L4'
test1input1str = 'R75,D30,R83,U83,L12,D49,R71,U7,L72' #159 #610
test1input2str = 'U62,R66,U55,R34,D71,R55,D58,R83'
test2input1str = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51' #135 #410
test2input2str = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'
# step 0 convert string to list
input1 = input1str.split(',')
input2 = input2str.split(',')
#input1 = test2input1str.split(',')
#input2 = test2input2str.split(',')
# step 1 make a function to generate a list of coordinates of all points a set of instructions passes through
def wire_locs(incodes):
curr_loc = [0,0]
path = list()
for inst in incodes:
dir = inst[0]
length = inst[1:] # im sure theres a better way to do this
if dir == 'R': #Right
for i in range(int(length)):
curr_loc[0] += 1
path.append(tuple(curr_loc))
if dir == 'L': #Left
for i in range(int(length)):
curr_loc[0] -= 1
path.append(tuple(curr_loc))
if dir == 'U': #Up
for i in range(int(length)):
curr_loc[1] += 1
path.append(tuple(curr_loc))
if dir == 'D': #Down
for i in range(int(length)):
curr_loc[1] -= 1
path.append(tuple(curr_loc))
return path
# step2 find the intersection between the two paths and calculate the manhatten distance
path1 = wire_locs(input1)
path2 = wire_locs(input2)
intersects = set(path1) & set(path2)
distances = [ abs(i[0])+abs(i[1]) for i in intersects]
distances.sort()
min_manhatten = distances[0]
print(min_manhatten)
# End Part 1
# Part 2: we have a new distance metric, the total path length
distances2 = [path2.index(i)+path1.index(i)+2 for i in intersects] #+2 because of the index 0
distances2.sort()
min_parttwo = distances2[0]
print(min_parttwo)
| 4,912 | 3,744 |
from django.db import models
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from django.core.urlresolvers import reverse
from django.conf import settings
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# Level 0: base abstract and infrastructure classes #
# # # # # # # # # # # # # # # # # # # # # # # # # # #
class Base(models.Model):
'''
Abstract base class for elements, for bootstrapping.
Basically the most generic metadata, used throughout.
'''
class Meta:
abstract = True
created_at = models.DateTimeField(
editable=False, blank=False, auto_now_add=True)
updated_at = models.DateTimeField(
blank=False, null=True, auto_now=True)
def get_previous(self):
'''
Overloadable relative navigation.
Object previous/next is managed here, at the model level.
Inter-model previous/next, like between model lists, is handled in views.
'''
try:
return self.get_previous_by_created_at()
except self.DoesNotExist:
return None
def get_next(self):
'''Overloadable relative navigation. See get_previous, above.'''
try:
return self.get_next_by_created_at()
except self.DoesNotExist:
return None
def get_class_name(self):
'''Class name for template display.'''
return self.__class__.__name__
def get_absolute_url(self):
'''Canonical instance url.'''
return reverse(str(self.__class__.__name__).lower(), args=(self.slug, ))
def get_list_url(self):
'''Canonical instance list url.'''
return reverse('{}_list'.format(self.get_class_name().lower()))
class Common(Base):
'''
Slightly richer version of Base class, with descriptive elements.
'''
class Meta:
abstract = True
name = models.CharField(max_length=255, blank=True)
slug = models.SlugField(unique=True, blank=False)
def __str__(self):
return '{}'.format(self.name)
class LifeMixin(models.Model):
'''
Breathes life, as a mixin.
'''
class Meta:
abstract = True
GENDERS = (
('f', 'Female'),
('m', 'Male'),
('none', 'None'),
('other', 'Other'),
)
gender = models.CharField(max_length=10, choices=GENDERS, blank=True)
# species = models.CharField(max_length=255, blank=True,
# help_text='TODO: Use controlled vocabulary.')
ki = models.DecimalField(
validators=[MinValueValidator(0), MaxValueValidator(1)],
blank=False, null=True,
default=0.5, max_digits=4, decimal_places=3, help_text="Choose a number between 0.0 and 1.0. The default is 0.5, which represents the life-force of Joe the Plumber. 0.0 is empty space, somewhere past Pluto. 1.0 is God himself. See wiki/ki for more information.") # TODO: wiki/ki
# class CompoundMixin(models.Model):
# '''
# Abstract base class for groups of elements.
# '''
# class Meta:
# abstract = True
# members = []
# # I'm not entirely sure what I want to do with this yet, since fields need
# # to be defined in each subclass instead of overridden. This makes things
# # more complex than I like, but probably OK. In the meantime, I'll
# # leave this here and give it methods soon, hopefully generic to work
# # for all subclasses.
class AspectMixin(models.Model):
'''
A type of representation, like an angle or perspective, usually for Picture or Plan.
'''
class Meta:
abstract = True
CHOICES = (
('primary', 'Primary'),
('secondary', 'Secondary'),
('front', 'Front'),
('back', 'Back'),
('left', 'Left Side'),
('right', 'Right Side'),
('top', 'Top'),
('bottom', 'Bottom'),
('internal', 'Internal'),
('external', 'External'),
)
aspect = models.CharField(
blank=False,
max_length=10, unique=True, choices=CHOICES, default='primary')
caption = models.TextField(blank=True)
# def alt_text(self): # TODO
# return ??
# # # # # # # # # #
# Utility tables #
# # # # # # # # # #
class Relation(Base):
'''
Relationships between elements.
'''
# when ambiguity exists, relations should be of the form:
# 1. source (subject, inferior, child, or branch)
# 2. predicate
# 3. target (direct object, superior, parent, or trunk)
PREDICATES = (
('related', 'is related to'),
('attract', 'is attracted to'),
('cause', 'is caused by'),
('child', 'is child of'),
('heir', 'is heir of'),
('control', 'is controlled by'),
('friend', 'is friend of'),
('inside', 'is inside of'),
('mate', 'is mate of'),
('own', 'is owned by'),
('part', 'is part of'),
('result', 'is result of'),
('subject', 'is subject of'),
('type', 'is type of'),
)
source = models.ForeignKey("self", related_name="sources",
blank=False, null=True)
predicate = models.CharField(
blank=False, max_length=10, choices=PREDICATES, default='related')
target = models.ForeignKey("self", related_name="targets",
blank=False, null=True)
def __str__(self):
return '{} {} {}'.format(source, predicate, target)
def get_absolute_url(self):
return reverse('relation',
args=(self.source, self.predicate, self.target))
class Location(Base):
'''
A set of geographic and temporal coordinates for an item.
'''
# TODO: set unique for geographic location, and/or time
POSITIONS = (
('absolute', 'Absolute'),
('relative', 'Relative')
)
position = models.CharField(max_length=10, choices=POSITIONS, blank=False, default='relative', help_text='When in doubt, leaves as "Relative". "Absolute" positions establish a new reference point for sublocations: they are always relative to the ABSOLUTE_LOCATION in settings. "Relative" positions are relative to their nearest "Absolute" parent, otherwise they are also relative to ABSOLUTE_LOCATION. See: wiki/position') # TODO: set REFERENCE_LOCATION
longitude = models.DecimalField(max_digits=9, decimal_places=6,
blank=False, null=False, default=90,
help_text="In decimal.")
latitude = models.DecimalField(max_digits=9, decimal_places=6,
blank=False, null=False, default=0,
help_text="In decimal.")
altitude = models.DecimalField(max_digits=9, decimal_places=3,
blank=False, null=False, default=0,
help_text="In meters above sea level.")
time = models.DateTimeField(blank=False, null=False, default=timezone.now,
help_text="Time begins anew in the year 7000.")
# TODO: set default to D.time
# approximate = TODO
sublocations = models.ManyToManyField("self", blank=True, help_text="The main location indicates the reference point (e.g. the center); if sublocations are relative, they are to this point.")
def __str__(self):
if self.position == 'absolute':
pos = 'a'
else:
pos = 'r'
return '{} long:{} lat:{} alt:{} @ time:{} '.format(pos, self.time, self.longitude, self.latitude, self.altitude)
def get_absolute_url(self):
return reverse('location',
args=(self.longitude, self.latitude, self.altitude, self.time ))
class Keyword(Common):
'''
A grass-roots means of classifying something.
'''
subkeywords = models.ManyToManyField("self", blank=True, help_text="Allows a structured category hierarchy for classification browsing.")
class Property(Common):
'''
A characteristic or attribute of something.
'''
class Meta:
verbose_name_plural = 'properties'
class Picture(AspectMixin, Common):
'''
An "a posteriori" representation of something, usually raster, usually graphical. Contrast with 'Plan'.
'''
width = models.PositiveIntegerField(help_text="In pixels.",
blank=False, null=True)
height = models.PositiveIntegerField(help_text="In pixels.",
blank=False, null=True)
image = models.ImageField(width_field=width, height_field=height,
blank=False, null=True)
# def image_tag(self): # TODO
# return '<img src="{}" width="{}" height="{}" />'.format(self.url, self.width, self.height)
class Plan(AspectMixin, Common):
'''
An "a priori" representation of something, usually vector, usually graphical. Contrast with 'Picture'.
'''
file = models.FileField(blank=False, null=True)
def get_absolute_url(self):
return reverse('plan', args=(self.slug, ))
# # # # # # # # # # # # #
# Level 1: Basic Items #
# # # # # # # # # # # # #
class Item(Common):
'''
The abstract attributes in "Common", but with access to subsequent models like picture and plan.
'''
class Meta:
abstract = True
relations = models.ManyToManyField("self", through='Relation', symmetrical=False)
locations = models.ManyToManyField(Location, blank=True)
keywords = models.ManyToManyField(Keyword, blank=True)
properties = models.ManyToManyField(Property, blank=True)
pictures = models.ManyToManyField(Picture, blank=True)
plans = models.ManyToManyField(Plan, blank=True)
scale = models.PositiveIntegerField(default=0, blank=True, null=True,
help_text='The magnitude of a thing, in whole numbers. 0 is average/medium/normal/default/human-sized. e.g.: -2=XS, -1=S, 0=M, 1=L, 2=XL, 3=2XL and so on.')
class Card(Item):
'''
A sequential storytelling and organizing device, inspired by the index cards of writers.
'''
TEXT_FORMATS = (
('md', 'Markdown (Git)'),
('html', 'HTML'),
)
text = models.TextField(blank=True, null=True, help_text="This is a container for long-form prose, or whatever other type of content this card should have.")
text_format = models.CharField(max_length=10, choices=TEXT_FORMATS, blank=False, default='md')
sort_order = models.DecimalField(max_digits=12, decimal_places=6, help_text="Order in which this card appears (at its scale). Lower numbers come first; negative numbers OK. To slip a card between two other cards, use a decimal.")
class Event(Item):
'''
Basic Event.
'''
# duration = TODO
class Thing(Item):
'''
Basic Thing.
'''
mass = models.DecimalField(max_digits=12, decimal_places=3,
help_text="In kilograms.",
blank=True, null=True)
height = models.DecimalField(max_digits=12, decimal_places=3,
help_text="In meters.",
blank=True, null=True)
width = models.DecimalField(max_digits=12, decimal_places=3,
help_text="In meters.",
blank=True, null=True)
length = models.DecimalField(max_digits=12, decimal_places=3,
help_text="In meters.",
blank=True, null=True)
# heading = models.DecimalField(max_digits=4, decimal_places=3,
# blank=True, null=True,
# help_text="In radians. The angle between the direction the item is pointing and true North.")
# approximation TODO here?
class Place(Item):
'''
Basic Place.
'''
location = models.ForeignKey(Location, related_name="places_primary",
blank=True, null=True)
# TODO make self.location one of self.locations,
# and self.locations a superset of self.location
# # # # # # # # # # # # # #
# Level 2: Complex Items #
# # # # # # # # # # # # # #
class Person(LifeMixin, Thing):
'''
A human being.
'''
class Meta:
verbose_name_plural = 'people'
name_secondary = models.CharField(verbose_name='Given Name', max_length=255,
blank=True)
def __init__(self, *args, **kwargs):
self._meta.get_field('name').blank = False
self._meta.get_field('name').verbose_name = 'Family Name'
# self._meta.get_field('species').default = 'homo sapiens'
# self._meta.get_field('mass').default = 75
# self._meta.get_field('height').default = 1.75
# self._meta.get_field('gender').default = 'female'
super(Person, self).__init__(*args, **kwargs)
def name_full(self):
if self.name_secondary and self.name:
return '{}, {}'.format(self.name, self.name_secondary)
elif self.name:
return '{}'.format(self.name)
else:
return 'No Name'
def age(self): # TODO
pass
def __str__(self):
return self.name_full()
# class Collection(CompoundMixin, Thing):
# '''
# A group of things.
# '''
# class Corpus(CompoundMixin, Person):
# '''
# A group of people. Used to be called "Group", but it turns out that's a built-in Django class.
# '''
# class Memory(Thing):
# '''
# Something a living thing takes with them.
# '''
# life = models.ForeignKey(Life, related_name="memories")
#
# def get_absolute_url(self):
# return reverse('memory', args=(self.slug, ))
# class Plant(Life):
# '''
# A plant (flora).
# '''
# pass
#
# def get_absolute_url(self):
# return reverse('memory', args=(self.slug, ))
# class Animal(Life):
# '''
# An animal (fauna).
# '''
# pass
#
# def get_absolute_url(self):
# return reverse('animal', args=(self.slug, ))
# class Group(Collectable, Person):
# '''
# An organization, class, tribe or family of human beings.
# '''
# # cls = Person
# members = models.ManyToManyField(Person, related_name="groups")
#
# def get_absolute_url(self):
# return reverse('group', args=(self.slug, ))
| 13,757 | 4,252 |
import os
import boto3
from getpass import getpass
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
client = boto3.client("cognito-idp", region_name=os.getenv("REGION_NAME"))
username = input("[*] Enter Your Email Address: ")
password = getpass("[*] Enter Your Password: ")
response = client.initiate_auth(
ClientId=os.getenv("CLIENT_ID"),
AuthFlow="USER_PASSWORD_AUTH",
AuthParameters={"USERNAME": username, "PASSWORD": password},
)
access_token = response["AuthenticationResult"]["AccessToken"]
print("[*] Successful issuance of Access Token")
attribute_name = input("[*] Enter Attribute Name: ")
attribute_value = input("[*] Enter Attribute Value: ")
response = client.update_user_attributes(
UserAttributes=[
{
'Name': attribute_name,
'Value': attribute_value
},
],
AccessToken=access_token,
)
print(response)
| 953 | 297 |
from rest_framework import serializers
from .models import VirtualNetwork
class NetworkSerializer(serializers.ModelSerializer):
# specify model and fields
class Meta:
model = VirtualNetwork
exclude = ['id', 'deactivated', 'account'] | 259 | 61 |