text string | size int64 | token_count int64 |
|---|---|---|
"""Linguistic data for statistical pre-processing.
Frequency statistics, as seen in data/, are provided by:
a) Mark Mayzner, 1965:
------------------------------------------------------------------------------
METHODOLOGY: Starting at a random place in a given newspapers/magazines/book,
record three- to seven-letter words until 200 words are selected. Repeat 100x.
SAMPLE SIZE: 20,000
https://archive.is/wip/u9vOA (as seen in tables: https://archive.is/BJEQt)
------------------------------------------------------------------------------
b) Peter Norvig, 2012:
------------------------------------------------------------------------------
METHODOLOGY: Using the Google Books Ngrams dataset (English 20120701), perform
the analysis on the entire corpus, with no sample size or length restrictions.
Discard any word with fewer than 100,000 mentions.
Mayzner had actually reached out to Norvig requesting this update to his work!
SAMPLE SIZE: 743,842,922,321 (unique: 97,565)
https://archive.is/wip/SHwcy
Data available by substituting {a-z} for the final character in the URL:
https://storage.googleapis.com/books/ngrams/books/googlebooks-eng-all-1gram-20120701-a.gz
------------------------------------------------------------------------------
"""
import string
LETTERS = tuple(string.ascii_lowercase)
| 1,316 | 402 |
from keras import backend as K
import numpy as np
def Active_Contour_Loss(y_true, y_pred):
"""
lenth term
"""
x = y_pred[:,:,1:,:] - y_pred[:,:,:-1,:] # horizontal and vertical directions
y = y_pred[:,:,:,1:] - y_pred[:,:,:,:-1]
delta_x = x[:,:,1:,:-2]**2
delta_y = y[:,:,:-2,1:]**2
delta_u = K.abs(delta_x + delta_y)
lenth = K.mean(K.sqrt(delta_u + 0.00000001)) # equ.(11) in the paper
"""
region term
"""
C_1 = np.ones((256, 256))
C_2 = np.zeros((256, 256))
region_in = K.abs(K.mean( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper
region_out = K.abs(K.mean( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper
lambdaP = 1 # lambda parameter could be various.
mu = 1 # mu parameter could be various.
return lenth + lambdaP * (mu * region_in + region_out)
| 846 | 428 |
# -*- coding: utf-8 -*-
# Imports
import pandas as pd
from .pd_utils import load_csv_or_excel
from .pd_utils import load_experiment_results
from .pd_utils import to_torch
from .math_utils import standard
# Objective function class
class objective:
"""Objective funciton data container and operations.
Note
----
Objective internally standardizes response values to zero mean and unit
variance.
"""
def __init__(self,
results_path=None, results=pd.DataFrame(),
domain_path=None, domain=pd.DataFrame(),
exindex_path=None, exindex=pd.DataFrame(),
target=-1, gpu=False, computational_objective=None):
"""
Parameters
----------
results_path : str, optional
Path to experimental results.
results : pandas.DataFrame, optional
Experimental results with X values matching the domain.
domain_path : str, optional
Path to experimental domain.
Note
----
A domain_path or domain are required.
domain : pandas.DataFrame, optional
Experimental domain specified as a matrix of possible
configurations.
exindex_path : str, optional
Path to experiment results index if available.
exindex : pandas.DataFrame, optional
Experiment results index matching domain format. Used as lookup
table for simulations.
target : str
Column label of optimization objective. If set to -1, the last
column of the DataFrame will be set as the target.
gpu : bool
Carry out GPyTorch computations on a GPU if available.
computational_objective : function, optional
Function to be optimized for computational objectives.
"""
# Initialize
self.results_path = results_path
self.results = results
self.domain_path = domain_path
self.domain = domain
self.exindex_path = exindex_path
self.exindex = exindex
self.target = target
self.gpu = gpu
self.computational_objective = computational_objective
# Load domain
if domain_path != None:
self.domain = load_csv_or_excel(self.domain_path)
self.domain.reset_index(drop=True)
# Load results
if type(self.results) == type(pd.DataFrame()) and len(self.results) > 0:
if target == -1:
self.target = self.results.columns.values[-1]
elif results_path != None:
data = load_experiment_results(self.results_path)
self.results = data
if target == -1:
self.target = self.results.columns.values[-1]
# Load experiment index
if exindex_path != None:
self.exindex = load_csv_or_excel(exindex_path)
if target == -1:
self.target = self.exindex.columns.values[-1]
if type(exindex) == type(pd.DataFrame()) and len(exindex) > 0:
if target == -1:
self.target = exindex.columns.values[-1]
# Standardize targets (0 mean and unit variance)
self.scaler = standard()
self.results = self.scaler.standardize_target(self.results, self.target)
# Torch tensors and labeld external data
if len(self.results) > 0:
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=gpu)
self.y = to_torch(self.results[self.target], gpu=gpu).view(-1)
index = ['external' + str(i) for i in range(len(self.results))]
self.results = pd.DataFrame(self.results.values,
columns=self.results.columns,
index=index)
else:
self.X = to_torch([], gpu=gpu)
self.y = to_torch([], gpu=gpu)
# Get results from the index
def get_results(self, domain_points, append=False):
"""Returns target values corresponding to domain_points.
Parameters
----------
domain_points : pandas.DataFrame
Points from experiment index to retrieve responses for. If the
objective is a computational function, run function and return
responses.
append : bool
If true append points to results and update X and y.
Returns
----------
pandas.DataFrame
Proposed experiments.
"""
# Computational objective
if self.computational_objective != None:
new_results = []
for point in domain_points.values:
result = self.computational_objective(point)
new_results.append(result)
batch = domain_points.copy()
batch[self.target] = new_results
if append == True:
# Unstandardize results and append to know outcomes
results = self.scaler.unstandardize_target(self.results, self.target)
data = pd.concat([results, batch])
# Restandardize
self.results = self.scaler.standardize_target(data, self.target)
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu)
self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1)
return batch
# Human in the loop objective
if type(self.exindex) == type(None):
return print("edbo bot: Error no experiment index")
# Retrieve domain points from index
index = self.exindex.drop(self.target, axis=1)
union_index = pd.merge(
index.reset_index(),
domain_points,
how='inner'
)['index']
batch = self.exindex.iloc[list(union_index)]
# Append to results
if append == True:
# Unstandardize results and append to know outcomes
results = self.scaler.unstandardize_target(self.results, self.target)
data = pd.concat([results, batch])
# Restandardize
self.results = self.scaler.standardize_target(data, self.target)
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu)
self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1)
return batch
# Clear results
def clear_results(self):
"""Clear results and reset X and y.
Returns
----------
None
"""
self.results = pd.DataFrame()
self.X = to_torch([], gpu=self.gpu)
self.y = to_torch([], gpu=self.gpu)
# Return unstandardized results
def results_input(self):
"""Return unstandardized results.
Returns
----------
pandas.DataFrame
Unstandardized results.
"""
if len(self.results) == 0:
results = self.results
else:
results = self.scaler.unstandardize_target(self.results, self.target)
return results
| 7,584 | 1,960 |
import osmnx as ox
import networkx as nx
def gdf_to_nx(gdf_network):
# generate graph from GeoDataFrame of LineStrings
net = nx.Graph()
net.graph['crs'] = gdf_network.crs
fields = list(gdf_network.columns)
for _, row in gdf_network.iterrows():
first = row.geometry.coords[0]
last = row.geometry.coords[-1]
data = [row[f] for f in fields]
attributes = dict(zip(fields, data))
net.add_edge(first, last, **attributes)
return net
def add_traveltime_colors(G, center_node, mode):
travel_speed = 0
useRoadSpeed = False
if mode == 'drive':
useRoadSpeed = True
elif mode == 'bike':
travel_speed = 15
elif mode == 'walk':
travel_speed = 5
else:
return (None, 'Invalid mode. Try one of "bike", "drive", or "walk".')
trip_times = [5, 10, 15, 20, 25] # minutes
# add an edge attribute for travel time in minutes required to traverse each edge
if useRoadSpeed:
G = ox.add_edge_speeds(G)
else:
# set constant travel speed for all edges
nx.set_edge_attributes(G, travel_speed, 'speed_kph')
G = ox.add_edge_travel_times(G) # computes travel time in seconds
travel_times = nx.get_edge_attributes(G, "travel_time")
for u, v, k, data in G.edges(data=True, keys=True):
data['time'] = travel_times[(u, v, k)] / 60 # convert to min
# get one color for each isochrone
iso_colors = ox.plot.get_colors(n=len(trip_times), cmap='plasma', start=0, return_hex=True)
# color the edges based on subgraph
edge_colors = {}
for trip_time, color in zip(sorted(trip_times, reverse=True), iso_colors):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
for edge in subgraph.edges:
edge_colors[edge] = color
nx.set_edge_attributes(G, edge_colors, 'color')
# project graph back to the standard crs
G = ox.project_graph(G, 'WGS84')
return (G, '') | 1,983 | 688 |
#!/usr/bin/env python3
from time import time
def fib_sum(limit):
prev2 = 1
prev1 = 2
fib_sum = 0
while prev2 < limit:
# There is probably a more clever solution that skips the calculation
# of every 1st and 3rd element.
# For now, we will just cherry-pick the even values.
if prev1 % 2 == 0:
fib_sum += prev1
old_prev1 = prev1
prev1 = prev1 + prev2
prev2 = old_prev1
return fib_sum
if __name__ == '__main__':
start = time()
solu = fib_sum(4e6)
elapse = time() - start
print('Solution: {}'.format(solu))
print('Solution found in {:.8f}s'.format(elapse))
| 664 | 231 |
from tkinter import filedialog, ttk, messagebox
from tkinter import *
import traceback
import requests
import zipfile
import json
import os
from source.Game import Game, RomAlreadyPatched, InvalidGamePath, InvalidFormat, in_thread, VERSION_FILE_URL
from source.Option import Option
from source.definition import get_version_from_string
with open("./translation.json", encoding="utf-8") as f:
translation_dict = json.load(f)
class Gui:
def __init__(self):
"""
Initialize program Gui
"""
self.root = Tk()
self.option = Option()
self.option.load_from_file("./option.json")
self.game = Game(gui=self)
self.game.ctconfig.load_ctconfig_file("./ct_config.json")
self.game.ctconfig.all_version.sort(key=get_version_from_string)
latest_version: str = self.game.ctconfig.all_version[-1]
self.is_dev_version = False # Is this installer version a dev ?
self.stringvar_language = StringVar(value=self.option.language)
self.stringvar_game_format = StringVar(value=self.option.format)
self.boolvar_disable_download = BooleanVar(value=self.option.disable_download)
self.boolvar_del_track_after_conv = BooleanVar(value=self.option.del_track_after_conv)
self.boolvar_dont_check_for_update = BooleanVar(value=self.option.dont_check_for_update)
self.intvar_process_track = IntVar(value=self.option.process_track)
self.boolvar_use_1star_track = BooleanVar(value=True)
self.boolvar_use_2star_track = BooleanVar(value=True)
self.boolvar_use_3star_track = BooleanVar(value=True)
self.stringvar_mark_track_from_version = StringVar(value=latest_version)
self.root.title(self.translate("MKWFaraphel Installer"))
self.root.resizable(False, False)
self.root.iconbitmap(bitmap="./icon.ico")
if not(self.boolvar_dont_check_for_update.get()): self.check_update()
self.menu_bar = Menu(self.root)
self.root.config(menu=self.menu_bar)
self.menu_language = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Language"), menu=self.menu_language)
self.menu_language.add_radiobutton(label="Français", variable=self.stringvar_language, value="fr", command=lambda: self.option.edit("language", "fr", need_restart=True))
self.menu_language.add_radiobutton(label="English", variable=self.stringvar_language, value="en", command=lambda: self.option.edit("language", "en", need_restart=True))
self.menu_format = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Format"), menu=self.menu_format)
self.menu_format.add_radiobutton(label=self.translate("FST (Directory)"), variable=self.stringvar_game_format, value="FST", command=lambda: self.option.edit("format", "FST"))
self.menu_format.add_radiobutton(label="ISO", variable=self.stringvar_game_format, value="ISO", command=lambda: self.option.edit("format", "ISO"))
self.menu_format.add_radiobutton(label="CISO", variable=self.stringvar_game_format, value="CISO", command=lambda: self.option.edit("format", "CISO"))
self.menu_format.add_radiobutton(label="WBFS", variable=self.stringvar_game_format, value="WBFS", command=lambda: self.option.edit("format", "WBFS"))
self.menu_trackselection = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Track selection"), menu=self.menu_trackselection)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 1 ","star"), variable=self.boolvar_use_1star_track)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 2 ","stars"), variable=self.boolvar_use_2star_track)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 3 ","stars"), variable=self.boolvar_use_3star_track)
self.menu_trackselection.add_separator()
self.menu_marktrackversion = Menu(self.menu_trackselection, tearoff=0)
self.menu_trackselection.add_cascade(label=self.translate("Mark all tracks from version"), menu=self.menu_marktrackversion)
self.menu_marktrackversion.add_radiobutton(label=self.translate("None"), variable=self.stringvar_mark_track_from_version, value="None")
for version in self.game.ctconfig.all_version:
self.menu_marktrackversion.add_radiobutton(label=f"v{version}", variable=self.stringvar_mark_track_from_version, value=version)
self.menu_advanced = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Advanced"), menu=self.menu_advanced)
self.menu_advanced.add_checkbutton(label=self.translate("Disable downloads"), variable=self.boolvar_disable_download, command=lambda: self.option.edit("disable_download", self.boolvar_disable_download))
self.menu_advanced.add_checkbutton(label=self.translate("Delete track after wu8 to szs conversion"), variable=self.boolvar_del_track_after_conv, command=lambda: self.option.edit("del_track_after_conv", self.boolvar_del_track_after_conv))
self.menu_advanced.add_checkbutton(label=self.translate("Don't check for update"), variable=self.boolvar_dont_check_for_update, command=lambda: self.option.edit("dont_check_for_update", self.boolvar_dont_check_for_update))
self.menu_advanced.add_separator()
self.menu_trackconvprocess = Menu(self.menu_advanced, tearoff=0)
self.menu_advanced.add_cascade(label=self.translate("Number of track conversion process"), menu=self.menu_trackconvprocess)
for cpu in range(1, 9):
self.menu_trackconvprocess.add_radiobutton(label=self.translate(str(cpu), " ", "process"), variable=self.intvar_process_track, value=cpu, command=lambda: self.option.edit("process_track", self.intvar_process_track))
self.frame_language = Frame(self.root)
self.frame_language.grid(row=1, column=1, sticky="E")
self.frame_game_path = LabelFrame(self.root, text=self.translate("Original game"))
self.frame_game_path.grid(row=2, column=1)
entry_game_path = Entry(self.frame_game_path, width=50)
entry_game_path.grid(row=1, column=1, sticky="NEWS")
def select_path():
path = filedialog.askopenfilename(filetypes=((self.translate("Wii game"),
r"*.iso *.wbfs main.dol *.wia *.ciso"),))
if os.path.exists(path):
entry_game_path.delete(0, END)
entry_game_path.insert(0, path)
Button(self.frame_game_path, text="...", relief=RIDGE, command=select_path).grid(row=1, column=2, sticky="NEWS")
self.frame_game_path_action = Frame(self.frame_game_path) # Extract and do everything button
self.frame_game_path_action.grid(row=2, column=1, columnspan=2, sticky="NEWS")
self.frame_game_path_action.columnconfigure(1, weight=1)
@in_thread
def use_path(): nothread_use_path()
def nothread_use_path():
self.frame_action.grid_forget()
try:
self.game.set_path(entry_game_path.get())
self.progress(show=True, indeter=True, statut=self.translate("Extracting the game..."))
self.game.extract()
self.frame_action.grid(row=3, column=1, sticky="NEWS")
except RomAlreadyPatched:
messagebox.showerror(self.translate("Error"), self.translate("This game is already modded"))
raise RomAlreadyPatched
except InvalidGamePath:
messagebox.showerror(self.translate("Error"), self.translate("The file path in invalid"))
raise InvalidGamePath
except InvalidFormat:
messagebox.showerror(self.translate("Error"), self.translate("This game's format is invalid"))
raise InvalidFormat
except:
self.log_error()
raise Exception
finally:
self.progress(show=False)
self.button_game_extract = Button(self.frame_game_path_action, text=self.translate("Extract file"),
relief=RIDGE, command=use_path)
self.button_game_extract.grid(row=1, column=1, sticky="NEWS")
@in_thread
def do_everything():
nothread_use_path()
self.game.nothread_patch_file()
self.game.nothread_install_mod()
self.button_do_everything = Button(self.frame_game_path_action, text=self.translate("Do everything"), relief=RIDGE, command=do_everything)
self.button_do_everything.grid(row=1, column=2, sticky="NEWS")
self.frame_action = LabelFrame(self.root, text=self.translate("Action"))
self.button_prepare_file = Button(self.frame_action, text=self.translate("Prepare files"), relief=RIDGE, command=lambda: self.game.patch_file(), width=45)
self.button_prepare_file.grid(row=1, column=1, columnspan=2, sticky="NEWS")
self.button_install_mod = Button(self.frame_action, text=self.translate("Install mod"), relief=RIDGE, command=lambda: self.game.install_mod(), width=45)
# Install mod button will only appear after prepare file step
self.progressbar = ttk.Progressbar(self.root)
self.progresslabel = Label(self.root)
def check_update(self) -> None:
"""
Check if an update is available
"""
try:
github_version_data = requests.get(VERSION_FILE_URL, allow_redirects=True).json()
with open("./version", "rb") as f: local_version_data = json.load(f)
local_version = get_version_from_string(f"{local_version_data['version']}.{local_version_data['subversion']}")
github_version = get_version_from_string(f"{github_version_data['version']}.{github_version_data['subversion']}")
if github_version > local_version: # if github version is newer than local version
if messagebox.askyesno(
self.translate("Update available !"),
self.translate("An update is available, do you want to install it ?",
f"\n\nVersion : {local_version} -> {github_version}\n"
f"Changelog :\n{github_version_data['changelog']}")):
if not (os.path.exists("./Updater/Updater.exe")):
dl = requests.get(github_version_data["updater_bin"], allow_redirects=True)
with open("./download.zip", "wb") as file:
print(self.translate("Downloading the Updater..."))
file.write(dl.content)
print(self.translate("end of the download, extracting..."))
with zipfile.ZipFile("./download.zip") as file:
file.extractall("./Updater/")
print(self.translate("finished extracting"))
os.remove("./download.zip")
print(self.translate("starting application..."))
os.startfile(os.path.realpath("./Updater/Updater.exe"))
elif local_version > github_version:
self.is_dev_version = True
except requests.ConnectionError:
messagebox.showwarning(self.translate("Warning"),
self.translate("Can't connect to internet. Download will be disabled."))
self.option.disable_download = True
except:
self.log_error()
def log_error(self) -> None:
"""
When an error occur, will show it in a messagebox and write it in error.log
"""
error = traceback.format_exc()
with open("./error.log", "a") as f:
f.write(f"---\n"
f"For game version : {self.game.ctconfig.version}\n"
f"./file/ directory : {os.listdir('./file/')}"
f"GAME/files/ information : {self.game.path, self.game.region}"
f"{error}\n")
messagebox.showerror(self.translate("Error"), self.translate("An error occured", " :", "\n", error, "\n\n"))
def progress(self, show: bool = None, indeter: bool = None, step: int = None,
statut: str = None, max: int = None, add: int = None) -> None:
"""
configure the progress bar shown when doing a task
:param show: show or hide the progress bar
:param indeter: if indeter, the progress bar will do a infinite loop animation
:param step: set the progress of the bar
:param statut: text shown under the progress bar
:param max: set the maximum step
:param add: add to step of the progress bar
"""
if indeter is True:
self.progressbar.config(mode="indeterminate")
self.progressbar.start(50)
elif indeter is False:
self.progressbar.config(mode="determinate")
self.progressbar.stop()
if show is True:
self.state_button(enable=False)
self.progressbar.grid(row=100, column=1, sticky="NEWS")
self.progresslabel.grid(row=101, column=1, sticky="NEWS")
elif show is False:
self.state_button(enable=True)
self.progressbar.grid_forget()
self.progresslabel.grid_forget()
if statut: self.progresslabel.config(text=statut)
if step: self.progressbar["value"] = step
if max:
self.progressbar["maximum"] = max
self.progressbar["value"] = 0
if add: self.progressbar.step(add)
def state_button(self, enable: bool = True) -> None:
"""
used to enable or disable button when doing task
:param enable: are the button enabled ?
"""
button = [
self.button_game_extract,
self.button_install_mod,
self.button_prepare_file,
self.button_do_everything
]
for widget in button:
if enable:
widget.config(state=NORMAL)
else:
widget.config(state=DISABLED)
def translate(self, *texts, lang: str = None) -> str:
"""
translate text into an another language in translation.json file
:param texts: all text to convert
:param lang: force a destination language to convert track
:return: translated text
"""
if lang is None: lang = self.stringvar_language.get()
elif lang == "F": lang = "fr"
elif lang == "G": lang = "ge"
elif lang == "I": lang = "it"
elif lang == "S": lang = "sp"
if lang in translation_dict:
_lang_trad = translation_dict[lang]
translated_text = ""
for text in texts:
if text in _lang_trad:
translated_text += _lang_trad[text]
else:
translated_text += text
return translated_text
return "".join(texts) # if no translation language is found
| 15,221 | 4,430 |
import FWCore.ParameterSet.Config as cms
def modify_hltL3TrajSeedOIHit(_hltL3TrajSeedOIHit):
_iterativeTSG = _hltL3TrajSeedOIHit.TkSeedGenerator.iterativeTSG
_iterativeTSG.ComponentName = cms.string('FastTSGFromPropagation')
_iterativeTSG.HitProducer = cms.InputTag("fastMatchedTrackerRecHitCombinations")
_iterativeTSG.MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent")
_iterativeTSG.SimTrackCollectionLabel = cms.InputTag("fastSimProducer")
_iterativeTSG.beamSpot = cms.InputTag("offlineBeamSpot")
_hltL3TrajSeedOIHit.TrackerSeedCleaner = cms.PSet()
def modify_hltL3TrajSeedIOHit(_hltL3TrajSeedIOHit):
_iterativeTSG = cms.PSet()
_iterativeTSG.ComponentName = cms.string('FastTSGFromIOHit')
_iterativeTSG.PtCut = cms.double(1.0)
_iterativeTSG.SeedCollectionLabels = cms.VInputTag(
cms.InputTag("initialStepSeeds"),
cms.InputTag("detachedTripletStepSeeds"),
cms.InputTag("lowPtTripletStepSeeds"),
cms.InputTag("pixelPairStepSeeds"))
_iterativeTSG.SimTrackCollectionLabel = cms.InputTag("fastSimProducer")
_hltL3TrajSeedIOHit.TkSeedGenerator.iterativeTSG = _iterativeTSG
_hltL3TrajSeedIOHit.TrackerSeedCleaner = cms.PSet()
| 1,236 | 505 |
import os, sys, re, transaction, base64, zlib
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from .models import (
DBSession,
CIA,
Entry,
User,
Group,
Base,
)
from .security import hash_password
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
with transaction.manager:
for cia in DBSession.query(CIA).all():
print(cia.icon_s)
icons1 = base64.b64decode(cia.icon_s)
try:
icons2 = zlib.decompress(icons1)
except zlib.error:
icons2 = icons1
iconl1 = base64.b64decode(cia.icon_l)
try:
iconl2 = zlib.decompress(iconl1)
except zlib.error:
iconl2 = iconl1
cia.icon_s = base64.b64encode(icons2)
cia.icon_l = base64.b64encode(iconl2)
DBSession.query(CIA).filter_by(id=cia.id).update(dict(icon_s=cia.icon_s,icon_l=cia.icon_l))
with transaction.manager:
for cia in DBSession.query(CIA).all():
m = re.search('(.*)#(.*)', cia.url.url)
if m:
cia.url = m.group(1)
cia.path = m.group(2)
| 1,612 | 570 |
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
import numpy as np
# =============================================================
# Modeling tools for cross validation
# Reference: https://github.com/fmfn/BayesianOptimization/blob/master/examples/sklearn_example.py
# =============================================================
# ===================
# Random Forest
# ===================
def rfc_cv(n_estimators,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
metric,
X,
y,
preparessor):
"""
Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, max_depth, min_samples_leaf and max_features. Combined with X and
y this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split,
max_depth, min_samples_leaf and max_featues that maximizes the metric
"""
preprocessor = preparessor
estimator = RandomForestClassifier(
n_estimators = n_estimators,
max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf,
max_features = max_features,
random_state = 42
)
# Append classifier to preparing pipeline. Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', estimator)])
cval = cross_val_score(clf,
X,
y,
scoring = metric,
cv = 5)
return cval.mean()
# ===================
# XGBoost
# ===================
def xgb_cv(n_estimators,
max_depth,
colsample_bytree,
learning_rate,
metric,
X,
y,
preparessor):
"""
XGBoost cross validation.
This function will instantiate a XGBoost classifier this will perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations that maximizes the metric
"""
preprocessor = preparessor
PARAM_SCALE_POS = np.ceil( len(y[y == 0]) / len(y[y == 1]) )
estimator = xgb.XGBClassifier(
n_estimators = n_estimators,
max_depth = max_depth,
colsample_bytree = colsample_bytree,
learning_rate = learning_rate,
objective = 'binary:logistic',
scale_pos_weight = PARAM_SCALE_POS,
random_state = 42,
verbosity = 0
)
# Append classifier to preparing pipeline. Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', estimator)])
cval = cross_val_score(clf,
X,
y,
scoring = metric,
cv = 5)
return cval.mean()
| 3,297 | 898 |
# MIT License
#
# Copyright (c) 2020 Oli Wright <oli.wright.github@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# flask_makespc.py
#
# Flask container for makespc.py
# Simple script to convert images to the Stop Press Canvas .SPC format which
# is used on Amstrad PCW8256 and friends.
import os
from flask import Flask, flash, request, redirect, send_from_directory
from werkzeug.utils import secure_filename
from makespc import convert_to_spc
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top
UPLOAD_FOLDER = 'uploads'
OUTPUT_FOLDER = 'output'
PREVIEW_FOLDER = 'previews'
APP_UPLOAD_FOLDER = os.path.join(APP_ROOT, UPLOAD_FOLDER)
APP_OUTPUT_FOLDER = os.path.join(APP_ROOT, OUTPUT_FOLDER)
APP_PREVIEW_FOLDER = os.path.join(APP_ROOT, PREVIEW_FOLDER)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'bmp'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/preview/<path:filename>', methods=['GET', 'POST'])
def preview(filename):
return send_from_directory(PREVIEW_FOLDER, filename=filename)
@app.route('/output/<path:filename>', methods=['GET', 'POST'])
def output(filename):
return send_from_directory(OUTPUT_FOLDER, filename=filename)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
html = '''
<!doctype html>
<title>Convert an image to .SPC</title>
<h1>Make SPC Online</h1>
<p>This tool converts images to Stop Press Canvas .SPC format, popular on Amstrad PCW8256 computers.</p>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Convert to SPC>
</form>
'''
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
input_filename = secure_filename(file.filename)
full_input_filename = os.path.join(APP_UPLOAD_FOLDER, input_filename)
file.save(full_input_filename)
basename, extension = os.path.splitext(input_filename)
preview_filename = basename + ".png"
full_preview_filename = os.path.join(APP_PREVIEW_FOLDER, preview_filename)
output_filename = basename + ".spc"
full_output_filename = os.path.join(APP_OUTPUT_FOLDER, output_filename)
convert_to_spc(full_input_filename, full_preview_filename, full_output_filename)
html += '''
<p>Click the image to download your SPC file.</p>
<a href="/output/%s"><img src="/preview/%s"/></a>
''' % (output_filename, preview_filename)
return html
| 4,089 | 1,318 |
#reads in the protocol requirements and stores the information in a class
import yaml
import logging
logger = logging.getLogger(__name__)
def loadYamlFile(filename):
#open up the filename
logger.debug("Opening file {}".format(filename))
try:
fObject = open(filename, 'r')
except FileNotFoundError:
logger.error("Config File {} not Found!".format(filename))
return []
else:
data = yaml.load(fObject.read())
fObject.close()
return data
def parseYamlConfig(data):
# we already have all of the information we need stored in the data from
# the YAML file. However, it's worthwhile to also generate a list of all
# incoming and outgoing variables. This allows checking for duplicates.
incomingVariables = []
outgoingVariables = []
# go through each message
for msg, metadata in data.items():
for k, v in metadata['variables'].items():
if metadata['type'] == 'incoming':
incomingVariables.append({k: v})
elif metadata['type'] == 'outgoing':
outgoingVariables.append(k, v)
logger.debug(incomingVariables)
| 1,201 | 346 |
#
# PySNMP MIB module CADANT-CMTS-NOTIFICATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CADANT-CMTS-NOTIFICATION-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:45:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
trapCounter, trapSeverity = mibBuilder.importSymbols("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter", "trapSeverity")
cadNotification, = mibBuilder.importSymbols("CADANT-PRODUCTS-MIB", "cadNotification")
ifOperStatus, ifAdminStatus, ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifOperStatus", "ifAdminStatus", "ifDescr", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, iso, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Gauge32, MibIdentifier, Integer32, Unsigned32, IpAddress, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Gauge32", "MibIdentifier", "Integer32", "Unsigned32", "IpAddress", "Counter64", "Counter32", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
cadNotificationMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1))
cadNotificationMib.setRevisions(('2015-09-14 00:00', '2006-05-03 00:00', '2005-09-28 00:00', '2003-03-26 00:00', '2002-07-24 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cadNotificationMib.setRevisionsDescriptions(('Add cadLinkUp.', 'Add cadIpdrNoPrimaryCollector, cadIpdrStreamingDisabled and cadIpdrReportCycleMissed.', 'Add RIP2 authentication failure.', 'Renamed RADIUS traps so that they can also be used by TACACS+.', 'Inital version. Add AAA/Security related traps.',))
if mibBuilder.loadTexts: cadNotificationMib.setLastUpdated('201509140000Z')
if mibBuilder.loadTexts: cadNotificationMib.setOrganization('Cadant Inc')
if mibBuilder.loadTexts: cadNotificationMib.setContactInfo('Cadant Technical Support ')
if mibBuilder.loadTexts: cadNotificationMib.setDescription('This MIB defines object which are NOTIFICATION-TYPE and used to define the SNMP Traps generated from C4 CMTS.')
cadTrapMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1))
cadTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0))
cadTrapsInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1))
securityInfo = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1, 1), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: securityInfo.setStatus('current')
if mibBuilder.loadTexts: securityInfo.setDescription('The detail security failure information')
ipdrInfo = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1, 2), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ipdrInfo.setStatus('current')
if mibBuilder.loadTexts: ipdrInfo.setDescription('The detail IPDR failure information')
aaaServerUnreachableTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 1)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: aaaServerUnreachableTrap.setStatus('current')
if mibBuilder.loadTexts: aaaServerUnreachableTrap.setDescription('An event to report that the AAA (RADIUS or TACACS+) server is not responding')
aaaServerGroupUnreachableTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 2)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: aaaServerGroupUnreachableTrap.setStatus('current')
if mibBuilder.loadTexts: aaaServerGroupUnreachableTrap.setDescription('An event to report that all the hosts in the AAA (RADIUS or TACACS+) server group are not responding')
aaaServerAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 3)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: aaaServerAuthFailTrap.setStatus('current')
if mibBuilder.loadTexts: aaaServerAuthFailTrap.setDescription('An event to report that AAA (RADIUS or TACACS+) server rejected the authentication request')
secuLocalAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 4)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: secuLocalAuthFailTrap.setStatus('current')
if mibBuilder.loadTexts: secuLocalAuthFailTrap.setDescription('An event to report that local password authentication failed')
secuLineAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 5)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: secuLineAuthFailTrap.setStatus('current')
if mibBuilder.loadTexts: secuLineAuthFailTrap.setDescription('An event to report that line password authentication failed')
rip2AuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 6)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo"))
if mibBuilder.loadTexts: rip2AuthFailTrap.setStatus('current')
if mibBuilder.loadTexts: rip2AuthFailTrap.setDescription('An event to report rip2 authentication failed.')
cadIpdrNoPrimaryCollector = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 7)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo"))
if mibBuilder.loadTexts: cadIpdrNoPrimaryCollector.setStatus('current')
if mibBuilder.loadTexts: cadIpdrNoPrimaryCollector.setDescription('An event to report IPDR Streaming is enabled but there is no primary collector connected.')
cadIpdrStreamingDisabled = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 8)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo"))
if mibBuilder.loadTexts: cadIpdrStreamingDisabled.setStatus('current')
if mibBuilder.loadTexts: cadIpdrStreamingDisabled.setDescription('An event to report IPDR Streaming function has been turned off.')
cadIpdrReportCycleMissed = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 9)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo"))
if mibBuilder.loadTexts: cadIpdrReportCycleMissed.setStatus('current')
if mibBuilder.loadTexts: cadIpdrReportCycleMissed.setDescription('An event to report CMTS was not able to start the scheduled IPDR report cycle session on time. This may be due to system or network load or the lack of primary collector connection.')
cadLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 10)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: cadLinkUp.setStatus('current')
if mibBuilder.loadTexts: cadLinkUp.setDescription('This is to expand standard linkUp trap to include ifDescr. A linkUp trap signifies that the SNMP entity, acting in an agent role, has detected that the ifOperStatus object for one of its communication links left the down state and transitioned into some other state (but not into the notPresent state). This other state is indicated by the included value of ifOperStatus.')
mibBuilder.exportSymbols("CADANT-CMTS-NOTIFICATION-MIB", cadTrapsInfo=cadTrapsInfo, aaaServerGroupUnreachableTrap=aaaServerGroupUnreachableTrap, aaaServerAuthFailTrap=aaaServerAuthFailTrap, cadIpdrNoPrimaryCollector=cadIpdrNoPrimaryCollector, secuLineAuthFailTrap=secuLineAuthFailTrap, aaaServerUnreachableTrap=aaaServerUnreachableTrap, PYSNMP_MODULE_ID=cadNotificationMib, cadLinkUp=cadLinkUp, securityInfo=securityInfo, cadTrapMibObjects=cadTrapMibObjects, cadNotificationMib=cadNotificationMib, cadTraps=cadTraps, cadIpdrStreamingDisabled=cadIpdrStreamingDisabled, secuLocalAuthFailTrap=secuLocalAuthFailTrap, rip2AuthFailTrap=rip2AuthFailTrap, ipdrInfo=ipdrInfo, cadIpdrReportCycleMissed=cadIpdrReportCycleMissed)
| 9,225 | 3,665 |
n1 = int(input('Digite o primeiro termo da PA: '))
n2 = int(input('Digite a razão da PA: '))
for c in range(1, 11):
o = n1 + (c - 1)*n2
print(o, end=' - ')
print('ACABOU') | 179 | 81 |
import requests
import sys
import os
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
requests.adapters.DEFAULT_RETRIES = 10
def exploit_post_code(data, token):
main_url = 'https://openapiv51.ketangpai.com/AttenceApi/checkin'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/65.0.3325.181 Safari/537.36",
"Token": "%s" % token}
re = requests.post(url=main_url, data=data, headers=headers)
re.keep_alive = False
if r"\u7b7e\u5230\u6210\u529f" in re.text:
msg = "签到成功,正确代码为%s" % str(data["code"])
print(msg)
return False
'''
cmd = "kill -9 " + str(os.getpid())
os.system(cmd)
sys.exit(1)
'''
def exploit_code(data, payload, token):
dict_data = eval(data)
dict_data["code"] = payload
msg = '正在尝试%s' % str(payload)
sys.stdout.write(str(msg) + '\r')
try:
exploit_post_code(dict_data, token)
except Exception as e:
exploit_code(data, payload, token)
def geo_exploit(data, token):
main_url = 'https://openapiv51.ketangpai.com/AttenceApi/checkin'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/65.0.3325.181 Safari/537.36",
"Token": "%s" % token}
re = requests.post(url=main_url, data=data, headers=headers)
re.keep_alive = False
if r"\u7b7e\u5230\u6210\u529f" in re.text:
msg = "签到成功"
print(msg)
'''
cmd = "kill -9 " + str(os.getpid())
os.system(cmd)
sys.exit(1)
'''
| 1,729 | 711 |
import logging
import io
from asyncio import Queue
from sse_starlette.sse import (
EventSourceResponse as _EventSourceResponse,
AppStatus,
ServerSentEvent,
)
from .endec import Encode
logger = logging.getLogger("app_server")
class EventSourceResponse(_EventSourceResponse):
"""Override original `EventSourceResponse`.
If data is `None`, send comment to keep connections.
"""
@staticmethod
def comment_encode(content: str = "", sep: str = None) -> bytes:
buffer = io.StringIO()
buffer.write(f": {content}")
buffer.write(sep if sep is not None else "\r\n")
return buffer.getvalue().encode("utf-8")
async def stream_response(self, send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
self._ping_task = self._loop.create_task(self._ping(send)) # type: ignore
async for data in self.body_iterator:
if AppStatus.should_exit:
logger.debug(f"Caught signal. Stopping stream_response loop.")
break
if isinstance(data, dict):
chunk = ServerSentEvent(**data).encode()
elif data is None:
chunk = self.comment_encode("NONE", sep=self.sep)
else:
chunk = ServerSentEvent(str(data), sep=self.sep).encode()
logger.debug(f"[EventSourceResponse] chunk: {chunk.decode()}")
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class SSEManager:
__queue = Queue()
@classmethod
def push_event(cls, event: str, data: dict):
cls.__queue.put_nowait(dict(event=event, data=Encode.json(data)))
@classmethod
async def next_event(cls):
q = cls.__queue
if q.empty():
return None
item = await q.get()
q.task_done()
return item
| 2,087 | 602 |
#! /usr/bin/env python3
"""
Packet Commands
"""
COMMAND_CPU_WRITE = 1
COMMAND_CPU_READ = 2
COMMAND_DAQ_WRITE = 3
COMMAND_DAQ_READ = 4
"""
RAM ADDRESSES
"""
RAM0_BASE_ADDRESS = 0x90000000
RAM1_BASE_ADDRESS = 0x90002000
RAM2_BASE_ADDRESS = 0x90004000
RAM3_BASE_ADDRESS = 0x90006000
"""
GPIO
"""
GPIO_BASE_ADDRESS = 0x40000000
GPIO_R_IN = GPIO_BASE_ADDRESS + 0
GPIO_R_OUT = GPIO_BASE_ADDRESS + 4
GPIO_R_OE = GPIO_BASE_ADDRESS + 8
GPIO_R_INTE = GPIO_BASE_ADDRESS + 12
GPIO_R_PTRIG = GPIO_BASE_ADDRESS + 16
GPIO_R_AUX = GPIO_BASE_ADDRESS + 20
GPIO_R_CTRL = GPIO_BASE_ADDRESS + 24
GPIO_F_CTRL_INTE = 0
GPIO_B_CTRL_INTE = (1 << GPIO_F_CTRL_INTE)
GPIO_F_CTRL_INTS = 1
GPIO_B_CTRL_INTS = (1 << GPIO_F_CTRL_INTS)
GPIO_R_INTS = GPIO_BASE_ADDRESS + 28
GPIO_R_ECLK = GPIO_BASE_ADDRESS + 32
GPIO_R_NEC = GPIO_BASE_ADDRESS + 32
"""
SYSCON
"""
SYSCON_BASE_ADDRESS = 0x40001000
SYSCON_R_IDENTIFICATION = SYSCON_BASE_ADDRESS + 0
SYSCON_R_STATUS = SYSCON_BASE_ADDRESS + 4
F_SYSCON_STATUS_LOCKED = 0
B_SYSCON_STATUS_LOCKED = (1 << F_SYSCON_STATUS_LOCKED)
SYSCON_R_CONTROL = SYSCON_BASE_ADDRESS + 8
"""
DSP SLAVE
"""
WB_DSP_SLAVE_BASE_ADDRESS = (0x70000000)
WB_DSP_SLAVE_INPUT0 = WB_DSP_SLAVE_BASE_ADDRESS + 0
B_DSP_EQUATION_NONE = 0x00
B_DSP_EQUATION_SUM = 0x01
B_DSP_EQUATION_MULTIPLY = 0x02
B_DSP_EQUATION_DTREE = 0x03
B_DSP_SLAVE_DATA_SIGNED = (1 << 10)
B_DSP_SLAVE_ENABLE_MAC = (1 << 11)
B_DSP_SLAVE_SCALAR_MULTIPLY = (1 << 12)
B_DSP_SLAVE_EQUATION_START = (1 << 31)
WB_DSP_SLAVE_INPUT1 = WB_DSP_SLAVE_BASE_ADDRESS + 4
WB_DSP_SLAVE_INPUT2 = WB_DSP_SLAVE_BASE_ADDRESS + 8
WB_DSP_SLAVE_INPUT3 = WB_DSP_SLAVE_BASE_ADDRESS + 0x0C
WB_DSP_SLAVE_INPUT4 = WB_DSP_SLAVE_BASE_ADDRESS + 0x10
WB_DSP_SLAVE_OUTPUT0 = WB_DSP_SLAVE_BASE_ADDRESS + 0x14
WB_DSP_SLAVE_OUTPUT1 = WB_DSP_SLAVE_BASE_ADDRESS + 0x18
WB_DSP_SLAVE_OUTPUT2 = WB_DSP_SLAVE_BASE_ADDRESS + 0x1C
WB_DSP_SLAVE_OUTPUT3 = WB_DSP_SLAVE_BASE_ADDRESS + 0x20
WB_DSP_SLAVE_OUTPUT4 = WB_DSP_SLAVE_BASE_ADDRESS + 0x24
B_CONTROL_DATA_SIZE_WORD = 0x00
B_CONTROL_DATA_SIZE_HWORD = 0x01
B_CONTROL_DATA_SIZE_BYTE = 0x02
B_CONTROL_DATA_SIZE_UNDEFINED = 0x03
| 2,088 | 1,267 |
from .figure import Figure
from .subplot import Subplot
from .subplot_time import SubplotTime
from .csv_reader import CsvReader, matchCsv
from .excel_reader import ExcelReader
from .get_path import getFileList, PathList
from .save_plot import actionSavePNG
| 257 | 73 |
import os
def get_file_list(root, key=None):
files = os.listdir(root)
if key is not None:
files.sort(key=key)
return files
def get_config_file(name):
return os.path.join(os.path.dirname(__file__), ".." , "resource", name) | 249 | 91 |
# Generated by Django 2.2.6 on 2019-10-22 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='motifs',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('nom', models.CharField(max_length=40)),
('description', models.TextField()),
('motif_Img', models.ImageField(upload_to='images/')),
],
options={
'db_table': 'motifs',
},
),
]
| 648 | 189 |
def conv(user):
import requests
import json
import xmltodict
url = 'https://api.github.com/users/' + user
s = requests.get(url)
# Converter json para dict
x = {}
x['wg'] = json.loads(s.text)
y = xmltodict.unparse(x, pretty=True)
return y
| 280 | 100 |
from .app import create_app
# APP = create_app()
# python commands:
# in app dir
#FLASKAPP=twitoff flask run
# in root dir
# FLASK_APP=twitoff flask shell
'''
Notes for setup:
in root, FLASK_APP=twitoff flask shell
import create_app
init create_app()
import DB
DB.create_all()
creates tables
'''
'''
Other commands
user1 = User.query.filter(User.name == 'nasa')
user1 = user1.one()
user1.tweets
''' | 404 | 156 |
# Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ProxyRoute(object):
def __init__(self,
locations,
empty_endpoint_status_code,
source_group_manager,
use_https=False,
route_path='',
context={}):
self._locations = locations
self._empty_endpoint_status_code = empty_endpoint_status_code
self._source_group_manager = source_group_manager
self._context = context
self._locations = locations
self._empty_endpoint_status_code = empty_endpoint_status_code
self._source_group_manager = source_group_manager
self._protocol = 'https://' if use_https else 'http://'
self._route_path = route_path
@property
def blueprints(self):
return self._source_group_manager.blueprints
@property
def locations(self):
return self._locations
@property
def endpoints(self):
return self._source_group_manager.endpoints
@property
def empty_endpoint_status_code(self):
return self._empty_endpoint_status_code
@property
def slug(self):
return self._source_group_manager.slug
@property
def context(self):
return self._context
@property
def protocol(self):
return self._protocol
@property
def route_path(self):
return self._route_path
def start(self, weight_adjustment_start):
self._source_group_manager.start(weight_adjustment_start)
| 1,945 | 574 |
import numpy as np
import pandas as pd
from rail.creation.engines import Engine
from typing import Callable
class Creator:
"""Object that supplies mock data for redshift estimation experiments.
The mock data is drawn from a probability distribution defined by the
generator, with an optional degrader applied.
"""
def __init__(self, engine: Engine, degrader: Callable = None, info: dict = None):
"""
Parameters
----------
engine: rail.Engine object
Object defining a redshift probability distribution.
Must have sample, log_prob and get_posterior methods (see engine.py)
degrader: callable, optional
A Degrader, function, or other callable that degrades the generated
sample. Must take a pandas DataFrame and a seed int, and return a
pandas DataFrame representing the degraded sample.
info: any, optional
Additional information desired to be stored with the instance
as a dictionary.
"""
self.engine = engine
self.degrader = degrader
self.info = info
def get_posterior(self, data: pd.DataFrame, column: str, grid: np.ndarray):
"""Calculate the posterior of the given column over the values in grid.
Parameters
----------
data : pd.DataFrame
Pandas dataframe of the data on which the posteriors are conditioned.
column : str
Name of the column for which the posterior is calculated.
grid : np.ndarray
Grid over which the posterior is calculated.
Returns
-------
np.ndarray
Array of posteriors, of shape (data.shape[0], grid.size).
"""
return self.engine.get_posterior(data, column, grid)
def sample(
self,
n_samples: int,
seed: int = None,
include_pdf: bool = False,
pz_grid: np.ndarray = None,
):
"""Draws n_samples from the engine
Parameters
----------
n_samples : int
Number of samples to draw
seed : int, optional
sets the random seed for drawing samples
include_pdf : boolean, optional
If True, redshift posteriors are returned for each galaxy.
The posteriors are saved in the column pz_pdf, and the
redshift grid saved as df.attrs['pz_grid'].
pz_grid : np.array, default=np.arange(0, 2.02, 0.02)
The grid over which to calculate the redshift posteriors.
Returns
-------
outputs : pd.DataFrame
samples from model, containing photometry, true redshift, and
redshift posterior PDF's if requested.
Notes
-----
Output posterior format is currently hardcoded to grid evaluations but could be integrated with qp.
We will probably change the output format to dovetail with the evaluation module when ready.
"""
if include_pdf is True and pz_grid is None:
pz_grid = np.arange(0, 2.02, 0.02)
rng = np.random.default_rng(seed)
# get samples
outputs = self.engine.sample(n_samples, seed=seed)
if self.degrader is not None:
# degrade sample
outputs = self.degrader(outputs, seed=seed)
# calculate fraction that survives the cut
selected_frac = len(outputs) / n_samples
# draw more samples and degrade until we have enough samples
while len(outputs) < n_samples:
# estimate how many extras to draw
n_supplement = int(1.1 / selected_frac * (n_samples - len(outputs)))
# draw new samples and apply cut
new_sample = self.engine.sample(n_supplement, seed=rng.integers(1e18))
new_sample = self.degrader(new_sample, seed=rng.integers(1e18))
# add these to the larger set
outputs = pd.concat((outputs, new_sample), ignore_index=True)
# cut out the extras
outputs = outputs[:n_samples]
# calculate posteriors
if include_pdf:
posteriors = self.get_posterior(outputs, column="redshift", grid=pz_grid)
outputs.attrs["pz_grid"] = pz_grid
outputs["pz_pdf"] = list(posteriors)
return outputs
| 4,394 | 1,150 |
#!/usr/bin/python3
# Perform the unit tests on SV
import subprocess
import os
import pathlib
import traceback
import pipetestutils
def main():
r1 = -1
try:
pathlib.Path("build/reports").mkdir(parents=True, exist_ok=True)
os.chdir("src/test")
except Exception as e:
print("Problem changing directory")
print("type error: " + str(e))
print(traceback.format_exc())
exit(-1)
try:
args = ["./test_bitcoin", "--log_format=JUNIT" \
, "--log_sink=../../build/reports/unittests.xml"]
r1 = subprocess.call(args)
except Exception as e:
print("Problem running tests")
print("type error: " + str(e))
print(traceback.format_exc())
exit(-2)
exit(abs(r1))
if __name__ == '__main__':
main()
| 834 | 275 |
from . import base
from . import mixins
from datetime import date
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'FAMILY_NAME',
'first_name': 'GIVEN_NAME',
'department': 'DEPTID_DESCR',
'job_title': 'JOBTITLE',
'gender': 'GENDER',
'race': 'ETHNIC_GROUP_DESCR',
'hire_date': 'LAST_HIRE_DT',
'compensation': 'ANNUAL_PAY',
'longevity': 'ANNUALIZED_LONGEVITY',
'employee_type': 'FULL_PART_TIME',
}
NAME_FIELDS = ('first_name', 'last_name', )
gender_map = {'Female': 'F', 'Male': 'M'}
ORGANIZATION_NAME = 'The University of Texas Medical Branch at Galveston'
ORGANIZATION_CLASSIFICATION = 'University Hospital'
DATE_PROVIDED = date(2019, 7, 30)
URL = ('https://s3.amazonaws.com/raw.texastribune.org/ut_medical_branch/'
'salaries/2019/Response.xlsx')
@property
def compensation_type(self):
if self.employee_type == 'Part-time':
return 'PT'
else:
return 'FT'
@property
def description(self):
if self.employee_type == 'Part-time':
return "Part-time annual compensation"
else:
return "Annual compensation"
@property
def compensation(self):
#longevity is in addition to base annual_pay, add if applicable
if self.get_mapped_value('longevity') == '0':
return self.get_mapped_value('compensation')
else:
longevity = self.get_mapped_value('longevity')
salary = self.get_mapped_value('compensation')
return float(salary) + float(longevity)
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def person(self):
data = {
'family_name': self.last_name,
'given_name': self.first_name,
'name': self.get_raw_name(),
'gender': self.gender_map[self.gender.strip()]
}
return data
transform = base.transform_factory(TransformedRecord)
| 2,439 | 813 |
from django.db.models import Count, Sum
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.views import generic
from accountingsubject.forms import AccountingSubjectForm
from accountingsubject.models import AccountingSubject
from cash.models import CashOnHand
class IndexView(generic.ListView):
template_name = 'accountingsubject/index.html'
context_object_name = 'accounting_subject_list'
def get_queryset(self):
return AccountingSubject.objects.annotate(Count('cashonhand'), Sum('cashonhand__lucre'))
class DetailView(generic.DetailView):
model = AccountingSubject
context_object_name = 'accounting_subject'
template_name = 'accountingsubject/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['list_count'] = CashOnHand.objects.filter(opposite_account_id=self.object.id).count()
context['list_sum'] = CashOnHand.objects.filter(opposite_account_id=self.object.id).count()
return context
# @login_required()
def accounting_subject_edit(request, accounting_subject_id):
# logger.error(request)
# if request.user.is_authenticated:
# # A backend authenticated the credentials
# logger.debug(request.user.username + " is logged")
# if request.user.has_perm('cash.add_cashonhand'):
# logger.debug(request.user.username + " has permission: cash.add_cashonhand")
# if request.user.has_perm('cash.add_accountingsubject'):
# logger.debug(request.user.username + " has permission: cash.add_accountingsubject")
# return HttpResponseRedirect('/admin')
# else:
# # No backend authenticated the credentials
# return HttpResponseRedirect('/cash')
accounting_subject = get_object_or_404(AccountingSubject, pk=accounting_subject_id)
if request.method == 'POST':
form = AccountingSubjectForm(request.POST)
if form.is_valid():
accounting_subject.accounting_subject = form.cleaned_data['accounting_subject']
accounting_subject.debit_balance = form.cleaned_data['debit_balance']
accounting_subject.remark = form.cleaned_data['remark']
accounting_subject.save()
# return render(request, 'cash/accountingsubjectedit.html',
# {'form': form, 'accounting_subject': accounting_subject})
return HttpResponseRedirect('/cash')
else:
obj = {"accounting_subject":accounting_subject.accounting_subject,
"debit_balance":accounting_subject.debit_balance,
"remark": accounting_subject.remark,
}
form = AccountingSubjectForm(obj)
return render(request, 'cash/accountingsubjectedit.html', {'form': form, 'accounting_subject': accounting_subject}) | 2,912 | 842 |
class AttributedDict(dict):
def __dir__(self):
directory = dir(super())
directory.extend([str(key.replace(" ", "_")) for key in self])
return directory
def __getattr__(self, attr):
_dir_dict = dict()
[_dir_dict.update({key.replace(" ", "_"): key}) for key in self]
if attr in _dir_dict:
return self[_dir_dict[attr]]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
_dir_dict = dict()
[_dir_dict.update({key.replace(" ", "_"): key}) for key in self]
if attr in _dir_dict:
self[_dir_dict[attr]] = value
elif attr in self:
self[attr] = value
else:
raise AttributeError(attr)
| 762 | 223 |
def duree(debut, fin):
d = debut[0] * 60 + debut[1]
f = fin[0] * 60 + fin[1]
e = f - d
h = e // 60
m = e - h * 60
if h < 0:
h = 24 - abs(h)
return (h, m)
print(duree ((14, 39), (18, 45)))
print(duree ((6, 0), (5, 15))) | 258 | 146 |
""" General Utilities file. """
import sys
import os
############################ NON-TF UTILS ##########################
from skimage.util import img_as_float
import numpy as np
import cv2
import pickle
from PIL import Image
from io import BytesIO
import math
import tqdm
import scipy
import json
import matplotlib
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
for gui in gui_env:
try:
print ("testing", gui)
matplotlib.use(gui,warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
print ("utils.py Using:",matplotlib.get_backend())
from matplotlib.backends.backend_agg import FigureCanvasAgg as Canvas
from mpl_toolkits.mplot3d import Axes3D
import config as cfg
######### Basic Utils #########
def adjust_gamma(image, gamma=1.0):
""" Gamma correct images. """
## Build a LUT mapping the pixel values [0, 255] to their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
## Apply gamma correction using the LUT
return cv2.LUT(image, table)
def scipy_sharpen(img_flt, alpha=30):
""" Sharpen images. """
from scipy import ndimage
blurred_f = ndimage.gaussian_filter(img_flt, 3)
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
img_flt = blurred_f + alpha * (blurred_f - filter_blurred_f)
return img_flt
def read_pickle(path):
""" Load Pickle file. """
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, path):
""" Save Pickle file. """
with open(path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
######### Pose quality and Metrics #########
def compute_similarity_transform(S1, S2):
""" Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem. """
transposed = False
if S1.shape[0] != 3 and S2.shape[0] != 3:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
## Mean
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
## Compute variance of X1 used for scale
var1 = np.sum(X1**2)
## The outer product of X1 and X2
K = X1.dot(X2.T)
## Solution that Maximizes trace(R'K) is R=U*V', where U, V are
## Singular vectors of K
U, s, Vh = np.linalg.svd(K)
V = Vh.T
## Construct Z that fixes the orientation of R to get det(R)=1
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
## Construct R
R = V.dot(Z.dot(U.T))
## Recover scale
scale = np.trace(R.dot(K)) / var1
## Recover translation
t = mu2 - scale*(R.dot(mu1))
## Error
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def compute_error(pred_3d_all, gt_3d_all, full_out=True):
""" MPJPE and PA_MPJPE metric computation. """
pred_3d_all_flat = pred_3d_all.copy()
pred_3d_all_flat = pred_3d_all_flat - pred_3d_all_flat[:, 0:1,:]
gt_3d_all_flat = gt_3d_all.copy()
gt_3d_all_flat = gt_3d_all_flat - gt_3d_all_flat[:, 0:1,:]
joint_wise_error = []
error = []
pa_joint_wise_error = []
pa_error = []
for i in range(len(pred_3d_all_flat)):
each_pred_3d = pred_3d_all_flat[i]
each_gt_3d = gt_3d_all_flat[i]
tmp_err = np.linalg.norm(each_pred_3d-each_gt_3d, axis=1)
joint_wise_error.append(tmp_err)
error.append(np.mean(tmp_err))
pred3d_sym = compute_similarity_transform(each_pred_3d.copy(), each_gt_3d.copy())
tmp_pa_err = np.linalg.norm(pred3d_sym-each_gt_3d, axis=1)
pa_joint_wise_error.append(tmp_pa_err)
pa_error.append(np.mean(tmp_pa_err))
joint_wise_error = np.array(joint_wise_error)
if(full_out):
mpjpe = np.mean(error)*1000 ### Note: unit is mm
pampjpe = np.mean(pa_error)*1000 ### Note: unit is mm
return mpjpe, pampjpe
else:
return error, pa_error
###### Alternative manual regressors ######
def smplx45_to_17j(pose_smpl):
""" SMPLX 45 joint J3D to 17 joint J3D. """
## Remove fingers
pose_smpl = pose_smpl[:-10]
## Remove extra def feet
pose_smpl = pose_smpl[:-6]
## Remove face
pose_smpl = pose_smpl[:-5]
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
return pose_smpl
def smpl23_to_17j_3d(pose_smpl):
""" Simple SMPL 23 joint J3D to 17 joint J3D. """
smpl_to_17j = [ [0,1],[8,11],
[12],[17],[19], ### or 15 , 17
[13],[18], [20], ### or 16 , 18
[14],[0],[3],
[9,6],[9],[1],
[4],[10,7],[10] ]
pose_17j = np.zeros((len(smpl_to_17j),3))
for idx in range(len(smpl_to_17j)):
sel_idx = smpl_to_17j[idx]
if(len(sel_idx) == 2):
pose_17j[idx] = (pose_smpl[sel_idx[0]] + pose_smpl[sel_idx[1]]) / 2.0
else:
pose_17j[idx] = pose_smpl[sel_idx[0]]
return pose_17j
""" SMPL J17 reordering vec. """
smpl_reorder_vec = [0, 9,
12, 14, 16,
11, 13, 15,
10,
2, 4, 6, 8,
1, 3, 5, 7 ]
def reorder_smpl17_to_j17(pose_3d):
""" SMPL reorder SMPL J17 to standard J17. """
pose_3d = pose_3d[smpl_reorder_vec]
return pose_3d
def smpl24_to_17j_adv(pose_smpl):
""" Improved SMPL 23 joint J3D to 17 joint J3D. """
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## Place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
## Neck to head raise with tilt towards nose
alt_f = 0.7
head = pose_smpl[15].copy()
neck = pose_smpl[12].copy()
## Alt inc
n_h_dir = head - neck
mag_nh = np.linalg.norm(n_h_dir)
n_h_dir /= mag_nh
head = head + (n_h_dir*mag_nh*alt_f)
## Place back
pose_smpl[15] = head
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
return pose_smpl
def hip_straighten(pose_smpl):
""" Straighten Hip in J17. """
#pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
pelvis = (r_hip + l_hip) / 2
pose_smpl[0] = pelvis
return pose_smpl
""" Limb parents for SMPL joints. """
limb_parents = [ 0,
0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
9, 9, 9,
12,12,12,
16,17,18,19,20,21
]
""" 3D skeleton plot colours for SMPL joints. """
colors = np.array([[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195],
[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195]])
def fig2data(fig):
""" Convert a Matplotlib figure to a 4D numpy array with RGBA channels. """
## Draw the renderer
fig.canvas.draw()
## Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
## Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def draw_limbs_3d_plt(joints_3d, ax, limb_parents=limb_parents):
## Direct 3d plotting
for i in range(joints_3d.shape[0]):
x_pair = [joints_3d[i, 0], joints_3d[limb_parents[i], 0]]
y_pair = [joints_3d[i, 1], joints_3d[limb_parents[i], 1]]
z_pair = [joints_3d[i, 2], joints_3d[limb_parents[i], 2]]
#ax.text(joints_3d[i, 0], joints_3d[i, 1], joints_3d[i, 2], s=str(i))
ax.plot(x_pair, y_pair, z_pair, color=colors[i]/255.0, linewidth=3, antialiased=True)
def plot_skeleton_3d(joints_3d, flag=-1, limb_parents=limb_parents, title=""):
## 3D Skeleton plotting
fig = plt.figure(frameon=False, figsize=(7, 7))
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.clear()
## Axis setup
if (flag == 0):
ax.view_init(azim=0, elev=0)
elif (flag == 1):
ax.view_init(azim=90, elev=0)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
ax.set_zlim(-200, 200)
scale = 1
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_limbs_3d_plt(joints_3d * scale, ax, limb_parents)
ax.set_title(title)
plt_img = fig2data(fig)
plt.close(fig)
return plt_img
def skeleton_image(joints_2d, img):
""" 2D Joint skeleton Overlay. """
img_copy = img.copy()
for i in range(joints_2d.shape[0]):
x_pair = [joints_2d[i, 0], joints_2d[limb_parents[i], 0]]
y_pair = [joints_2d[i, 1], joints_2d[limb_parents[i], 1]]
img_copy = cv2.line(img_copy, (int(x_pair[0]),int(y_pair[0])), (int(x_pair[1]),int(y_pair[1])), colors[i],4)
return img_copy
def create_collage(img_list, axis=1):
""" Collage a set of images to form a panel. (numpy) """
np_new_array = np.concatenate([i for i in img_list], axis=axis)
return np_new_array
def align_by_pelvis(joints):
""" Center by pelvis joint. """
hip_id = 0
joints -= joints[hip_id, :]
return joints
def mesh2d_center_by_nose(mesh2d,w=224 ,h=224):
""" Simple mesh centering by nose/pelvis vtx. (numpy) """
#hip_id = 0
nose_id = 0
ctr = mesh2d[nose_id,:]
mesh_ret = mesh2d - ctr + np.array([ w/2, h/5 ])
return mesh_ret
def align_with_image_j2d(points2d, img_width, img_height):
""" Perform center alignment to image coordinate system. (numpy) """
points2d[:,0] += img_width/2
points2d[:,1] += img_height/2
return points2d
""" Input preprocess """
def get_transform(center, scale, res, rot=0):
""" Generate transformation matrix. """
h = 224 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot ## To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
## Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
""" Transform pixel location to different reference. """
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int) + 1
def crop(img, center, scale, res, rot=0):
""" Crop image according to the supplied bounding box. """
## Upper left point
ul = np.array(transform([1, 1], center, scale, res, invert=1)) - 1
## Bottom right point
br = np.array(transform([res[0]+1, res[1]+1], center, scale, res, invert=1)) - 1
## Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
## Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
## Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
## Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = scipy.misc.imresize(new_img, res)
return new_img
def j2d_crop(img, j2d_file, rescale=1.2, detection_thresh=0.2):
""" Get center and scale for Bbox from OpenPose/Centertrack detections."""
with open(j2d_file, 'r') as f:
keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
## Adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H))
return img
def bbox_crop(img, bbox):
""" Crop, center and scale image based on BBox """
with open(bbox, 'r') as f:
bbox = np.array(json.load(f)['bbox']).astype(np.float32)
ul_corner = bbox[:2]
center = ul_corner + 0.5 * bbox[2:]
width = max(bbox[2], bbox[3])
scale = width / 200.0
img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H))
return img
########################### TF UTILS #############################
import pickle as pkl
import tensorflow as tf
import tensorflow_graphics as tfg
from render.render_layer_ortho import RenderLayer
import render.vertex_normal_expose as dirt_expose
PI = np.pi
def tfread_image(image,fmt='png', channels=3):
""" Simple read and decode image. """
if (fmt == 'png'):
return tf.image.decode_png(image, channels=channels)
elif (fmt == 'jpg'):
return tf.image.decode_jpeg(image, channels=channels)
else:
print ("ERROR specified format not found....")
def tf_norm(tensor, axis=1):
""" Min-Max normalize image. """
min_val = tf.reduce_min(tensor, axis=axis, keepdims=True)
normalized_tensor = tf.div( tf.subtract(tensor, min_val), tf.subtract(tf.reduce_max(tensor, axis=axis, keepdims=True), min_val))
return normalized_tensor
def tfresize_image(image, size=(cfg.IMG_W, cfg.IMG_H)):
""" Resize image. """
return tf.image.resize(image, size)
def denormalize_image(image):
""" Undo normalization of image. """
image = (image / 2) + 0.5
return image
def unprocess_image(image):
""" Undo preprocess image. """
# Normalize image to [0, 1]
image = (image / 2) + 0.5
image = image * 255.0 #[0,1] to [0,255] range
return image
def preprocess_image(image, do_znorm=True):
""" Preprocess image. """
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (cfg.IMG_W, cfg.IMG_H))
image /= 255.0 # normalize to [0,1] range
if(do_znorm):
# Normalize image to [-1, 1]
image = 2 * (image - 0.5)
return image
def load_and_preprocess_image(path):
""" Simple read and preprocess for just image. """
image = tf.io.read_file(path)
processed_image = preprocess_image(image)
return processed_image
def load_and_preprocess_image_and_mask(path, j2d, j3d, beta, mask_path, pose, camera, data_id):
""" Simple read and preprocess for image and mask. """
image = tf.io.read_file(path)
proc_image = preprocess_image(image)
## For Mask
mask = tf.io.read_file(mask_path)
proc_mask = preprocess_image(mask, do_znorm=False)
return proc_image, j2d, j3d, beta, proc_mask, pose, camera, data_id
def tf_create_collage(img_list, axis=2):
""" Collage a set of images to form a panel. """
tf_new_array = tf.concat([i for i in img_list], axis=axis)
return tf_new_array
def log_images(tag, image, step, writer):
""" Logs a list of images to tensorboard. """
height, width, channel = image.shape
image = Image.fromarray(image)
output = BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
## Create an Image object
img_sum = tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
## Create a Summary value
im_summary = tf.Summary.Value(tag='%s' % (tag), image=img_sum)
## Create and write Summary
summary = tf.Summary(value=[im_summary])
writer.add_summary(summary, step)
def get_network_params(scope):
""" Get all accessable variables. """
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
def get_net_train_params(scope):
""" Get Trainable params. """
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
def copy_weights(iter_no, wt_dir, label='best'):
""" Backup the Weights to pretrained_weights/ given iteration number and label i.e 'iter' or 'best' """
files = os.listdir(wt_dir+label+"wt_")
match_substr = '%s-%d' % (label, iter_no)
files = [f for f in files if match_substr in f]
for f in files:
cmd = 'cp %s%s pretrained_weights/' % (wt_dir, f)
print (cmd)
os.system(cmd)
def get_most_recent_iteration(wt_dir, label='iter'):
""" Gets the most recent iteration number from weights/ dir of given label: ('best' or 'iter') """
files = os.listdir(wt_dir)
files = [f for f in files if label in f]
numbers = {long(f[f.index('-') + 1:f.index('.')]) for f in files}
return max(numbers)
def copy_latest(wt_dir, wt_type='best'):
""" Backup latest weights. """
latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir)
copy_weights(latest_iter, label=wt_type, wt_dir=wt_dir)
return latest_iter
def get_latest_iter(wt_dir, wt_type='best'):
""" Get latest weights. """
latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir)
return latest_iter
def tf_align_by_pelvis(joints):
""" Simple centering by pelvis location. """
hip_id = 0
pelvis = joints[:, hip_id:hip_id+1, :]
return tf.subtract(joints, pelvis)
def tf_mesh2d_center_by_nose(mesh2d,w=224 ,h=224):
""" Simple mesh centering by nose/pelvis vtx. """
#hip_id = 0
nose_id = 0
ctr = mesh2d[nose_id:nose_id+1,:]
mesh_ret = tf.add(tf.subtract(mesh2d, ctr), [[ w/2, h/5 ]])
return mesh_ret
def tf_perspective_project(points3d, focal, prin_pt, name="perspective_project"):
""" Simple Perspective Projection. """
fx = focal[0]
fy = focal[1]
tx = prin_pt[0]
ty = prin_pt[1]
intrin = tf.convert_to_tensor(np.array([ [fx, 0., tx],
[0., fy, ty],
[0., 0., 1.]]))
intrin = tf.tile(intrin,[points3d.shape[0]])
p_cam3d = tf.matmul(points3d, intrin, name=name)
points2d = (points3d[:,:,0:2] / points3d[:,:,2]) ### project
return points2d
def tf_orthographic_project(points3d, name="orthographic_project"):
""" Simple Orthographic Projection. """
return points3d[:,:,0:2] ## X,Y,Z
def tf_dyn_scale_and_align(vertices, joints_3d, scale, add_trans):
""" Dynamic scale and trans adjust. """
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
#person_ctr = (xy_max + xy_min)/2.0
person_range = tf.abs(xy_max-xy_min)
person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2)
### Scale person to detector scale
vertices = tf.div(vertices, person_sc)
vertices = vertices * scale
joints_3d = tf.div(joints_3d, person_sc)
joints_3d = joints_3d * scale
### Bbox center
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
person_ctr = (xy_max + xy_min)/2.0
add_trans = tf.concat([add_trans, tf.zeros_like(add_trans[:,:,0:1])], axis=2)
vertices = vertices - person_ctr + add_trans
joints_3d = joints_3d - person_ctr + add_trans
return vertices, joints_3d, scale[:,0], ((add_trans-person_ctr)[:,0,:2])
def tf_do_scale_and_align(vertices, joints_3d, scale, trans):
""" Perform Scale and trans. (in world space) """
scale = tf.reshape(scale, [-1, 1, 1])
trans = tf.reshape(trans, [-1, 1, 2])
z = tf.zeros_like(trans[:,:,0:1])
shift = tf.concat([trans, z], axis=2)
### Trans in world space
vertices = vertices + shift
joints_3d = joints_3d + shift
### Scale person
vertices = vertices * scale
joints_3d = joints_3d * scale
return vertices, joints_3d
def for_tpix_tf_do_scale_and_align(vertices, joints_3d, scale, trans):
""" Perform Scale and trans. (in Pixel space) """
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
#person_ctr = (xy_max + xy_min)/2.0
person_range = tf.abs(xy_max-xy_min)
person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2) ##ignore z
### Unit scale
vertices = tf.div(vertices, person_sc)
joints_3d = tf.div(joints_3d, person_sc)
###
scale = tf.reshape(scale, [-1, 1, 1])
trans = tf.reshape(trans, [-1, 1, 2])
z = tf.zeros_like(trans[:,:,0:1])
shift = tf.concat([trans, z], axis=2)
### Scale person
vertices = vertices * scale
joints_3d = joints_3d * scale
### Trans in cam space
vertices = vertices + shift
joints_3d = joints_3d + shift
return vertices, joints_3d
def tf_align_with_image_j2d(points2d, img_width, img_height):
""" Perform center alignment to image coordinate system. (in Pixel space) """
if(img_width == img_height):
points2d = points2d + (img_width/2)
else:
width_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_width/2)
height_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_height/2)
concatd = tf.concat([width_tf, height_tf], axis=2)
points2d = points2d + concatd
return points2d
############ Render pipeline utils ############
MESH_PROP_FACES_FL = './assets/smpl_sampling.pkl'
""" Read face definition. Fixed for a SMPL model. """
with open(os.path.join(os.path.dirname(__file__), MESH_PROP_FACES_FL), 'rb') as f:
sampling = pkl.load(f)
M = sampling['meshes']
faces = M[0]['f'].astype(np.int32)
faces = tf.convert_to_tensor(faces,dtype=tf.int32)
def_bgcolor = tf.zeros(3) + [0, 0.5, 0] ## Green BG
def colour_pick_img(img_batch, vertices, batch_size):
""" Pick clr based on mesh registration. [Vtx, Img] -> [Vtx_clr] """
proj_verts = tf_orthographic_project(vertices)
verts_pix_space = tf_align_with_image_j2d(proj_verts, cfg.IMG_W, cfg.IMG_H)
#### Pick colours and resolve occlusion softly
verts_pix_space = tf.cast(verts_pix_space, dtype=tf.int32)
verts_pix_space = tf.concat([verts_pix_space[:,:,1:], verts_pix_space[:,:,0:1]], axis=2)
if(cfg.TF_version >= 1.14):
#### Alternative colour pick for TF 1.14 & above, faster inference.
clr_picked = tf.gather_nd(params=occ_aware_mask, indices=verts_pix_space, batch_dims=1) ### NOTE: only for tf 1.14 and above
else:
### For TF 1.13 and older
for b in range(batch_size):
if b == 0:
clr_picked = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])]
else:
curr_clr_pick = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])]
clr_picked = tf.concat([clr_picked, curr_clr_pick], axis=0)
img_clr_picked = tf.cast(clr_picked, dtype=tf.float32)
return img_clr_picked
def get_occ_aware_cam_facing_mask(vertices, batch_size, part_based_occlusion_resolve=False, bgcolor=def_bgcolor):
""" Occlusion-aware vtx weighting, depth based or part-based. [Vtx] -> [Vtx_occ_wtmap] """
if (part_based_occlusion_resolve):
vertex_colors = np.zeros((batch_size, 6890, 3))
### Part segmentation_generation
vtx_prts = np.load("vtx_clr_smpl_proj_final_part_segmentations.npy")
### Vertex parts modify for maximal seperation
vtx_prts = vtx_prts + 1
vtx_prts[vtx_prts == 2] = 5
vtx_prts[vtx_prts == 22] = 7
vtx_prts[vtx_prts == 8] = 22
vtx_prts[vtx_prts == 12] = 2
vtx_prts[vtx_prts == 23] = 13
vtx_prts[vtx_prts == 19] = 4
vtx_prts[vtx_prts == 21] = 18
#### part labelled
vtx_part_labels = np.zeros(vertices.shape)
vtx_prts = np.expand_dims(vtx_prts, axis=1)
vtx_prts = vtx_prts / 24.0
part_label = np.concatenate([vtx_prts, vtx_prts, vtx_prts], axis=1)
vtx_part_labels[:] = part_label ##broadcast to form batch
#### Render cam setup
fixed_rt = np.array([1.0, 0.0, 0.0]) ### tilt,pan,roll
angle = np.linalg.norm(fixed_rt)
axis = fixed_rt / angle
ang = np.pi
new_an_ax = axis * (ang)
fixed_rt = new_an_ax
fixed_t = [0., 0., 0.]
##
fixed_renderer = RenderLayer(cfg.IMG_W, cfg.IMG_H, 3, bgcolor=bgcolor, f=faces, camera_f=[cfg.IMG_W, cfg.IMG_H], camera_c=[cfg.IMG_W/2.0, cfg.IMG_H/2.0], camera_rt=fixed_rt, camera_t=fixed_t)
vert_norms = dirt_expose.get_vertex_normals(vertices, faces)
#### Verts selection based on norm
vert_norms_flat = tf.reshape(vert_norms, [-1, 3])
fake_angle = tf.ones_like(vert_norms_flat[:,0:1], dtype=tf.float32) ## unit mag
euler_angles = tfg.geometry.transformation.euler.from_axis_angle(axis=vert_norms_flat, angle=fake_angle)
vert_norms_euler = tf.reshape(euler_angles, [-1, 6890, 3])
### Diff. margin formulation
quant_sharpness_factor = 50
verts_ndiff = vert_norms_euler[:,:,2:] * -1 ## invert as cam faces
verts_ndiff = verts_ndiff * quant_sharpness_factor ## centrifugal from 0.0 to get quantization effect
#verts_ndiff = tf.math.sign(verts_ndiff)
#verts_ndiff = tf.nn.relu(verts_ndiff)
verts_ndiff = tf.nn.sigmoid(verts_ndiff)
if(part_based_occlusion_resolve):
vtx_part_labels= tf.convert_to_tensor(vtx_part_labels, dtype=tf.float32)
## Normal part based resolving occlusion based render
cam_facing_vtx_clrs = tf.multiply(vtx_part_labels, verts_ndiff)
else:
## Depth based occlusion aware picking to be debugged
depth_vertices = vertices[:,:,2:]
## Normalize the depth between 0 and 1
min_val = tf.reduce_min(depth_vertices, axis=1, keepdims=True)
normalized_depth_vertices = tf.div( tf.subtract(depth_vertices, min_val), tf.subtract(tf.reduce_max(depth_vertices, axis=1, keepdims=True), min_val))
cam_facing_vtx_clrs = tf.tile(normalized_depth_vertices, [1,1,3])
cam_facing_vtx_clrs = tf.multiply(cam_facing_vtx_clrs, verts_ndiff)
## Mask render for occlusion resolution
occ_aware_mask = fixed_renderer.call(vertices, vc=cam_facing_vtx_clrs) ## occulsion aware z-buffered parts masks
clr_picked = colour_pick_img(occ_aware_mask, vertices, batch_size)
## Occlusion resolution based on z-buffered parts
if(part_based_occlusion_resolve):
occ_sel_diff = (vtx_part_labels[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0
else:
### Depth based colour pick
occ_sel_diff = (normalized_depth_vertices[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0
### Diff. margin soft selection
occ_sel = tf.nn.sigmoid(occ_sel_diff) * tf.nn.sigmoid(-1 * occ_sel_diff) * 4.0
#### Select front facing
final_front_facing_occ_resolved = tf.multiply(occ_sel, verts_ndiff)
return final_front_facing_occ_resolved
def apply_ref_symmetry(vclr_picked_resolved, front_facing_occ_resolved_mask, batch_size):
""" Reflectional symmetry module. [Vtx_clr, Vtx_wtmap] -> [Vtx_clr_symm] """
symm_arr = np.load("./assets/basic_vtx_clr_symm_map.npy")
symm_arr_transpose = np.transpose(symm_arr)
sym_map = tf.expand_dims(symm_arr, axis=0)
sym_map = tf.tile(sym_map, [batch_size,1,1])
sym_map_transpose = tf.expand_dims(symm_arr_transpose, axis=0)
sym_map_transpose = tf.tile(sym_map_transpose, [batch_size, 1, 1])
## Group clr value calc
num = tf.matmul(sym_map, vclr_picked_resolved)
den = tf.matmul(sym_map, front_facing_occ_resolved_mask)
den = den + 0.00001
calc_val = tf.truediv(num, den)
### Value assign using symmtery
vclr_symm = tf.matmul(sym_map_transpose, calc_val)
return vclr_symm
| 31,392 | 12,965 |
import queue
import multiprocessing
import itertools
import sys
__all__ = ["QConnect", "CConnect"]
pulse_length_default = 10 * 10 ** -12 # 10 ps photon pulse length
signal_speed = 2.998 * 10 ** 5 #speed of light in km/s
fiber_length_default = 0.0
class QConnect:
def __init__(self, *args, transit_devices=[]):
'''
This is the base class for a quantum connection between multiple agents.
:param agents \*args: list of agents to connect
:param List<Devices> transit_devices: list of devices qubits travel through
'''
agents = list(args)
self.agents = {}
self.source_devices = {}
self.target_devices = {}
self.transit_devices = {}
'''
Create queue to keep track of multiple requests. Name of queue is name of
target agent.
'''
self.queues = {}
for agent in agents:
self.agents.update({agent.name: agent})
self.source_devices.update({agent.name: agent.source_devices})
self.target_devices.update({agent.name: agent.target_devices})
self.transit_devices.update({agent.name: transit_devices})
self.queues.update({agent.name: queue.Queue()})
for agentConnect in agents:
if agentConnect != agent:
agent.qconnections[agentConnect.name] = self
def put(self, source, target, qubits, source_time):
'''
Constructs full list of devices that each qubit must travel through. Sends the qubits
through source devices. Places qubits and a list of transit and target
devices on the queue. Queue is keyed on the target agent's name.
:param String source: name of agent where the qubits being sent originated
:param String target: name of agent receiving qubits
:param Array qubits: array of numbers corresponding to qubits the source is sending
:param Float source_time: time of source agent before sending qubits
:returns: time qubits took to pass through source devices
'''
source_devices = self.source_devices[source]
transit_devices = self.transit_devices[source]
target_devices = self.target_devices[target]
non_source_devices = {
"transit": transit_devices,
"target": target_devices,
}
program = self.agents[source].program
source_delay = 0
# Keep track of qubits remaining
traveling_qubits = qubits
if not source_devices:
source_delay += pulse_length_default
else:
# Keep track of qubits lost by each device
total_lost_qubits = []
for device in source_devices:
# If qubits are still remaining
if traveling_qubits:
res = device.apply(program, traveling_qubits)
if 'lost_qubits' in res.keys():
lost_qubits = res['lost_qubits']
# Remove lost qubits from traveling qubits
traveling_qubits = list(set(traveling_qubits) - set(lost_qubits))
# Add lost_qubits lost from current device to total_lost_qubits
total_lost_qubits += lost_qubits
if 'delay' in res.keys(): source_delay += res['delay']
else: break
# Invert lost qubits and add to traveling qubits
for q in total_lost_qubits:
if q == 0: total_lost_qubits.append(float("-inf"))
else: total_lost_qubits.append(-q)
traveling_qubits += total_lost_qubits
# Scale source delay time according to number of qubits sent
scaled_source_delay = source_delay*len(qubits)
self.queues[target].put((traveling_qubits, non_source_devices, scaled_source_delay, source_time))
return scaled_source_delay
def get(self, agent):
'''
Pops qubits off of the agent's queue. Sends qubit through transit and target devices,
simulating a quantum network. Return an array of the qubits that have been altered, as well as
the time it took the qubit to travel through the network. Some qubits may be lost during transmission. If lost,
their value will switch to negative, or, in the case of 0, be set to -inf
:param Agent agent: agent receiving the qubits
:returns: list of qubits, time to pass through transit and target devices, and the source agent's time
'''
traveling_qubits, devices, source_delay, source_time = self.queues[agent.name].get()
agent.qubits = list(set(traveling_qubits + agent.qubits))
program = self.agents[agent.name].program
transit_devices = devices["transit"]
target_devices = devices["target"]
# Number of qubits before any are lost
num_travel_qubits = len(traveling_qubits)
travel_delay = 0
if not transit_devices:
travel_delay += fiber_length_default/signal_speed
if not target_devices:
travel_delay += 0
total_lost_qubits = [q for q in traveling_qubits if q < 0 or q == float("-inf")]
remaining_qubits = [q for q in traveling_qubits if q >= 0]
for device in list(itertools.chain(transit_devices, target_devices)):
# If qubits are remaining
if remaining_qubits:
res = device.apply(program, traveling_qubits)
if 'lost_qubits' in res.keys():
lost_qubits = res['lost_qubits']
# Remove lost qubits from traveling qubits
remaining_qubits = list(set(remaining_qubits) - set(lost_qubits))
# Add lost_qubits lost from current device to total_lost_qubits
total_lost_qubits += lost_qubits
if 'delay' in res.keys(): travel_delay += res['delay']
else: break
# Remove traveling_qubits
agent.qubits = list(set(agent.qubits) - set(traveling_qubits))
lost_qubits_flipped = []
for q in total_lost_qubits:
if q == 0: lost_qubits_flipped.append(float("-inf"))
else:
lost_qubits_flipped.append(-q)
# Add inverted lost qubits to remaining qubits
traveling_qubits = remaining_qubits + lost_qubits_flipped
agent.qubits += traveling_qubits
scaled_delay = travel_delay*num_travel_qubits + source_delay
return traveling_qubits, scaled_delay, source_time
class CConnect:
def __init__(self, *args, length=0.0):
'''
This is the base class for a classical connection between multiple agents.
:param agents \*args: list of agents to connect
:param Float length: distance between first and second agent
'''
agents = list(args)
self.agents = {}
'''
Create queue to keep track of multiple requests. Name of queue is name of
target agent.
'''
self.queues = {}
for agent in agents:
self.agents.update({agent.name: agent})
self.queues.update({agent.name: queue.Queue()})
for agentConnect in agents:
if agentConnect != agent:
agent.cconnections[agentConnect.name] = self
self.length = length
def put(self, target, cbits):
'''
Places cbits on queue keyed on the target Agent's name
:param String target: name of recipient of program
:param Array cbits: array of numbers corresponding to cbits agent is sending
:returns: time for cbits to travel
'''
csource_delay = pulse_length_default * 8 * sys.getsizeof(cbits)
self.queues[target].put((cbits, csource_delay))
return csource_delay
def get(self, agent):
'''
Pops cbits off of the agent's queue and adds travel delay
:param String agent: name of the agent receiving the cbits
:returns: cbits from source and time they took to travel
'''
cbits, source_delay = self.queues[agent].get()
travel_delay = self.length/signal_speed
scaled_delay = travel_delay*len(cbits) + source_delay
return cbits, scaled_delay | 8,389 | 2,328 |
import typing as T
from pqcli.mechanic import Player, StatType
from pqcli.ui.curses.widgets import Focusable
from .progress_bar_window import DataTableProgressBarWindow
class CharacterSheetWindow(Focusable, DataTableProgressBarWindow):
def __init__(
self, player: Player, parent: T.Any, h: int, w: int, y: int, x: int
) -> None:
super().__init__(
parent,
h,
w,
y,
x,
" Character Sheet ",
align_right=False,
show_time=True,
)
self._on_focus_change += self._render
self._focused = True
self._player = player
self._player.connect("level_up", self._sync_traits)
self._player.stats.connect("change", self._sync_traits)
self._player.exp_bar.connect("change", self._sync_exp)
self.sync()
def stop(self) -> None:
super().stop()
self._player.disconnect("level_up", self._sync_traits)
self._player.stats.disconnect("change", self._sync_traits)
self._player.exp_bar.disconnect("change", self._sync_exp)
def sync(self) -> None:
self._sync_traits()
self._sync_exp()
def _sync_traits(self) -> None:
if not self._win:
return
self._data_table.clear()
self._data_table.add("Name", self._player.name)
self._data_table.add("Race", self._player.race.name)
self._data_table.add("Class", self._player.class_.name)
self._data_table.add("Level", str(self._player.level))
self._data_table.add(" " * 15, "")
for stat in StatType:
self._data_table.add(stat.value, str(self._player.stats[stat]))
self._render_data_table()
def _sync_exp(self) -> None:
self._cur_pos = self._player.exp_bar.position
self._max_pos = self._player.exp_bar.max_
self._progress_title = (
f"Experience ({self._max_pos-self._cur_pos:.0f} XP to go)"
)
self._render_progress_bar()
| 2,035 | 643 |
import yaml
import pathlib
import json
import math
class Planet(object):
def __init__(self, config):
self.GMS = 0
# mass
self.M = 0.
self.name = "unknown"
# period
self.T = 1.
# eccentricity
self.e = 0.
# semi major axis
self.a = 1.
# configuration
self.config = config
def fixup(self):
self.M = self.M / 2.e+30
self.RMin = self.a * (1 - self.e)
self.RMax = self.a * (1 + self.e)
self.R = self.a
self.V = (2 * math.pi * self.R) / self.T
self.GMS = self.R * self.V**2
self.vMax = math.sqrt(
(((1 + self.e) * (1 + self.M)) / self.RMin) * self.GMS)
self.L = self.a * (1 - self.e) * self.vMax
self.GM = self.GMS * self.M
@staticmethod
def load(config, data):
if isinstance(data, pathlib.PosixPath):
data = str(data)
if isinstance(data, str):
with open(data, "r") as data_file:
data = yaml.safe_load(data_file)
if not isinstance(data, dict):
raise TypeError(f"data type {type(data)} cannot be loaded")
planet = Planet(config)
for k in data:
setattr(planet, k, data[k])
planet.fixup()
return planet
def get_dict(self):
data = self.__dict__.copy()
data.pop("config")
return data
def save(self, filename):
with open(filename, 'w') as file:
yaml.dump(self.get_dict(), file)
def __str__(self) -> str:
return f"planet {self.name} => {', '.join(yaml.safe_dump(self.get_dict()).splitlines())}"
| 1,668 | 568 |
import unittest
from tests.unit.test_account import TestAccount
from tests.unit.test_application import TestApplication
from tests.unit.test_usages import TestUsages
from tests.unit.test_conferences import TestConferences
from tests.unit.test_mms_messages import TestMmsMessages
from tests.unit.test_sms_messages import TestSmsMessages
from tests.unit.test_calls import TestCalls
from tests.unit.test_transcriptions import TestTranscriptions
from tests.unit.test_sip_domain import TestSipDomain
from tests.unit.test_sip_credentials import TestSipCredentials
from tests.unit.test_recordings import TestRecordings
from tests.unit.test_notifications import TestNotifications
from tests.unit.test_application_clients import TestApplicationClients
from tests.unit.test_available_phone_number import TestAvailablePhoneNumber
from tests.unit.test_carrier_services import TestCarrierServices
from tests.unit.test_incoming_phone_numbers import TestIncomingPhoneNumbers
from tests.unit.test_ip_access_control_lists import TestIpAccessControlLists
from tests.unit.fraud_control_test import FraudControlTest
def suite():
"""
Gather all the tests from this module in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(FraudControlTest))
test_suite.addTest(unittest.makeSuite(TestIpAccessControlLists))
test_suite.addTest(unittest.makeSuite(TestIncomingPhoneNumbers))
test_suite.addTest(unittest.makeSuite(TestAvailablePhoneNumber))
test_suite.addTest(unittest.makeSuite(TestApplicationClients))
test_suite.addTest(unittest.makeSuite(TestCarrierServices))
test_suite.addTest(unittest.makeSuite(TestNotifications))
test_suite.addTest(unittest.makeSuite(TestRecordings))
test_suite.addTest(unittest.makeSuite(TestSipCredentials))
test_suite.addTest(unittest.makeSuite(TestSipDomain))
test_suite.addTest(unittest.makeSuite(TestTranscriptions))
test_suite.addTest(unittest.makeSuite(TestCalls))
test_suite.addTest(unittest.makeSuite(TestMmsMessages))
test_suite.addTest(unittest.makeSuite(TestSmsMessages))
test_suite.addTest(unittest.makeSuite(TestConferences))
test_suite.addTest(unittest.makeSuite(TestUsages))
test_suite.addTest(unittest.makeSuite(TestAccount))
test_suite.addTest(unittest.makeSuite(TestApplication))
return test_suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
test_suite = suite()
runner.run(test_suite)
| 2,479 | 760 |
def _get_square(list):
l1 = [x*x for x in range(0, len(list), 2) if x % 3 != 0]
return l1
print(_get_square([1, 2, 3, 4, 5]))
def get_square(list):
l1 = [x*x for x in list if x % 3 != 0 and x % 2 == 0]
return l1
print(get_square([1, 2, 3, 4, 5])) | 262 | 137 |
import json
def main(file_path, out_len):
with open(file_path, 'r') as f:
data = json.loads(f.read())
print(len(data['data']))
data['data'] = data['data'][:out_len]
with open(file_path, 'w') as f:
print(len(data['data']))
json.dump(data, f)
if __name__ == "__main__":
main('data/train-v1.1.json', 10)
main('data/dev-v1.1.json', 2)
| 391 | 156 |
"""Provides ability to run jobs locally or on HPC."""
from collections import OrderedDict
import datetime
import fileinput
import importlib
import logging
import os
import shutil
import jade
from jade.common import (
CONFIG_FILE,
JOBS_OUTPUT_DIR,
OUTPUT_DIR,
RESULTS_FILE,
HPC_CONFIG_FILE,
)
from jade.enums import JobCompletionStatus, Status, ResourceMonitorType
from jade.events import (
EVENTS_FILENAME,
EVENT_NAME_ERROR_LOG,
StructuredLogEvent,
EVENT_CATEGORY_ERROR,
EVENT_CATEGORY_RESOURCE_UTIL,
EVENT_NAME_BYTES_CONSUMED,
EVENT_NAME_SUBMIT_STARTED,
EVENT_NAME_SUBMIT_COMPLETED,
)
from jade.exceptions import InvalidParameter
from jade.extensions.registry import Registry, ExtensionClassType
from jade.hpc.common import HpcType
from jade.hpc.hpc_manager import HpcManager
from jade.hpc.hpc_submitter import HpcSubmitter
from jade.jobs.cluster import Cluster
from jade.jobs.job_configuration_factory import create_config_from_previous_run
from jade.jobs.job_manager_base import JobManagerBase
from jade.jobs.job_runner import JobRunner
from jade.jobs.results_aggregator import ResultsAggregator
from jade.models import SubmitterParams
from jade.models.submission_group import make_submission_group_lookup
from jade.loggers import log_event
from jade.result import serialize_results, ResultsSummary
from jade.utils.repository_info import RepositoryInfo
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import dump_data, get_directory_size_bytes
import jade.version
logger = logging.getLogger(__name__)
class JobSubmitter(JobManagerBase):
"""Submits jobs for execution locally or on an HPC."""
def __init__(self, config_file, output, is_new):
"""Internal constructor. Callers should use create() or load()."""
super().__init__(config_file, output)
self._hpc = None
self._config_file = config_file
self._is_new = is_new
@classmethod
def create(cls, config_file, params: SubmitterParams, output=OUTPUT_DIR):
"""Creates a new instance.
Parameters
----------
config_file : JobConfiguration
configuration for simulation
params: SubmitterParams
output : str
Output directory
"""
main_file = os.path.join(output, CONFIG_FILE)
shutil.copyfile(config_file, main_file)
mgr = cls(main_file, output, True)
mgr.run_checks(params)
return mgr
@classmethod
def load(cls, output):
"""Loads an instance from an existing directory."""
return cls(os.path.join(output, CONFIG_FILE), output, False)
def __repr__(self):
return f"""num_jobs={self.get_num_jobs()}
results_summary={self.get_results_summmary_report()}"""
def cancel_jobs(self, cluster):
"""Cancel running and pending jobs."""
groups = make_submission_group_lookup(cluster.config.submission_groups)
hpc = HpcManager(groups, self._output)
for job_id in cluster.job_status.hpc_job_ids:
hpc.cancel_job(job_id)
cluster.mark_complete(canceled=True)
def submit_jobs(self, cluster, force_local=False):
"""Submit simulations. Auto-detect whether the current system is an HPC
and submit to its queue. Otherwise, run locally.
Parameters
----------
cluster : Cluster
force_local : bool
If on HPC, run jobs through subprocess as if local.
Returns
-------
Status
"""
if self._is_new:
logger.info("Submit %s jobs for execution.", self._config.get_num_jobs())
logger.info("JADE version %s", jade.version.__version__)
registry = Registry()
loggers = registry.list_loggers()
logger.info("Registered modules for logging: %s", ", ".join(loggers))
self._save_repository_info(registry)
ResultsAggregator.create(self._output)
# If an events summary file exists, it is invalid.
events_file = os.path.join(self._output, EVENTS_FILENAME)
if os.path.exists(events_file):
os.remove(events_file)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_SUBMIT_COMPLETED,
message="job submission started",
num_jobs=self.get_num_jobs(),
)
log_event(event)
else:
self._handle_submission_groups()
result = Status.IN_PROGRESS
group = self._config.get_default_submission_group()
groups = make_submission_group_lookup(cluster.config.submission_groups)
self._hpc = HpcManager(groups, self._output)
if self._hpc.hpc_type == HpcType.LOCAL or force_local:
runner = JobRunner(self._config_file, output=self._output)
num_processes = group.submitter_params.num_processes
verbose = group.submitter_params.verbose
result = runner.run_jobs(verbose=verbose, num_processes=num_processes)
agg = ResultsAggregator.load(self._output)
agg.process_results()
is_complete = True
else:
is_complete = self._submit_to_hpc(cluster)
if is_complete:
result = self._handle_completion(cluster)
return result
def _handle_completion(self, cluster):
result = Status.GOOD
self._results = ResultsAggregator.list_results(self._output)
if len(self._results) != self._config.get_num_jobs():
finished_jobs = {x.name for x in self._results}
all_jobs = {x.name for x in self._config.iter_jobs()}
missing_jobs = sorted(all_jobs.difference(finished_jobs))
logger.error(
"Error in result totals. num_results=%s total_num_jobs=%s",
len(self._results),
self._config.get_num_jobs(),
)
logger.error(
"These jobs did not finish: %s. Check for process crashes or HPC timeouts.",
missing_jobs,
)
result = Status.ERROR
else:
missing_jobs = []
self.write_results_summary(RESULTS_FILE, missing_jobs)
self._log_error_log_messages(self._output)
bytes_consumed = get_directory_size_bytes(self._output, recursive=False)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_BYTES_CONSUMED,
message="main output directory size",
bytes_consumed=bytes_consumed,
)
log_event(event)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_SUBMIT_COMPLETED,
message="job submission completed",
num_jobs=self.get_num_jobs(),
)
log_event(event)
group = self._config.get_default_submission_group()
if group.submitter_params.generate_reports:
self.generate_reports(self._output, group.submitter_params.resource_monitor_type)
cluster.mark_complete()
if cluster.config.pipeline_stage_num is not None:
# The pipeline directory must be the one above this one.
pipeline_dir = os.path.dirname(self._output)
next_stage = cluster.config.pipeline_stage_num + 1
cmd = (
f"jade pipeline submit-next-stage {pipeline_dir} "
f"--stage-num={next_stage} "
f"--return-code={result.value}"
)
run_command(cmd)
return result
def write_results_summary(self, filename, missing_jobs):
"""Write the results to filename in the output directory."""
data = OrderedDict()
data["jade_version"] = jade.version.__version__
now = datetime.datetime.now()
data["timestamp"] = now.strftime("%m/%d/%Y %H:%M:%S")
data["base_directory"] = os.getcwd()
results = self._build_results(missing_jobs)
data["results_summary"] = results["summary"]
data["missing_jobs"] = missing_jobs
data["results"] = results["results"]
output_file = os.path.join(self._output, filename)
dump_data(data, output_file)
logger.info("Wrote results to %s.", output_file)
num_successful = results["summary"]["num_successful"]
num_canceled = results["summary"]["num_canceled"]
num_failed = results["summary"]["num_failed"]
num_missing = len(missing_jobs)
total = num_successful + num_failed + num_missing
log_func = logger.info if num_successful == total else logger.warning
log_func(
"Successful=%s Failed=%s Canceled=%s Missing=%s Total=%s",
num_successful,
num_failed,
num_canceled,
num_missing,
total,
)
return output_file
def _build_results(self, missing_jobs):
num_successful = 0
num_failed = 0
num_canceled = 0
for result in self._results:
if result.is_successful():
num_successful += 1
elif result.is_failed():
num_failed += 1
else:
assert result.is_canceled(), str(result)
num_canceled += 1
return {
"results": serialize_results(self._results),
"summary": {
"num_successful": num_successful,
"num_failed": num_failed,
"num_canceled": num_canceled,
"num_missing": len(missing_jobs),
},
}
def _save_repository_info(self, registry):
extensions = registry.list_extensions()
extension_packages = set(["jade"])
for ext in extensions:
exec_module = ext[ExtensionClassType.EXECUTION].__module__
name = exec_module.split(".")[0]
extension_packages.add(name)
for name in extension_packages:
try:
package = importlib.import_module(name)
repo_info = RepositoryInfo(package)
patch = os.path.join(self._output, f"{name}-diff.patch")
repo_info.write_diff_patch(patch)
logger.info("%s repository information: %s", name, repo_info.summary())
except InvalidParameter:
pass
@staticmethod
def _log_error_log_messages(directory):
for event in JobSubmitter.find_error_log_messages(directory):
log_event(event)
@staticmethod
def find_error_log_messages(directory):
"""Parse output log files for error messages
Parameters
----------
directory : str
output directory
"""
substrings = (
"DUE TO TIME LIMIT", # includes slurmstepd, but check this first
"srun",
"slurmstepd",
"Traceback",
)
filenames = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith(".e")]
if not filenames:
return
for line in fileinput.input(filenames):
for substring in substrings:
if substring in line:
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_ERROR,
name=EVENT_NAME_ERROR_LOG,
message="Detected error message in log.",
error=substring,
filename=fileinput.filename(),
line_number=fileinput.lineno(),
text=line.strip(),
)
yield event
# Only find one match in a single line.
break
@staticmethod
def generate_reports(directory, resource_monitor_type):
"""Create reports summarizing the output results of a set of jobs.
Parameters
----------
directory : str
output directory
resource_monitor_type : ResourceMonitorType
"""
commands = [
(f"jade show-results -o {directory}", "results.txt"),
(f"jade show-events -o {directory} --categories Error", "errors.txt"),
]
if resource_monitor_type != ResourceMonitorType.NONE:
commands.append((f"jade stats show -o {directory}", "stats.txt"))
commands.append((f"jade stats show -o {directory} -j", "stats_summary.json"))
if resource_monitor_type == ResourceMonitorType.PERIODIC:
commands.append((f"jade stats plot -o {directory}", None))
reports = []
for cmd in commands:
output = {}
ret = run_command(cmd[0], output=output)
if ret != 0:
logger.error("Failed to run [%s]: %s: %s", cmd, ret, output["stderr"])
if cmd[1] is not None:
filename = os.path.join(directory, cmd[1])
with open(filename, "w") as f_out:
if "json" not in cmd[1]:
f_out.write(cmd[0] + "\n\n")
f_out.write(output["stdout"])
reports.append(filename)
logger.info("Generated reports %s.", " ".join(reports))
return 0
def _submit_to_hpc(self, cluster):
hpc_submitter = HpcSubmitter(
self._config,
self._config_file,
cluster,
self._output,
)
if hpc_submitter.run():
logger.info("All submitters have completed.")
return True
logger.debug("jobs are still pending")
return False
def run_checks(self, params: SubmitterParams):
"""Checks the configuration for errors. May mutate the config."""
self._config.check_job_dependencies(params)
self._config.check_submission_groups(params)
self._config.check_spark_config()
@staticmethod
def run_submit_jobs(config_file, output, params, pipeline_stage_num=None):
"""Allows submission from an existing Python process."""
os.makedirs(output, exist_ok=True)
mgr = JobSubmitter.create(config_file, params, output=output)
cluster = Cluster.create(
output,
mgr.config,
pipeline_stage_num=pipeline_stage_num,
)
local = params.hpc_config.hpc_type == HpcType.LOCAL
ret = 1
try:
status = mgr.submit_jobs(cluster, force_local=local)
if status == Status.IN_PROGRESS:
check_cmd = f"jade show-status -o {output}"
if not params.dry_run:
print(f"Jobs are in progress. Run '{check_cmd}' for updates.")
ret = 0
else:
ret = status.value
finally:
cluster.demote_from_submitter()
if local:
# These files were not used in this case.
cluster.delete_files_internal()
return ret
| 15,289 | 4,332 |
import os
import pytest
from starkware.starknet.compiler.compile import (
compile_starknet_files)
from starkware.starknet.testing.starknet import Starknet
from starkware.starknet.testing.contract import StarknetContract
# The path to the contract source code.
CONTRACT_FILE = os.path.join(
os.path.dirname(__file__), "../contracts/MarketMaker.cairo")
# The testing library uses python's asyncio. So the following
# decorator and the ``async`` keyword are needed.
@pytest.mark.asyncio
async def test_record_items():
# Compile the contract.
contract_definition = compile_starknet_files(
[CONTRACT_FILE], debug_info=True)
# Create a new Starknet class that simulates the StarkNet
# system.
starknet = await Starknet.empty()
# Deploy the contract.
contract_address = await starknet.deploy(
contract_definition=contract_definition)
contract = StarknetContract(
starknet=starknet,
abi=contract_definition.abi,
contract_address=contract_address,
)
market_a_pre = 300
market_b_pre = 500
user_a_pre = 40 # User gives 40.
res = await contract.trade(market_a_pre, market_b_pre, user_a_pre).invoke()
(market_a_post, market_b_post, user_b_post, ) = res
assert market_a_post == market_a_pre + user_a_pre
assert market_b_post == market_b_pre - user_b_post
| 1,364 | 458 |
__version__ = '0.1.12'
from ._useragent import UserAgentMiddleware
from ._markdown import MarkdownPipeline
from ._cookies import FirefoxCookiesMiddleware
from ._mongodb import MongoDBPipeline
from ._redis import RedisDupeFilter
| 230 | 72 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import subprocess
import traceback
from urllib.parse import urlparse
import boto3
import tempfile
def download_s3_file(s3_uri: str, local_path: str) -> str:
"""
Downloads a file to a local path.
Args:
s3_uri (str): the S3 URI to get the file from.
local_path (str) : the local path to download to
Returns:
str: the path to the file containing the downloaded path.
"""
s3_client = boto3.client("s3")
parsed_url = urlparse(s3_uri, allow_fragments=False)
s3_bucket = parsed_url.netloc
s3_key = parsed_url.path.lstrip("/")
local_s3_file = os.path.join(local_path, os.path.basename(s3_key))
s3_client.download_file(s3_bucket, s3_key, local_s3_file)
return local_s3_file
def perform_additional_setup() -> None:
lib_s3_uri = os.getenv('AMZN_BRAKET_IMAGE_SETUP_SCRIPT')
if lib_s3_uri:
try:
print("Getting setup script from ", lib_s3_uri)
with tempfile.TemporaryDirectory() as temp_dir:
script_to_run = download_s3_file(lib_s3_uri, temp_dir)
subprocess.run(["chmod", "+x", script_to_run])
subprocess.run(script_to_run)
except Exception as e:
print(f"Unable to install additional libraries.\nException: {e}")
if __name__ == "__main__":
perform_additional_setup()
| 1,930 | 640 |
import torch.optim
from numpy import ndarray
def get_optim(optim, params, init_lr, steps=1, wd=0, gamma=1,
momentum=0.9, max_epochs=120):
if optim == 'sgd':
optimizer = torch.optim.SGD(
params, lr=init_lr, momentum=momentum, weight_decay=wd)
elif optim == 'sgd_nomem':
optimizer = torch.optim.SGD(
params, lr=init_lr, momentum=0, weight_decay=wd)
elif optim == 'adam':
optimizer = torch.optim.Adam(
params, lr=init_lr, weight_decay=wd, # amsgrad=True,
betas=(0.9, .999))
else:
raise ValueError('Unknown optimizer')
# Set the learning rate decay
if isinstance(steps, (tuple, list, ndarray)) and len(steps) == 1:
steps = steps[0]
if isinstance(steps, int):
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, int(max_epochs/steps), gamma=gamma)
elif isinstance(steps, (tuple, list, ndarray)):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, steps, gamma=gamma)
else:
raise ValueError('Unknown lr schedule')
return optimizer, scheduler
| 1,146 | 395 |
from celery import Celery, Task
from microengine_utils import errors
from microengine_utils.datadog import configure_metrics
from microengine_utils.constants import SCAN_FAIL, SCAN_SUCCESS, SCAN_TIME, SCAN_VERDICT
from microengineclamav.models import Bounty, ScanResult, Verdict, Assertion, Phase
from microengineclamav import settings
from microengineclamav.scan import scan, compute_bid
celery_app = Celery('tasks', broker=settings.BROKER)
class MetricsTask(Task):
_metrics = None
@property
def metrics(self):
if self._metrics is None:
self._metrics = configure_metrics(
settings.DATADOG_API_KEY,
settings.DATADOG_APP_KEY,
settings.ENGINE_NAME,
poly_work=settings.POLY_WORK
)
return self._metrics
@celery_app.task(base=MetricsTask)
def handle_bounty(bounty):
bounty = Bounty(**bounty)
scan_result = ScanResult()
with handle_bounty.metrics.timer(SCAN_TIME):
try:
scan_result = scan(bounty)
handle_bounty.metrics.increment(SCAN_SUCCESS, tags=[f'type:{bounty.artifact_type}'])
handle_bounty.metrics.increment(SCAN_VERDICT, tags=[f'type:{bounty.artifact_type}',
f'verdict:{scan_result.verdict.value}'])
except errors.CalledProcessScanError:
handle_bounty.metrics.increment(
SCAN_FAIL, tags=[f'type:{bounty.artifact_type}', 'scan_error:calledprocess']
)
if bounty.phase == Phase.ARBITRATION:
scan_response = scan_result.to_vote()
else:
if scan_result.verdict in [Verdict.UNKNOWN, Verdict.SUSPICIOUS]:
# These results don't bid any NCT.
bid = 0
else:
bid = compute_bid(bounty, scan_result)
scan_response = scan_result.to_assertion(bid)
bounty.post_response(scan_response)
| 1,941 | 620 |
import datetime
import os, sys, six, base64, copy
from jinja2 import Environment, FileSystemLoader, Template
from google.auth.transport import requests
from google.cloud import datastore
from google.cloud import storage
from google.cloud import bigquery
import google.oauth2.id_token
from flask import Flask, render_template, request
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, IntegerField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired, Email
import github3
DAG_FOLDER = 'dags/'
SQL_FOLDER = 'dags/sql/'
DAG_REPO_ORG = 'rendybjunior'
DAG_REPO_NAME = 'freddie-dags'
MASTER_BRANCH_NAME = 'master'
PROJECT = 'xxx'
BUCKET = 'xxx'
g = github3.login(token='xxx')
DOLLAR_TO_IDR = 14000
BQ_DOLLAR_PER_TB = 5
datastore_client = datastore.Client()
app = Flask(__name__)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
class DagForm(FlaskForm):
dag_name = StringField('DAG Name', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
owner = StringField('Owner', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
start_date = DateField('Start Date', validators=[DataRequired()], format='%Y-%m-%d')
email = StringField('Email', validators=[DataRequired(), Email()], render_kw={"placeholder": "separate@bycomma.com,separate@bycomma2.com,"})
retries = IntegerField('Num of Retries', validators=[DataRequired()], default=1)
retry_delay_minutes = IntegerField('Retry Delay (in minutes)', validators=[DataRequired()], default=15)
schedule_interval = StringField('Schedule (in cron) UTC', validators=[DataRequired()], render_kw={"placeholder": "0 17 * * *"})
tasks = StringField('Tasks', validators=[DataRequired()], render_kw={"placeholder": "separated_by_comma, lower_case_underscored"})
dependencies = StringField('Dependencies', validators=[DataRequired()], render_kw={"placeholder": "eg. prev_task_id1,task_id1;prev_task_id1,task_id2)"})
submit = SubmitField('Save')
class TaskForm(FlaskForm):
task_id = StringField('Task ID', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
destination_table = StringField('Destination table', validators=[DataRequired()], render_kw={"placeholder": "my-project.test.freddie_mercury"})
sql = TextAreaField('SQL', validators=[DataRequired()])
sql_params = StringField('SQL Param to test SQL. THIS VALUE FOR TESTING ONLY', render_kw={"placeholder": "example: ds=2019-01-01,dsnodash=20190101"})
save = SubmitField('Save')
check_query = SubmitField('Check Query')
run_query = SubmitField('Run Query')
def store_task(task_id, destination_table, sql, sql_params, updated_by, type_):
entity = datastore.Entity(key=datastore_client.key('Task', task_id), exclude_from_indexes=['sql_base64'])
entity.update({
'type': type_,
'destination_table': destination_table,
'sql_base64' : base64.b64encode(sql.encode()),
'sql_params' : sql_params,
'updated_at' : datetime.datetime.now(),
'updated_by' : updated_by
})
datastore_client.put(entity)
return True, "{} saved".format(task_id) # todo check put return value
def fetch_task(task_id):
key = datastore_client.key('Task', task_id)
task = datastore_client.get(key=key)
task_obj = {
'type': task.get('type'),
'task_id': task.key.name,
'sql': base64.b64decode(task.get('sql_base64')).decode(),
'sql_params': task.get('sql_params'),
'destination_table': task.get('destination_table')
}
return task_obj
def fetch_tasks(limit=10):
query = datastore_client.query(kind='Task')
query.order = ['-updated_at']
tasks = query.fetch(limit=limit)
tasks_obj = []
for task in tasks:
tasks_obj.append({
'type': task.get('type'),
'task_id': task.key.name,
'sql': base64.b64decode(task.get('sql_base64')).decode(),
'sql_params': task.get('sql_params'),
'destination_table': task.get('destination_table')
})
return tasks_obj
def store_dag(dag_name, owner, start_date, retries, retry_delay_minutes, email, schedule_interval, tasks, dependencies, updated_by):
entity = datastore.Entity(key=datastore_client.key('Dag', dag_name))
entity.update({
'dag_name': dag_name,
'owner': owner,
'start_date' : start_date,
'retries': retries,
'retry_delay_minutes': retry_delay_minutes,
'email': email,
'schedule_interval': schedule_interval,
'tasks': tasks,
'dependencies': dependencies,
'updated_at' : datetime.datetime.now(),
'updated_by' : updated_by
})
datastore_client.put(entity)
return True, "{} saved".format(dag_name) # todo check put return value
def fetch_dags(limit=10):
query = datastore_client.query(kind='Dag')
query.order = ['-updated_at']
dags = query.fetch(limit=limit)
dags_obj = []
for dag in dags:
dags_obj.append({
'dag_name': dag.key.name,
'owner': dag.get('owner'),
'start_date' : dag.get('start_date'),
'retries': dag.get('retries'),
'retry_delay_minutes': dag.get('retry_delay_minutes'),
'email': dag.get('email'),
'schedule_interval': dag.get('schedule_interval'),
'tasks': dag.get('tasks'),
'dependencies': dag.get('dependencies'),
'updated_by' : dag.get('updated_by')
})
return dags_obj
def fetch_dag(dag_name):
key = datastore_client.key('Dag', dag_name)
dag = datastore_client.get(key=key)
dag_obj = {
'dag_name': dag.key.name,
'owner': dag.get('owner'),
'start_date' : dag.get('start_date'),
'retries': dag.get('retries'),
'retry_delay_minutes': dag.get('retry_delay_minutes'),
'email': dag.get('email'),
'schedule_interval': dag.get('schedule_interval'),
'tasks': dag.get('tasks'),
'dependencies': dag.get('dependencies'),
'updated_by' : dag.get('updated_by')
}
return dag_obj
def upload_sql(task_id, sql):
file_path = os.path.join(SQL_FOLDER, task_id + ".sql")
client = storage.Client(project=PROJECT)
bucket = client.get_bucket(BUCKET)
blob = bucket.blob(file_path)
blob.upload_from_string(sql)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
print(url)
# todo return meaningful status & message
def upload_dag(dag_name, dag_text):
file_path = os.path.join(DAG_FOLDER, dag_name + ".py")
client = storage.Client(project=PROJECT)
bucket = client.get_bucket(BUCKET)
blob = bucket.blob(file_path)
blob.upload_from_string(dag_text)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
print(url)
# todo return meaningful status & message
def check_query(sql):
job_config = bigquery.QueryJobConfig()
job_config.dry_run = True
job_config.use_query_cache = False
job_config.use_legacy_sql = False
client = bigquery.Client(project=PROJECT)
try:
query_job = client.query(sql, job_config)
query_size_megabyte = query_job.total_bytes_processed / 1024 / 1024
query_size_terabyte = query_size_megabyte / 1024 / 1024
dollar_est = BQ_DOLLAR_PER_TB * query_size_terabyte
rp_est = dollar_est * DOLLAR_TO_IDR
message = "Total MB that will be processed: {0:.2f}".format(query_size_megabyte)
message += ". Cost estimate: ${0:.2f}".format(dollar_est)
message += " or Rp{0:.2f})".format(rp_est)
return True, message
except:
return False, sys.exc_info()[1]
def run_query(sql, limit=25):
sql_with_limit = sql + "\n LIMIT {}".format(limit)
job_config = bigquery.QueryJobConfig()
job_config.flatten_results = True
job_config.use_query_cache = False
job_config.use_legacy_sql = False
client = bigquery.Client(project=PROJECT)
try:
query_job = client.query(sql_with_limit, job_config=job_config) # API request
rows = query_job.result()
return rows, "OK"
except:
return [], sys.exc_info()[1]
def create_branch(repository, dag_name):
branch_name = '-'.join([dag_name, datetime.datetime.now().strftime('%Y%m%d%H%M%S')])
master_branch = repository.branch(MASTER_BRANCH_NAME)
master_head_sha = master_branch.commit.sha
repository.create_branch_ref(branch_name, master_head_sha)
return branch_name
def create_github_pr(dag_name, dag_file_content, sql_file_contents, committer_name, committer_email):
repository = g.repository(DAG_REPO_ORG, DAG_REPO_NAME)
branch_name = create_branch(repository, dag_name)
dag_file_path = DAG_FOLDER + dag_name + '.py'
content = None
try:
content = repository.file_contents(path=dag_file_path, ref=branch_name)
except Exception:
pass
if content is None:
repository.create_file(path=dag_file_path,
message="Create DAG File {}".format(dag_name),
content=dag_file_content,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
else:
content.update(
message="Update DAG File {}".format(dag_name),
content=dag_file_content,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
for task_id, sql in sql_file_contents:
sql_file_path = SQL_FOLDER + task_id + '.sql'
content = None
try:
content = repository.file_contents(path=sql_file_path, ref=branch_name)
except Exception:
pass
if content is None:
repository.create_file(path=sql_file_path,
message="Create SQL for task {}".format(task_id),
content=sql,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
else:
content.update(
message="Update SQL File for task {}".format(task_id),
content=sql,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
pull_body="*test* _123_" #TODO
repository.create_pull(title=branch_name, base=MASTER_BRANCH_NAME, head=branch_name, body=pull_body)
firebase_request_adapter = requests.Request()
@app.route('/')
def root():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
dags = None
tasks = None
if id_token:
try:
# Verify the token against the Firebase Auth API. This example
# verifies the token on each page load. For improved performance,
# some applications may wish to cache results in an encrypted
# session store (see for instance
# http://flask.pocoo.org/docs/1.0/quickstart/#sessions).
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
tasks = fetch_tasks()
dags = fetch_dags()
except ValueError as exc:
# This will be raised if the token is expired or any other
# verification checks fail.
error_message = str(exc)
return render_template(
'index.html',
user_data=claims, error_message=error_message, dags=dags, tasks=tasks)
@app.route('/dag_form', methods=["GET", "POST"])
def dag_form():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
if id_token:
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
form = DagForm()
dag_text = ""
if form.validate_on_submit():
root = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(root, 'templates')
env = Environment( loader = FileSystemLoader(templates_dir) )
template = env.get_template('dag_template.py')
store_dag(dag_name=form.dag_name.data,
owner=form.owner.data,
start_date=form.start_date.data.strftime("%Y-%m-%d"),
email=form.email.data,
retries=form.retries.data,
retry_delay_minutes=form.retry_delay_minutes.data,
schedule_interval=form.schedule_interval.data,
tasks=form.tasks.data,
dependencies=form.dependencies.data,
updated_by=claims['email'])
tasks = []
sql_file_contents = []
for task_id in form.tasks.data.replace(' ','').split(','):
task = fetch_task(task_id)
if task != "":
# upload_sql(task_id, task.get('sql'))
sql_file_contents.append((task_id, task.get('sql').encode()))
task_for_dag = copy.deepcopy(task)
task_for_dag['sql'] = 'sql/' + task_id + ".sql"
tasks.append(task_for_dag)
dependencies = []
for dependency in form.dependencies.data.replace(' ','').split(';'):
temp = dependency.split(',')
dependencies.append({
'preceding_task_id': temp[0],
'task_id': temp[1]
})
dag_text = template.render(
dag_name=form.dag_name.data,
owner=form.owner.data,
start_date=form.start_date.data.strftime('%Y-%m-%d'),
email=form.email.data,
retries=form.retries.data,
retry_delay_minutes=form.retry_delay_minutes.data,
schedule_interval=form.schedule_interval.data,
tasks=tasks,
dependencies=dependencies,
)
# upload_dag(dag_name=form.dag_name.data, dag_text=dag_text)
create_github_pr(dag_name=form.dag_name.data,
dag_file_content=dag_text.encode(),
sql_file_contents=sql_file_contents,
committer_name=claims['name'],
committer_email=claims['email'])
else:
if request.args.get('dag_name') is not None:
dag = fetch_dag(dag_name=request.args.get('dag_name'))
if dag is not None:
form.dag_name.data = dag.get('dag_name')
form.owner.data = dag.get('owner')
form.start_date.data = datetime.datetime.strptime(dag.get('start_date'),"%Y-%m-%d")
form.retries.data = dag.get('retries')
form.retry_delay_minutes.data = dag.get('retry_delay_minutes')
form.email.data = dag.get('email')
form.schedule_interval.data = dag.get('schedule_interval')
form.tasks.data = dag.get('tasks')
form.dependencies.data = dag.get('dependencies')
return render_template('dag_form.html', user_data=claims, title='DAG Form', form=form, dag_text=dag_text)
@app.route('/task_form', methods=["GET", "POST"])
def task_form():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
times = None
if id_token:
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
form = TaskForm()
is_save_ok, save_msg = None, None
is_query_ok, check_query_result = None, None
run_query_result, run_query_result_headers, run_query_result_msg = [], [], None
if form.validate_on_submit():
sql = form.sql.data
if form.sql_params.data:
params = form.sql_params.data.replace(' ','').split(',')
param_dict = {}
for param in params:
param_dict[param.split('=')[0]] = param.split('=')[1]
sql = Template(sql).render(param_dict)
is_query_ok, check_query_result = check_query(sql)
if form.save.data:
if is_query_ok:
is_save_ok, save_msg = store_task(task_id=form.task_id.data,
destination_table=form.destination_table.data,
sql=sql,
sql_params=form.sql_params.data,
type_='BQ_ETL',
updated_by=claims['email'])
else:
save_msg = "Can not save, something happened, see error msg"
# elif form.check_query.data:
# do nothing
elif form.run_query.data:
if is_query_ok:
run_query_result, run_query_result_msg = run_query(sql)
run_query_result_headers = [field.name for field in run_query_result.schema]
else:
run_query_result_msg = "Can not run, something happened, see error msg"
else:
if request.args.get('task_id') is not None:
task = fetch_task(task_id=request.args.get('task_id'))
if task is not None:
form.task_id.data = task.get('task_id')
form.destination_table.data = task.get('destination_table')
form.sql.data = task.get('sql')
form.sql_params.data = task.get('sql_params')
return render_template('task_form.html', user_data=claims, title='Task Form', form=form,
is_save_ok=is_save_ok, save_msg=save_msg,
is_query_ok=is_query_ok, check_query_result=check_query_result,
run_query_result_headers=run_query_result_headers,
run_query_result=run_query_result, run_query_result_msg=run_query_result_msg)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True) | 18,835 | 5,835 |
NotaParcial1 = int(input("Nota primer Parcial: "))
NotaParcial2 = int(input("Nota segundo Parcial: "))
NotaTaller = int(input("Nota del Taller: "))
NotaProyecto = int(input("Nota del Proyecto: "))
Parcial1 = NotaParcial1*(25/100)
Parcial2 = NotaParcial2*(25/100)
Taller = NotaTaller*(20/100)
Proyecto = NotaProyecto*(30/100)
nota_final = Parcial1 + Parcial2 + Taller + Proyecto
print (nota_final)
"""
Entrada
Ingresar los valores de las notas:
Nota Primer Parcial
Nota Segundo Parcial
Nota Taller
Nota Proyecto
Proceso
Calcular el valor del porcentaje de cada nota:
Porcentaje Parcial 1=Nota Pirmer Parcial * 25%
Porcentaje Parcial 2=Nota Segundo Parcial * 25%
Porcentaje Taller=Nota Taller * 20%
Porcentaje Proyecto=Nota Proyecto * 30%
Calcular la nota final sumando la suma de los porcentajes:
Nota Final = Porcentaje Parcial 1 + Porcentaje Parcial 2 + Porcentaje Taller + Porcentaje Proyecto
Salida
Devolver la Nota Final
"""
| 982 | 426 |
#!/usr/bin/env python3
"""
This script checksums, signs, and compresses malvarma-<version>.img, and
creates malvarma-<version>.tar.bz2.
The author's GPG signature is hardcoded below.
"""
import os
import shutil
import sys
import subprocess
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python3 package.py malvarma-<version>.img")
sys.exit(1)
imgfile = sys.argv[1]
folder_name = imgfile.split(".img")[0]
if not os.path.exists(imgfile):
print("Error: {imgfile} does not exist.".format(imgfile=imgfile))
sys.exit(1)
print("Checksumming...")
subprocess.check_call("sha256sum {imgfile} > {imgfile}.sha256".format(imgfile=imgfile),
shell=True, stderr=subprocess.STDOUT)
print("Signing...")
subprocess.check_call("gpg --detach-sign --default-key 0x90DB43617CCC1632 --sign {imgfile}".format(imgfile=imgfile),
shell=True, stderr=subprocess.STDOUT)
print("Compressing")
shutil.rmtree(folder_name, ignore_errors=True)
os.makedirs(folder_name)
shutil.move(imgfile, folder_name)
shutil.move(imgfile + ".sig", folder_name)
shutil.move(imgfile + ".sha256", folder_name)
subprocess.check_call("tar -cvjSf {folder_name}.tar.bz2 {folder_name}".format(folder_name=folder_name),
shell=True, stderr=subprocess.STDOUT)
| 1,395 | 500 |
import os
import time
from signal import signal, SIGINT
from TauLidarCommon.frame import FrameType
from TauLidarCamera.camera import Camera
outputDir = './samples'
runLoop = True
def setup():
camera = None
ports = Camera.scan() ## Scan for available Tau Camera devices
if len(ports) > 0:
camera = Camera.open(ports[0]) ## Open the first available Tau Camera
camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0
camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000
camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80
cameraInfo = camera.info()
print("\nToF camera opened successfully:")
print(" model: %s" % cameraInfo.model)
print(" firmware: %s" % cameraInfo.firmware)
print(" uid: %s" % cameraInfo.uid)
print(" resolution: %s" % cameraInfo.resolution)
print(" port: %s" % cameraInfo.port)
print("\nPress Ctrl-c in terminal to shutdown ...")
return camera
def run(camera):
global runLoop
count = 0
if not os.path.exists(outputDir):
os.makedirs(outputDir)
print('Recording...')
while runLoop:
frame = camera.readFrameRawData(FrameType.DISTANCE_AMPLITUDE)
if frame:
fName = '%s/%s.frame'%(outputDir, time.time())
with open(fName, "wb") as binary_file:
binary_file.write(frame)
print('\rFrame: %d'%count, end='')
count += 1
def cleanup(camera):
print('\nShutting down ...')
camera.close()
def handler(signal_received, frame):
global runLoop
runLoop = False
if __name__ == "__main__":
camera = setup()
signal(SIGINT, handler)
if camera:
try:
run(camera)
except Exception as e:
print(e)
cleanup(camera)
| 1,955 | 611 |
import sys
from collections import deque
def bfs(x):
q = deque([x])
dist = [0] * (N + 1)
check = [False] * (N + 1)
cnt = -1
check[x] = True
while q:
size = len(q)
cnt += 1
for _ in range(size):
x = q.popleft()
for y in a[x]:
if dist[y] == 0 and not check[y]:
dist[y] = dist[x] + 1
q.append(y)
check[y] = True
return cnt
if __name__ == '__main__':
N = int(input())
a = [[] for _ in range(N + 1)]
result = 1000000
res = []
while True:
u, v = map(int, sys.stdin.readline().split())
if u == -1 and v == -1:
break
a[u].append(v)
a[v].append(u)
for i in range(1, N + 1):
score = bfs(i)
if score < result:
res = []
result = score
res.append(i)
elif score == result:
res.append(i)
print(result, len(res))
for s in res:
print(s, end=' ') | 1,038 | 373 |
import json
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from bokeh.client import push_session
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import Range1d
import numpy as np
class test_bokeh_wamp(ApplicationSession):
def __init__(self, config=None):
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
"""
Initialize the WAMP settings. This is called before everything is setup to ensure
the WAMP settings are initialized.
:return:
"""
self.log.info("WAMP connected")
yield self.subscribe(self.on_ens_json_data, u"com.rti.data.ens")
self.log.info("test Bokehs WAMP init")
def on_ens_json_data(self, data):
"""
Called when JSON Ensemble data is received from WAMP.
:param data: JSON object containing serial data.
:return:
"""
json_data = json.loads(data) # convert to JSON
bins = []
ampB0 = []
ampB1 = []
ampB2 = []
ampB3 = []
corrB0 = []
corrB1 = []
corrB2 = []
corrB3 = []
for bin in range(json_data['EnsembleData']["NumBins"]):
bins.append(bin)
ampB0.append(json_data['Amplitude']["Amplitude"][bin][0])
ampB1.append(json_data['Amplitude']["Amplitude"][bin][1])
ampB2.append(json_data['Amplitude']["Amplitude"][bin][2])
ampB3.append(json_data['Amplitude']["Amplitude"][bin][3])
corrB0.append(json_data['Correlation']["Correlation"][bin][0])
corrB1.append(json_data['Correlation']["Correlation"][bin][1])
corrB2.append(json_data['Correlation']["Correlation"][bin][2])
corrB3.append(json_data['Correlation']["Correlation"][bin][3])
self.config.extra['ampB0'].data_source.data["y"] = bins
self.config.extra['ampB0'].data_source.data["x"] = ampB0
self.config.extra['ampB1'].data_source.data["y"] = bins
self.config.extra['ampB1'].data_source.data["x"] = ampB1
self.config.extra['ampB2'].data_source.data["y"] = bins
self.config.extra['ampB2'].data_source.data["x"] = ampB2
self.config.extra['ampB3'].data_source.data["y"] = bins
self.config.extra['ampB3'].data_source.data["x"] = ampB3
self.config.extra['corrB0'].data_source.data["y"] = bins
self.config.extra['corrB0'].data_source.data["x"] = corrB0
self.config.extra['corrB1'].data_source.data["y"] = bins
self.config.extra['corrB1'].data_source.data["x"] = corrB1
self.config.extra['corrB2'].data_source.data["y"] = bins
self.config.extra['corrB2'].data_source.data["x"] = corrB2
self.config.extra['corrB3'].data_source.data["y"] = bins
self.config.extra['corrB3'].data_source.data["x"] = corrB3
if __name__ == '__main__':
x = np.array([1])
y = np.array([1])
TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover'
ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140))
ampPlot.legend.location = "top_left"
ampPlot.legend.click_policy = "hide"
ampPlot.xaxis[0].axis_label="dB"
ampPlot.yaxis[0].axis_label = "Bin"
ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabAmp = Panel(child=ampPlot, title="Amplitude")
corrPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 1))
corrPlot.legend.location = "top_left"
corrPlot.legend.click_policy = "hide"
corrPlot.xaxis[0].axis_label = "% (percent)"
corrPlot.yaxis[0].axis_label = "Bin"
corrB0 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
corrB1 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
corrB2 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
corrB3 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabCorr = Panel(child=corrPlot, title="Correlation")
tabs = Tabs(tabs=[tabAmp, tabCorr])
# open a session to keep our local document in sync with server
session = push_session(curdoc())
session.show(tabs) # open the document in a browser
# Start the WAMP connection
# Connect the main window to the WAMP connection
runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1",
extra={'ampB0': ampB0, 'ampB1': ampB1, 'ampB2': ampB2, 'ampB3': ampB3,
'corrB0': corrB0, 'corrB1': corrB1, 'corrB2': corrB2, 'corrB3': corrB3})
runner.run(test_bokeh_wamp)
session.loop_until_closed() # run forever | 5,235 | 1,877 |
from pathlib import Path
import sys
from unittest.mock import patch
from nornir import InitNornir
import nornsible
from nornsible import InitNornsible, nornsible_delegate, nornsible_task
NORNSIBLE_DIR = nornsible.__file__
TEST_DIR = f"{Path(NORNSIBLE_DIR).parents[1]}/tests/"
@nornsible_task
def custom_task_example(task):
return "Hello, world!"
@nornsible_task
def custom_task_example_2(task):
return "Hello, world!"
@nornsible_delegate
def custom_task_example_3(task):
return "Hello, world!"
def test_nornsible_task_skip_task():
testargs = ["somescript", "-l", "localhost", "-s", "custom_task_example"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
task_result = nr.run(task=custom_task_example)
assert set(task_result.keys()) == {"delegate", "localhost"}
assert task_result["localhost"].result == "Task skipped!"
assert task_result["delegate"].result == "Task skipped, delegate host!"
def test_nornsible_task_skip_task_disable_delegate():
testargs = ["somescript", "-l", "localhost", "-s", "custom_task_example", "-d"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
task_result = nr.run(task=custom_task_example)
assert set(task_result.keys()) == {"localhost"}
assert task_result["localhost"].result == "Task skipped!"
def test_nornsible_task_explicit_task():
testargs = ["somescript", "-l", "localhost", "-t", "custom_task_example_2"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
print(nr.inventory.hosts)
tasks = [custom_task_example, custom_task_example_2]
task_results = []
for task in tasks:
task_results.append(nr.run(task=task))
assert task_results[0]["localhost"].result == "Task skipped!"
assert task_results[1]["localhost"].result == "Hello, world!"
assert task_results[0]["delegate"].result == "Task skipped, delegate host!"
assert task_results[1]["delegate"].result == "Task skipped, delegate host!"
def test_nornsible_task_no_tags():
testargs = ["somescript", "-l", "localhost"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
print(nr.inventory.hosts)
tasks = [custom_task_example, custom_task_example_2]
task_results = []
for task in tasks:
task_results.append(nr.run(task=task))
assert task_results[0]["localhost"].result == "Hello, world!"
assert task_results[1]["localhost"].result == "Hello, world!"
def test_nornsible_delegate():
testargs = ["somescript", "-l", "localhost"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
print(nr.inventory.hosts)
tasks = [custom_task_example_3]
task_results = []
for task in tasks:
task_results.append(nr.run(task=task))
assert task_results[0]["localhost"].result == "Task skipped, non-delegate host!"
def test_nornsible_delegate_disable_delegate():
testargs = ["somescript", "-l", "localhost", "-d"]
with patch.object(sys, "argv", testargs):
nr = InitNornir(
inventory={
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml",
"group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml",
},
},
logging={"enabled": False},
)
nr = InitNornsible(nr)
print(nr.inventory.hosts)
tasks = [custom_task_example_3]
task_results = []
for task in tasks:
task_results.append(nr.run(task=task))
assert task_results[0]["localhost"].result == "Task skipped, delegate host!"
| 5,875 | 1,780 |
## Web File
def insertWeb(filetype, json, cursor, conn, uid):
if (filetype == 'web'):
web_page_node(json,uid,cursor,conn) # [pages] / [pageNode]
web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode]
def web_entry_response(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_response'
featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'}
featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'}
featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['response'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Date', 'header_Date')
renameArrayItem(vals2, 'Server', 'header_Server')
renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy')
renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Type', 'header_ContentType')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
vals3 = {}
values3 = []
cntattr3 = 0
for tis3 in featureAttrs3:
vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
renameArrayItem(vals3, 'size', 'content_size')
renameArrayItem(vals3, 'compression', 'content_compression')
renameArrayItem(vals3, 'mimeType', 'content_mimeType')
renameArrayItem(vals3, 'encoding', 'content_encoding')
attrsInJson3,typesInJson3 = toCommaStringDict(vals3)
#print type(attrsInJson3)
#print attrsInJson3
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
if ( attrsInJson3 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3
values.extend(values3)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_request(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_request'
featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'}
featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['request'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Host', 'header_Host')
renameArrayItem(vals2, 'User-Agent', 'header_UserAgent')
renameArrayItem(vals2, 'Accept', 'header_Accept')
renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2
typesInJsonCombined = typesInJson + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_node(json, uid, cursor, conn):
tblName = 'lab_web_entries'
featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'}
featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'}
featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'}
for jiv in json['pages']:
for innerjiv in jiv['entriesNode']:
cntattr = 0
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
cntattr2 = 0
attrsInJson2 = ''
typesInJson2 = ''
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
cntattr3 = 0
attrsInJson3 = ''
typesInJson3 = ''
keytypevals3 = {}
values3 = []
for tis3 in featureAttrs3:
keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3)
##combine
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3
typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3
values.extend(values2)
values.extend(values3)
#insert
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
##entry request
web_entry_id = getMaxId(tblName,cursor,conn)
web_entry_request(innerjiv, uid, cursor, conn, web_entry_id)
web_entry_response(innerjiv, uid, cursor, conn, web_entry_id)
def web_page_node(json, uid, cursor, conn):
tblName = 'lab_web_pages'
featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'}
cntattr = 0
for jiv in json['pages']:
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'pageid', 'id')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Helper Functions
def dbinsert(tblName,fields,fieldTypes,cursor,values,conn):
sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")"
#print sql_command
#print values
cursor.execute(sql_command, values)
conn.commit()
def getMaxId(tblName,cursor, conn):
sql = "select max(id) from " + tblName
cursor.execute(sql)
results = cursor.fetchall()
return str(results[0][0])
def isJsonKey(json, tisKey):
for key,val in json.items():
if (key == tisKey):
return True
break
return False
def appendJsonKey(json, key, vals, values, cntattr):
if (isJsonKey(json,key)):
vals[cntattr] = str(key)
values.append(json[key])
return vals,values
def toCommaStringDict(keytypevals):
ret = ''
ret2 = ''
for key in keytypevals:
ret = ret + '`' + keytypevals[key] + '`' + ','
ret2 = ret2 + '%s' + ','
if (len(ret) > 0):
ret = ret[:-1]
ret2 = ret2[:-1]
return ret,ret2
def renameArrayItem(arr, frm, to):
for key in arr:
try:
if( arr[key] == frm):
arr[key] = to
except:
dummy = 0
return arr
def appendJsonKeyConcat(json, key, vals, values, cntattr):
ret = ''
if (isJsonKey(json,key)):
for i in json[key]:
ret = (ret + ' ' + i).strip()
vals[cntattr] = str(key)
values.append(ret)
return vals,values | 8,338 | 3,430 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for beam_search_helper."""
from absl.testing import parameterized
import lingvo.compat as tf
from lingvo.core import beam_search_helper
from lingvo.core import py_utils
from lingvo.core import test_utils
import numpy as np
def GetBeamSearchHelperResults(sess,
num_hyps_per_beam,
pass_seq_lengths=False,
force_eos_in_top_k=False):
np.random.seed(9384758)
tf.random.set_seed(8274758)
vocab_size = 12
src_len = 5
tgt_len = 7
src_batch_size = 2
tgt_batch_size = src_batch_size * num_hyps_per_beam
p = beam_search_helper.BeamSearchHelper.Params().Set(
name='bsh', target_seq_len=tgt_len, force_eos_in_top_k=force_eos_in_top_k)
bs_helper = p.Instantiate()
def InitBeamSearchState(unused_theta, unused_encoder_outputs,
unused_num_hyps_per_beam):
atten_probs = tf.constant(
np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32)
return (py_utils.NestedMap({
'log_probs': tf.zeros([tgt_batch_size, vocab_size]),
'atten_probs': atten_probs,
}), py_utils.NestedMap({'atten_probs': atten_probs}))
def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_step_ids, states,
unused_num_hyps_per_beam):
atten_probs = tf.identity(states.atten_probs)
logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747)
return (py_utils.NestedMap({
'atten_probs': atten_probs,
'log_probs': logits
}), states)
def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_new_step_ids, states):
return states
src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838)
src_enc_padding = tf.constant(
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
dtype=tf.float32)
encoder_outputs = py_utils.NestedMap(encoded=src_enc, padding=src_enc_padding)
if pass_seq_lengths:
encoder_outputs['seq_lengths'] = tf.constant([4, 3], dtype=tf.int32)
theta = py_utils.NestedMap()
decoder_output = bs_helper.BeamSearchDecode(theta, encoder_outputs,
num_hyps_per_beam,
InitBeamSearchState,
PreBeamSearchStepCallback,
PostBeamSearchStepCallback)
topk_ids, topk_lens, topk_scores = sess.run([
decoder_output.topk_ids, decoder_output.topk_lens,
decoder_output.topk_scores
])
return topk_ids, topk_lens, topk_scores
class BeamSearchHelperTest(test_utils.TestCase, parameterized.TestCase):
# TODO(yonghui): Add more thorough tests.
def testBeamSearchHelper(self):
with self.session(use_gpu=False) as sess:
topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
sess, num_hyps_per_beam=3)
print(np.array_repr(topk_ids))
print(np.array_repr(topk_lens))
print(np.array_repr(topk_scores))
expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0],
[4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2],
[6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]]
expected_topk_lens = [5, 4, 4, 7, 6, 6]
expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776],
[9.74691486, 8.46679497, 7.14809656]]
self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
self.assertAllClose(expected_topk_scores, topk_scores)
def testBeamSearchHelperHypsOne(self):
with self.session(use_gpu=False) as sess:
topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
sess, num_hyps_per_beam=1)
print(np.array_repr(topk_ids))
print(np.array_repr(topk_lens))
print(np.array_repr(topk_scores))
expected_topk_ids = [[9, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]
expected_topk_lens = [2, 0]
expected_topk_scores = [[3.778749], [0.0]]
self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
self.assertAllClose(expected_topk_scores, topk_scores)
def testBeamSearchHelperWithSeqLengths(self):
with self.session(use_gpu=False) as sess:
topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
sess, num_hyps_per_beam=3, pass_seq_lengths=True)
print(np.array_repr(topk_ids))
print(np.array_repr(topk_lens))
print(np.array_repr(topk_scores))
expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0],
[4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2],
[6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]]
expected_topk_lens = [5, 4, 4, 7, 6, 6]
expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776],
[9.74691486, 8.46679497, 7.14809656]]
self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
self.assertAllClose(expected_topk_scores, topk_scores)
def testBeamSearchHelperForceEos(self):
with self.session(use_gpu=False) as sess:
topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
sess, num_hyps_per_beam=3, force_eos_in_top_k=True)
print(np.array_repr(topk_ids))
print(np.array_repr(topk_lens))
print(np.array_repr(topk_scores))
expected_topk_ids = [
[4, 3, 11, 6, 9, 3, 2],
[4, 3, 11, 6, 9, 7, 2],
[4, 3, 4, 1, 4, 1, 2],
[6, 0, 4, 6, 6, 11, 2],
[6, 0, 4, 6, 3, 3, 2],
[6, 0, 4, 6, 1, 2, 0],
]
expected_topk_lens = [7, 7, 7, 7, 7, 6]
expected_topk_scores = [[10.576365, 9.345996, 9.125197],
[9.746915, 8.905771, 8.466795]]
self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
self.assertAllClose(expected_topk_scores, topk_scores)
@parameterized.named_parameters(
('eos_valid_in_topk', 100.0, True),
('eos_valid_not_in_topk', 100.0, False),
('eos_not_valid_in_topk', 0.5, True),
('eos_not_valid_not_in_topk', 0.5, False),
)
def testBeamSearchForceEosInTopK(self, valid_eos_max_logit_delta,
force_eos_in_top_k):
with self.session() as sess:
vocab_size = 300
tgt_len = 100
num_hyps_per_beam = 3
src_batch_size = 2
tgt_batch_size = src_batch_size * num_hyps_per_beam
p = beam_search_helper.BeamSearchHelper.Params().Set(
name='bsh',
target_seq_len=tgt_len,
num_hyps_per_beam=num_hyps_per_beam,
beam_size=100000.0, # Beam search until the end.
valid_eos_max_logit_delta=valid_eos_max_logit_delta,
force_eos_in_top_k=force_eos_in_top_k,
)
bs_helper = p.Instantiate()
def InitBeamSearchCallBack(unused_theta, unused_encoder_outputs,
unused_num_hyps_per_beam):
return py_utils.NestedMap(
log_probs=tf.zeros([tgt_batch_size, vocab_size]),
atten_probs=tf.zeros([tgt_batch_size, 0])), py_utils.NestedMap()
def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_step_ids, states,
unused_num_hyps_per_beam):
# Same probs for each id.
logits = tf.zeros([tgt_batch_size, vocab_size])
# Except eos is slightly lower prob.
logits = logits - 1.0 * tf.expand_dims(
tf.one_hot(p.target_eos_id, vocab_size), 0)
return py_utils.NestedMap(
atten_probs=tf.zeros([tgt_batch_size, 0]), log_probs=logits), states
def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_new_step_ids, states):
return states
encoder_outputs = py_utils.NestedMap(
seq_lengths=tf.zeros([src_batch_size], dtype=tf.int32))
theta = py_utils.NestedMap()
beam_search_output = bs_helper.BeamSearchDecode(
theta,
encoder_outputs,
init_beam_search_state=InitBeamSearchCallBack,
pre_beam_search_step_callback=PreBeamSearchStepCallback,
post_beam_search_step_callback=PostBeamSearchStepCallback)
topk_lens = sess.run(beam_search_output.topk_lens)
if not force_eos_in_top_k or valid_eos_max_logit_delta < 1.0:
self.assertAllEqual(topk_lens, np.zeros_like(topk_lens))
else:
self.assertAllGreater(topk_lens, 0)
@parameterized.named_parameters(
# eos score is too low to terminate
# 1 hyp terminated at first frame by eoc, and then two other
# terminated at second frame by eoc
('last_chunk_eoc_in_topk', True, True, -10., [1, 2, 2, 1, 2, 2],
[[-1., -1., -1.], [-1., -1., -1.]]),
# Not last chunk or not forcing in topk, eoc can not terminate.
# eos score is low, can not terminate either
('last_chunk_eoc_not_in_topk1', True, False, -10., [0, 0, 0, 0, 0, 0],
[[-0., -0., -0.], [-0., -0., -0.]]),
('last_chunk_eoc_not_in_topk2', False, True, -10., [0, 0, 0, 0, 0, 0],
[[-0., -0., -0.], [-0., -0., -0.]]),
('last_chunk_eoc_not_in_topk3', False, False, -10., [0, 0, 0, 0, 0, 0],
[[-0., -0., -0.], [-0., -0., -0.]]),
# eos score is high and can terminate
# 1 hyp terminated at first frame by eos, and then two other
# terminated at second frame by eos
('last_chunk_eoc_not_in_topk_eos_in_top_k', False, False, 1.,
[1, 2, 2, 1, 2, 2], [[1., 1., 1.], [1., 1., 1.]]),
# both can terminate at each step, use the lower score.
('last_chunk_eoc_in_topk_eos_in_top_k', True, True, 1.,
[1, 2, 2, 1, 2, 2], [[-1., -1., -1.], [-1., -1., -1.]]),
)
def testBeamSearchForceLastChunkEocInTopK(self, is_last_chunk,
force_last_chunk_eoc_in_top_k,
eos_score, expected_topk_lens,
expected_topk_scores):
with self.session() as sess:
vocab_size = 30
tgt_len = 10
num_hyps_per_beam = 3
src_batch_size = 2
tgt_batch_size = src_batch_size * num_hyps_per_beam
p = beam_search_helper.BeamSearchHelper.Params().Set(
name='bsh',
target_eoc_id=0,
target_seq_len=tgt_len,
num_hyps_per_beam=num_hyps_per_beam,
beam_size=100000.0, # Beam search until the end.
force_last_chunk_eoc_in_top_k=force_last_chunk_eoc_in_top_k,
)
bs_helper = p.Instantiate()
def InitBeamSearchCallBack(unused_theta, unused_encoder_outputs,
unused_num_hyps_per_beam):
return py_utils.NestedMap(
log_probs=tf.zeros([tgt_batch_size, vocab_size]),
atten_probs=tf.zeros([tgt_batch_size, 0]),
is_last_chunk=tf.zeros([tgt_batch_size],
tf.bool)), py_utils.NestedMap()
def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_step_ids, states,
unused_num_hyps_per_beam):
# Same probs for each id.
logits = tf.zeros([tgt_batch_size, vocab_size])
# Except eoc has slightly lower score.
logits = logits - 1.0 * tf.expand_dims(
tf.one_hot(p.target_eoc_id, vocab_size), 0)
# eos has very low score (can not terminate by eos)
logits = logits + eos_score * tf.expand_dims(
tf.one_hot(p.target_eos_id, vocab_size), 0)
return py_utils.NestedMap(
atten_probs=tf.zeros([tgt_batch_size, 0]),
log_probs=logits,
is_last_chunk=tf.fill([tgt_batch_size],
value=is_last_chunk)), states
def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_new_step_ids, states):
return states
encoder_outputs = py_utils.NestedMap(
seq_lengths=tf.zeros([src_batch_size], dtype=tf.int32))
theta = py_utils.NestedMap()
beam_search_output = bs_helper.BeamSearchDecode(
theta,
encoder_outputs,
init_beam_search_state=InitBeamSearchCallBack,
pre_beam_search_step_callback=PreBeamSearchStepCallback,
post_beam_search_step_callback=PostBeamSearchStepCallback)
topk_lens, topk_scores = sess.run(
[beam_search_output.topk_lens, beam_search_output.topk_scores])
self.assertAllEqual(topk_lens, expected_topk_lens)
self.assertAllClose(topk_scores, expected_topk_scores, atol=1e-6)
def testCustomStepIds(self):
with self.session(use_gpu=False):
np.random.seed(9384758)
tf.random.set_seed(8274758)
vocab_size = 12
src_len = 5
tgt_len = 7
num_hyps_per_beam = 3
src_batch_size = 2
tgt_batch_size = src_batch_size * num_hyps_per_beam
p = beam_search_helper.BeamSearchHelper.Params().Set(
name='bsh', target_seq_len=tgt_len)
bs_helper = p.Instantiate()
def InitBeamSearchState(unused_theta, unused_encoder_outputs,
unused_num_hyps_per_beam):
atten_probs = tf.constant(
np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32)
return (py_utils.NestedMap({
'log_probs': tf.zeros([tgt_batch_size, vocab_size]),
'atten_probs': atten_probs,
'step_ids': tf.zeros([tgt_batch_size, 1], dtype=tf.int32)
}), py_utils.NestedMap({'atten_probs': atten_probs}))
def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_step_ids, states,
unused_num_hyps_per_beam):
atten_probs = tf.identity(states.atten_probs)
logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747)
return (py_utils.NestedMap({
'atten_probs': atten_probs,
'log_probs': logits
}), states)
def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs,
unused_new_step_ids, states):
return states
src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838)
src_enc_padding = tf.constant(
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
dtype=tf.float32)
encoder_outputs = py_utils.NestedMap(
encoded=src_enc, padding=src_enc_padding)
theta = py_utils.NestedMap()
decoder_output = bs_helper.BeamSearchDecode(theta, encoder_outputs,
num_hyps_per_beam,
InitBeamSearchState,
PreBeamSearchStepCallback,
PostBeamSearchStepCallback)
topk_ids, topk_lens, topk_scores = self.evaluate([
decoder_output.topk_ids, decoder_output.topk_lens,
decoder_output.topk_scores
])
print(np.array_repr(topk_ids))
print(np.array_repr(topk_lens))
print(np.array_repr(topk_scores))
expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0],
[4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2],
[6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]]
expected_topk_lens = [5, 4, 4, 7, 6, 6]
expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776],
[9.74691486, 8.46679497, 7.14809656]]
self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
self.assertAllClose(expected_topk_scores, topk_scores)
class MergeBeamSearchOutputsTest(test_utils.TestCase):
def testMergeBeamSearchOutputs(self):
with self.session():
topk_scores_1 = [[1., 3., 5.], [-2., -1., 0.]]
topk_ids_1 = [[[10, 11, 12], [30, 31, 32], [50, 51, 52]],
[[20, 21, 22], [10, 11, 12], [0, 0, 0]]]
topk_lens_1 = [[3, 3, 2], [3, 3, 0]]
topk_hyps_1 = [['one', 'three', 'five'], ['minus two', 'minus one', '']]
topk_1 = beam_search_helper.BeamSearchDecodeOutput(
tf.constant(topk_hyps_1),
tf.reshape(tf.constant(topk_ids_1), [6, -1]),
tf.reshape(tf.constant(topk_lens_1), [-1]),
tf.reshape(tf.constant(topk_scores_1), [-1]), None, None)
topk_scores_2 = [[2., 4.], [-3., 0.]]
topk_ids_2 = [[[20, 21, 22], [40, 41, 42]], [[30, 31, 33], [0, 0, 0]]]
topk_lens_2 = [[3, 2], [3, 0]]
topk_hyps_2 = [['two', 'four'], ['minus three', '']]
topk_2 = beam_search_helper.BeamSearchDecodeOutput(
tf.constant(topk_hyps_2),
tf.reshape(tf.constant(topk_ids_2), [4, -1]),
tf.reshape(tf.constant(topk_lens_2), [-1]),
tf.reshape(tf.constant(topk_scores_2), [-1]), None, None)
topk = beam_search_helper.MergeBeamSearchOutputs(3, [topk_1, topk_2])
self.assertIsNone(topk.topk_decoded)
self.assertAllEqual([5., 4., 3., -1., -2., -3.], topk.topk_scores.eval())
self.assertAllEqual([2, 2, 3, 3, 3, 3], topk.topk_lens.eval())
self.assertAllEqual([[50, 51, 52], [40, 41, 42], [30, 31, 32],
[10, 11, 12], [20, 21, 22], [30, 31, 33]],
topk.topk_ids.eval())
self.assertAllEqual([[b'five', b'four', b'three'],
[b'minus one', b'minus two', b'minus three']],
topk.topk_hyps.eval())
class GreedySearchHelperTest(test_utils.TestCase):
def testGreedySearchHelper(self):
with self.session(use_gpu=False):
np.random.seed(9384758)
tf.random.set_seed(8274758)
vocab_size = 12
src_len = 5
tgt_len = 7
src_batch_size = 2
tgt_batch_size = src_batch_size
p = beam_search_helper.GreedySearchHelper.Params().Set(
name='gsh', target_seq_len=tgt_len)
gs_helper = p.Instantiate()
def InitGreedySearchState(unused_theta, unused_encoder_outputs,
unused_num_hyps_per_beam):
atten_probs = tf.constant(
np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32)
return (py_utils.NestedMap({
'log_probs': tf.zeros([tgt_batch_size, vocab_size]),
'atten_probs': atten_probs,
}), py_utils.NestedMap({'atten_probs': atten_probs}))
def PreGreedySearchStepCallback(unused_theta, unused_encoder_outputs,
unused_step_ids, states,
unused_num_hyps_per_beam):
atten_probs = tf.identity(states.atten_probs)
logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747)
return (py_utils.NestedMap({
'atten_probs': atten_probs,
'log_probs': logits
}), states)
def PostGreedySearchStepCallback(unused_theta, unused_encoder_outputs,
unused_new_step_ids, states):
return states
src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838)
src_enc_padding = tf.constant(
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
dtype=tf.float32)
encoder_outputs = py_utils.NestedMap(
encoded=src_enc, padding=src_enc_padding)
theta = py_utils.NestedMap()
(final_hyp_ids, final_hyp_lens,
final_done_hyps) = gs_helper.GreedySearchDecode(
theta, encoder_outputs, InitGreedySearchState,
PreGreedySearchStepCallback, PostGreedySearchStepCallback)
(final_hyp_ids, final_hyp_lens, final_done_hyps) = self.evaluate(
[final_hyp_ids, final_hyp_lens, final_done_hyps])
print(np.array_repr(final_hyp_ids))
print(np.array_repr(final_hyp_lens))
print(np.array_repr(final_done_hyps))
expected_hyp_ids = [[2, 2, 6, 7, 1, 9, 4], [3, 9, 3, 9, 6, 5, 10]]
expected_hyp_lens = [1, 7]
expected_done_hyps = [True, False]
self.assertAllEqual(expected_hyp_ids, final_hyp_ids.tolist())
self.assertAllEqual(expected_hyp_lens, final_hyp_lens.tolist())
self.assertAllEqual(expected_done_hyps, final_done_hyps.tolist())
if __name__ == '__main__':
tf.test.main()
| 21,541 | 8,511 |
def ddm(T, x0, xinfty, lam, sig):
t = np.arange(0, T, 1.)
x = np.zeros_like(t)
x[0] = x0
for k in range(len(t)-1):
x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1)
return t, x
# computes equilibrium variance of ddm
# returns variance
def ddm_eq_var(T, x0, xinfty, lam, sig):
t, x = ddm(T, x0, xinfty, lam, sig)
# returns variance of the second half of the simulation
# this is a hack: assumes system has settled by second half
return x[-round(T/2):].var()
np.random.seed(2020) # set random seed
# sweep through values for lambda
lambdas = np.arange(0.05, 0.95, 0.01)
empirical_variances = np.zeros_like(lambdas)
analytical_variances = np.zeros_like(lambdas)
sig = 0.87
# compute empirical equilibrium variance
for i, lam in enumerate(lambdas):
empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig)
# Hint: you can also do this in one line outside the loop!
analytical_variances = sig**2 / (1 - lambdas**2)
# Plot the empirical variance vs analytical variance
with plt.xkcd():
var_comparison_plot(empirical_variances, analytical_variances) | 1,148 | 461 |
__version__ = '0.0.11b'
from .base import *
from .context import *
from .task import *
from .env import *
from .parse import *
| 129 | 47 |
from tkinter import *
from tkinter.font import BOLD
from algoritmos.vrc import *
from algoritmos.noise_1_bits import *
######################################################################################################
# Pagina 1
def tab1(root, common_img, bits_normales, bits_desglosados):
pagina1 = Toplevel(root)
pagina1.geometry("1200x800")
pagina1.title('VRC')
# TITULO
label1=Label(pagina1,text='VRC', font=('Times_New_Roman',20), width=1000 , height=50, image=common_img, compound='c')
label1.place(x=100, y=10)
lbl_bits_normales = Label(pagina1, text='Bits: ',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50)
lbl_bits_normales.place(x=100, y=100)
lbl_bits_codificados = Label(pagina1, text='VRC: ',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50)
lbl_bits_codificados.place(x=100, y=175)
# Codificacion VRC
b_separados = list(bits_desglosados)
# Funcion / metodo VRC
vrc_1 = vrc(b_separados)
print('vrc')
print(vrc_1)
vrc_codificado = []
for i in vrc_1:
y = "".join(i)
vrc_codificado.append(y)
# Label bits originales
txt1 = 'Bits: '
for i in bits_normales:
txt1 = txt1 + i + ' : '
lbl_bits_normales['text']=txt1
# Label bits condificados
txt2 = 'VRC: '
for i in vrc_codificado:
txt2 = txt2 + i + ' : '
lbl_bits_codificados['text']=txt2
# Funcion que envia los datos y posible ruido
def transmitir():
# Tramas con posibles errores
trama_errada = []
trama_errada = noise(vrc_1,'vrc')
print('Grupo Transmitido')
print(trama_errada)
trama_errada2 = []
for i in trama_errada:
y = "".join(i)
trama_errada2.append(y)
# Label bits condificados
txt3 = 'Transmitidos: '
for i in trama_errada2:
txt3 = txt3 + i + ' : '
lbl_errados['text']=txt3
# Comprobacion VRC
comprobado = comprobacion_vrc(trama_errada)
print(comprobado)
lbl_comprobado['text']='Comprobacion: ' + comprobado
#print(vrc_1)
# BOTON DE TRANSMITIR
btn_transmitir = Button(pagina1, text='Transmitir', font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50, width=200, command=transmitir)
btn_transmitir.place(x=500, y=250)
# Label del Ruido
lbl_errados = Label(pagina1, text='Posible trama errada',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50)
lbl_errados.place(x=100, y=325)
# Label Comprobacion VRC
lbl_comprobado = Label(pagina1, text='Comprobacion',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50)
lbl_comprobado.place(x=100, y=400)
| 2,829 | 1,120 |
from unittest.mock import patch
import numpy as np
from auxein.population.dna_builders import UniformRandomDnaBuilder, NormalRandomDnaBuilder
def test_uniform_random_dna_builder_instantiation():
builder = UniformRandomDnaBuilder(interval=(-5, 0))
assert builder.get_distribution() == 'uniform'
assert len(builder.get(10)) == 10
def test_uniform_random_dna_builder_values():
builder = UniformRandomDnaBuilder()
for _ in range(0, 100):
dna: np.ndarray = builder.get(2)
assert -1 < dna[0] < 1
assert -1 < dna[1] < 1
@patch('numpy.random.normal')
def test_normal_random_dna_builder_instantiation(mock_np_normal):
mock_np_normal.return_value = [0.5, -1.3]
builder = NormalRandomDnaBuilder()
assert builder.get_distribution() == 'normal'
assert len(builder.get(2)) == 2
mock_np_normal.assert_called_once_with(0.0, 1.0, 2)
| 889 | 318 |
# Written by Yunfei LIU
# Sep 23, 2020
# Please obey the license GPLv3
# This allows to input four integers with space between them
number1,number2,number3,number4 = map(int,input().split())
# Add them up
sum = number1 + number2 + number3 + number4
# Get the unit digit
result = sum%10
# Output the result
print(result)
| 322 | 111 |
#!/usr/bin/env python
# https://en.wikipedia.org/wiki/Champernowne_constant
# sequences below are related to these constants
# counts how many numbers between two 10**n and 10**(n-1)
def A0(n):
return (10**n - 10**(n-1))*n
# http://oeis.org/A033714
# This sequence also gives the total count of digits of n below 10^n.
def A033714(n):
r = 1
for i in range(1, n):
r += A0(i)
return r
for i in range(0, 32):
print(i, A0(i), A033714(i))
| 445 | 203 |
from model import *
from config import *
from utils import *
if __name__ == "__main__":
''' GPU(s) '''
gpus = tf.config.experimental.list_physical_devices('GPU')
GPU_N = 3
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[GPU_N:], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
import ipdb; ipdb.set_trace()
np.random.seed(420)
tf.random.set_seed(420)
'''
loss and gradient function.
'''
# loss_object = tf.losses.SparseCategoricalCrossentropy()
@tf.function
def loss(model, x, y):
y_ = model(x)
return loss_object(y_true=y, y_pred=y_)
@tf.function
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = tf.abs(y_true - y_pred)
less_than_one = K.cast(tf.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
@tf.function
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
''' dataset and dataset iterator'''
## cifar100 is likey too small. Switching to imagenet2012
# cifar100 = tf.keras.datasets.cifar100
# (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
import tensorflow_datasets as tfds
import ipdb
tfds.list_builders()
imagenet2012_builder = tfds.builder("imagenet2012")
train_set, test_set = imagenet2012_builder.as_dataset(split=["train", "validation"])
def onetwentyseven(x):
# normalizing between 1 and -1.
x['image'] = tf.image.resize(x['image'], size=(256, 256))
x['image'] = tf.cast(x['image'], tf.float32) / 127.5 - 1
return x
train_set = train_set.shuffle(1024).map(onetwentyseven)
train_set = train_set.batch(32)
test_set = test_set.shuffle(1024).map(onetwentyseven)
test_set = test_set.batch(32)
import ipdb; ipdb.set_trace()
# preprocess
'''
x_train = (x_train.reshape(-1, 32, 32, 3) / 255).astype(np.float32)
x_test = (x_test.reshape(-1, 32, 32, 3) / 255).astype(np.float32)
# create tf.data.Dataset
train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# now train_set and test_set are Dataset objects.
# we return the dataset iterator by calling the
# __iter__() method
#
# Alternatively, we can just iterate over the Datasets
# iff eager mode is on (i.e. by default).
train_set = train_set.shuffle(10000)
test_set.shuffle(10000)
b_train_set = train_set.batch(256)
b_test_set = test_set.batch(256)
'''
''' model '''
# from config import Config
from viz import *
from utils import test_model
class Config(object):
def __init__(self):
self.BATCH_SIZE=256
self.BACKBONE = 'resnet51'
mycon = Config()
model = ResNet((None, None, 3), 1000, mycon)
model.build(input_shape=(256, None, None, 3)) # place correct shape from imagenet
''' initialize '''
# Reduce LR with *0.1 when plateau is detected
adapt_lr = LearningRateReducer(init_lr=0.1, factor=0.1,
patience=10, refractory_interval=20) # wait 20 epochs from last update
loss_object = tf.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(), momentum = 0.9)
train_loss_results = []
train_accuracy_results = []
test_loss_results, test_acc_results = [], []
num_epochs = 300
''' train '''
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
k = 0
optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(train_loss_results), momentum = 0.9)
for batch in train_set:
# img_btch, lab_btch, fn_btch = batch
img_btch = batch['image']
lab_btch = batch['label']
loss_value, grads = grad(model, img_btch, lab_btch)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss_avg(loss_value)
epoch_accuracy(lab_btch, model(img_btch))
if epoch < 1:
print("Epoch {:03d}: Batch: {:03d} Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, k, epoch_loss_avg.result(), epoch_accuracy.result()))
k+=1
print("Trainset >> Epoch {:03d}: Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, epoch_loss_avg.result(), epoch_accuracy.result()))
# end epoch
#if int(epoch_accuracy.result() > 70):
test_loss, test_accuracy = test_model(model, test_set)
test_loss_results.append(test_loss)
test_acc_results.append(test_accuracy)
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
# import ipdb; ipdb.set_trace()
if epoch % 100 == 0:
fname = 'imgs/Test_Acc_Loss_IN2012_' + str(epoch) + '.png'
# here we should plot metrics and loss for test too.
# hence TODO: update save_plot
loss_l = [train_loss_results, test_loss_results]
acc_l = [train_accuracy_results, test_acc_results]
save_plot(loss_l, acc_l, fname)
#if train_loss_results[-1] > train_loss_results[-2]: # was if epoch == 10:
# learning_rate /= 10
# optimizer = tf.keras.optimizers.SGD(lr=learning_rate, momentum=0.9)
# print("Sir, we just updated the learning rate Sir.")
import ipdb; ipdb.set_trace()
| 5,393 | 2,382 |
# https://leetcode.com/problems/reverse-integer/
# Runtime: 32 ms, faster than 96.20% of Python3 online submissions for Reverse Integer.
# Runtime: 36 ms, faster than 84.28% of Python3 online submissions for Reverse Integer.
# Runtime: 44 ms, faster than 22.92% of Python3 online submissions for Reverse Integer.
class Solution:
def reverse1(self, x: int):
if x >= 0:
strX = str(x)[::-1]
else:
strX = "-"+ str(x)[::-1][:-1]
result = int(strX)
bits = result.bit_length()
if bits > 31:
result = 0
return result
def reverse2(self, x: int):
if x >= 0:
strX = str(x)[::-1]
else:
# This modification increase the performance from 44ms to 36 ms
strX = "-" + str(- x)[::-1]
result = int(strX)
bits = result.bit_length()
if bits > 31:
result = 0
def reverse3(self, x: int):
if x >= 0:
strX = str(x)[::-1]
else:
strX = "-" + str(- x)[::-1]
result = int(strX)
# This modification increase the performance from 36 ms to 32 ms
if result in range(-2**31, 2**31 - 1 ):
return result
return 0
class Solution:
def reverse(self, x: int):
if x >= 0:
strX = str(x)[::-1]
else:
# This modification increase the performance from 44ms to 36 ms
strX = "-" + str(- x)[::-1]
result = int(strX)
# bits = result.bit_length()
# if bits > 31:
# result = 0
if result in range(-2**31, 2**31 - 1 ):
return result
return 0
x = -1
# x = 1563847412
# print("s" , x.bit_length())
print ( Solution().reverse(x))
| 1,862 | 635 |
"""
A hodge-podge of convenience functions for luci
"""
| 57 | 20 |
testcases = int(input())
for _ in range(testcases):
main_details = list(map(int, input().split()))
coin_details = list(map(lambda x: int(x)%main_details[-1], input().split()))
print(sum(coin_details)%main_details[-1])
| 235 | 85 |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>2</version>
<name>TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE</name>
<primitive_test_id/>
<primitive_test_name>wanmanager_DoNothing</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>With active DSL connection ,primary WAN Type and priority as 1 only DSL is expected to have active connection</synopsis>
<groups_id/>
<execution_time>30</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WANMANAGER_48</test_case_id>
<test_objective>This test case is to check with active DSL connection ,primary WAN Type and priority as 1 only DSL is expected to have active connection</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
3.WAN Manager should be enabled
4.DSL Line Should be Enabled. </pre_requisite>
<api_or_interface_used>none</api_or_interface_used>
<input_parameters>Device.X_RDK_WanManager.CPEInterface.1.Wan.Type
Device.X_RDK_WanManager.CPEInterface.2.Wan.Type
Device.X_RDK_WanManager.CPEInterface.1.Wan.Priority
Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority</input_parameters>
<automation_approch>1.Load the module
2.Check for active DSL line connection and disable WANOE if enabled
3.Get the default WANOE,DSL WAN type and priority
4.Set WANOE,DSL WAN type and priority as primary,secondary,0,1 respectively
5.reboot the device
6.Check if DSL Line is active and WANOE is inactive
7.Revert the set values
8.Unload the module</automation_approch>
<expected_output>with WANOE,DSL WAN type and priority as primary,secondary,0,1 respectively ,DSL Line is expected to be active and WANOE is inactive</expected_output>
<priority>High</priority>
<test_stub_interface>WAN_MANAGER</test_stub_interface>
<test_script>TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE</test_script>
<skipped>No</skipped>
<release_version>M89</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags/>
</xml>
'''
# tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
from time import sleep;
from WanManager_Utility import *;
obj = tdklib.TDKScriptingLibrary("tdkbtr181","RDKB");
obj1 = tdklib.TDKScriptingLibrary("sysutil","1");
tadobj = tdklib.TDKScriptingLibrary("tad","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE');
obj1.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE');
tadobj.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
loadmodulestatus2 = tadobj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus2;
if "SUCCESS" in (loadmodulestatus.upper() and loadmodulestatus1.upper() and loadmodulestatus2.upper()):
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
tadobj.setLoadModuleStatus("SUCCESS");
revertWANOE =0;
revertpriority =0;
objReturned,dsl_wan,active = getDSLWANStatus(tadobj,1);
if active == 0:
expectedresult="SUCCESS";
actualresult = "SUCCESS";
print "******performing a pre-requisite where in WANOE inteface is expected to be disabled ***";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.Enable");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and details == "true":
print "WANOE is enabled and disabling it ";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
result,tdkTestObj = EnableDisableInterafce(2,"false",tdkTestObj);
revertWANOE = 1;
if expectedresult in actualresult:
paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Type","Device.X_RDK_WanManager.CPEInterface.2.Wan.Type"];
defaults = [];
flag =0;
print "The Default WAN Values are being fetched";
for item in paramList:
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName",item);
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult:
defaults.append(details);
else:
flag = 1;
break;
if flag == 0:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Get the Default WAN and priority values for DSL and WANOE";
print "EXPECTED RESULT 2: Should get the default WAN and priority values for DSL and WANOE"
print "ACTUAL RESULT 2 :The defaults for %s are %s:"%(paramList,defaults);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj_Get = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj_Set = obj.createTestStep('TDKB_TR181Stub_Set');
revertpriority,defPriority,actualresult = MakePriorityUnEqual(tdkTestObj_Get,tdkTestObj_Set);
if expectedresult in actualresult:
for items in paramList:
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
tdkTestObj.addParameter("ParamName",items)
tdkTestObj.addParameter("ParamValue","Primary");
tdkTestObj.addParameter("Type","string");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
print "Set operation successful for :" ,items;
else:
flag =1;
break;
if flag == 1:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Setting WAN type as Primary for DSL and WANOE";
print "EXPECTED RESULT 3: Should set WAN type as Primary for DSL and WANOE";
print "ACTUAL RESULT 3: set operation failed for %s"%item;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Setting WAN type as Primary for DSL and WANOE";
print "EXPECTED RESULT 3: Should set WAN type as Primary for DSL and WANOE";
print "ACTUAL RESULT 3: Set operation is successful";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
print "Rebooting the device to verify the set operations done are working as expected";
obj1.initiateReboot();();
sleep(300);
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.1.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult1 = tdkTestObj.getResult();
activeDSL = tdkTestObj.getResultDetails().strip().replace("\\n", "");
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult2 = tdkTestObj.getResult();
activeWANOE = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in (actualresult1 and actualresult2):
if activeDSL == "true" and activeWANOE == "false":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 4: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 4: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
revflg =0;
index =0;
for item in paramList:
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
tdkTestObj.addParameter("ParamName",item)
tdkTestObj.addParameter("ParamValue",defaults[index]);
tdkTestObj.addParameter("Type","string");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index = index +1;
if expectedresult in actualresult:
print "Reverting %s"%item;
else:
revflg =1;
if revflg == 0:
tdkTestObj.setResultStatus("SUCCESS");
print "revert operation sucessful";
print "rebooting the device to apply the set operations done as apart of revert";
obj1.initiateReboot();();
sleep(300);
else:
tdkTestObj.setResultStatus("FAILURE");
print "revert operation failed";
else:
print "[TEST EXECUTION RESULT] : FAILURE";
print "Failed to make the priorities unequal";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Get the Default WAN and priority values for DSL and WANOE";
print "EXPECTED RESULT 2: Should get the default WAN and priority values for DSL and WANOE"
print "ACTUAL RESULT 2 :The defaults for %s are %s:"%(paramList,defaults);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
print "#####Performing revert operation for interafce disabling and priorities if set######";
#Revert operations
if revertWANOE == 1:
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
result,tdkTestObj = EnableDisableInterafce(2,"true",tdkTestObj);
if expectedresult in result:
tdkTestObj.setResultStatus("SUCCESS");
else:
tdkTestObj.setResultStatus("FAILURE");
print "Enabling the WNOE interafce failed";
if revertpriority ==1:
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority");
tdkTestObj.addParameter("ParamValue",defPriority[1]);
tdkTestObj.addParameter("Type","int");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
result = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index =index +1;
if expectedresult in result:
print "Reverted the unequal priority";
tdkTestObj.setResultStatus("SUCCESS");
else:
tdkTestObj.setResultStatus("FAILURE");
print "failed to revert the changed priority";
else:
objReturned.setResultStatus("FAILURE");
print "*********DSL is not active please have a active connection********";
obj.unloadModule("tdkbtr181");
obj1.unloadModule("sysutil");
tadobj.unloadModule("tad");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
tadobj.setLoadModuleStatus("FAILURE");
| 16,347 | 4,675 |
import geopandas as gp
import pytest
from shapely.geometry import Polygon
import maup
CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
@pytest.fixture
def crs():
return CRS
@pytest.fixture
def four_square_grid():
"""
b d
a c
"""
a = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])
b = Polygon([(0, 1), (0, 2), (1, 2), (1, 1)])
c = Polygon([(1, 0), (1, 1), (2, 1), (2, 0)])
d = Polygon([(1, 1), (1, 2), (2, 2), (2, 1)])
df = gp.GeoDataFrame(
{"ID": ["a", "b", "c", "d"], "geometry": [a, b, c, d]}, crs=CRS
)
return df
@pytest.fixture
def square():
return Polygon([(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5)])
@pytest.fixture
def distant_polygon():
return Polygon([(100, 101), (101, 101), (101, 100)])
@pytest.fixture
def diamond():
return Polygon([(100, 0), (0, 100), (-100, 0), (0, -100)])
@pytest.fixture
def polygon_inside_diamond_bounds():
return Polygon([(90, 90), (91, 90), (91, 91), (90, 91)])
@pytest.fixture
def squares_within_four_square_grid():
return gp.GeoSeries(
[
# both fit inside a:
Polygon([(0, 0), (0, 0.5), (0.5, 0.5), (0.5, 0)]),
Polygon([(0.5, 0.5), (1, 0.5), (1, 1), (0.5, 1)]),
# is exactly b:
Polygon([(0, 1), (0, 2), (1, 2), (1, 1)]),
# fits neatly inside d:
Polygon([(1.25, 1.25), (1.25, 1.75), (1.75, 1.75), (1.75, 1.25)]),
],
crs=CRS,
)
@pytest.fixture
def left_half_of_square_grid(four_square_grid):
return four_square_grid[four_square_grid["ID"].isin(["a", "b"])]
@pytest.fixture
def squares_df(squares_within_four_square_grid):
return gp.GeoDataFrame(
{
"geometry": squares_within_four_square_grid,
"data": [1, 1, 1, 1],
"ID": ["01", "02", "03", "04"],
},
crs=CRS,
)
@pytest.fixture
def square_mostly_in_top_left():
return gp.GeoSeries([Polygon([(1.5, 0.5), (1.5, 2), (0, 2), (0, 0.5)])], crs=CRS)
@pytest.fixture
def squares_some_neat_some_overlapping(
square_mostly_in_top_left, squares_within_four_square_grid
):
result = squares_within_four_square_grid.append(
square_mostly_in_top_left, ignore_index=True
)
result.crs = CRS
return result
@pytest.fixture
def big_square():
return gp.GeoSeries([Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])], crs=CRS)
| 2,409 | 1,147 |
"""
Plot results of benchmark. Need to run benchmark.py first to generate
the file benchmark_results.json.
"""
import json
from collections import defaultdict
import matplotlib.pyplot as plt
def groupby(values, keyfunc):
"""Group values by key returned by keyfunc."""
groups = defaultdict(list)
for value in values:
key = keyfunc(value)
groups[key].append(value)
return groups
def main() -> None:
"""Plot benchmark results."""
with open("benchmark_results.json", encoding="utf-8") as f:
results = json.load(f)
def keyfunc(result):
return (result["zipfile"], result["threadcount"])
for (zipfile, threadcount), results in groupby(results, keyfunc).items():
results.sort(key=lambda result: result["filesize"])
filesizes = []
timings = []
for result in results:
filesizes.append(result["filesize"])
timings.append(1000 * sum(result["timings"]) / len(result["timings"]))
plt.loglog(
filesizes,
timings,
label=f"{zipfile} - {threadcount} Thread{'s'[:threadcount-1]}",
)
plt.legend()
plt.xlabel("File size [bytes]")
plt.ylabel("Milliseconds to process a 10 MB zip file (lower is better)")
plt.tight_layout()
plt.savefig("benchmark.png")
plt.show()
if __name__ == "__main__":
main()
| 1,389 | 418 |
from edgesets import UEdge, DEdge
def test_repr():
e1 = DEdge(7, 8)
text = repr(e1)
assert text == "DEdge(7, 8, weight=1)"
e2 = eval(text)
assert type(e1) == type(e1)
assert e1 == e2
def test_if_directions_are_differents_with_same_nodes():
d1 = DEdge(10, 15)
d2 = DEdge(15, 10)
assert d1 != d2
assert hash(d1) != hash(d2)
def test_if_DEdge_is_differente_from_UEdge():
d1 = DEdge(10, 15)
d2 = UEdge(15, 10)
assert d1 != d2
assert hash(d1) != hash(d2)
def test_DEdge_is_different_from_tuple():
param = (25, 42)
edge = DEdge(*param)
assert edge != param
assert hash(edge) != hash(param)
def test_DEdge_is_different_from_list():
param = [24, 25]
edge = DEdge(*param)
assert edge != param
# assert hash(edge) != hash(param) # list is not hashable | 872 | 383 |
import argparse
from pathlib import Path
import numpy as np
import scipy
import keras
from keras.models import load_model
from moviepy.editor import VideoFileClip, concatenate_videoclips
from tqdm import tqdm
def main():
# yapf: disable
parser = argparse.ArgumentParser(description='Video Highlight')
parser.add_argument('model', type=str, help='Path to model')
parser.add_argument('video', type=str, help='Path to video to highlight')
parser.add_argument('--out', '-o', type=str, default='./hl.mp4', help='output name')
parser.add_argument('--fps', type=int, default=2, help='fps')
parser.add_argument('--itv', type=int, default=6, help='interval of adjusting')
parser.add_argument('--bs', type=int, default=80, help='batch size')
args = parser.parse_args()
# yapf: enable
print('Loading model & video', end='...')
model = load_model(args.model)
video = VideoFileClip(args.video)
print('ok')
n_frames = int(video.duration) * args.fps
xs = np.zeros((n_frames, 224, 224, 3), dtype=np.float32)
for f in tqdm(range(n_frames), desc='Loading Video Frames', ascii=True):
img = video.get_frame(f / args.fps)
xs[f] = scipy.misc.imresize(img, (224, 224))
# Predicting
pred = model.predict(xs, args.bs, verbose=1)
pred = pred.round().astype(np.uint8).flatten()
print(pred[:500])
for i in range(n_frames - args.itv):
s, t = i, i + args.itv
if pred[s] == 1 and pred[t - 1] == 1:
pred[s:t] = 1
diff = np.diff(np.concatenate([[0], pred, [1]]))
starts = (diff == +1).nonzero()[0] / args.fps
ends = (diff == -1).nonzero()[0] / args.fps
segs = [video.subclip(s, e) for s, e in zip(starts, ends)]
out = concatenate_videoclips(segs)
out.write_videofile(args.out, fps=video.fps, threads=4, audio=True)
if __name__ == '__main__':
main()
| 1,889 | 709 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.db.models import Count, Q
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.contrib.auth import login, authenticate, logout
from django.contrib.contenttypes.models import ContentType
from taggit.models import Tag
from accounts.models import Account
from blog.models import BlogPost, Category, BlogPicture
from comments.forms import CommentForm
from comments.models import Comments
from category.models import Category
BLOG_POST_PER_PAGE = 3
RESULT_POST_PER_PAGE = 17
#Category Count
def get_category_count():
questSet = BlogPost \
.objects \
.values('categories__title','categories__id') \
.annotate(Count('categories__title'))
return questSet
#Blog Page.
def blog_screen_view(request):
category_count = get_category_count()
super_featured = BlogPost.objects.filter(super_featured=True).order_by('-date_published')[:3]
blogPosts = BlogPost.objects.order_by('-date_published')
recentPosts = BlogPost.objects.order_by('-date_published')[:4]
# Pagination
page = request.GET.get('page',1)
blog_posts_paginator = Paginator(blogPosts, BLOG_POST_PER_PAGE)
try:
blogPosts = blog_posts_paginator.page(page)
except PageNotAnInteger:
blogPosts = blog_posts_paginator.page(BLOG_POST_PER_PAGE)
except EmptyPage:
blogPosts = blog_posts_paginator.page(blog_posts_paginator.num_pages)
context = {
'super_featured_posts':super_featured,
'posts': blogPosts,
'recent_posts': recentPosts,
'categories': category_count,
}
return render(request, 'blog/blog.html', context)
# Single Post
def post_screen_view(request, slug):
post = get_object_or_404(BlogPost, slug=slug)
post_related = post.tags.similar_objects()[:3]
app_url = request.get_full_path
category_count = get_category_count()
recentPosts = BlogPost.objects.order_by('-date_published')[:4]
comments = post.comments
initial_data = {
'content_type': post.get_content_type,
'object_id' : post.id,
}
if request.method == 'POST':
form = CommentForm(request.POST or None)
if form.is_valid():
com = form.save(commit=False)
com.user = request.user
com.content_type = post.get_content_type
com.object_id = post.id
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comments.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() ==1:
parent_obj = parent_qs.first()
com.parent = parent_obj
com.save()
return HttpResponseRedirect(com.content_object.get_absolute_url())
else:
print('error')
else:
form = CommentForm()
context = {
'post': post,
'recent_posts': recentPosts,
'categories': category_count,
'post_url': app_url,
'comments': comments,
'comment_form': form,
'related_posts':post_related,
}
return render(request, 'blog/post.html', context)
# Search Page
def search_screen_view(request):
query_set = BlogPost.objects.all()
category_count = get_category_count()
query = request.GET.get('q')
if query:
query_set = query_set.filter(
Q(title__icontains=query) |
Q(description_one__icontains=query) |
Q(description_two__icontains=query)
).distinct()
paginator = Paginator(query_set, RESULT_POST_PER_PAGE) # 6 posts per page
page = request.GET.get('page',1)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'query_sets': posts,
'categories':category_count,
}
return render(request, 'blog/result_search.html', context) | 4,389 | 1,409 |
# coding=utf-8
import tensorflow as tf
class DataSetLoader(object):
def __init__(self, config, generators, default_set_name='train'):
self.config = config
self.generators = generators
self.data_sets = dict()
self.data_set_init_ops = dict()
with tf.variable_scope("data"):
for k in self.generators.keys():
self.data_sets[k] = self.get_data_set_from_generator(self.generators[k].next, epochs=self.config.epochs,
batch_size=self.config.batch_size)
self.iterator = self.data_sets[default_set_name].make_one_shot_iterator()
features, labels = self.iterator.get_next()
self.next_data = {'features': features, 'labels': labels}
for k in self.data_sets.keys():
self.data_set_init_ops[k] = self.iterator.make_initializer(self.data_sets[k])
@staticmethod
def get_data_set_from_generator(generator_func, epochs=1, batch_size=16):
data_set = tf.data.Dataset.from_generator(generator_func,
output_types=(tf.int32, tf.int32),
output_shapes=(tf.TensorShape([64]), tf.TensorShape([1])))
data_set = data_set.repeat(epochs)
data_set = data_set.batch(batch_size)
return data_set
| 1,408 | 415 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from ssht00ls.classes.config import *
from ssht00ls.classes import utils
# the ssh connections object class.
class Connections(Traceback):
def __init__(self):
# docs.
DOCS = {
"module":"ssht00ls.connections",
"initialized":True,
"description":[],
"chapter": "Connections", }
# defaults.
Traceback.__init__(self, traceback="ssht00ls.connections", raw_traceback="ssht00ls.classes.connections.Connections")
#
def list(self, filter="ssh"):
if dev0s.defaults.vars.os not in ["linux"]:
return dev0s.response.error(f"Unsupported operating system [{dev0s.defauls.vars.os}].")
output = dev0s.utils.__execute_script__("""ss | grep ssh | awk '{print $1","$2","$3","$4","$5","$6}' """)
connections = {}
for line in output.split("\n"):
if line not in [""]:
net_id,state,recvq, sendq,local_address,remote_address = line.split(",")
if state == "ESTAB":
connections[remote_address] = {
"remote_address":remote_address,
"local_address":local_address,
"recvq":recvq,
"sendq":sendq,
"net_id":net_id,
}
return dev0s.response.success(f"Successfully listed {len(connections)} ssh connection(s).", {
"connections":connections,
})
# Initialized objects.
connections = Connections()
| 1,323 | 549 |
data = np.arange(0, 20, 2)
result = pd.Series(data)
# Alternative Solution
# s = pd.Series(range(0, 20, 2))
# Alternative Solution
# s = pd.Series([x for x in range(0, 20) if x % 2 == 0])
| 193 | 91 |
#!/usr/bin/env python
# encoding: utf-8
from contextlib import contextmanager
import mock
__author__ = 'Liu Yicong'
__email__ = 'imyikong@gmail.com'
@contextmanager
def mock_patches(*patches, **named_patches):
"""
A context manager to help create mock patches.
>>> with mock_patches("package.module.cls", cls2="package.cls") as mocks:
... mocks.cls() #=> package.module.cls
... mocks.cls2() #=> package.cls
"""
attrs = list(i.split(".")[-1] for i in patches)
attrs.extend(list(named_patches.keys()))
patches = list(patches)
patches.extend(list(named_patches.values()))
mock_patches = []
mocks = mock.Mock()
for k, i in zip(attrs, patches):
patch = mock.patch(i)
mock_patches.append(patch)
setattr(mocks, k, patch.start())
try:
yield mocks
finally:
for p in mock_patches:
p.stop()
| 907 | 315 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-11 12:37
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0015_automaticupdaterule_locked_for_editing'),
]
operations = [
migrations.AddField(
model_name='createscheduleinstanceactiondefinition',
name='specific_start_date',
field=models.DateField(null=True),
),
]
| 558 | 185 |
# -*- coding:utf-8 -*-
"""
console.py
~~~~~~~~
数据发布插件 - 输出到控制台
:author: Fufu, 2021/6/7
"""
from . import OutputPlugin
from ..libs.metric import Metric
class Console(OutputPlugin):
"""数据发布 - 输出到控制台"""
name = 'console'
async def write(self, metric: Metric) -> None:
"""写入数据"""
print('>>>', metric.as_text)
| 353 | 156 |
import json
import unittest
from slack_sdk.web import WebClient
from tests.slack_sdk.web.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
class TestWebClient_Issue_1049(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_the_pattern(self):
client = WebClient(
base_url="http://localhost:8888",
token="xoxb-admin_convo_pagination",
)
pages = []
for page in client.admin_conversations_search(query="announcement"):
pages.append(page)
self.assertEqual(len(pages), 2)
| 699 | 239 |
from netforce.model import Model, fields, get_model, clear_cache
from netforce.database import get_connection
from datetime import *
import time
from netforce import access
class TaskList(Model):
_name = "task.list"
_string = "Task List"
_fields = {
"name": fields.Char("Name",required=True),
"date_created": fields.Date("Date Created",required=True),
"project_id": fields.Many2One("project","Project"),
"milestone_id": fields.Many2One("project.milestone","Milestone"),
"tasks": fields.One2Many("task","task_list_id","Tasks"),
}
_order = "date_created desc,id desc"
_defaults ={
"date_created": lambda *a: time.strftime("%Y-%m-%d"),
}
TaskList.register()
| 734 | 228 |
#!/usr/bin/python3
import paramiko,time
#using as ssh client
client=paramiko.SSHClient()
#auto adjut host kue verification with yes or no
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#time to connect to remote Cisco IOS
addr=input("Enter your Router IP :")
u='root'
p='cisco'
#connected with SSh session
client.connect(addr,username=u,password=p,allow_agent=False, look_for_keys=False)
#we have to ask for shell
device_access=client.invoke_shell()
#now sending command
device_access.send("show ip int br \n")
time.sleep(1)
## assuming command got executed, receive data.
output=device_access.recv(65000)
#deconding byte - like string into staring
print(output.decode('ascii'))
| 696 | 232 |
#!/usr/bin/env python3
import sys, os
import arsdkparser
#===============================================================================
class Writer(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def write(self, fmt, *args):
if args:
self.fileobj.write(fmt % (args))
else:
self.fileobj.write(fmt % ())
#===============================================================================
def class_name(name):
splitted_name = name.split('_')
return "ArsdkFeature" + "".join(x.capitalize() for x in splitted_name)
def enum_class_name(feature_strict_name, enum_name):
splitted_name = enum_name.split('_')
return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name)
def multiset_class_name(feature_strict_name, multiset_name):
splitted_name = multiset_name.split('_')
return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name)
def param_name(name):
components = name.split('_')
return components[0].lower() + "".join(x[0].upper() + x[1:] for x in components[1:])
def arg_type(feature_strict_name, arg, is_fun_arg=False):
args = {
arsdkparser.ArArgType.I8: "NSInteger",
arsdkparser.ArArgType.U8: "NSUInteger",
arsdkparser.ArArgType.I16: "NSInteger",
arsdkparser.ArArgType.U16: "NSUInteger",
arsdkparser.ArArgType.I32: "NSInteger",
arsdkparser.ArArgType.U32: "NSUInteger",
arsdkparser.ArArgType.I64: "int64_t",
arsdkparser.ArArgType.U64: "uint64_t",
arsdkparser.ArArgType.FLOAT: "float",
arsdkparser.ArArgType.DOUBLE: "double",
arsdkparser.ArArgType.STRING: "NSString*"
}
if isinstance(arg.argType, arsdkparser.ArEnum):
argType = enum_class_name(feature_strict_name, arg.argType.name)
elif isinstance(arg.argType, arsdkparser.ArBitfield):
if arg.argType.btfType == arsdkparser.ArArgType.I64 or \
arg.argType.btfType == arsdkparser.ArArgType.U64:
argType = args[arsdkparser.ArArgType.U64]
else:
argType = args[arsdkparser.ArArgType.U32]
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
if is_fun_arg:
argType = multiset_class_name(feature_strict_name, arg.argType.name) + ' *'
else:
argType = multiset_class_name(feature_strict_name, arg.argType.name)
else:
argType = args[arg.argType]
return argType
def multiset_c_name(ftr, multiset):
return "struct arsdk_%s_%s" % (ftr, multiset)
def arg_c_type(arg, is_fun_arg=False):
args = {
arsdkparser.ArArgType.I8: "int8_t",
arsdkparser.ArArgType.U8: "uint8_t",
arsdkparser.ArArgType.I16: "int16_t",
arsdkparser.ArArgType.U16: "uint16_t",
arsdkparser.ArArgType.I32: "int32_t",
arsdkparser.ArArgType.U32: "uint32_t",
arsdkparser.ArArgType.I64: "int64_t",
arsdkparser.ArArgType.U64: "uint64_t",
arsdkparser.ArArgType.FLOAT: "float",
arsdkparser.ArArgType.DOUBLE: "double",
arsdkparser.ArArgType.STRING: "const char*"
}
if isinstance(arg.argType, arsdkparser.ArEnum):
argType = args[arsdkparser.ArArgType.I32]
elif isinstance(arg.argType, arsdkparser.ArBitfield):
argType = args[arg.argType.btfType]
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
if is_fun_arg:
argType = multiset_c_name("generic", arg.argType.name.lower()) + ' *'
else:
argType = multiset_c_name("generic", arg.argType.name.lower())
else:
argType = args[arg.argType]
return argType
def arg_name(arg):
if isinstance(arg.argType, arsdkparser.ArEnum):
argName = param_name(arg.name)
elif isinstance(arg.argType, arsdkparser.ArBitfield):
argName = param_name(arg.name) + "BitField"
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
argName = param_name(arg.name)
else:
argName = param_name(arg.name)
return argName
def arg_value_from_obj_c_to_c(feature_strict_name, arg):
if arg.argType == arsdkparser.ArArgType.STRING:
return "[" + arg_name(arg) + " UTF8String]"
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
return "[%s getNativeSettings]" % arg_name(arg)
elif arg_c_type(arg) != arg_type(feature_strict_name, arg):
return "(" + arg_c_type(arg) + ")" + arg_name(arg)
else:
return arg_name(arg)
def c_name(val):
return val[0].upper() + val[1:]
#===============================================================================
def expected_cmd_class():
return "ExpectedCmd"
def command_name(feature_name, cmd):
command_name_str = feature_name + "_" + cmd.name
splitted_name = command_name_str.split('_')
command_name_str = "".join(x.capitalize() for x in splitted_name)
# lower first letter
return command_name_str[0].lower() + command_name_str[1:]
def static_initializer_method_name(feature_obj, feature_name, cmd, with_swift_name=False):
return_part = "+ (" + expected_cmd_class() + "*)"
method_root_name = command_name(feature_name, cmd)
method_name = return_part + method_root_name
if cmd.args:
# the first arg is special as the arg name is not part of the method name
arg = cmd.args[0]
method_name += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
for arg in cmd.args[1:]:
method_name += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
if with_swift_name:
method_name += "\nNS_SWIFT_NAME(" + method_root_name + "("
for arg in cmd.args:
method_name += arg_name(arg) + ":"
method_name += "))"
return method_name
def command_class_name(feature_name, cmd):
command_name_str = command_name(feature_name, cmd)
return expected_cmd_class() + command_name_str[0].upper() + command_name_str[1:]
def match_command_name():
return "- (BOOL)match:(struct arsdk_cmd*)cmd checkParams:(BOOL)checkParams"
def gen_expected_header_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import <Foundation/Foundation.h>\n")
out.write("#import <SdkCore/Arsdk.h>\n")
out.write("\n")
out.write("struct arsdk_cmd;\n")
out.write("\n")
out.write("@interface %s : NSObject\n", expected_cmd_class())
out.write("\n")
out.write("%s;\n", match_command_name())
out.write("- (NSString*)describe;\n");
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("%s;\n", static_initializer_method_name(feature_obj, feature_name, cmd, True))
out.write("@end\n")
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("@interface %s : %s\n", command_class_name(feature_name, cmd), expected_cmd_class())
out.write("@end\n")
out.write("\n")
def gen_expected_source_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import \"" + expected_cmd_class() + ".h\"\n")
out.write("#import <arsdk/arsdk.h>\n")
out.write("\n")
out.write("@interface %s ()\n", expected_cmd_class())
out.write("\n")
out.write("@property (nonatomic, assign) struct arsdk_cmd* cmd;\n")
out.write("@end\n")
out.write("\n")
out.write("@implementation %s\n", expected_cmd_class())
out.write("\n")
out.write("%s {return false;}\n", match_command_name())
out.write("\n")
out.write("- (NSString*)describe {\n");
out.write(" return [ArsdkCommand describe:self.cmd];\n");
out.write("}\n");
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("%s {\n", static_initializer_method_name(feature_obj, feature_name, cmd))
out.write(" %s *expectedCmd = [[%s alloc] init];\n",
command_class_name(feature_name, cmd),
command_class_name(feature_name, cmd))
out.write(" expectedCmd.cmd = calloc(1, sizeof(*expectedCmd.cmd));\n")
out.write(" arsdk_cmd_init(expectedCmd.cmd);\n")
out.write("\n")
if cmd.args:
out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in cmd.args))
else:
out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd);\n",
c_name(feature_name), c_name(cmd.name))
out.write(" if (res < 0) {\n")
out.write(" return nil;\n")
out.write(" }\n")
out.write(" return expectedCmd;\n")
out.write("}\n")
out.write("\n")
out.write("@end\n")
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("@implementation %s\n", command_class_name(feature_name, cmd))
out.write("\n")
out.write("%s {\n", match_command_name())
out.write(" if (self.cmd->id != cmd->id) return false;\n")
out.write("\n")
if cmd.args:
out.write(" if (checkParams) {\n")
for arg in cmd.args:
out.write(" %s _%s;\n", arg_c_type(arg), arg_name(arg))
out.write(" int res = arsdk_cmd_dec_%s_%s(cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join("&_" + arg_name(arg) for arg in cmd.args))
out.write(" if (res < 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
out.write("\n")
for arg in cmd.args:
out.write(" %s my%s;\n", arg_c_type(arg), arg_name(arg).title())
out.write(" res = arsdk_cmd_dec_%s_%s(self.cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join("&my" + arg_name(arg).title() for arg in cmd.args))
out.write(" if (res < 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
out.write("\n")
for arg in cmd.args:
if arg.argType == arsdkparser.ArArgType.STRING:
out.write(" NSString* %sObj = [NSString stringWithUTF8String:_%s];\n",
arg_name(arg), arg_name(arg))
out.write(" NSString* my%sObj = [NSString stringWithUTF8String:my%s];\n",
arg_name(arg).title(), arg_name(arg).title())
out.write(" if (![%sObj isEqual:my%sObj]) return false;\n", arg_name(arg), arg_name(arg).title())
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
out.write(" res = memcmp(&_%s, &my%s, sizeof(my%s));\n", arg.name, arg_name(arg).title(),
arg_name(arg).title())
out.write(" if (res != 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
else:
out.write(" if (_%s != my%s) return false;\n", arg_name(arg), arg_name(arg).title())
out.write("\n")
out.write(" }\n")
out.write(" return true;\n")
out.write("}\n")
out.write("@end\n")
out.write("\n")
#===============================================================================
def cmd_encoder_class():
return "CmdEncoder"
def encoder_function_signature(feature_obj, msg, with_swift_name=False):
feature_name = feature_obj.name + ("_" + msg.cls.name if msg.cls else "")
function_underscored = command_name(feature_name, msg) + "_encoder"
components = function_underscored.split('_')
func_name = components[0][0].lower() + components[0][1:] + "".join(x[0].upper() + x[1:] for x in components[1:])
function_signature = "+ (int (^)(struct arsdk_cmd *))" + func_name
if msg.args:
# the first arg is special as the arg name is not part of the method name
arg = msg.args[0]
function_signature += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
for arg in msg.args[1:]:
function_signature += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
if with_swift_name:
function_signature += "\nNS_SWIFT_NAME(" + func_name + "("
for arg in msg.args:
function_signature += arg_name(arg) + ":"
function_signature += "))"
return function_signature
def gen_encoder_header_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import <Foundation/Foundation.h>\n")
out.write("#import <SdkCore/Arsdk.h>\n")
out.write("\n")
out.write("struct arsdk_cmd;\n")
out.write("\n")
out.write("@interface %s : NSObject\n", cmd_encoder_class())
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for evt in feature_obj.evts:
out.write("%s;\n", encoder_function_signature(feature_obj, evt, True))
out.write("@end\n")
out.write("\n")
def gen_encoder_source_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import \"%s.h\"\n", cmd_encoder_class())
out.write("#import <arsdk/arsdk.h>\n")
out.write("\n")
out.write("@implementation %s\n", cmd_encoder_class())
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for evt in feature_obj.evts:
feature_name = feature_obj.name + ("_" + evt.cls.name if evt.cls else "")
out.write("%s {\n", encoder_function_signature(feature_obj, evt))
out.write(" return ^(struct arsdk_cmd* cmd) {\n")
if evt.args:
out.write(" return arsdk_cmd_enc_%s_%s(cmd, %s);\n",
c_name(feature_name), c_name(evt.name),
", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in evt.args))
else:
out.write(" return arsdk_cmd_enc_%s_%s(cmd);\n",
c_name(feature_name), c_name(evt.name))
out.write(" };\n")
out.write("}\n")
out.write("\n")
out.write("@end\n")
out.write("\n")
#===============================================================================
def list_files(ctx, outdir, extra):
None
#===============================================================================
#===============================================================================
def generate_files(ctx, outdir, extra):
if not os.path.exists (outdir):
os.mkdirs (outdir)
else:
filelist = os.listdir(outdir)
for f in filelist:
os.remove(outdir + "/" + f)
filepath = os.path.join(outdir, expected_cmd_class() + ".h")
with open(filepath, "w") as file_obj:
gen_expected_header_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, expected_cmd_class() + ".m")
with open(filepath, "w") as file_obj:
gen_expected_source_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, cmd_encoder_class() + ".h")
with open(filepath, "w") as file_obj:
gen_encoder_header_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, cmd_encoder_class() + ".m")
with open(filepath, "w") as file_obj:
gen_encoder_source_file(ctx, Writer(file_obj))
print("Done generating test features files.")
| 16,830 | 5,598 |
import numpy
import pygame
import random
from pygame import gfxdraw
pygame.init()
config_instance = open('settings.txt', 'r', encoding = 'utf-8')
class Settings:
def __init__(self, settings: dict):
def str_to_rgb(sequence):
r, g, b = sequence.split(' ')
r, g, b = int(r), int(g), int(b)
if (any([r not in range(0, 255), g not in range(0, 255), b not in range(0, 255)])):
raise ValueError(f'You set wrong colour values, check your settings! ({r, g, b})') # wrong rgb color values
return (r, g, b)
setting_names = {
'size of cell': ('cellsize', int),
'size of grid': ('gridsize', int),
'snake colour': 'snake_color',
'apple colour': 'apple_color',
'default length': ('snake_len', int)
}
for key, value in settings.items():
if (setting_names.get(key)):
if (isinstance(setting_names[key], tuple)):
setattr(self, setting_names[key][0], setting_names[key][1](value))
else:
setattr(self, setting_names[key], value)
if (getattr(self, 'snake_color', None)):
self.snake_color = str_to_rgb(self.snake_color)
else:
self.snake_color = (10, 240, 100) # default color
if (getattr(self, 'apple_color', None)):
self.apple_color = str_to_rgb(self.apple_color)
else:
self.apple_color = (240, 10, 10) # default color
def file_handler(instance):
text = instance.read().split('\n')
settings = {}
for line in text:
line = line.split(' - ')
line[0] = line[0].strip(); line[1] = line[1].strip()
settings[line[0]] = line[1]
return Settings(settings)
settings = file_handler(config_instance)
class Game:
def __init__(self, settings):
self.settings = settings
self.clock = pygame.time.Clock()
self.loop = False
self.display = pygame.display.set_mode((self.settings.gridsize * self.settings.cellsize, self.settings.gridsize * self.settings.cellsize))
self.snake: list = []
self.apple: list = []
self.direction: str = 'right'
middle = self.settings.gridsize // 2
xcoords = [middle + i for i in range(self.settings.snake_len)]
ycoords = [middle for _ in range(self.settings.snake_len)] # default snake position
for x, y in zip(xcoords, ycoords):
self.snake.append((x, y))
pygame.display.set_caption('Snake Game')
def start(self):
self.loop = True
self.spawn_apple()
while (self.loop):
for e in pygame.event.get():
if (e.type == pygame.QUIT):
self.loop = False
if (e.type == pygame.KEYDOWN):
if (e.key in [pygame.K_w, pygame.K_UP] and self.direction != 'down'):
self.direction = 'up'
elif (e.key in [pygame.K_s, pygame.K_DOWN] and self.direction != 'up'):
self.direction = 'down'
elif (e.key in [pygame.K_d, pygame.K_RIGHT] and self.direction != 'left'):
self.direction = 'right'
elif (e.key in [pygame.K_a, pygame.K_LEFT] and self.direction != 'right'):
self.direction = 'left'
self.clock.tick(15)
self.display.fill((0, 0, 0))
self.move_snake()
self.draw()
pygame.display.update()
def move_snake(self):
self.snake.pop(0)
if (self.direction == 'left'):
self.snake.append((self.snake[-1][0] - 1, self.snake[-1][1]))
elif (self.direction == 'right'):
self.snake.append((self.snake[-1][0] + 1, self.snake[-1][1]))
elif (self.direction == 'up'):
self.snake.append((self.snake[-1][0], self.snake[-1][1] - 1))
elif (self.direction == 'down'):
self.snake.append((self.snake[-1][0], self.snake[-1][1] + 1))
if (self.snake[-1] == tuple(self.apple)):
self.add_snakes_length(self.direction)
self.spawn_apple()
if (self.snake[-1] in self.snake[:-1]):
self.loop = False
print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}')
if (self.snake[-1][0] < 0 or self.snake[-1][1] < 0 or self.snake[-1][0] > self.settings.cellsize or self.snake[-1][1] > self.settings.cellsize):
self.loop = False
print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}')
def spawn_apple(self):
in_snake = True
while (in_snake):
apple_x = random.randint(0, self.settings.gridsize - 1)
apple_y = random.randint(0, self.settings.gridsize - 1)
if ((apple_x, apple_y) not in self.snake and (apple_x, apple_y) != self.apple):
in_snake = False
self.apple = [apple_x, apple_y]
def add_snakes_length(self, direction):
if (direction == 'up'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1))
elif (direction == 'down'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1))
elif (direction == 'left'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1))
elif (direction == 'right'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1))
def draw(self):
cellsize = self.settings.cellsize
gfxdraw.box(self.display, (self.apple[0] * cellsize, self.apple[1] * cellsize, cellsize, cellsize), self.settings.apple_color)
for x, y in self.snake:
gfxdraw.box(self.display, (x * cellsize, y * cellsize, cellsize, cellsize), self.settings.snake_color)
game = Game(settings)
game.start() | 4,998 | 2,133 |
import time
import random
def parse(input):
tokens = input.split(" ")
parsedTokens = []
time.sleep(5)
for i in range(200000):
test = random.randint(1, 8) + random.randint(-4, 90)
for token in tokens:
if token == "":
continue
parsedTokens.append({
"text": token,
"lemma": token,
"pos": "verb",
"decl": "3rd person singular future tense",
"gloss": ["a test definition", "a second test definition"]
})
return parsedTokens
| 484 | 168 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 6 10:07:13 2020
@author: sjliu.me@gmail.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class wcrn(nn.Module):
def __init__(self, num_classes=9):
super(wcrn, self).__init__()
self.conv1a = nn.Conv2d(103,64,kernel_size=3,stride=1,padding=0,groups=1)
self.conv1b = nn.Conv2d(103,64,kernel_size=1,stride=1,padding=0,groups=1)
self.maxp1 = nn.MaxPool2d(kernel_size=3)
self.maxp2 = nn.MaxPool2d(kernel_size=5)
# self.bn1 = nn.BatchNorm2d(128,eps=0.001,momentum=0.9)
self.bn1 = nn.BatchNorm2d(128)
self.conv2a = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1)
self.conv2b = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1)
self.fc = nn.Linear(128, num_classes)
# torch.nn.init.normal_(self.fc.weight, mean=0, std=0.01)
def forward(self, x):
out = self.conv1a(x)
out1 = self.conv1b(x)
out = self.maxp1(out)
out1 = self.maxp2(out1)
out = torch.cat((out,out1),1)
out1 = self.bn1(out)
out1 = nn.ReLU()(out1)
out1 = self.conv2a(out1)
out1 = nn.ReLU()(out1)
out1 = self.conv2b(out1)
out = torch.add(out,out1)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
class resnet99_avg(nn.Module):
def __init__(self, num_classes=9):
super(resnet99_avg, self).__init__()
self.conv1a = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1)
self.conv1b = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1)
self.bn1 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9)
self.conv2a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.conv2b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.bn2 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9)
self.conv3a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.conv3b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.fc = nn.Linear(64, num_classes)
def forward(self, x):
x1 = self.conv1a(x)
x2 = self.conv1b(x)
x1 = torch.cat((x1,x2),axis=1)
x2 = self.bn1(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv2a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv2b(x2)
x1 = torch.add(x1,x2)
x2 = self.bn2(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv3a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv3b(x2)
x1 = torch.add(x1,x2)
x1 = nn.AdaptiveAvgPool2d((1,1))(x1)
x1 = x1.reshape(x1.size(0), -1)
out = self.fc(x1)
return out
| 2,961 | 1,380 |
__all__ = ['templatetags']
| 27 | 13 |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_object.py."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_robotics.manipulation.props.parametric_object import parametric_object
class PropertyTest(parameterized.TestCase):
def test_size_mismatch_in_init(self):
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1, 2, 3]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[]]})
with self.assertRaises(ValueError):
_ = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': []})
def test_check_instance_assertions(self):
param_names = ('p', 'q', 'r')
param_check = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(param_names, param_check)
prop.check_instance({'p': 122, 'q': 122, 'r': 122})
self.assertEqual(prop._param_names, ('p', 'q', 'r'))
reply = prop.check_instance({'p': 500, 'q': 0, 'r': 0})
self.assertEqual(False, reply)
reply = prop.check_instance({'p': 0, 'q': -500, 'r': 0})
self.assertEqual(False, reply)
param_check = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [['p', 'r']], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(param_names, param_check)
prop.check_instance({'p': 0, 'q': 122, 'r': 255})
reply = prop.check_instance({'p': 0, 'q': 255, 'r': 122})
self.assertEqual(False, reply)
reply = prop.check_instance({'p': 122, 'q': 0, 'r': 255})
self.assertEqual(False, reply)
with self.assertRaises(ValueError):
prop.check_instance({'p': 0, 'q': 255})
param_names = ('p0', 'p1', 'p2')
with self.assertRaises(ValueError):
param_check = parametric_object.ParametricMinMaxBounds({
'p0': [[0, 255]], 'p1': [[0, 255]], 'p2': [[0, 255]]}).check_instance
def test_get_dict(self):
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.get_dict('first0_second0_third0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0_fourth0')
with self.assertRaises(ValueError):
_ = prop.get_dict('first0_second0_')
def test_set_types(self):
names = ('first', 'second', 'third')
types = {'first': parametric_object.ParametersTypes.INTEGER,
'second': parametric_object.ParametersTypes.INTEGER,
'third': parametric_object.ParametersTypes.INTEGER}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(True, reply)
reply = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0})
self.assertEqual(False, reply)
prop_shape = parametric_object.ParametricProperties(names, checks)
prop_texture = parametric_object.ParametricProperties(names, checks)
prop = parametric_object.ParametricObject(prop_shape, prop_texture)
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122},
{'first': 0, 'second': 255, 'third': 122})
self.assertEqual(True, reply)
names = ('first', 'second', 'third')
types = {'first': parametric_object.ParametersTypes.FLOAT,
'second': parametric_object.ParametersTypes.FLOAT,
'third': parametric_object.ParametersTypes.FLOAT}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0})
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(False, reply)
types = {'first': parametric_object.ParametersTypes.FLOAT,
'second': parametric_object.ParametersTypes.INTEGER,
'third': parametric_object.ParametersTypes.FLOAT}
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]}, types)
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0.0, 'second': 0, 'third': 0.0})
reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122})
self.assertEqual(False, reply)
names = ('p', 'q', 'r')
checks = parametric_object.ParametricMinMaxBounds({
'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'p': 0.0, 'q': 0, 'r': 0.0})
_ = prop.check_instance({'p': 0, 'q': 255.0, 'r': 122})
def test_parameters_min_max_tuples(self):
# 0 <= p0, p1, p2 <=1
# 3 <= p0, p1, p2 <=4
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 1], [3, 4]],
'second': [[0, 1], [3, 4]],
'third': [[0, 1], [3, 4]]})
prop = parametric_object.ParametricProperties(names, checks)
_ = prop.check_instance({'first': 0, 'second': 0, 'third': 0})
_ = prop.check_instance({'first': 1, 'second': 1, 'third': 1})
_ = prop.check_instance({'first': 3, 'second': 3, 'third': 3})
_ = prop.check_instance({'first': 4, 'second': 4, 'third': 4})
reply = prop.check_instance({'first': 2, 'second': 2, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 2, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 2, 'second': 3, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 3, 'second': 3, 'third': 2})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 5, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
reply = prop.check_instance({'first': 1, 'second': 3, 'third': 3})
self.assertEqual(False, reply)
# if a == 2, 0 <= b <= c, 0 <= c <=10
# if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10
names = ('a', 'b', 'c')
checks = parametric_object.ParametricMinMaxBounds({
'a': [[2, 2], [3, 10]],
'b': [[0, 'c'], [0, 10]],
'c': [[0, 10], [0, 10]]})
prop = parametric_object.ParametricProperties(names, checks)
# if a == 2, 0 <= b <= c, 0 <= c <=10
# if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10
with self.assertRaises(ValueError):
checks = parametric_object.ParametricMinMaxBounds({
'a': [[2, 2], [3, 10]],
'b': [[0, 'c'], [0, 10]],
'c': [[0, 10]]})
_ = prop.check_instance({'a': 2, 'b': 2, 'c': 10})
_ = prop.check_instance({'a': 3, 'b': 5, 'c': 2})
reply = prop.check_instance({'a': 2, 'b': 5, 'c': 2})
self.assertEqual(False, reply)
def test_add_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
c = parametric_object.ParametersDict({'k3': 5, 'k4': 6})
d = parametric_object.ParametersDict({'k1': 7, 'k4': 8})
r = a + b
self.assertEqual(r['k1'], 4)
self.assertEqual(r['k2'], 6)
with self.assertRaises(TypeError):
r = a + 1
with self.assertRaises(ValueError):
r = a + c
with self.assertRaises(ValueError):
r = a + d
def test_sub_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
c = parametric_object.ParametersDict({'k3': 5, 'k4': 6})
d = parametric_object.ParametersDict({'k1': 7, 'k4': 8})
r = a - b
self.assertEqual(r['k1'], -2)
self.assertEqual(r['k2'], -2)
with self.assertRaises(TypeError):
r = a - 1
with self.assertRaises(ValueError):
r = a - c
with self.assertRaises(ValueError):
r = a - d
def test_mult_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
r = a * 0.5
self.assertEqual(r['k1'], int(a['k1']*1/2))
self.assertEqual(r['k2'], int(a['k2']*1/2))
with self.assertRaises(TypeError):
r = a * b
with self.assertRaises(TypeError):
r = 0.5 * b
def test_truediv_parametric_dict(self):
a = parametric_object.ParametersDict({'k1': 1, 'k2': 2})
b = parametric_object.ParametersDict({'k1': 3, 'k2': 4})
r = a // 2
self.assertEqual(r['k1'], int(a['k1'] // 2))
self.assertEqual(r['k2'], int(a['k2'] // 2))
with self.assertRaises(TypeError):
r = a // b
with self.assertRaises(TypeError):
r = 0.5 // b
def test_types_conversion(self):
names = ('first', 'second', 'third')
checks = parametric_object.ParametricMinMaxBounds({
'first': [[0, 255]],
'second': [[0, 255]],
'third': [[0, 255]]})
prop = parametric_object.ParametricProperties(names, checks)
dictionary = {'first': 0, 'second': 3, 'third': 2}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
self.assertEqual(name, 'first0_second3_third2')
dictionary = {'first': 0.0, 'second': 0.1, 'third': 2.0}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
self.assertEqual(name, 'first0.0_second0.1_third2.0')
dictionary = {'first': 1.0, 'second': 3.0, 'third': 4}
param_dict = parametric_object.ParametersDict(dictionary)
name = prop.get_name(param_dict)
reconstruction = prop.get_dict(name)
self.assertEqual(dictionary, reconstruction)
def test_types_algebra(self):
dictionary = {'first': 0, 'second': 3, 'third': 2}
types = (parametric_object.ParametersTypes.INTEGER,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict * 1.1
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], 3)
self.assertAlmostEqual(param_half['third'], 2)
types = (parametric_object.ParametersTypes.FLOAT,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict * 1.1
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], 3.3)
self.assertAlmostEqual(param_half['third'], 2.2)
dictionary = {'first': 0, 'second': 3, 'third': 2}
types = (parametric_object.ParametersTypes.INTEGER,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict / 3
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], int(3/3))
self.assertAlmostEqual(param_half['third'], int(2/3))
types = (parametric_object.ParametersTypes.FLOAT,)*3
param_dict = parametric_object.ParametersDict(dictionary, param_types=types)
param_half = param_dict / 3
self.assertAlmostEqual(param_half['first'], 0)
self.assertAlmostEqual(param_half['second'], float(3/3))
self.assertAlmostEqual(param_half['third'], float(2/3))
if __name__ == '__main__':
absltest.main()
| 12,491 | 4,755 |
import hawkey
import logging
from rpmreq import graph
from rpmreq import query
log = logging.getLogger(__name__)
def build_requires(specs, repos, base_repos=None,
out_data=None, out_image=None,
cache_ttl=3600):
dep_graph = graph.build_requires_graph(
specs=specs, repos=repos, base_repos=base_repos,
cache_ttl=cache_ttl)
graph.break_dep_graph_cycles(dep_graph)
if out_data or out_image:
graph.dump_dep_graph(dep_graph,
out_data=out_data,
out_image=out_image)
return graph.parse_dep_graph(dep_graph)
def last_version(dep, repos):
"""
Return latest package meeting dep
or latest version of dep regardless of version range.
:param dep: dependency to meet
:param repos: repos to query
:return: DepQueryResult, see rpmreq.query.query_dep
"""
sack = query.fetch_repos_sack(repos)
q = hawkey.Query(sack)
return query.query_dep(q, dep)
| 1,012 | 336 |
from mlflow.tracking import MlflowClient
from urllib.parse import urlparse
def get_prod_path_mlflow_model_mlflow_query(model_name, version, new_bucket, new_path):
client = MlflowClient()
artifact_path_original = None
for mv in client.search_model_versions(f"name='{model_name}'"):
if mv.version == str(version):
artifact_path_original = mv.source
new_mflow_path = None
if artifact_path_original:
if new_bucket and new_path:
o = urlparse(artifact_path_original, allow_fragments=False)
new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{o.path.strip('/')}"
return {"old_mlflow_path": artifact_path_original,
"new_mflow_path": new_mflow_path}
def get_prod_path_mlflow_model_explicit(model_name, version, new_bucket, new_path):
new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{model_name}/{version}"
return {"old_mlflow_path": None,
"new_mflow_path": new_mflow_path}
| 1,033 | 355 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from iso8601 import parse_date
from pytz import timezone
import urllib
import json
import os
def convert_time(date):
date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
return timezone('Europe/Kiev').localize(date).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
def subtract_min_from_date(date, minutes):
date_obj = datetime.strptime(date.split("+")[0], '%Y-%m-%dT%H:%M:%S.%f')
return "{}+{}".format(date_obj - timedelta(minutes=minutes), date.split("+")[1])
def convert_datetime_to_25h8_format(isodate):
iso_dt = parse_date(isodate)
day_string = iso_dt.strftime("%d/%m/%Y %H:%M")
return day_string
def convert_string_from_dict_25h8(string):
return {
u"грн.": u"UAH",
u"True": u"1",
u"False": u"0",
u"Відкриті торги": u"aboveThresholdUA",
u"Відкриті торги з публікацією англ. мовою": u"aboveThresholdEU",
u'Код ДК 021-2015 (CPV)': u'CPV',
u'Код ДК (ДК003)': u'ДК003',
u'Код ДК (ДК018)': u'ДК018',
u'з урахуванням ПДВ': True,
u'з ПДВ': True,
u'без урахуванням ПДВ': False,
u'ОЧIКУВАННЯ ПРОПОЗИЦIЙ': u'active.tendering',
u'ПЕРIОД УТОЧНЕНЬ': u'active.enquiries',
u'АУКЦIОН': u'active.auction',
u'ПРЕКВАЛІФІКАЦІЯ': u'active.pre-qualification',
u'ОСКАРЖЕННЯ ПРЕКВАЛІФІКАЦІЇ': u'active.pre-qualification.stand-still',
u'вимога': u'claim',
u'дано відповідь': u'answered',
u'вирішено': u'resolved',
u'Так': True,
u'Ні': False,
u'на розглядi': u'pending',
u'На розгляді': u'pending',
u'не вирішено(обробляється)': u'pending',
u'відмінено': u'cancelled',
u'відмінена': u'cancelled',
u'Переможець': u'active',
}.get(string, string)
def adapt_procuringEntity(role_name, tender_data):
if role_name == 'tender_owner':
tender_data['data']['procuringEntity']['name'] = u"Ольмек"
tender_data['data']['procuringEntity']['address']['postalCode'] = u"01100"
tender_data['data']['procuringEntity']['address']['region'] = u"місто Київ"
tender_data['data']['procuringEntity']['address']['locality'] = u"Київ"
tender_data['data']['procuringEntity']['address']['streetAddress'] = u"вул. Фрунзе 77"
tender_data['data']['procuringEntity']['identifier']['legalName'] = u"Ольмек"
tender_data['data']['procuringEntity']['identifier']['id'] = u"01234567"
if tender_data['data'].has_key('procurementMethodType'):
if "above" in tender_data['data']['procurementMethodType']:
tender_data['data']['tenderPeriod']['startDate'] = subtract_min_from_date(
tender_data['data']['tenderPeriod']['startDate'], 1)
return tender_data
def adapt_delivery_data(tender_data):
for index in range(len(tender_data['data']['items'])):
value = tender_data['data']['items'][index]['deliveryAddress']['region']
if value == u"місто Київ":
tender_data['data']['items'][index]['deliveryAddress']['region'] = u"Київ"
return tender_data
def adapt_view_data(value, field_name):
if 'value.amount' in field_name:
value = float(value.split(' ')[0])
elif 'currency' in field_name:
value = value.split(' ')[1]
elif 'valueAddedTaxIncluded' in field_name:
value = ' '.join(value.split(' ')[2:])
elif 'minimalStep.amount' in field_name:
value = float(value.split(' ')[0])
elif 'unit.name' in field_name:
value = value.split(' ')[1]
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'questions' in field_name and '.date' in field_name:
value = convert_time(value.split(' - ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_25h8(value)
def adapt_view_item_data(value, field_name):
if 'unit.name' in field_name:
value = ' '.join(value.split(' ')[1:])
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_25h8(value)
def get_related_elem_description(tender_data, feature, item_id):
if item_id == "":
for elem in tender_data['data']['{}s'.format(feature['featureOf'])]:
if feature['relatedItem'] == elem['id']:
return elem['description']
else:
return item_id
def custom_download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def add_second_sign_after_point(amount):
amount = str(repr(amount))
if '.' in amount and len(amount.split('.')[1]) == 1:
amount += '0'
return amount
def get_bid_phone(internal_id, bid_index):
r = urllib.urlopen('https://lb.api-sandbox.openprocurement.org/api/2.3/tenders/{}'.format(internal_id)).read()
tender = json.loads(r)
bid_id = tender['data']['qualifications'][int(bid_index)]["bidID"]
for bid in tender['data']['bids']:
if bid['id'] == bid_id:
return bid['tenderers'][0]['contactPoint']['telephone']
def get_upload_file_path():
return os.path.join(os.getcwd(), 'src/robot_tests.broker.25h8/testFileForUpload.txt') | 5,350 | 1,981 |
EMAIL_ADDRESS = 'domfigarobarbearia@gmail.com'
EMAIL_PASSWORD = 'barbeariadomfigaro'
HEROKU_PASSWORD = "Barbeariadomfigaro!" | 125 | 57 |
'''
Created on 15/03/2018
@author: pelejaf
''' | 47 | 27 |
# coding: utf-8
from config.base import *
DEBUG = False
SERVER_PORT = 8899
| 76 | 30 |
from __future__ import print_function
NUM_ROWS = 7
NUM_COLS = 7
DIRECTIONS = ('E', 'W', 'N', 'S')
MOVEMENT_DIFFS = {
'N': (0, -1),
'S': (0, 1),
'E': (1, 0),
'W': (-1, 0)
}
X_MOVEMENT_DIFFS = {
'N': 0,
'S': 0,
'E': 1,
'W': -1
}
Y_MOVEMENT_DIFFS = {
'N': -1,
'S': 1,
'E': 0,
'W': 0
}
def actions_and_successors(state, white_player=True):
"""
Returns a list of action, successor tuples resulting from the given state.
:param state: the state to get successors of
:param white_player: True if the current player is white, False otherwise
:return: a list of action, successor tuples resulting from the given state.
"""
return [(a, result(state, a, white_player)) for a in actions(state, white_player)]
def print_state(state):
"""
Prints the given state.
:param state: the state to print
"""
print(' ', end=' ')
for col in range(NUM_COLS):
print(col + 1, end=' ')
print()
for row in range(NUM_ROWS):
print(row + 1, end=' ')
for col in range(NUM_COLS):
if (col + 1, row + 1) in state[0]:
print('O', end='')
elif (col + 1, row + 1) in state[1]:
print('X', end='')
else:
print(' ', end='')
if col < NUM_COLS - 1:
print(',', end='')
print()
def str_to_state(str_state):
"""
Returns a state corresponding to the provided string representation. Here is an example of a valid state:
, , , , , ,X
, , , , ,X,
, , , , ,O,X
,X,O, , , ,X
, , , , ,O,
,O,X, , , ,
O, , , ,O, ,
:param str_state: a string representation of the board
:return: the corresponding state
"""
white_squares = []
black_squares = []
y = 1
for row in str_state.splitlines():
x = 1
for square in row.split(','):
if square == ',':
continue
if square == 'O':
white_squares.append((x, y))
elif square == 'X':
black_squares.append((x, y))
x += 1
y += 1
return tuple(white_squares), tuple(black_squares)
def is_within_bounds(x, y):
"""
:return: True if the given x, y coordinates are within the bounds of the board
"""
return 0 < x <= NUM_COLS and 0 < y <= NUM_ROWS
def is_free_square(state, x, y):
"""
:return: True if the given x, y coordinates are free spots, given the provided state
"""
return (x, y) not in state[0] and (x, y) not in state[1]
def is_valid_action(state, x, y, direction):
"""
Checks if moving the piece at given x, y coordinates in the given direction is valid, given the current state.
:param state: the current state
:param x: the x coordinate of the piece
:param y: the y coordinate of the piece
:param direction: the direction to travel with this action
:return: True if the action is valid, False otherwise
"""
new_x = x + X_MOVEMENT_DIFFS[direction]
new_y = y + Y_MOVEMENT_DIFFS[direction]
return is_within_bounds(new_x, new_y) and is_free_square(state, new_x, new_y)
def occupied_squares_by_player(state, white_player):
"""
Returns the the x, y coordinates of the squares occupied by the given player.
:param state: the given state
:param white_player: True if the current player is white, False otherwise
:return: the x, y coordinates of the squares occupied by the given player.
"""
return state[0] if white_player else state[1]
def actions(state, white_player=True):
"""
Returns the actions available to the given player in the given state.
:param state: the current state
:param white_player: True if the current player is white, False otherwise
:return: the actions available to the given player in the given state
"""
return [(x, y, direction)
for (x, y) in occupied_squares_by_player(state, white_player)
for direction in DIRECTIONS
if is_valid_action(state, x, y, direction)]
def action_str_to_tuple(a):
"""
Converts the provided action string to a tuple
:param a: the action, in string form. For example: '13E'.
:return: the action in tuple form
"""
if a is not None and '1' <= a[0] <= '7' and '1' <= a[1] <= '7' and a[2] in DIRECTIONS:
return int(a[0]), int(a[1]), a[2]
else:
return None
def action_tuple_to_str(action):
"""
Converts the provided action tuple to a string.
:param action: the action
:return: a string representation of the action tuple
"""
if action is None:
return None
return str(action[0]) + str(action[1]) + action[2]
def result(state, action, white_player=True):
"""
Returns the resulting state when the given action is applied to the given state.
:param state: the current state
:param action: the action to apply
:param white_player: True if the current player is white, False otherwise
:return: the resulting state when the given action is applied to the given state
"""
if white_player:
return result_tuple(state, action, white_player), state[1]
else:
return state[0], result_tuple(state, action, white_player)
def result_tuple(s, a, white_player):
"""
Returns the x, y coordinates of the pieces of the given player when the given action is applied to the given state.
:param s: the current state
:param a: the action to apply
:param white_player: True if the current player is white, False otherwise
:return: the x, y coordinates of the pieces of the given player when the given action is applied to the given state
"""
old_x = a[0]
old_y = a[1]
direction = a[2]
new_x = old_x + X_MOVEMENT_DIFFS[direction]
new_y = old_y + Y_MOVEMENT_DIFFS[direction]
return tuple((x, y) if x != old_x or y != old_y else (new_x, new_y)
for (x, y) in occupied_squares_by_player(s, white_player))
def file_to_state(file_name):
"""
Converts the board given by the provided file to a state. Here is an example of a valid state:
, , , , , ,X
, , , , ,X,
, , , , ,O,X
,X,O, , , ,X
, , , , ,O,
,O,X, , , ,
O, , , ,O, ,
:param file_name: the name of the file containing the state
:return: a state corresponding to the board
"""
with open(file_name, 'r') as state_file:
string_state = state_file.read()
state = str_to_state(string_state)
return state
| 6,574 | 2,134 |
from dataclasses import dataclass
from typing import Dict
from dataclasses_jsonschema import JsonSchemaMixin
@dataclass
class DatabaseStatsBreakdown(JsonSchemaMixin):
"""Database stats broken down by tier"""
breakdown: Dict[str, int]
total: int
@dataclass
class DatabaseStatsSchema(JsonSchemaMixin):
"""All database stats"""
matches: DatabaseStatsBreakdown
fighters: DatabaseStatsBreakdown
| 419 | 124 |
import numpy as np
import pandas as pd
import unittest
from pyampute.ampute import MultivariateAmputation
from pyampute.exploration.md_patterns import mdPatterns
class TestMapping(unittest.TestCase):
'''
This class tests the example code in the blogpost "A mapping from R-function ampute to pyampute"
'''
def setUp(self) -> None:
super().setUp()
self.n = 10000
self.nhanes2_sim = np.random.randn(10000, 4)
try:
self.nhanes2_orig = pd.read_csv("data/nhanes2.csv")
except:
print("CSV file failed to load.")
def test_patterns(self):
mdp = mdPatterns()
mypatterns = mdp.get_patterns(self.nhanes2_orig, show_plot=False)
self.assertEqual(mypatterns.shape, (6, 6))
self.assertListEqual(
mypatterns.iloc[1:-1, 1:-1].values.tolist(),
[[1, 1, 1, 0], [1, 1, 0, 1], [1, 0, 0, 1], [1, 0, 0, 0]])
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3]},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2]},
{'incomplete_vars': [1, 2, 3]}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertEqual(mypatterns.shape, (6, 6))
self.assertListEqual(
mypatterns["n_missing_values"].values[:-1].astype(int).tolist(),
[0, 1, 1, 2, 3])
def test_proportions(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'freq': 0.1},
{'incomplete_vars': [2], 'freq': 0.6},
{'incomplete_vars': [1, 2], 'freq': 0.2},
{'incomplete_vars': [1, 2, 3], 'freq': 0.1}
],
prop=0.3)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertListEqual(
mypatterns.columns.values.tolist(),
["row_count", 0, 3, 1, 2, "n_missing_values"]
)
self.assertAlmostEqual(
mypatterns.loc[1, "row_count"],
0.3 * 0.6 * self.n,
delta=0.05 * self.n,
)
def test_mechanisms(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'mechanism': "MCAR"},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2], 'mechanism': "MNAR"},
{'incomplete_vars': [1, 2, 3]}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
self.assertEqual(ma.patterns[0]['mechanism'], "MCAR")
self.assertEqual(ma.patterns[2]['mechanism'], "MNAR")
self.assertListEqual(ma.mechanisms.tolist(), ["MCAR", "MAR", "MNAR", "MAR"])
def test_weights(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'weights': [0, 4, 1, 0]},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2], 'mechanism': "MNAR"},
{'incomplete_vars': [1, 2, 3], 'weights': {0: -2, 3: 1}, 'mechanism': "MAR+MNAR"}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertListEqual(
ma.weights.tolist(),
[[0, 4, 1, 0], [1, 1, 0, 1], [0, 1, 1, 0], [-2, 0, 0, 1]]
)
self.assertTrue(len(ma.wss_per_pattern), 4)
if __name__ == "__main__":
unittest.main()
| 3,781 | 1,389 |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import random
import cv2
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Dropout, Lambda, Dense, Conv2D, Flatten, Input, MaxPooling2D
from keras.optimizers import RMSprop
from keras import backend as K
import os
from numpy.random import permutation
class Classification:
def __init__(self, x_train, y_train, x_test, y_test, input_shape, num_classes):
self.x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
self.y_train = to_categorical(y_train)
self.x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
self.y_test = to_categorical(y_test)
self.input_shape = input_shape
self.num_classes = num_classes
def build_model(self):
input = Input(shape=(self.input_shape[0], self.input_shape[1], 1))
x = Flatten()(input)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(128, activation='relu')(x)
x = Dense(self.num_classes, activation='softmax')(x)
model = Model(input, x)
return model
def train(self, epochs):
model = self.build_model()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), epochs=epochs) | 1,560 | 551 |
import sys
if sys.version_info < (3, 5):
print("Python 3.5 is required for this package")
sys.exit(1)
from setuptools import setup
setup(name = "simplesvnbrowser",
version = "0.0.1",
description = "A simple subversion repository browser application",
url = "https://github.com/holtrop/simple-svn-browser",
author = "Josh Holtrop",
author_email = "jholtrop@gmail.com",
license = "zlib",
packages = ["simplesvnbrowser"],
zip_safe = False,
scripts = ["bin/simple-svn-browser"],
install_requires = ["pygobject", "pyxdg"],
)
| 596 | 195 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 10:02:36 2019
@author: abibeka
Purpose: Batch update synchro volumes
"""
# 0.0 Housekeeping. Clear variable space
#******************************************************************************************
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import os
import pandas as pd
import numpy as np
import csv
os.chdir(r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\RampMetering\operations\Synchro')
# Read the volume data
dat = pd.read_csv('VOLUME.CSV',skiprows=2)
dat.fillna('',inplace=True)
dat2 = dat
dat2 = dat2.drop(columns = 'DATE')
dat2.rename(columns = {'TIME': 'RECORDNAME'},inplace=True)
dat2.RECORDNAME = 'Volume'
# Scale the volume data
#Number of Years = 2040 - 2016
NumYears = 2040 - 2016
GrowthRates = [0,1,2] # percent per year
NetGrowthCalc = lambda x: (1+x/100)**NumYears
NetGrowthRate = list(map(NetGrowthCalc,GrowthRates))
NetGrowthRate
def Output2040Vols(datCp = dat2, NetGrowthRt = 1):
datCp.iloc[:,2:] = datCp.iloc[:,2:].applymap(lambda x: x if not x else round(x*NetGrowthRt))
#Change volume data and columns to list --- so it can be written
dat2Write = datCp.values.tolist()
#Read the two 2 lines of the csv file separately
with open('VOLUME.csv', 'r') as readFile:
reader = csv.reader(readFile)
lines = list(reader)
Header = lines[0:3]
Header[0] = ['[Lanes]']
Header[1] =['Lane Group Data']
Header[2][0] = 'RECORDNAME'
Header[2].remove('TIME')
#Write the top 2 lines of the csv file, column name and data
with open('Volume2040_NetGrwRt_{}.csv'.format(round(NetGrowthRt,2)), 'w', newline = '') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(Header)
writer.writerows(dat2Write)
writeFile.close()
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[0])
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[1])
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[2])
| 2,092 | 822 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates the multiline text charts
Given the unique nature of multiline text charts, we use a separate method
to construct them.
-----
"""
# Built-in Modules
import pickle
import sys
import textwrap
import traceback
# Third-party Modules
# Note the order and structure of matplotlib imports is intentional.
import matplotlib
matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done.
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# My modules
import chart_tools
log = chart_tools.log
payload = chart_tools.payload
p_dict = payload['p_dict']
k_dict = payload['k_dict']
props = payload['props']
chart_name = props['name']
plug_dict = payload['prefs']
text_to_plot = payload['data']
log['Threaddebug'].append(u"chart_multiline.py called.")
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"{0}".format(payload))
try:
def __init__():
pass
def clean_string(val):
"""
Cleans long strings of whitespace and formats certain characters
The clean_string(self, val) method is used to scrub multiline text elements in
order to try to make them more presentable. The need is easily seen by looking
at the rough text that is provided by the U.S. National Weather Service, for
example.
-----
:param unicode val:
:return val:
"""
# List of (elements, replacements)
clean_list = ((' am ', ' AM '),
(' pm ', ' PM '),
('*', ' '),
('\u000A', ' '),
('...', ' '),
('/ ', '/'),
(' /', '/'),
('/', ' / ')
)
# Take the old, and replace it with the new.
for (old, new) in clean_list:
val = val.replace(old, new)
val = ' '.join(val.split()) # Eliminate spans of whitespace.
return val
p_dict['figureWidth'] = float(props['figureWidth'])
p_dict['figureHeight'] = float(props['figureHeight'])
try:
height = int(props.get('figureHeight', 300)) / int(plt.rcParams['savefig.dpi'])
if height < 1:
height = 1
chart_tools.log['Warning'].append(u"[{n}] Height: Pixels / DPI can not be less than one. Coercing to "
u"one.".format(n=chart_name)
)
except ValueError:
height = 3
try:
width = int(props.get('figureWidth', 500)) / int(plt.rcParams['savefig.dpi'])
if width < 1:
width = 1
chart_tools.log['Warning'].append(u"[{n}] Width: Pixels / DPI can not be less than one. Coercing to "
u"one.".format(n=chart_name)
)
except ValueError:
width = 5
fig = plt.figure(figsize=(width, height))
ax = fig.add_subplot(111)
ax.axis('off')
# If the value to be plotted is empty, use the default text from the device
# configuration.
if len(text_to_plot) <= 1:
text_to_plot = unicode(p_dict['defaultText'])
else:
# The clean_string method tries to remove some potential ugliness from the text
# to be plotted. It's optional--defaulted to on. No need to call this if the
# default text is used.
if p_dict['cleanTheText']:
text_to_plot = clean_string(val=text_to_plot)
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"[{n}] Data: {t}".format(n=chart_name, t=text_to_plot))
# Wrap the text and prepare it for plotting.
text_to_plot = textwrap.fill(text=text_to_plot,
width=int(p_dict['numberOfCharacters']),
replace_whitespace=p_dict['cleanTheText']
)
ax.text(0.01, 0.95,
text_to_plot,
transform=ax.transAxes,
color=p_dict['textColor'],
fontname=p_dict['fontMain'],
fontsize=p_dict['multilineFontSize'],
verticalalignment='top'
)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if not p_dict['textAreaBorder']:
[s.set_visible(False) for s in ax.spines.values()]
# Transparent Charts Fill
if p_dict['transparent_charts'] and p_dict['transparent_filled']:
ax.add_patch(patches.Rectangle((0, 0), 1, 1,
transform=ax.transAxes,
facecolor=p_dict['faceColor'],
zorder=1
)
)
# =============================== Format Title ================================
chart_tools.format_title(p_dict=p_dict, k_dict=k_dict, loc=(0.5, 0.98), align='center')
# Note that subplots_adjust affects the space surrounding the subplots and not
# the fig.
plt.subplots_adjust(top=0.98,
bottom=0.05,
left=0.02,
right=0.98,
hspace=None,
wspace=None
)
chart_tools.save(logger=log)
except (KeyError, IndexError, ValueError, UnicodeEncodeError) as sub_error:
tb = traceback.format_exc()
chart_tools.log['Critical'].append(u"[{n}] {s}".format(n=chart_name, s=tb))
pickle.dump(chart_tools.log, sys.stdout)
| 5,695 | 1,708 |
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import pytest
from apigw_manager.apigw import command
class TestApiCommand:
@pytest.fixture(autouse=True)
def setup_command(self):
self.command = command.ApiCommand()
def test_get_configuration(self, configuration):
result = self.command.get_configuration()
assert configuration.api_name == result.api_name
assert configuration.host == result.host
def test_get_configuration_with_args(self, faker):
api_name = faker.color
host = faker.url()
result = self.command.get_configuration(api_name=api_name, host=host)
assert api_name == result.api_name
assert host.startswith(result.host)
class TestDefinitionCommand:
@pytest.fixture(autouse=True)
def setup_command(self):
self.command = command.DefinitionCommand()
def test_get_context(self):
context = self.command.get_context(["a:1", "b:2"])
assert "settings" in context
assert "environ" in context
assert context["data"]["a"] == 1
assert context["data"]["b"] == 2
| 1,814 | 548 |
from pylie.common import *
from pylie import SO3
def test_to_rotation_matrix_results_in_valid_rotation():
# Test 2x2 matrix.
A = np.random.rand(2, 2)
R = to_rotation_matrix(A)
np.testing.assert_almost_equal(R @ R.T, np.identity(2), 14)
np.testing.assert_almost_equal(np.linalg.det(R), 1, 14)
# Test 3x3 matrix.
A = np.random.rand(3, 3)
R = to_rotation_matrix(A)
np.testing.assert_almost_equal(R @ R.T, np.identity(3), 14)
np.testing.assert_almost_equal(np.linalg.det(R), 1, 14)
def test_to_rotation_matrix_results_in_close_rotation():
angle = 0.5 * np.pi
axis = np.array([[1, 0, 0]]).T
R = SO3.Exp(angle * axis).matrix
# Invalidate a valid rotation matrix by scaling it.
R_scaled = 3 * R
# Fit to SO(3).
R_closest = to_rotation_matrix(R_scaled)
# Result should be the same rotation matrix.
np.testing.assert_almost_equal(R_closest, R, 14)
# Perturb the rotation matrix with random noise.
R_noisy = R + 0.01 * np.random.rand(3, 3)
# Fit to SO(3)
so3_closest = SO3(R_noisy)
# Extract angle-axis representation.
angle_closest, axis_closest = so3_closest.Log(True)
# Result should be close to the same rotation.
np.testing.assert_almost_equal(angle_closest, angle, 2)
np.testing.assert_almost_equal(axis_closest, axis, 2)
| 1,342 | 552 |
import pytz
from datetime import timedelta
from dateutil import parser
from django.utils.text import Truncator
from django.db import IntegrityError
from core.models import Data
class HoursDataSource(object):
def __init__(self, start_date, end_date):
self.entries = []
self.start_date = start_date
self.end_date = end_date
def truncate(self, text, length):
return Truncator(text).chars(length)
def date_within_bounds(self, date, give_or_take=None):
start_date = self.start_date
end_date = self.end_date
if give_or_take is not None:
start_date -= give_or_take
end_date += give_or_take
return start_date <= date <= end_date
def get_group_date(self, date):
return date + timedelta(days=-date.weekday())
# return date.replace(day=1)
def add_entry(self, date, title, mouseover, url, css_class):
try:
Data.objects.create(date=date, title=title, mouseover=mouseover,
url=url, css_class=css_class)
except IntegrityError:
pass
def date_within_bounds(self, date, give_or_take=None):
start_date = self.start_date
end_date = self.end_date
if give_or_take is not None:
start_date -= give_or_take
end_date += give_or_take
return start_date <= date <= end_date
| 1,409 | 427 |
from django.db.models.signals import post_save
from django.dispatch import receiver
from roadmaps.models import RoadmapNode
from roadmaps.services.progress import ProgressPropagator
@receiver(post_save, sender=RoadmapNode)
def propagate_completion_to_descendant_nodes(sender, **kwargs):
roadmap: RoadmapNode = kwargs.get('instance')
ProgressPropagator.propagate_completion_desc(roadmap)
| 398 | 119 |
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from ReconstructOrder.datastructures.intensity_data import IntensityData
# ==== test basic construction =====
def test_basic_constructor_nparray():
"""
test assignment using numpy arrays
"""
int_data = IntensityData()
a = np.ones((512, 512))
b = 2*np.ones((512, 512))
c = 3*np.ones((512, 512))
d = 4*np.ones((512, 512))
e = 5*np.ones((512, 512))
int_data.append_image(a)
int_data.append_image(b)
int_data.append_image(c)
int_data.append_image(d)
int_data.append_image(e)
assert_array_equal(int_data.get_image(0), a)
assert_array_equal(int_data.get_image(1), b)
assert_array_equal(int_data.get_image(2), c)
assert_array_equal(int_data.get_image(3), d)
assert_array_equal(int_data.get_image(4), e)
assert_array_equal(int_data.data, np.array([a, b, c, d, e]))
def test_basic_constructor_memap(setup_temp_data):
"""
test assignment using memory mapped files
"""
mm = setup_temp_data
int_data = IntensityData()
int_data.append_image(mm)
int_data.append_image(2 * mm)
int_data.append_image(3 * mm)
int_data.append_image(4 * mm)
int_data.append_image(5 * mm)
assert_array_equal(int_data.get_image(0), mm)
assert_array_equal(int_data.get_image(1), 2*mm)
assert_array_equal(int_data.get_image(2), 3*mm)
assert_array_equal(int_data.get_image(3), 4*mm)
assert_array_equal(int_data.get_image(4), 5*mm)
assert_array_equal(int_data.data, np.array([mm, 2*mm, 3*mm, 4*mm, 5*mm]))
def test_basic_constructor_with_names():
"""
test construction with channel names
Returns
-------
"""
int_data = IntensityData()
int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
a = np.ones((512, 512))
b = 2 * np.ones((512, 512))
c = 3 * np.ones((512, 512))
d = 4 * np.ones((512, 512))
e = 5 * np.ones((512, 512))
int_data.replace_image(a, 'IExt')
int_data.replace_image(b, 'I0')
int_data.replace_image(c, 'I45')
int_data.replace_image(d, 'I90')
int_data.replace_image(e, 'I135')
assert_array_equal(int_data.get_image("IExt"), a)
def test_basic_constructor_without_names():
"""
test construction with channel names
Returns
-------
"""
int_data = IntensityData()
# int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
a = np.ones((512, 512))
b = 2 * np.ones((512, 512))
c = 3 * np.ones((512, 512))
d = 4 * np.ones((512, 512))
e = 5 * np.ones((512, 512))
int_data.append_image(a)
int_data.append_image(b)
int_data.append_image(c)
int_data.append_image(d)
int_data.append_image(e)
assert_array_equal(int_data.get_image(0), a)
# ==== test instances and private/public access =====
def test_instances():
"""
test instance attributes
"""
I1 = IntensityData()
I2 = IntensityData()
with pytest.raises(AssertionError):
assert(I1 == I2)
with pytest.raises(AssertionError):
I1.append_image(np.ones((32, 32)))
I2.append_image(np.ones((64, 64)))
assert_array_equal(I1.get_image(0),I2.get_image(0))
def test_private_access(setup_intensity_data):
"""
should not have access to private variables
access is restricted to setters/getters
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(AttributeError):
print(int_data.__IExt)
with pytest.raises(AttributeError):
print(int_data.__I0)
# ==== test methods =====
# replace_image method
def test_replace_image_shape(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = np.ones((5,5))
with pytest.raises(ValueError):
int_data.replace_image(newim, 0)
def test_replace_image_dtype(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = 0
with pytest.raises(TypeError):
int_data.replace_image(newim, 0)
def test_replace_image_by_index(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = np.ones((512, 512))
int_data.replace_image(newim, 0)
assert_array_equal(int_data.data[0], newim)
def test_replace_image_by_string(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
newim = np.ones((512,512))
int_data.replace_image(newim, 'I90')
assert_array_equal(int_data.get_image('I90'), newim)
# channel_names property
def test_channel_names(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
# get_image method
def test_get_image_str(setup_intensity_data):
"""
test query by string channel name
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
dat = int_data.get_image('e')
assert(dat.shape, (512,512))
assert(dat[0][0], 5)
def test_get_img_str_undef(setup_intensity_data):
"""
test exception handling of query by string channel name
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e','f','g','h']
int_data.channel_names = names
with pytest.raises(ValueError):
dat = int_data.get_image('q')
def test_get_image_int(setup_intensity_data):
"""
test query by int channel index
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
dat = int_data.get_image(4)
assert(dat.shape, (512,512))
assert(dat[0][0], 5)
# axis_names property
def test_axis_names(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
names = ['c', 'x', 'y', 'z', 't']
int_data.axis_names = names
assert(int_data.axis_names, names)
# ==== test data dimensions =====
def test_ndims_1(setup_ndarrays):
"""
test that shape is preserved
"""
p, q, r = setup_ndarrays
int_data = IntensityData()
int_data.append_image(p)
int_data.append_image(p)
int_data.append_image(p)
assert(int_data.data[0].shape == p.shape)
assert(int_data.data.shape == (3,)+p.shape)
def test_ndims_2(setup_ndarrays):
"""
test exception handling for image data that is not \
numpy array or numpy memmap
"""
int_data = IntensityData()
with pytest.raises(TypeError):
int_data.append_image(1)
with pytest.raises(TypeError):
int_data.append_image([1, 2, 3])
with pytest.raises(TypeError):
int_data.append_image({1, 2, 3})
with pytest.raises(TypeError):
int_data.append_image((1, 2, 3))
def test_ndims_3(setup_ndarrays):
"""
test exception handling upon assignment of dim mismatch image
"""
p, q, r = setup_ndarrays
int_data = IntensityData()
int_data.append_image(p)
with pytest.raises(ValueError):
int_data.append_image(q)
# ==== Attribute assignment ==========
def test_assignment(setup_intensity_data):
"""
test exception handling of improper assignment
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(TypeError):
int_data.Iext = a
with pytest.raises(TypeError):
int_data.__IExt = a
def test_set_data(setup_intensity_data):
"""
test that neither data nor frames are set-able attributes
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(AttributeError):
int_data.data = 0
with pytest.raises(AttributeError):
int_data.num_channels = 0
| 7,725 | 3,072 |
import torch
class MarioConfig:
def __init__(self):
# hyper config
self.max_num_gpus = 1
self.num_workers = 32
self.discount = 0.999
self.observation_space = (84, 84, 3)
self.action_space = 256 + 20 + 8
import os
import datetime
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results",
os.path.basename(__file__)[:-3], datetime.datetime.now().strftime(
"%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_log = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = int(100 * 1e6) # Total number of training steps (ie weights update according to a batch)
# Alg config
self.lambda_ = 0.95
# Actor config
# Learner config
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.batch_size = 32 # Number of parts of games to train on at each training step
self.checkpoint_interval = int(8) # Number of training steps before using the model for self-playing
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
self.cofentropy = 1e-3
self.v_scaling = 0.5
self.clip_param = 0.15
self.lr_init = 5e-4 # Initial learning rate
self.replay_buffer_size = int(1e3) # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 16 # Number of game moves to keep for every batch element
| 1,722 | 564 |