content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import unittest
from typing import List
| [
11748,
555,
715,
395,
198,
6738,
19720,
1330,
7343,
628,
198
] | 3.818182 | 11 |
from flask import render_template, g
from flask_babel import _, get_locale
from . import main
@main.before_app_request
@main.route("/")
@main.route("/index")
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
308,
198,
6738,
42903,
62,
65,
9608,
1330,
4808,
11,
651,
62,
17946,
1000,
198,
198,
6738,
764,
1330,
1388,
628,
198,
31,
12417,
13,
19052,
62,
1324,
62,
25927,
628,
198,
31,
12417,
13,
38629,... | 2.910714 | 56 |
import pickle
import copy
import numpy as np
import environment.collisionmodel as cm
import utiltools.thirdparty.o3dhelper as o3dh
import utiltools.robotmath as rm
import utiltools.thirdparty.p3dhelper as p3dh
if __name__ == '__main__':
import robothelper
import numpy as np
import environment.collisionmodel as cm
yhx = robothelper.RobotHelperX(usereal=False, startworld=True, autorotate = True)
loc = LocatorFixed(homomatfilename="rightfixture_homomat1")
# bgdepth = pickle.load(open("./databackground/bgdepth.pkl", "rb"))
# bgpcd = pickle.load(open("./databackground/bgpcd.pkl", "rb"))
# objpcdmerged = None
# for i in range(1):
# yhx.pxc.triggerframe()
# fgdepth = yhx.pxc.getdepthimg()
# fgpcd = yhx.pxc.getpcd()
#
# substracteddepth = bgdepth - fgdepth
# substracteddepth = substracteddepth.clip(40, 300)
# substracteddepth[substracteddepth == 40] = 0
# substracteddepth[substracteddepth == 300] = 0
#
# # cv2.imshow("", yhx.pxc.cvtdepth(substracteddepth))
# # cv2.waitKey(0)
#
# tempdepth = substracteddepth.flatten()
# objpcd = fgpcd[np.nonzero(tempdepth)]
# objpcd = loc.getcorrectedpcd(objpcd)
# if objpcdmerged is None:
# objpcdmerged = objpcd
# else:
# objpcdmerged = np.vstack((objpcdmerged, objpcd))
objpcd = loc.capturecorrectedpcd(yhx.pxc, ncapturetimes=1)
pcdnp = p3dh.genpointcloudnodepath(objpcd, pntsize=2)
pcdnp.reparentTo(yhx.base.render)
# import utiltools.thirdparty.o3dhelper as o3dh
# pcdlist, _ = o3dh.clusterpcd(objpcd, pcd_nparray_nrmls = None)
# for i, objpcd in enumerate(pcdlist):
# pcdnp = p3dh.genpointcloudnodepath(objpcd, pntsize=2)
# pcdnp.reparentTo(yhx.base.render)
# if i == 0:
# pcdnp.setColor(.7,0,0,1)
# elif i == 1:
# pcdnp.setColor(0,0,.7,1)
# elif i == 2:
# pcdnp.setColor(0,.7,0,1)
# else:
# pcdnp.setColor(1,1,1,1)
tbscm = loc.gentubestand(homomat=loc.tubestandhomomat)
tbscm.reparentTo(yhx.base.render)
# base.run()
# homomat = loc.findtubestand_matchonobb(objpcd, toggle_debug=False)
# import registration.pattern as ptn
# pto = ptn.Pattern(root=".")
# pto.setpattern(np.array([[0,0,0,0,0,0,0,0,0,0],
# [0,0,2,3,3,0,0,0,0,0],
# [0,0,0,0,3,0,1,0,0,0],
# [0,0,0,2,0,1,0,0,1,0],
# [0,0,3,2,0,0,0,0,0,0]]))
# pto.setpattern(np.array([[0,0,3,2,0,0,0,0,0,0],
# [0,0,0,2,0,1,0,0,1,0],
# [0,0,0,0,3,0,1,0,0,0],
# [0,0,2,3,3,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0]]))
# pto.setpattern(np.array([[0,0,0,0,0,0,2,3,0,0],
# [0,1,0,0,1,0,2,0,0,0],
# [0,0,0,1,0,3,0,0,0,0],
# [0,0,0,0,0,3,3,2,0,0],
# [0,0,0,0,0,0,0,0,0,0]]))
# pto.setpattern(np.array([[3,3,3,0,0,0,0,1,1,1],
# [3,3,3,0,0,0,0,1,1,1],
# [3,3,3,0,0,0,0,1,1,1],
# [3,3,3,0,0,0,0,1,1,1],
# [3,3,3,0,0,0,0,1,1,1]]))
# pto.setpattern(np.array([[0,0,1,1,0,0,3,0,0,0],
# [0,1,0,0,1,1,0,0,0,0],
# [0,0,0,1,0,3,0,0,0,0],
# [0,0,0,0,0,3,3,0,1,0],
# [0,0,0,0,0,0,0,0,1,0]]))
# pto.gencad(homomat=loc.tubestandhomomat).reparentTo(base.render)
# pto.gendumbell(homomat=loc.tubestandhomomat).reparentTo(base.render)
# base.run()
# homomat = loc.findtubestand_match(objpcdmerged, toggle_debug=True)
elearray, eleconfidencearray = loc.findtubes(loc.tubestandhomomat, objpcd, toggledebug=False)
# yhx.base.run()
# yhx.p3dh.genframe(pos=loc.tubestandhomomat[:3,3], rotmat=loc.tubestandhomomat[:3,:3]).reparentTo(yhx.base.render)
rbtnp = yhx.rbtmesh.genmnp(yhx.rbt)
rbtnp.reparentTo(yhx.base.render)
# pcdnp = p3dh.genpointcloudnodepath(objpcd, pntsize=5)
# pcdnp.reparentTo(yhx.base.render)
# cornerhole_pcdnp = p3dh.genpointcloudnodepath(loc.calibrate_holes(objpcd), colors=np.array([1, 0, 0, 1]),
# pntsize=10)
# cornerhole_pcdnp.reparentTo(yhx.base.render)
# positions, rotmats = loc.findtubestands_calibratewoodstickholes(objpcd)
# for posrot in zip(positions, rotmats):
# loc.gentubestand(rm.homobuild(posrot[0], posrot[1])).reparentTo(yhx.base.render)
# tbscm = loc.gentubestand(homomat=homomat)
# tbscm.reparentTo(yhx.base.render)
# tbscm.showcn()
tubecms = loc.gentubes(elearray, loc.tubestandhomomat, eleconfidencearray=eleconfidencearray)
for tbcm in tubecms:
tbcm.reparentTo(yhx.base.render)
# tbcm.setColor(1,0,0,.2)
tbcm.showcn()
yhx.base.run()
| [
11748,
2298,
293,
198,
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2858,
13,
26000,
1166,
19849,
355,
12067,
198,
11748,
3384,
2326,
10141,
13,
17089,
10608,
13,
78,
18,
67,
2978,
525,
355,
267,
18,
34985,
198,
11748,
... | 1.664622 | 3,098 |
#!/usr/bin/env python
import unittest
import pyprimesieve
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
555,
715,
395,
198,
11748,
12972,
1050,
999,
12311,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
... | 2.477273 | 44 |
"""Balance-related functionality of `Wealth`."""
import datetime as dt
import functools
from typing import Generator, List
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from IPython.core.display import display
from IPython.display import Markdown
from ipywidgets.widgets import (
BoundedIntText,
Box,
Checkbox,
Dropdown,
HBox,
Label,
Output,
VBox,
)
import wealth
from wealth.plot import create_account_checkboxes, display_df, style_red_green_fg
from wealth.util.util import money_fmt
def _daterange(start: dt.date, end: dt.date) -> Generator[dt.date, None, None]:
"""Yield dates between, including given start and end dates."""
for offset in range((end - start).days + 1):
yield start + dt.timedelta(offset)
def _display_balance(
_,
checkboxes: List[Checkbox],
drp_date: Dropdown,
out: Output,
df: pd.DataFrame,
):
"""Plot the balance, i.e. the cumulative sum, of the given dataframe's
column `amount` at the date of the given dropdown's value."""
accounts = [c.description for c in checkboxes if c.value and c.description != "All"]
series = df[df["account"].isin(accounts)]["amount"].resample("D").sum().cumsum()
date = dt.datetime(drp_date.value.year, drp_date.value.month, drp_date.value.day)
value = series.iloc[series.index.get_loc(date, method="pad")]
out.clear_output()
with out:
display(Markdown(f'<br><font size="6">{wealth.Money(value)}</font>'))
def balance(df: pd.DataFrame):
"""Show account-related cumulative sum of the dataframe's column `amount` at
a specified date."""
df["date"] = pd.to_datetime(df["date"])
df = df.reset_index(drop=True).set_index("date")
out = Output()
checkboxes = []
dates = list(_daterange(df.index.date.min(), df.index.date.max()))
drp_date = Dropdown(
description="Date: ",
options=dates,
value=df.index.date.max(),
layout=wealth.plot.dropdown_layout,
)
update_balance = functools.partial(
_display_balance,
checkboxes=checkboxes,
out=out,
drp_date=drp_date,
df=df,
)
create_account_checkboxes(checkboxes, df, True, update_balance)
drp_date.observe(update_balance, "value")
display(Markdown("# Balance"))
display(wealth.plot.account_checkboxes(checkboxes))
display(drp_date)
display(out)
update_balance(None)
def _create_local_minimum_maximum_df(df: pd.DataFrame) -> pd.DataFrame:
"""Create a dataframe consisting of the local minima & maxima of the given
dataframe as well as its first and last entrees."""
return pd.concat(
[
df.head(1),
df[
(df.shift(1) > df) & (df.shift(-1) > df)
| (df.shift(1) < df) & (df.shift(-1) < df)
],
df.tail(1),
]
)
def _plot_df(df: pd.DataFrame, freq: str, label: str):
"""Plot given dataframe with the given frequency and label."""
if freq == "<atomic>":
plt.step(df.index, df, label=label, where="post")
elif freq == "<minmax>":
plt.plot(_create_local_minimum_maximum_df(df), label=label)
else:
df = df.rolling(freq).mean()
plt.plot(df, label=label)
def _plot_cumsum(
_,
sum_accs_checkboxes: List[Checkbox],
single_accs_checkboxes: List[Checkbox],
out: Output,
fig: mpl.figure.Figure,
df: pd.DataFrame,
drp_freq: Dropdown,
):
"""Plot cumsum graphs with the given params."""
sum_accs = [chk.description for chk in sum_accs_checkboxes if chk.value]
single_accounts = [
chk.description
for chk in single_accs_checkboxes
if chk.value and chk.description != "All"
]
show_legend = False
sum_series = df[df["account"].isin(sum_accs)]["amount"].cumsum()
with out:
fig.clear()
wealth.plot.setup_plot_and_axes(fig, "Cumulative Sum of All transactions")
if not sum_series.empty:
_plot_df(sum_series, drp_freq.value, "Combined")
show_legend = True
for account in single_accounts:
single_series = df[df["account"] == account]["amount"].cumsum()
_plot_df(single_series, drp_freq.value, account)
show_legend = True
if show_legend:
plt.legend(loc="best", borderaxespad=0.1)
def graph(df: pd.DataFrame):
"""Show an account-related cumulative sum graph of the dataframe's column
`amount`."""
drp_freq = Dropdown(
description="Frequency:",
options=[
("Atomic", "<atomic>"),
("Minima/Maxima", "<minmax>"),
("Day", "D"),
("Week", "7D"),
("2 Weeks", "14D"),
("Month", "30D"),
("Quarter", "90D"),
("Semester", "180D"),
("Year", "365D"),
],
value="<atomic>",
layout=wealth.plot.dropdown_layout,
)
sum_accs_checkboxes, single_accs_checkboxes = [], []
out = Output()
with out:
fig = plt.figure(figsize=(12, 10), num="Cumulative Sum of All Transaction")
plot = functools.partial(
_plot_cumsum,
sum_accs_checkboxes=sum_accs_checkboxes,
single_accs_checkboxes=single_accs_checkboxes,
out=out,
fig=fig,
df=df,
drp_freq=drp_freq,
)
drp_freq.observe(plot, "value")
create_account_checkboxes(sum_accs_checkboxes, df, True, plot)
create_account_checkboxes(single_accs_checkboxes, df, True, plot)
display(Markdown("# Plot"))
display(
VBox(
[
drp_freq,
HBox(
[
VBox(
[
Label("Accounts for combined plot: "),
Label("Accounts for individual plots: "),
]
),
VBox(
[
Box(sum_accs_checkboxes),
Box(single_accs_checkboxes),
]
),
],
layout=wealth.plot.box_layout,
),
]
)
)
plot(None)
display(out)
def _display_mean_and_median(df: pd.DataFrame, caption: str):
"""Display mean, median and display mean and median without outliers."""
filtered = df.dropna()[np.abs(scipy.stats.zscore(df.dropna())) < 2]
df_out = pd.DataFrame(
index=["mean", "median", "filtered mean", "filtered median"],
data={"values": [df.mean(), df.median(), filtered.mean(), filtered.median()]},
)
style = df_out.style.format(formatter=money_fmt(), na_rep="").applymap(
style_red_green_fg
)
out = Output()
with out:
display(Markdown(f"### {caption}"))
display(style)
return out
def _display_summary(_, txt_n_periods: BoundedIntText, out: Output, df: pd.DataFrame):
"""Display a summary for the given series."""
n_periods = txt_n_periods.value
out.clear_output()
with out:
display(
HBox(
[
_display_mean_and_median(df["diff"].tail(n_periods), "Differences"),
_display_mean_and_median(
df["min diff"].tail(n_periods), "Differences of Minima"
),
_display_mean_and_median(
df["max diff"].tail(n_periods), "Differences of Maxima"
),
]
)
)
def _display_mean_balance_dataframes(
_,
drp_freq: Dropdown,
checkboxes: List[Checkbox],
out: Output,
df: pd.DataFrame,
):
"""List the balances per timeframes with the given frequency."""
out.clear_output()
if df.empty:
return
df_out = pd.DataFrame()
accounts = [c.description for c in checkboxes if c.value and c.description != "All"]
mask = df["account"].isin(accounts)
daily_cumsum_df = df[mask]["amount"].resample("D").sum().cumsum()
resampler = daily_cumsum_df.resample(drp_freq.value)
df_out["mean"] = resampler.mean()
df_out["diff"] = df_out["mean"].diff()
df_out["min"] = resampler.min()
df_out["min diff"] = df_out["min"].diff()
df_out["max"] = resampler.max()
df_out["max diff"] = df_out["max"].diff()
df_out.index = df_out.index.strftime("%Y-%m-%d")
style = (
df_out.iloc[::-1]
.style.format(formatter=money_fmt(), na_rep="")
.applymap(style_red_green_fg)
)
with out:
display_df(style)
if len(df_out) <= 1:
return
inner_out = Output()
lbl_n_periods = Label("Consider recent Periods:")
txt_n_periods = BoundedIntText(
12,
min=1,
max=10000,
layout=wealth.plot.slim_text_layout,
)
update_out = functools.partial(
_display_summary,
txt_n_periods=txt_n_periods,
out=inner_out,
df=df_out,
)
txt_n_periods.observe(update_out, "value")
display(Markdown("## Summary"))
display(Box([lbl_n_periods, txt_n_periods], layout=wealth.plot.box_layout))
display(inner_out)
update_out(None)
def means(df: pd.DataFrame):
"""Display dataframes containing balances for a given frequency."""
df["date"] = pd.to_datetime(df["date"])
df = df.reset_index(drop=True).set_index("date")
out = Output()
drp_freq = Dropdown(
description="Frequency:",
options=wealth.plot.frequency_options,
value="MS",
layout=wealth.plot.dropdown_layout,
)
checkboxes = []
update_out = functools.partial(
_display_mean_balance_dataframes,
drp_freq=drp_freq,
checkboxes=checkboxes,
out=out,
df=df,
)
drp_freq.observe(update_out, "value")
create_account_checkboxes(checkboxes, df, True, update_out)
display(Markdown("# Mean Balances"))
display(
VBox(
[
HBox([drp_freq], layout=wealth.plot.box_layout),
wealth.plot.account_checkboxes(checkboxes),
]
)
)
display(out)
update_out(None)
| [
37811,
45866,
12,
5363,
11244,
286,
4600,
1135,
1094,
63,
526,
15931,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
1257,
310,
10141,
198,
6738,
19720,
1330,
35986,
11,
7343,
198,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
... | 2.063627 | 5,045 |
'''Move the mouse based on input from webcam. If motion is detected the mouse is moved.
This is to switch scenes in OBS (screen recording and streaming software) based on camera activity.
OBS supports automatic switching based on mouse activity, but not camera activity. This hacks around that.
Requires opencv-python, imutils, pyautogui, and numpy, and "Advanced Scene Switcher" for OBS.
Script should be started then Advanced Scene Switcher configured to switch scenes based on idle detection.
I apologize in advance for how hacky this is.
'''
import cv2
import imutils
import time
import pyautogui
#set up video capture
cv2.namedWindow("preview")
vc = cv2.VideoCapture(2) #change this number for your video capture device number.
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
firstFrame = None
alternate = False
#Begin motion detection.
#This basic motion detection requires only modest input, so the image is read infrequently,
#at a low resolution, and blurred and thresholded. The goal is to reduce CPU demands.
while rval:
time.sleep(0.1) #delay between frames.
rval, frame = vc.read()
frame = imutils.resize(frame,width=128) #Resize image very small
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #convert to greyscale
gray = cv2.GaussianBlur(gray, (9,9),0) #blur image
if firstFrame is None:
firstFrame = gray
continue
#threshold and find contours. If contours have significant area then motion is detected.
frameDelta = cv2.absdiff(firstFrame,gray)
thresh = cv2.threshold(frameDelta, 25,255,cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
#this function may return (cnts, _) on older versions of cv2. If you get an exception try that.
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < 500:
continue
else:
#motion detected, jiggle mouse one pixel
moveAmt = 1
if not alternate:
moveAmt *= -1
alternate = not alternate
pyautogui.moveRel(None,moveAmt)
#uncomment to show window. Not required to move mouses
#cv2.imshow("preview", thresh)
#exit on ESC
key = cv2.waitKey(20)
if key == 27:
break
cv2.destroyWindow("preview")
| [
7061,
6,
21774,
262,
10211,
1912,
319,
5128,
422,
49823,
13,
1002,
6268,
318,
12326,
262,
10211,
318,
3888,
13,
198,
1212,
318,
284,
5078,
8188,
287,
440,
4462,
357,
9612,
8296,
290,
11305,
3788,
8,
1912,
319,
4676,
3842,
13,
198,
4... | 2.706783 | 914 |
import random
import time
import sys
def retrieving_word(topic):
"""
This function is used to randomly return a word from a list based on what topic the player wants.
Paramaters
----------
topic: str
This is to store the topic the user wants to obtain the word from
Ruturns
-------
Array
randomly selected word based on the topic
"""
#LIST OF WORDS TO BE RETRIEVED BASED ON PLAYER'S CHOICE
list_of_words_birds = ['albatross','aviary','bald eagle','pelican','canary','chicken','goose','duck','hen','crow','dove','emu','falcon','flamingo','kingfisher','lark','owl','penguin','peacock']
list_of_words_animals = ['alligator','anaconda','ant','ape','bear','baboon','beaver','bat','boar','blue whale','butterfly','cat','cheetah','camel','cobra','cow','crocodile','elephant','zebra']
list_of_words_gemstones = ['aquamarine','amethyst','agate','alexandrite','amazonite','amber','beryl','bloodstone','calcite','citrine','diamond','fluorite','emrald','garnet','jasper','jade','lapiz lazuli','malachite','opal','onyx','pearl','peridot','pyrite','quartz','ruby','sapphire','topaz','tourmaline','turquoise','zircon']
list_of_words_sports = ['archery','badminton','cricket','bowling','tennis','skateboarding','surfing','hockey','karate','yoga','volleyball','baseball','rugby','soccer','cycling','golf','football']
lst_of_words_countries = ['afghanistan','albania','algeria','andaman and nicobar islands','bahamas','bhutan','beligum','bangladesh','brazil','canada','costa rica','cuba','denmark','equador','germany','greece','hungary','india','italy','kazakhstan','luxembourg','madascar','maldives','netherland','new zealand','north korea','parguay','portugal','romania','qatar','sweden','spain','united states of america','yemen','zimbabwe']
if 'bird' in topic:
count = random.randint(0,(len(list_of_words_birds)-1))
full_word = list_of_words_birds[count]
topic = 'BIRDS'
elif 'animal' in topic:
count = random.randint(0,(len(list_of_words_animals)-1))
full_word = list_of_words_animals[count]
topic = 'ANIMALS'
elif 'gemstone' in topic:
count = random.randint(0,(len(list_of_words_gemstones)-1))
full_word = list_of_words_gemstones[count]
topic = 'GEMSTONES'
elif 'sport' in topic:
count = random.randint(0,(len(list_of_words_sports)-1))
full_word = list_of_words_sports[count]
topic = 'SPORTS'
elif 'countr' in topic:
count = random.randint(0,(len(lst_of_words_countries)-1))
full_word = lst_of_words_countries[count]
topic = 'COUNTRIES'
return full_word
def generating_partially_blanked_word (full_word_list):
"""
This function will generate the partially blanked word, with randomised alphabet(s) required for the user to enter the input
input -> 'CROCODILE'
output -> 'C _ _ _ _ _ _ L _', 4
Parameters
----------
full_word: str
it contains the word that is to be blanked in this function
Return
-------
lst_with_alph: list
list of partially blanked alphabet(s)
maximum_hint_count: int
maximum hint count based on the length of the word
"""
index=0
lst_with_alph=[]
#initialize the list with '_' and blank spaces ' '
while index < len(full_word_list):
if ' ' in full_word_list[index]:
lst_with_alph.append(' ') #replace sapce between 2 words with a ' ' (double spacebar)
else:
lst_with_alph.append('_') #replace alphabets with underscore ('_')
index += 1
#set the randomize alphbets and max number of hints
if len(full_word_list) <=4:
count = random.sample(range(0,(len(full_word_list)-1)),1)[0] #randomly generate a number (index of alphabet in full_word) to be printed in the blank word i.e. _ _ c _ _ like this
lst_with_alph[count] = full_word_list[count] #randomly generate ONE alphabet in the blanked word
maximum_hint_count = 1 #maximum number of times the player can ask for hint
elif 4 < len(full_word_list) <= 6:
count = random.sample(range(0,(len(full_word_list)-1)),1)[0] #randomly generate a number (index of alphabet in full_word) to be printed in the blank word i.e. _ _ c _ _ like this
lst_with_alph[count] = full_word_list[count] #randomly generate ONE alphabet in the blanked word
maximum_hint_count = 2 #maximum number of times the player can ask for hint
elif 10 > len(full_word_list) > 6:
count = random.sample(range(0,(len(full_word_list)-1)),2) #randomly generate a number (index of alphabet in full_word) to be printed in the blank word i.e. _ _ c _ _ like this
for i in range(2):
lst_with_alph[count[i]] = full_word_list[count[i]]
maximum_hint_count = 3 #maximum number of times the player can ask for hint
else:
count = random.sample(range(0,(len(full_word_list)-1)),3) #randomly generate a number (index of alphabet in full_word) to be printed in the blank word i.e. _ _ c _ _ like this
for i in range(3):
lst_with_alph[count[i]] = full_word_list[count[i]]
maximum_hint_count = 4 #maximum number of times the player can ask for hint
#return the list of partially blanked alphabet(s) and maximum hint count as array
return lst_with_alph, maximum_hint_count
"""
this is driver function of this game
1. Get the randomized blanked word based on the selected topic
2. Prompt player to guess the alphabets or ask for hint until either the player gets the word correct or has exhuasted all the available tries/chance
"""
if __name__ == '__main__':
choice = (input('WHAT DO YOU WANT TO PLAY: ')).lower() #choice is users input of game he wants to play
play_hangman(choice)
| [
11748,
4738,
220,
201,
198,
11748,
640,
201,
198,
11748,
25064,
201,
198,
4299,
50122,
62,
4775,
7,
26652,
2599,
201,
198,
220,
220,
220,
37227,
220,
220,
201,
198,
220,
220,
220,
770,
2163,
318,
973,
284,
15456,
1441,
257,
1573,
42... | 2.524832 | 2,376 |
from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198
] | 3.538462 | 39 |
""" Implementation for the dataset and GenesisTextDataset class, which handles dataloading from ipfs
"""
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import random
from re import I
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import Subset
import torch
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import requests
from loguru import logger
import bittensor
logger = logger.opt(colors=True)
class Dataset():
""" Implementation for the dataset class, which handles dataloading from ipfs
"""
@staticmethod
def requests_retry_session(
retries=10,
backoff_factor=0.5,
status_forcelist=(104, 500, 502, 504),
session=None,
):
""" Creates a retriable session for request calls. This enables
automatic retries and back-off retries should any request calls fail.
Args:
retries (int, optional): Maximum number of retries. Defaults to 3.
backoff_factor (float, optional): Factor by which to back off if a retry fails. Defaults to 0.3.
status_forcelist (tuple, optional): A set of integer HTTP status codes that we should force a retry on. Defaults to (500, 502, 504).
session ([type], optional): Session for which to set up the retries. Defaults to None.
Returns:
requests.Session(): A Requests Session object set up for retries and backoff.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def retrieve_directory(self, address: str, params = None, action: str = 'post'):
r"""Connects to Pinata IPFS gateway and retrieves directory.
Returns:
dict: A dictionary of the files inside of the genesis_datasets and their hashes.
"""
session = requests.Session()
session.params.update(params)
if action == 'get':
response = Dataset.requests_retry_session(session=session).get(address)
elif action == 'post':
response = Dataset.requests_retry_session(session=session).post(address)
return response
def __len__(self):
""" Returns length of the dataset that the dataset is processing
"""
def __getitem__(self, idx):
""" Returns the next batch from the dataset.
"""
class GenesisTextDataset( Dataset ):
""" One kind of dataset that caters for the data from ipfs
"""
def get_random_directories(self):
r""" Getting directories from a random dataset_hash
Where a directory could be leading to a data file or a directory file
"""
# --- Getting dataset hashes from pin/ls.
dataset_hashes = []
response = self.retrieve_directory(self.pin_get, (('type', 'recursive'),), action = 'post')
if response.status_code != 200:
dataset_hashes= [
'QmPbAqDsMpufa2eNsE8X9TRh43JsAPxbj7tz3PmprouH7U',
'QmRJKZq6q64H1iwokVJTbi4tWjewvodAaZ6Kn1SpgP33EG',
'QmSJJtZa37kX7ABBJyani9i3cFTq86zebTLQqioRCvgDei',
'QmSQ6AnnWQUy4bETQSAgkgCkJ1AQePSeKvbaFejizj5HP3',
'QmSTudwkfLWkwFSC7LnUVZyroBgV3A6atbFeKUZ63DnTeW',
'QmTtuWZmTZf5JcBmXpbDDM5Hkq4AoFJk9NGoDsR4zUhbJx',
'QmVbNzncoJK8WwyAoWxLndk4999iyyYZbCKpEvUxrFXp1N',
'QmWiHsJ6z2LbZnEcidgz2vPq9ZsgrKUQ4QdB83pFcFvug3',
'QmXa1SDyVK6f876JYHwoQZcpXGMi8aPYKWvHzKTDXuqU5z',
'QmYg67pZwPsX3qH31tEc1qexrPc88zUkZG4AqsNDZo5FEX',
'QmZawcgwiT9S5Vk5WX41RRaBPb73KByQej9JmRCNgNVxjz',
'QmeSNvZVtHeMmJSuJQAUyTTW9LZbQkAqLDgVVXhzqJHrvY',
'Qmefa9xMdu7HZyr3U1zH8MaCayPngPJ9iZnnddXfXMrA2N',
'Qmf3BjH7SzK8WHGWBngt4WK6jGCpUtgPEBCw2pFZvYimto'
]
else:
for hash, v in response.json()['Keys'].items():
dataset_hashes.append(hash)
# --- Getting directories from a random dataset hash.
# --- directories: List[ Map{Name: str, Hash: str, Size: int} ]
i = 0
directories = []
dataset_hashes_order = list(range(len(dataset_hashes)))
random.shuffle(dataset_hashes_order)
while len(directories) == 0 and i < len(dataset_hashes):
dataset_hash = dataset_hashes[dataset_hashes_order[i]]
i += 1
response = self.retrieve_directory(self.file_get, (('arg', dataset_hash),))
if response.status_code != 200:
logger.warning("Failed to retrieve directory, ignoring directory:".ljust(20) + "<blue>{}</blue>".format(dataset_hash))
else:
# --- Get the directory links if there is valid response, else check on another dataset_hash
directory = response.json()
if directory and 'Links' in directory.keys():
directories += directory['Links']
logger.success("Loaded dataset hash:".ljust(20) + "<blue>{}</blue>".format(dataset_hash))
if len(directories) == 0:
directories = None
return directories
def extract_datafile_dir(self, directory):
r"""
With recursion, from the given directory, get a directory that leads to a datafile.
Args:
directory: Map{ Name: str, Hash: str, Size: int }:
The original directory to look up a datafile for.
Returns:
directory: Map{ Name: str, Hash: str, Size: int }:
A random directory that lead to a datafile.
"""
# --- If the size of directory is small, it is leads to data file, return the data file.
if directory['Size'] <= self.datafile_size_bound:
return directory
# --- Else, the directory leads to more directories, return a random data file within the directories.
else:
response = self.retrieve_directory(self.file_get, (('arg', directory['Hash']),))
# --- Return none if the request failed.
if response.status_code != 200:
logger.warning("Failed to retrieve directory, ignoring directory:".ljust(20) + "<blue>{}</blue>".format(directory))
return None
# --- Pick a random sub_directory, run recursion until we have found a data file
else:
sub_directories = response.json()
if sub_directories and 'Links' in sub_directories.keys() and len(sub_directories['Links']) >= 1:
random_sub_directory = random.choice(sub_directories['Links'])
# --- Fill the name of the random_sub_directory if it is empty.
if random_sub_directory['Name'] == '':
random_sub_directory['Name'] = directory['Name']
return self.extract_datafile_dir(random_sub_directory)
else:
logger.warning("Directory seems empty, ignoring directory:".ljust(20) + "<blue>{}</blue>". format(dir_hash))
return None
def get_text(self, file):
r"""
Load the text data from disk if it is already in the the data_dir,
else download it from IPFS and save it
Args:
file: Map{ Name: str, Hash: str, Size: int }
The directory to get text file from.
Returns:
text: str:
The text data.
"""
text = None
file_name = file['Name']
file_hash = file['Hash']
full_path = os.path.expanduser(os.path.join(self.data_dir, file_name))
# --- Load text from path
if os.path.exists(full_path):
try:
with open(full_path, mode='r') as f:
text = f.read()
logger.success("Loaded:".ljust(20) + "<blue>{}</blue>".format(file_name))
except Exception:
logger.warning("Load failed:".ljust(20) + "<blue>{}</blue>".format(file_name))
# --- If couldnt load from path, download text.
if text == None:
response = self.retrieve_directory(self.file_get, (('arg', file_hash),))
if response.status_code != 200:
logger.warning("Failed to retrieve file, ignoring file:".ljust(20) + "<blue>{}</blue>".format(file_name))
else:
text = response.text
logger.success("Downloaded:".ljust(20) + "<blue>{}</blue>".format(file_name))
# --- Save text if the save_dataset flag is on.
if self.save_dataset:
try:
with open(full_path, mode = 'w+') as f:
f.write(text)
logger.success("Saved:".ljust(20) + "<blue>{}</blue>".format(file_name))
except Exception:
logger.warning("Save failed:".ljust(20) + "<blue>{}</blue>".format(file_name))
return text
def construct_text_corpus(self, min_data_len = 0):
""" Main function for generating the text data.
1. Get directories from a random dataset_hash (dataset_hash is the result from calling pin/ls).
2. Pick a random directory and get the directory that would lead to a datafile.
3. Get text from the directory.
4. Repeat 2,3 until we have reached the max_corpus_size
Returns:
text: str:
Contents of the text data.
"""
try:
logger.success("Retrieving a dataset files from the IPFS gateway...")
# --- Get directories from a random dataset_hash
directories = self.get_random_directories()
data_corpus = []
# --- Generate a random order of the directories
directory_order = list(range(len(directories)))
random.shuffle(directory_order)
# --- Pick random directories and get their text contents.
if directories:
total_dataset_size = 0
total_dataset_len = 0
i = 0
# --- Dont stop until the corpus size and the minimum data_length was reached.
while (total_dataset_size <= self.max_corpus_size) or (total_dataset_len < min_data_len):
# --- Get a directory that leads to a datafile.
random_datafile_dir = self.extract_datafile_dir(directories[directory_order[i]])
if random_datafile_dir == None:
pass
# --- Get text from the datafile directory
try:
text = self.get_text(random_datafile_dir)
except:
text = None
if text != None:
text_list = text.split()
data_corpus.extend(text_list)
total_dataset_size += int(random_datafile_dir['Size'])
total_dataset_len += len(text_list)
i += 1
return data_corpus
logger.error("It appears the directory is empty... Restart your miner to try again.")
return None
except Exception as e:
logger.error("Ran into exception when trying to retrieve dataset from IPFS: {}".format(e))
return None
def dataloader(self, epoch_length = 100):
""" Creates a torch dataloader out of a subclass of this class.
Args:
epoch_length (int, optional): The epoch length of the miner. If this length is not set or if it is larger than the dataset,
then a dataloader for the entire dataset is returned. Otherwise, a dataloader for a subset of the dataset of epoch_length
is returned. Defaults to None.
Returns:
torch.utils.data.dataloader.DataLoader: Pytorch dataloader.
"""
data_size = epoch_length * self.batch_size * self.block_size
# Make sure the data remained is at least as big as data_size
if len(self.data_remained) < (data_size) :
self.data_remained += self.construct_text_corpus(min_data_len = data_size)
self.data = self.data_remained[:data_size]
del self.data_remained[:data_size]
# Datalaoder calls self._getitem_ functions until the self.data uses up, and group the result by batch size
return DataLoader(self,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=True)
def __next__(self):
"""Returns the next element from the dataset.
"""
if self.__infinite_dataset_iterator == None:
self.__infinite_dataset_iterator = iter([input for input in self.dataloader(1000)]) # should set it to 1000
try:
return next(self.__infinite_dataset_iterator)
except StopIteration:
self.__infinite_dataset_iterator = iter([input for input in self.dataloader(1000)])
return next(self.__infinite_dataset_iterator)
def __len__(self):
"""Returns number of samples (blocks) of dataset
Returns:
length: int
"""
if (self.data == None) or (self.block_size == None) or (self.block_size == 0):
return 0
return round( len(self.data) / self.block_size )
def __getitem__(self, idx):
""" Returns a block of sentences from text dataset.
Args:
idx: index of data input
Returns:
torch.tensor(dix)
"""
start_idx = (idx * self.block_size) % len(self.data)
end_idx = start_idx + self.block_size
tokenized_text = torch.tensor(self.tokenizer(" ".join(self.data[start_idx:end_idx]), padding=True, truncation=True)['input_ids'], dtype=torch.long)
return tokenized_text[:self.block_size]
| [
37811,
46333,
329,
262,
27039,
290,
18993,
8206,
27354,
292,
316,
1398,
11,
543,
17105,
4818,
282,
1170,
278,
422,
20966,
9501,
198,
37811,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
15069,
10673,
33448,
575,
7487,
48395,
198,
... | 2.170257 | 7,195 |
zoo = Zoo()
reset_zoo()
| [
628,
198,
89,
2238,
796,
21980,
3419,
628,
198,
198,
42503,
62,
89,
2238,
3419,
198
] | 1.875 | 16 |
from bson.objectid import ObjectId
import math
from Gifts.getRecommendations.Requests import findingApi
from Gifts.getRecommendations.TextClasterisation import nlp
from Gifts.getRecommendations.DB import DB
from random import shuffle
page_size = 100 # todo in constant file
def remove_similar(items, n):
"""
left only n items with same colour
:param items: sorted by prediction
:param n: number of similar to left
:return: new list of items
"""
cur_color = items[0]['prediction']
count = 0
new_items = []
for item in items:
if item['prediction'] != cur_color:
cur_color = item['prediction']
count = 0
if count < n:
count += 1
new_items.append(item)
return new_items
def convert_list_to_dict(items, key):
"""
convert list of dict to dict
:param items: list of dict
:param key: which field of dict make key
:return: dict, where keys are items[:][key]
"""
new_dict = {}
for item in items:
cur_key = item[key]
item.__delitem__(key)
new_dict.update({cur_key: item})
return new_dict
def get_list_from_category(category_id, item_filter):
"""
find items from specified category in ebay by applying filter
:param category_id:
:param item_filter:
:return: up to 30 items // should be clarify
"""
items = []
# print type(category_id)
# print item_filter
response = findingApi.get_list_of_items("findItemsAdvanced",
{'categoryId': category_id,
'paginationInput': {'entriesPerPage': 100},
'itemFilter': item_filter})
#todo empty response
if 'item' not in response['searchResult']: # if empty response
return []
for item in response['searchResult']['item']:
try:
items.append({'title': item['title'],
'galleryURL': item['galleryURL'], 'itemURL': item['viewItemURL'],
'price': item['sellingStatus']['convertedCurrentPrice'],
'categoryID': category_id, 'itemID': item['itemId']})
# print item
except Exception as e:
print e.message
shuffle(items)
if len(items) > 10: # left only by 3 items for each of ten category
predictions = nlp.get_prediction([item['title'] for item in items])
for i in range(len(items)):
items[i]['prediction'] = int(predictions[i])
items.sort(key=lambda x: x['prediction'])
items = remove_similar(items, 3)
return convert_list_to_dict(items, 'itemID')
def choose_categories(user, max_categories=10):
"""
choose max_categories from user.categories
60% top, 40% unseen
:param user:
:param max_categories:
:return:
"""
assert max_categories > 5
items = []
count = 0
keys = user['categories'].keys()
shuffle(keys)
# add top categories
for category_id in keys:
if user['categories'][category_id]['votes'] > 3 and user['categories'][category_id]['rating'] >= 3:
user['categories'][category_id]['used'] = True
count += 1
items.append(category_id)
if count >= max_categories * 0.6:
break
# add unseen categories
for category_id in keys:
if not 'used' in user['categories'][category_id].keys() or user['categories'][category_id]['used'] is False:
if user['categories'][category_id]['votes'] > 3 and user['categories'][category_id]['rating'] <= 0:
continue
user['categories'][category_id]['used'] = True
count += 1
items.append(category_id)
if count >= max_categories:
break
return items
def generate_list_for_user(user, item_filter):
"""
generate user.items - suggested items
:param user:
:param item_filter:
:return:
"""
item_filter.append({'name': 'Condition', 'value': 'New'})
item_filter.append({'name': 'ListingType', 'value': 'FixedPrice'})
categories_id = choose_categories(user)
items = {}
for category_id in categories_id:
items.update(get_list_from_category(category_id, item_filter))
return items
def generate_list(user_id, min_price=None, max_price=None):
"""
:use generate_list_for_user
generate list of suggested items for user
:param user_id:
:param min_price:
:param max_price:
:return:
"""
user_id = ObjectId(user_id)
item_filter = []
if min_price is not None:
item_filter.append({'name': 'MinPrice', 'value': min_price})
if max_price is not None:
item_filter.append({'name': 'MaxPrice', 'value': max_price})
client = DB.get_client()
try:
user = client.GRS.users.find_one({"_id": user_id})
assert user is not None
items = generate_list_for_user(user, item_filter)
client.GRS.users.find_one_and_update({"_id": user_id}, {'$set': {'categories': user['categories'],
'cur_page': 0,
'items': items}})
finally:
client.close()
def rate(user_id, item_id, rating):
"""
change rating of category of item
:param user_id:
:param item_id:
:param rating:
:return:
"""
client = DB.get_client()
try:
user = client.GRS.users.find_one({"_id": user_id})
assert user is not None
category_id = user['items'][str(item_id)]['categoryID']
category = user['categories'][category_id]
category['rating'] = float(category['rating'] * category['votes'] + rating) / (category['votes'] + 1)
category['votes'] += 1
client.GRS.users.find_one_and_update({"_id": user_id},
{'$set': {'categories': user['categories']}}) # todo optimize
finally:
client.close()
# print category
return category_id, category
def rate_and_remove(user_id, item_id, rating):
"""
change rating of category of item and remove items, where category rating <=0 and votes >=3
:param user_id:
:param item_id:
:param rating:
:return:
"""
user_id = ObjectId(user_id)
category_id, category = rate(user_id, item_id, rating)
if category['votes'] >= 3 and category['rating'] <= 0:
remove_all_items_from_category(user_id, category_id)
def get_page(user_id, page_number):
"""
return items from page
:param user_id:
:param page_number:
:return: [] if wrong page, None if error, list of items if ok
"""
# todo add items from RS
assert type(page_number) == int
if page_number <= 0:
return []
user_id = ObjectId(user_id)
client = None
try:
client = DB.get_client()
user = client.GRS.users.find_one({"_id": user_id})
assert user is not None
if (page_number - 1) * page_size >= len(user['items'].keys()):
return []
items = []
# dict to list
for key in sorted(user['items'].keys()): # todo optimize
user['items'][key].update({'itemId': key})
items.append(user['items'][key])
return items[page_size * (page_number - 1): page_size * page_number]
finally:
client.close()
| [
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
198,
11748,
10688,
198,
6738,
48462,
13,
1136,
41248,
602,
13,
16844,
3558,
1330,
4917,
32,
14415,
198,
6738,
48462,
13,
1136,
41248,
602,
13,
8206,
2601,
1603,
5612,
1330,
299,
34431,
... | 2.276364 | 3,300 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pygogo.handlers
~~~~~~~~~~~~~~~
Log handlers
Examples:
Add a stdout handler::
>>> logger = logging.getLogger()
>>> logger.addHandler(stdout_hdlr())
>>> logger.info('hello world')
hello world
Attributes:
ENCODING (str): The module encoding
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import sys
import logging
import socket
from os import environ
from logging import handlers as hdlrs
from builtins import *
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
ENCODING = 'utf-8'
module_hdlr = logging.StreamHandler(sys.stdout)
module_logger = logging.getLogger(__name__)
module_logger.addHandler(module_hdlr)
def stdout_hdlr(**kwargs):
"""A standard output log handler
Returns:
New instance of :class:`logging.StreamHandler`
Examples:
>>> stdout_hdlr() # doctest: +ELLIPSIS
<logging.StreamHandler object at 0x...>
"""
return logging.StreamHandler(sys.stdout)
def stderr_hdlr(**kwargs):
"""A standard error log handler
Returns:
New instance of :class:`logging.StreamHandler`
Examples:
>>> stderr_hdlr() # doctest: +ELLIPSIS
<logging.StreamHandler object at 0x...>
"""
return logging.StreamHandler(sys.stderr)
def fileobj_hdlr(f, **kwargs):
"""A file object log handler
Args:
f (obj): A file like object.
Returns:
New instance of :class:`logging.StreamHandler`
Examples:
>>> from io import StringIO
>>> fileobj_hdlr(StringIO()) # doctest: +ELLIPSIS
<logging.StreamHandler object at 0x...>
"""
return logging.StreamHandler(f)
def file_hdlr(filename, mode='a', encoding=ENCODING, delay=False, **kwargs):
"""A file log handler
Args:
filename (string): The logfile name.
mode (string): The file open mode (default: a, i.e., append).
encoding (string): The file encoding (default: the module ENCODING).
delay (bool): Defer file opening until the first call to emit
(default: False).
Returns:
New instance of :class:`logging.FileHandler`
Examples:
>>> from tempfile import NamedTemporaryFile
>>> f = NamedTemporaryFile()
>>> file_hdlr(f.name) # doctest: +ELLIPSIS
<logging.FileHandler object at 0x...>
"""
fkwargs = {'mode': mode, 'encoding': encoding, 'delay': delay}
return logging.FileHandler(filename, **fkwargs)
def socket_hdlr(host='localhost', port=None, tcp=False, **kwargs):
"""A socket log handler
Args:
host (string): The host name (default: localhost).
port (int): The port (default: `logging.handlers` default).
tcp (bool): Create a TCP connection instead of UDP (default: False).
Returns:
New instance of either :class:`logging.handlers.DatagramHandler` or
:class:`logging.handlers.SocketHandler`
Examples:
>>> socket_hdlr() # doctest: +ELLIPSIS
<logging.handlers.DatagramHandler object at 0x...>
>>> socket_hdlr(tcp=True) # doctest: +ELLIPSIS
<logging.handlers.SocketHandler object at 0x...>
"""
if tcp:
def_port = hdlrs.DEFAULT_TCP_LOGGING_PORT
handler = hdlrs.SocketHandler
else:
def_port = hdlrs.DEFAULT_UDP_LOGGING_PORT
handler = hdlrs.DatagramHandler
address = (host, port or def_port)
return handler(*address)
def syslog_hdlr(host='localhost', port=None, tcp=False, **kwargs):
"""A syslog log handler
Args:
host (string): The host name (default: localhost). Set to None to use
the platform dependent domain socket.
port (int): The port (default: `logging.handlers` default).
tcp (bool): Create a TCP connection instead of UDP (default: False).
Returns:
New instance of :class:`logging.handlers.SysLogHandler`
Examples:
>>> syslog_hdlr() # doctest: +ELLIPSIS
<logging.handlers.SysLogHandler object at 0x...>
"""
# http://stackoverflow.com/a/13874620/408556
DEF_SOCKETS = {'linux2': '/dev/log', 'darwin': '/var/run/syslog'}
if tcp:
def_port = hdlrs.SYSLOG_TCP_PORT
socktype = socket.SOCK_STREAM
else:
def_port = hdlrs.SYSLOG_UDP_PORT
socktype = socket.SOCK_DGRAM
if kwargs.get('address'):
address = kwargs['address']
elif host:
address = (host, port or def_port)
elif sys.platform in DEF_SOCKETS:
address = DEF_SOCKETS[sys.platform]
else:
msg = 'Domain socket location for {} is not supported.'
raise ValueError(msg.format(sys.platform))
if kwargs.get('facility'):
facility = kwargs['facility']
elif kwargs.get('local_num') and 8 > kwargs['local_num'] >= 0:
# http://unix.stackexchange.com/a/146993
value = 'LOG_LOCAL{}'.format(kwargs['facility'])
facility = getattr(hdlrs.SysLogHandler, value)
else:
facility = hdlrs.SysLogHandler.LOG_USER
return hdlrs.SysLogHandler(address, facility=facility, socktype=socktype)
def buffered_hdlr(target=None, capacity=4096, level='error', **kwargs):
"""A memory buffered log handler
Args:
target (obj): The target logger handler (default stdout).
capacity (int): The buffer size (default 4096).
level (string): The min event level required to flush buffer
(default: error).
Returns:
New instance of :class:`logging.handlers.MemoryHandler`
Examples:
>>> buffered_hdlr() # doctest: +ELLIPSIS
<logging.handlers.MemoryHandler object at 0x...>
"""
target = target or logging.StreamHandler(sys.stdout)
return hdlrs.MemoryHandler(capacity, level.upper(), target)
def webhook_hdlr(url, **kwargs):
"""A web log handler
Args:
url (string): The logging endpoint.
Kwargs:
get (bool): Use a GET request instead of POST (default: False).
Returns:
New instance of :class:`logging.handlers.HTTPHandler`
Examples:
>>> webhook_hdlr('http://api.mysite.com/log') # doctest: +ELLIPSIS
<logging.handlers.HTTPHandler object at 0x...>
"""
parsed = urlparse(url)
secure = parsed.scheme == 'https'
method = 'GET' if kwargs.get('get') else 'POST'
args = (parsed.netloc, parsed.path)
try:
hdlr = hdlrs.HTTPHandler(*args, method=method, secure=secure)
except TypeError:
hdlr = hdlrs.HTTPHandler(*args, method=method)
return hdlr
def email_hdlr(subject=None, **kwargs):
"""An email log handler
Args:
subject (str): The email subject (default: You've got mail.).
kwargs(dict): Keyword arguments.
Kwargs:
host (str): The email server host (default: localhost).
port (str): The email sever port (default: None).
sender (str): The email sender (default: the system username at gmail).
recipients (List[str]): The email recipients (default: the system
username at gmail).
username (str): The email sever username (default: None).
password (str): The email sever password (default: None).
Returns:
New instance of :class:`logging.handlers.SMTPHandler`
Examples:
>>> email_hdlr('hello world') # doctest: +ELLIPSIS
<logging.handlers.SMTPHandler object at 0x...>
"""
host = kwargs.get('host', 'localhost')
port = kwargs.get('port')
address = (host, port) if port else host
sender = kwargs.get('sender', '%s@gmail.com' % environ.get('USER'))
def_recipient = '%s@gmail.com' % environ.get('USER')
recipients = kwargs.get('recipients', [def_recipient])
subject = kwargs.get('subject', "You've got mail")
username = kwargs.get('username')
password = kwargs.get('password')
args = (address, sender, recipients, subject)
credentials = (username, password) if username or password else None
return hdlrs.SMTPHandler(*args, credentials=credentials)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
1509,
28,
19,
25,
912,
28,
19,
25,
11201,
392,
8658,
198,
198,
37811,
198,
9078,
70,
24076,
13,
4993,
8116,
198,
15116,
8728,
4907,
93,
198,
198,
11187... | 2.517156 | 3,235 |
import json
from PySimpleGUI.PySimpleGUI import Titlebar
from cryptography.fernet import Fernet
from time import sleep
import PySimpleGUI as sg
import os
| [
11748,
33918,
201,
198,
6738,
9485,
26437,
40156,
13,
20519,
26437,
40156,
1330,
11851,
5657,
201,
198,
6738,
45898,
13,
69,
1142,
316,
1330,
38982,
316,
201,
198,
6738,
640,
1330,
3993,
201,
198,
11748,
9485,
26437,
40156,
355,
264,
70... | 3.354167 | 48 |
"""
This script is a basic look at the lifecycle of a Python class. A python class
has 4 main stages:
1. Definition
2. Initialization
3. Access and Manipulation
4. Destruction
"""
print('script start')
print('class access and manipulation start')
print(basic.prop1)
print(basic.cls_method())
print('class access and manipulation end')
print('Stage 2: object creation start')
b = basic()
c = basic()
print('Stage 2: object creatin end')
print('Stage 3: object access and manipulation start')
print(b.my_method())
print(b.prop2)
print(b.my_method())
print('Stage 3: object access and manipulation end')
print('Stage 4: object destruction start')
del(b)
c = 12
print('Stage 4: object destruction end')
print('script end')
| [
37811,
198,
1212,
4226,
318,
257,
4096,
804,
379,
262,
3868,
47510,
286,
257,
11361,
1398,
13,
220,
317,
21015,
1398,
198,
10134,
604,
1388,
9539,
25,
198,
198,
16,
13,
30396,
198,
17,
13,
20768,
1634,
198,
18,
13,
8798,
290,
35045,... | 3.392523 | 214 |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headlines Timing
#
# Author:
# ----------------------------------------------------------------------------
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "Hazard_CrossingYear_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": """<*---1---> <---2---> |
Two events in December. Current time at start of first hazard.
ETNs for December events 0001 and 0002.""",
"name": "Hazard_CrossingYear_1a",
"drtTime": "20091230_1200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, -6, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 301200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 AM EST Wed Dec 30 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-302000-",
"/O.NEW.KTBW.DU.Y.0001.091230T1200Z-091231T0000Z/",
"/O.NEW.KTBW.DU.Y.0002.091231T0600Z-091231T1800Z/",
"Coastal Pasco-",
"700 AM EST Wed Dec 30 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 7 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 1 AM TO 1 PM EST THURSDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 7 PM EST this evening. A Blowing Dust Advisory has also been issued. This Blowing Dust Advisory is in effect from 1 AM to 1 PM EST Thursday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1---> <---2---> |* <---1--->
Time has marched forward into the new year and forecaster issues
another DU.Y to start in the future. Events from last year
are 'old' and ignored. Event from new year given ETN of 0001.""",
"name": "Hazard_CrossingYear_1b",
"drtTime": "20100101_0200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, -6, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 6, 12, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"900 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011000-",
"/O.NEW.KTBW.DU.Y.0001.100101T0600Z-100101T1200Z/",
"Coastal Pasco-",
"900 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY IN EFFECT FROM 1 AM TO 7 AM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 1 AM to 7 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1---> <---2---> | <----1--->*
Forecaster runs formatter within 30 minutes of event ending, thus
generating an EXP event.""",
"name": "Hazard_CrossingYear_1c",
"drtTime": "20100101_1200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": [
"WWUS72 KTBW 011200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011300-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T1200Z/",
"Coastal Pasco-",
"700 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The Blowing Dust Advisory is no longer in effect.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup1",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
{
"commentary": """<*----1----> <---2--|--->
Forecaster issues two events in December. The second event crosses
into the new year. ETNs are 0001 and 0002 representing the year
they were issued.""",
"name": "Hazard_CrossingYear_2a",
"drtTime": "20091230_1200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, 6, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 301200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 AM EST Wed Dec 30 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-302000-",
"/O.NEW.KTBW.DU.Y.0001.091230T1200Z-091231T0000Z/",
"/O.NEW.KTBW.DU.Y.0002.091231T0600Z-100101T0600Z/",
"Coastal Pasco-",
"700 AM EST Wed Dec 30 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 7 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 1 AM THURSDAY TO 1 AM EST FRIDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 7 PM EST this evening. A Blowing Dust Advisory has also been issued. This Blowing Dust Advisory is in effect from 1 AM Thursday to 1 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<----1----> <---2--|-*-->
Time marches forward into the new year, into the middle of the 2nd
event. The ETN remains at 0002 for this event.""",
"name": "Hazard_CrossingYear_2b",
"drtTime": "20100101_0200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, 6, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"900 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-010600-",
"/O.CON.KTBW.DU.Y.0002.000000T0000Z-100101T0600Z/",
"Coastal Pasco-",
"900 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 1 AM EST FRIDAY...",
# "A Blowing Dust Advisory remains in effect until 1 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$ ",
],
},
{
"commentary": """<----1----> <---2--|--*-> <----1---->
In the middle of the 2nd event ETN0002 in the new year, the forecaster
issues another event, which will get ETN 0001 since it is the first
event issued in that year.""",
"name": "Hazard_CrossingYear_2c",
"drtTime": "20100101_0400",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, 6, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 7, 12, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010400",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"1100 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011200-",
"/O.NEW.KTBW.DU.Y.0001.100101T0700Z-100101T1200Z/",
"/O.CON.KTBW.DU.Y.0002.000000T0000Z-100101T0600Z/",
"Coastal Pasco-",
"1100 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 1 AM EST FRIDAY...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 2 AM TO 7 AM EST FRIDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 2 AM to 7 AM EST Friday. A Blowing Dust Advisory remains in effect until 1 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<----1----> <---2--|---> * <----1---->
Forecaster runs the formatter within 30 minutes of the ending time
of the ETN 0002 event, which generates an EXP. The ETN 0001 event
has not yet started.""",
"name": "Hazard_CrossingYear_2d",
"drtTime": "20100101_0629", #RWA changed to 0629 since logic does not generate EXP for >= 30 minutes past
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, 6, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 7, 12, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010629",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"129 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011200-",
"/O.EXP.KTBW.DU.Y.0002.000000T0000Z-100101T0600Z/",
"/O.CON.KTBW.DU.Y.0001.100101T0700Z-100101T1200Z/",
"Coastal Pasco-",
"129 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 7 AM EST THIS MORNING...",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The Blowing Dust Advisory is no longer in effect. A Blowing Dust Advisory remains in effect until 7 AM EST this morning.",
"$$ ",
],
},
{
"commentary": """<----1----> <---2--|---> <----1----*>
We are now in the middle of the ETN 0001 event in the new year,
and within 30 minutes of its ending time, thus an EXP is generated.""",
"name": "Hazard_CrossingYear_2e",
"drtTime": "20100101_1145",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -18, 6, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 7, 12, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 011145",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"645 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011245-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T1200Z/",
"Coastal Pasco-",
"645 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY WILL EXPIRE AT 7 AM EST THIS MORNING...",
# "The Blowing Dust Advisory will expire at 7 AM EST this morning.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup2",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
{
"commentary": """* <--1---> <-----2---|----->
This test scenario checks extending an event from a prior year. The
initial setup is two events in the previous year with the 2nd event
ending in the new year. """,
"name": "Hazard_CrossingYear_3a",
"drtTime": "20091230_0011",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -36, -28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 300011",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"711 PM EST Tue Dec 29 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-300815-",
"/O.NEW.KTBW.DU.Y.0001.091230T1200Z-091230T2000Z/",
"/O.NEW.KTBW.DU.Y.0002.091231T0000Z-100102T0000Z/",
"Coastal Pasco-",
"711 PM EST Tue Dec 29 2009",
"...BLOWING DUST ADVISORY IN EFFECT FROM 7 AM TO 3 PM EST WEDNESDAY...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 7 PM WEDNESDAY TO 7 PM EST FRIDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 7 AM to 3 PM EST Wednesday. A Blowing Dust Advisory has also been issued. This Blowing Dust Advisory is in effect from 7 PM Wednesday to 7 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution. ",
"$$",
],
},
{
"commentary": """<--1---> <-----2---|---*--><-----2------>
The current time is in the new year, in the middle of the 2nd event.
Forecaster extends the ending time. ETN remains at 0002. EXT code
generated.""",
"name": "Hazard_CrossingYear_3b",
"drtTime": "20100101_1823",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 24, 29, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 011823",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"123 PM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-020230-",
"/O.EXT.KTBW.DU.Y.0002.000000T0000Z-100102T0500Z/",
"Coastal Pasco-",
"123 PM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY NOW IN EFFECT UNTIL MIDNIGHT EST TONIGHT...",
# "The Blowing Dust Advisory is now in effect until midnight EST tonight.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--1---> <-----2---|-----><*-----2------> <-1-> <-2->
We are now in the extended section of the second event. Forecaster
adds two new events further into the future. Since this are the first
event of this type issued in this new year, the ETNs are 0001 and 0002.""",
"name": "Hazard_CrossingYear_3c",
"drtTime": "20100102_0000",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 24, 29, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 36, 55, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 77, 89, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020000",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-020800-",
"/O.NEW.KTBW.DU.Y.0001.100102T1200Z-100103T0700Z/",
"/O.NEW.KTBW.DU.Y.0002.100104T0500Z-100104T1700Z/",
"/O.CON.KTBW.DU.Y.0002.000000T0000Z-100102T0500Z/",
"Coastal Pasco-",
"700 PM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL MIDNIGHT EST TONIGHT...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 7 AM SATURDAY TO 2 AM EST SUNDAY...",
"...BLOWING DUST ADVISORY IN EFFECT FROM MIDNIGHT SUNDAY NIGHT TO NOON EST MONDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 7 AM Saturday to 2 AM EST Sunday. A Blowing Dust Advisory has also been issued. This Blowing Dust Advisory is in effect from midnight Sunday night to noon EST Monday. A Blowing Dust Advisory remains in effect until midnight EST tonight.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--1---> <-----2---|-----><*-----2------> <-1-> <-2->
No time changes or grid changes from previous step, thus we get CONs
in the product. Note the confusing VTEC.""",
"name": "Hazard_CrossingYear_3d",
"drtTime": "20100102_0000",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 24, 29, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 36, 55, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 77, 89, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020000",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-020800-",
"/O.CON.KTBW.DU.Y.0002.000000T0000Z-100102T0500Z/",
"/O.CON.KTBW.DU.Y.0001.100102T1200Z-100103T0700Z/",
"/O.CON.KTBW.DU.Y.0002.100104T0500Z-100104T1700Z/",
"Coastal Pasco-",
"700 PM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL MIDNIGHT EST TONIGHT...",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 7 AM SATURDAY TO 2 AM EST SUNDAY...",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM MIDNIGHT SUNDAY NIGHT TO NOON EST MONDAY...",
# "A Blowing Dust Advisory remains in effect until midnight EST tonight. A Blowing Dust Advisory remains in effect from 7 AM Saturday to 2 AM EST Sunday. A Blowing Dust Advisory remains in effect from midnight Sunday night to noon EST Monday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--1---> <-----2---|-----><-----2------*> <-1-> <-2->
Formatter run within 30 minutes of the event ending time. EXP is
generated. The two new events for this year remain as CON with ETNs
of 0001 and 0002.""",
"name": "Hazard_CrossingYear_3e",
"drtTime": "20100102_0500",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 24, 29, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 36, 55, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 77, 89, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020500",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"1200 AM EST Sat Jan 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-021300-",
"/O.EXP.KTBW.DU.Y.0002.000000T0000Z-100102T0500Z/",
"/O.CON.KTBW.DU.Y.0001.100102T1200Z-100103T0700Z/",
"/O.CON.KTBW.DU.Y.0002.100104T0500Z-100104T1700Z/",
"Coastal Pasco-",
"1200 AM EST Sat Jan 2 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 7 AM THIS MORNING TO 2 AM EST SUNDAY...",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM MIDNIGHT SUNDAY NIGHT TO NOON EST MONDAY...",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The Blowing Dust Advisory is no longer in effect. A Blowing Dust Advisory remains in effect from 7 AM this morning to 2 AM EST Sunday. A Blowing Dust Advisory remains in effect from midnight Sunday night to noon EST Monday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup3",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
{
"commentary": """<-*-------1------|---->
Forecaster creates event that spans the year. ETN assigned 0001
based on 2009's pool of ETNs.""",
"name": "Hazard_CrossingYear_4a",
"drtTime": "20091231_0211",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 310211",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"911 PM EST Wed Dec 30 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-311015-",
"/O.NEW.KTBW.DU.Y.0001.091231T0211Z-100102T0000Z/",
"Coastal Pasco-",
"911 PM EST Wed Dec 30 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 7 PM EST FRIDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 7 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|*----> <-1->
Current time is Jan 1 and we are in the middle of the event issued
last year. Forecaster issues a new event, it gets assigned ETN 0001
since it is the first event for the new year.""",
"name": "Hazard_CrossingYear_4b",
"drtTime": "20100101_0000",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010000",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-010800-",
"/O.NEW.KTBW.DU.Y.0001.100102T0600Z-100102T1400Z/",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100102T0000Z/",
"Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 7 PM EST FRIDAY...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 1 AM TO 9 AM EST SATURDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 1 AM to 9 AM EST Saturday. A Blowing Dust Advisory remains in effect until 7 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|--*--> <-1-> <------2------->
Still in the middle of the event issued the previous year. Forecaster
issues another event, which gets assigned an ETN of 0002.""",
"name": "Hazard_CrossingYear_4c",
"drtTime": "20100101_2200",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 26, 28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 012200",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"500 PM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-020600-",
"/O.NEW.KTBW.DU.Y.0002.100102T0200Z-100102T0400Z/",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100102T0000Z/",
"/O.CON.KTBW.DU.Y.0001.100102T0600Z-100102T1400Z/",
"Coastal Pasco-",
"500 PM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 7 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 9 PM TO 11 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 1 AM TO 9 AM EST SATURDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 9 PM to 11 PM EST this evening. A Blowing Dust Advisory remains in effect until 7 PM EST this evening. A Blowing Dust Advisory remains in effect from 1 AM to 9 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|----> <*-1-> <------2------->
We are in the 1st event issued for the current year. We get two CONs
for the events since no changes were made to the grids.""",
"name": "Hazard_CrossingYear_4d",
"drtTime": "20100102_0300",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 26, 28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020300",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"1000 PM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-021100-",
"/O.CON.KTBW.DU.Y.0002.000000T0000Z-100102T0400Z/",
"/O.CON.KTBW.DU.Y.0001.100102T0600Z-100102T1400Z/",
"Coastal Pasco-",
"1000 PM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 11 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 1 AM TO 9 AM EST SATURDAY...",
# "A Blowing Dust Advisory remains in effect until 11 PM EST this evening. A Blowing Dust Advisory remains in effect from 1 AM to 9 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|----> <-1-> * <------2------->
The first event for the new year is over. The second event for the
new year has not yet started. Result is a CON code for the 2nd event
and no mention of the first event.""",
"name": "Hazard_CrossingYear_4e",
"drtTime": "20100102_0500",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 26, 28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020500",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"1200 AM EST Sat Jan 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-021300-",
"/O.CON.KTBW.DU.Y.0001.100102T0600Z-100102T1400Z/",
"Coastal Pasco-",
"1200 AM EST Sat Jan 2 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 9 AM EST THIS MORNING...",
# "A Blowing Dust Advisory remains in effect until 9 AM EST this morning.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|----> <-1-> <*------2------->
Time has progressed to be start of the 2nd event for the year. Start
time for VTEC is all zeros to indicate event in progress. CON code
since no other changes to time.""",
"name": "Hazard_CrossingYear_4f",
"drtTime": "20100102_0600",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 26, 28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 020600",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"100 AM EST Sat Jan 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-021400-",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100102T1400Z/",
"Coastal Pasco-",
"100 AM EST Sat Jan 2 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 9 AM EST THIS MORNING...",
# "A Blowing Dust Advisory remains in effect until 9 AM EST this morning.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<--------1------|----> <-1-> <------2-----*-->
Farther into the 2nd event for the year. CON code generated.""",
"name": "Hazard_CrossingYear_4g",
"drtTime": "20100102_1300",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 24, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 26, 28, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 30, 38, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 021300",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"800 AM EST Sat Jan 2 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-021400-",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100102T1400Z/",
"Coastal Pasco-",
"800 AM EST Sat Jan 2 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 9 AM EST THIS MORNING...",
# "A Blowing Dust Advisory remains in effect until 9 AM EST this morning.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup4",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
{
"commentary": """<*---1--->|
This scenario involves multiple zones and crossing years. This first
step simply sets up an event starting in December and ending at 0z
Jan 1.""",
"name": "Hazard_CrossingYear_5a",
"drtTime": "20091231_0211",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 310211",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"911 PM EST Wed Dec 30 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-311015-",
"/O.NEW.KTBW.DU.Y.0001.091231T0211Z-100101T0000Z/",
"Coastal Pasco-",
"911 PM EST Wed Dec 30 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 7 PM EST THURSDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 7 PM EST Thursday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->*|<---1-----> and |<---1--->
This step isn't decoded by the VTEC decoder allowing us to explore
different scenarios. This scenario is the current time is right before
the start of the new year and the forecaster extends the current
event until 7z for one zone plus a new zone (EXB coding), and until
9z for the original zone (EXT coding).""",
"name": "Hazard_CrossingYear_5b",
"drtTime": "20091231_2359",
"decodeVTEC": 0, #don't decode the VTEC
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 312359",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"659 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010700-",
"/O.EXB.KTBW.DU.Y.0001.100101T0000Z-100101T0700Z/",
"Pinellas-",
"659 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 2 AM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 2 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ149-010800-",
"/O.EXT.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"659 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY NOW IN EFFECT UNTIL 4 AM EST FRIDAY...",
# "The Blowing Dust Advisory is now in effect until 4 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|*<---1-----> and |<---1--->
We continue this scenario and run the decoder this time.
This scenario is the current time is at the beginning of the year.
The forecaster extends the current event that just ended at 0000z
until 9z for the original zone, and adds a zone for the event until
7z. Result is two NEW events with ETN of 0001 for the new year.""",
"name": "Hazard_CrossingYear_5c",
"drtTime": "20100101_0000",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010000",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-010800-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T0000Z/",
"/O.NEW.KTBW.DU.Y.0001.100101T0000Z-100101T0900Z/",
"Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 4 AM EST FRIDAY...",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 4 AM EST Friday. The Blowing Dust Advisory is no longer in effect.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ050-010700-",
"/O.NEW.KTBW.DU.Y.0001.100101T0000Z-100101T0700Z/",
"Pinellas-",
"700 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 2 AM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 2 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|<---1---*--> and |<---1--->*
Time continues until the event expires in the one zone, but continues
another two hours for the second zone. EXP generated in one zone and
CON in the other zone.""",
"name": "Hazard_CrossingYear_5d",
"drtTime": "20100101_0715",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010715",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"215 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010815-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T0700Z/",
"Pinellas-",
"215 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The Blowing Dust Advisory is no longer in effect.",
"$$",
"FLZ149-010900-",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"215 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 4 AM EST EARLY THIS MORNING...",
# "A Blowing Dust Advisory remains in effect until 4 AM EST early this morning.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|, <---1----*-> and |<---1--->*
Time is after the first event, but still in the second event. CON
is generated.""",
"name": "Hazard_CrossingYear_5e",
"drtTime": "20100101_0829",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 010829",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"329 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-010930-",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"329 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 4 AM EST EARLY THIS MORNING...",
# "A Blowing Dust Advisory remains in effect until 4 AM EST early this morning.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|<---1-----*> and |<---1---> *
Forecaster removes the grid prior to the ending time of the remaining
event, which generates a CAN.""",
"name": "Hazard_CrossingYear_5f",
"drtTime": "20100101_0835",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": [
"WWUS72 KTBW 010835",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"335 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-010945-",
"/O.CAN.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"335 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has cancelled the Blowing Dust Advisory.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup5",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
{
"commentary": """<*---1--->|
This scenario tests another two zone event. The first event is issued
last year (for one zone) and extends until 0000z on Jan 1st.""",
"name": "Hazard_CrossingYear_6a",
"drtTime": "20091231_0211",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 310211",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"911 PM EST Wed Dec 30 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-311015-",
"/O.NEW.KTBW.DU.Y.0001.091231T0211Z-100101T0000Z/",
"Coastal Pasco-",
"911 PM EST Wed Dec 30 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 7 PM EST THURSDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 7 PM EST Thursday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->*|<---1-----> and |<---1--->
Time is just before 0000z Jan 1st. Forecaster extends the event until
9z for the same zone, and adds a second zone starting at 0z until 7z.
EXB and EXT are generated for the zones.""",
"name": "Hazard_CrossingYear_6b",
"drtTime": "20091231_2359",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
],
"checkStrings": [
"WWUS72 KTBW 312359",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"659 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010700-",
"/O.EXB.KTBW.DU.Y.0001.100101T0000Z-100101T0700Z/",
"Pinellas-",
"659 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY IN EFFECT UNTIL 2 AM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect until 2 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ149-010800-",
"/O.EXT.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"659 PM EST Thu Dec 31 2009",
"...BLOWING DUST ADVISORY NOW IN EFFECT UNTIL 4 AM EST FRIDAY...",
# "The Blowing Dust Advisory is now in effect until 4 AM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|<---1---*--> <--1--> and |<---1--->* <---1--->
We are at the end of the first event in one zone, but in the middle of
the event in the second zone. Forecaster adds a new event with starting
times of 14z for both zones and continuing until 19z in one zone and 0z
for the second zone. ETNs of 0001 are generated for the new event since
it is the first event issued for the new year.""",
"name": "Hazard_CrossingYear_6c",
"drtTime": "20100101_0715",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 14, 19, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 19, 24, "DU.Y", ["FLZ050"]),
],
"checkStrings": [
"WWUS72 KTBW 010715",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"215 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-011515-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T0700Z/",
"/O.NEW.KTBW.DU.Y.0001.100101T1400Z-100102T0000Z/",
"Pinellas-",
"215 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY IN EFFECT FROM 9 AM THIS MORNING TO 7 PM EST THIS EVENING...",
"...BLOWING DUST ADVISORY HAS EXPIRED...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 9 AM this morning to 7 PM EST this evening. The Blowing Dust Advisory is no longer in effect.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ149-011515-",
"/O.NEW.KTBW.DU.Y.0001.100101T1400Z-100101T1900Z/",
"/O.CON.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"Coastal Pasco-",
"215 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT UNTIL 4 AM EST EARLY THIS MORNING...",
"...BLOWING DUST ADVISORY IN EFFECT FROM 9 AM THIS MORNING TO 2 PM EST THIS AFTERNOON...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Blowing Dust Advisory, which is in effect from 9 AM this morning to 2 PM EST this afternoon. A Blowing Dust Advisory remains in effect until 4 AM EST early this morning.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->| <---1----*-> <--1--> and |<---1--->* <--1-->
Event within 30 minutes of ending time in one zone, still ongoing in
other zone. No changes for the new event.""",
"name": "Hazard_CrossingYear_6d",
"drtTime": "20100101_0845",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", -24, 0, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 0, 7, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 7, 9, "DU.Y", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 14, 19, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 19, 24, "DU.Y", ["FLZ050"]),
],
"checkStrings": [
"WWUS72 KTBW 010845",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"345 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011645-",
"/O.EXP.KTBW.DU.Y.0001.000000T0000Z-100101T0900Z/",
"/O.CON.KTBW.DU.Y.0001.100101T1400Z-100101T1900Z/",
"Coastal Pasco-",
"345 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 9 AM THIS MORNING TO 2 PM EST THIS AFTERNOON...",
"...BLOWING DUST ADVISORY WILL EXPIRE AT 4 AM EST EARLY THIS MORNING...",
# "The Blowing Dust Advisory will expire at 4 AM EST early this morning. A Blowing Dust Advisory remains in effect from 9 AM this morning to 2 PM EST this afternoon.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ050-011645-",
"/O.CON.KTBW.DU.Y.0001.100101T1400Z-100102T0000Z/",
"Pinellas-",
"345 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 9 AM THIS MORNING TO 7 PM EST THIS EVENING...",
# "A Blowing Dust Advisory remains in effect from 9 AM this morning to 7 PM EST this evening.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|<---1-----> * <--1--> and |<---1---> * <---1--->
Last event has not started yet. CONs generated.""",
"name": "Hazard_CrossingYear_6e",
"drtTime": "20100101_0914",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 14, 19, "DU.Y", ["FLZ149","FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 19, 24, "DU.Y", ["FLZ050"]),
],
"checkStrings": [
"WWUS72 KTBW 010914",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"414 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-011715-",
"/O.CON.KTBW.DU.Y.0001.100101T1400Z-100102T0000Z/",
"Pinellas-",
"414 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 9 AM THIS MORNING TO 7 PM EST THIS EVENING...",
# "A Blowing Dust Advisory remains in effect from 9 AM this morning to 7 PM EST this evening.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
"FLZ149-011715-",
"/O.CON.KTBW.DU.Y.0001.100101T1400Z-100101T1900Z/",
"Coastal Pasco-",
"414 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY REMAINS IN EFFECT FROM 9 AM THIS MORNING TO 2 PM EST THIS AFTERNOON...",
# "A Blowing Dust Advisory remains in effect from 9 AM this morning to 2 PM EST this afternoon.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": """<---1--->|<---1-----> * and |<---1---> * <---1--->
Forecaster removes hazard in one zone. Extends time of the event for
second zone.""",
"name": "Hazard_CrossingYear_6f",
"drtTime": "20100101_1350",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 17, 19, "DU.Y", ["FLZ050"]),
("Fcst", "Hazards", "DISCRETE", 19, 24, "DU.Y", ["FLZ050"]),
],
"checkStrings": [
"WWUS72 KTBW 011350",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"850 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ149-011500-",
"/O.CAN.KTBW.DU.Y.0001.100101T1400Z-100101T1900Z/",
"Coastal Pasco-",
"850 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has cancelled the Blowing Dust Advisory.",
"$$",
"FLZ050-012200-",
"/O.EXT.KTBW.DU.Y.0001.100101T1700Z-100102T0000Z/",
"Pinellas-",
"850 AM EST Fri Jan 1 2010",
"...BLOWING DUST ADVISORY NOW IN EFFECT FROM NOON TODAY TO 7 PM EST THIS EVENING...",
# "The Blowing Dust Advisory is now in effect from noon today to 7 PM EST this evening.",
"A Blowing Dust Advisory means that blowing dust will restrict visibilities. Travelers are urged to use caution.",
"$$",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_CrossingYear_Cleanup5",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
]
import TestScript
| [
2235,
198,
2,
770,
3788,
373,
4166,
290,
1220,
393,
9518,
416,
7760,
1169,
261,
5834,
11,
198,
2,
12997,
284,
17453,
46133,
16945,
54,
12,
2713,
12,
34,
48,
12,
940,
3134,
351,
262,
1294,
5070,
13,
198,
2,
220,
198,
2,
471,
13,
... | 2.299073 | 24,061 |
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui.DialMeter import DialMeter
from pirates.piratesgui import PiratesGuiGlobals | [
6738,
19798,
330,
13,
47,
5282,
5841,
5028,
1330,
1635,
198,
6738,
27516,
13,
4063,
689,
8692,
1330,
20169,
9861,
672,
874,
198,
6738,
27516,
13,
4063,
689,
8692,
1330,
9297,
4374,
7509,
198,
6738,
27516,
13,
4063,
689,
48317,
13,
244... | 3.596774 | 62 |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix | [
11748,
299,
32152,
355,
45941,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
10802,
62,
6759,
8609
] | 3.25 | 36 |
"""BIG-IQ management client
"""
from datetime import datetime, timedelta
from retry import retry
from f5sdk.logger import Logger
from f5sdk import constants
from f5sdk.utils import http_utils
from f5sdk.decorators import check_auth, add_auth_header
class ManagementClient(object):
"""A class used as a management client for BIG-IQ
Attributes
----------
host : str
the hostname of the device
port : str
the port of the device
Methods
-------
get_info()
Refer to method documentation
make_request()
Refer to method documentation
"""
def __init__(self, host, **kwargs):
"""Class initialization
Parameters
----------
host : str
the hostname of the device
**kwargs :
optional keyword arguments
Keyword Arguments
-----------------
port : int
the port to assign to the port attribute
user : str
the username for device authentication
password : str
the password for device authentication
Returns
-------
None
"""
self.logger = Logger(__name__).get_logger()
self.host = host.split(':')[0]
self.port = kwargs.pop('port', 443)
self._user = kwargs.pop('user', None)
self._password = kwargs.pop('password', None)
# account for multiple authentication schemes
if self._user and self._password:
self._login_using_credentials()
else:
raise Exception('user|password required')
@retry(tries=constants.RETRIES['DEFAULT'], delay=constants.RETRIES['DELAY_IN_SECS'])
def _get_token(self):
"""Gets authentication token
Retries if unsuccessful, up to maximum allotment
Parameters
----------
None
Returns
-------
dict
a dictionary containing authentication token, expiration date and expiration in seconds:
{
'token': 'token',
'expirationDate': '2019-01-01T01:01:01.00'
}
"""
self.logger.debug('Getting authentication token')
response = http_utils.make_request(
self.host,
'/mgmt/shared/authn/login',
port=self.port,
method='POST',
body={
'username': self._user,
'password': self._password
},
basic_auth={
'user': self._user,
'password': self._password
}
)
token_details = response['token']
return {
'token': token_details['token'],
'expirationDate': (
datetime.now() + timedelta(seconds=token_details['timeout'])
).isoformat()
}
def _login_using_credentials(self):
"""Login to device using user + password
Parameters
----------
None
Returns
-------
None
"""
self.logger.info('Logging in using user + password')
self.token = self._get_token()['token']
@check_auth
@add_auth_header
def make_request(self, uri, **kwargs):
"""Makes request to device (HTTP/S)
Parameters
----------
uri : str
the URI where the request should be made
**kwargs :
optional keyword arguments
Keyword Arguments
-----------------
method : str
the HTTP method to use
headers : str
the HTTP headers to use
body : str
the HTTP body to use
body_content_type : str
the HTTP body content type to use
bool_response : bool
return boolean based on HTTP success/failure
advanced_return : bool
return additional information, like HTTP status code to caller
Returns
-------
dict
a dictionary containing the JSON response
"""
return http_utils.make_request(self.host, uri, port=self.port, **kwargs)
def get_info(self):
"""Gets device info
Parameters
----------
None
Returns
-------
dict
the device information
::
{
'version': 'x.x.x.x'
}
"""
response = self.make_request('/mgmt/tm/sys/version')
version_info = response['entries'][
'https://localhost/mgmt/tm/sys/version/0'
]['nestedStats']['entries']
return {
'version': version_info['Version']['description']
}
| [
37811,
3483,
38,
12,
33866,
4542,
5456,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
1005,
563,
1330,
1005,
563,
198,
198,
6738,
277,
20,
21282,
74,
13,
6404,
1362,
1330,
5972,
1362,
198,... | 2.16347 | 2,190 |
from flask import Flask, request, jsonify
from flask_basicauth import BasicAuth
from textblob import TextBlob
#import pandas as pd
#from sklearn.model_selection import train_test_split
import pickle
from sklearn.linear_model import LinearRegression
import os
#esta comentado pois o modelo já foi testado e salvo em modelo.sav
#não é preciso deixar pois o custo do treinamento é alto
'''df = pd.read_csv("notebook\Data\casas.csv")
colunas = ['tamanho','ano','garagem']
#Variavel explicativa
X = df.drop('preco', axis=1)
#Variavel resposta
y = df['preco']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
modelo = LinearRegression()
modelo.fit(X_train, y_train)
'''
#configura a ordem das colunas, caso receba o json
colunas = ['tamanho','ano','garagem']
#lê o arquivo de modelo salvo
modelo = pickle.load(open('..\..\models\modelo.sav','rb'))
app = Flask(__name__)
#configura o usuário
app.config['BASIC_AUTH_USERNAME'] = os.environ.get('BASIC_AUTH_USERNAME')
app.config['BASIC_AUTH_PASSWORD'] = os.environ.get('BASIC_AUTH_PASSWORD')
#objeto de autenticação básica
basic_auth = BasicAuth(app)
#metodo get que retorna uma string
@app.route('/')
#metodo get -> retorna se a frase é boa ou ruim
@app.route('/sentimento/<frase>')
@basic_auth.required
#metodo post -> recebe um json
'''@app.route('/cotacao/', methods=['POST'])
@basic_auth.required
def cotacao():
dados = request.get_json()
dados_input = [dados[col] for col in colunas]
preco = modelo.predict([dados_input])
return jsonify(preco=preco[0])'''
#metodo get -> retorna o preço estimado
@app.route('/cotacao/<tamanho>/<ano>/<garagem>')
@basic_auth.required
#reinicia a API quando o código for alterado
app.run(debug=True, host='0.0.0.0')
#oi | [
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
198,
6738,
42903,
62,
12093,
3970,
1071,
1330,
14392,
30515,
198,
6738,
2420,
2436,
672,
1330,
8255,
3629,
672,
198,
2,
11748,
19798,
292,
355,
279,
67,
198,
2,
6738,
1341,
35720,
1... | 2.450139 | 722 |
from exasol_integration_test_docker_environment.lib.config.log_config import log_config
| [
6738,
409,
292,
349,
62,
18908,
1358,
62,
9288,
62,
45986,
62,
38986,
13,
8019,
13,
11250,
13,
6404,
62,
11250,
1330,
2604,
62,
11250,
628
] | 3.423077 | 26 |
#!/usr/bin/env python
import os
import logging as _logging
_logger = _logging.getLogger(__name__)
master_host = "***REMOVED***"
master_user = "***REMOVED***"
master_password = "***REMOVED***"
master_db = "***REMOVED***"
slave_host = "127.0.0.1"
slave_user = "reader"
slave_password = "falcon"
slave_db = "falcon"
mysql_master_server = "***REMOVED***"
mysql_slave_server = "***REMOVED***"
mysql_user = "***REMOVED***"
mysql_password = "***REMOVED***"
mysql_db = "***REMOVED***"
order_book_db = "***REMOVED***"
sparrow_db = "***REMOVED***"
order_books_db_2 = {
"host": "***REMOVED***",
"user": "***REMOVED***",
"password": "***REMOVED***",
"db": "**REMOVED***",
}
kafka_bootstrap_server = "***REMOVED***"
# whether to enable api mocking in unit test cases
mock_api_enabled = os.getenv("MOCK_API_ENABLED")
# Binance Tests
binance_api_key = os.getenv("BINANCE_API_KEY")
binance_api_secret = os.getenv("BINANCE_API_SECRET")
# Coinbase Pro Tests
coinbase_pro_api_key = os.getenv("COINBASE_PRO_API_KEY")
coinbase_pro_secret_key = os.getenv("COINBASE_PRO_SECRET_KEY")
coinbase_pro_passphrase = os.getenv("COINBASE_PRO_PASSPHRASE")
# Huobi Tests
huobi_api_key = os.getenv("HUOBI_API_KEY")
huobi_secret_key = os.getenv("HUOBI_SECRET_KEY")
# Dolomite Tests
dolomite_test_web3_private_key = os.getenv("DOLOMITE_TEST_PK")
dolomite_test_web3_address = os.getenv("DOLOMITE_TEST_ADDR")
# Bittrex Tests
bittrex_api_key = os.getenv("BITTREX_API_KEY")
bittrex_secret_key = os.getenv("BITTREX_SECRET_KEY")
# Bitfinex
bitfinex_api_key = os.getenv("BITFINEX_API_KEY")
bitfinex_secret_key = os.getenv("BITFINEX_SECRET_KEY")
# KuCoin Tests
kucoin_api_key = os.getenv("KUCOIN_API_KEY")
kucoin_secret_key = os.getenv("KUCOIN_SECRET_KEY")
kucoin_passphrase = os.getenv("KUCOIN_PASSPHRASE")
# Bitcoin_com Tests
bitcoin_com_api_key = os.getenv("BITCOIN_COM_API_KEY")
bitcoin_com_secret_key = os.getenv("BITCOIN_COM_SECRET_KEY")
test_web3_provider_list = [os.getenv("WEB3_PROVIDER")]
# Liquid Tests
liquid_api_key = os.getenv("LIQUID_API_KEY")
liquid_secret_key = os.getenv("LIQUID_SECRET_KEY")
# Wallet Tests
test_erc20_token_address = os.getenv("TEST_ERC20_TOKEN_ADDRESS")
web3_test_private_key_a = os.getenv("TEST_WALLET_PRIVATE_KEY_A")
web3_test_private_key_b = os.getenv("TEST_WALLET_PRIVATE_KEY_B")
web3_test_private_key_c = os.getenv("TEST_WALLET_PRIVATE_KEY_C")
coinalpha_order_book_api_username = "***REMOVED***"
coinalpha_order_book_api_password = "***REMOVED***"
kafka_2 = {
"bootstrap_servers": "***REMOVED***",
"zookeeper_servers": "***REMOVED***"
}
try:
from .config_local import * # noqa: F401, F403
except ModuleNotFoundError:
pass
try:
from .web3_wallet_secret import * # noqa: F401, F403
except ModuleNotFoundError:
pass
try:
from .binance_secret import * # noqa: F401, F403
except ModuleNotFoundError:
pass
try:
from .coinbase_pro_secrets import * # noqa: F401, F403
except ModuleNotFoundError:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
198,
11748,
18931,
355,
4808,
6404,
2667,
198,
62,
6404,
1362,
796,
4808,
6404,
2667,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
9866,
62,
4774,
7... | 2.294656 | 1,310 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO Initialization
"""ResNet Model"""
# Python 2.X and 3.X compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import libraries
import collections
import functools
import sonnet as snt
import tensorflow as tf
from six.moves import range
# Tensorflow objects/functions
nest = tf.contrib.framework.nest
# Structure to be sent from actors to learner.
AgentOutput = collections.namedtuple('AgentOutput', 'action policy_logits baseline')
class Agent(snt.RNNCore):
"""Agent with ResNet."""
@snt.reuse_variables | [
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 3.640625 | 320 |
"""
TO-DO:
add probability map layer for T-bar or cleft detection and other semantic prediction
"""
import neuroglancer as ng
import numpy as np
from chunkflow.chunk import Chunk
from chunkflow.lib.synapses import Synapses
from .base import OperatorBase
| [
37811,
198,
10468,
12,
18227,
25,
198,
2860,
12867,
3975,
7679,
329,
309,
12,
5657,
393,
1190,
701,
13326,
290,
584,
37865,
17724,
198,
37811,
198,
198,
11748,
7669,
4743,
8250,
355,
23370,
198,
11748,
299,
32152,
355,
45941,
198,
198,
... | 3.633803 | 71 |
#createdbyme
| [
2,
25598,
1525,
1326,
201
] | 2.6 | 5 |
import pandas as pd
import argparse
import pickle
import numpy as np
from bert_score import BERTScorer
parser = argparse.ArgumentParser(description='Unlabeled')
parser.add_argument('--data_path', type=str, default='./tmp/tst-summarization-baseline-predict/test_generations.txt',
help='path to data files')
parser.add_argument('--output_data_path', type=str, default='./data/ulbl_predict.csv',
help='path to data files')
parser.add_argument('--thres', type=float, default=0.0,
help='path to data files')
args = parser.parse_args()
if __name__ == "__main__":
main() | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
1822,
29572,
198,
11748,
2298,
293,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
275,
861,
62,
26675,
1330,
347,
17395,
3351,
11934,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,... | 2.478599 | 257 |
from django.conf.urls import url, patterns
from django.contrib import admin
from .api import *
admin.autodiscover()
urlpatterns = patterns(
(r'^api/announcement/(?P<pk>\d+)/dismiss$', AnnouncementDismiss.as_view()),
(r'^api/announcements$', AnnouncementList.as_view()),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
7572,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
15042,
1330,
1635,
628,
198,
28482,
13,
2306,
375,
29392,
3419,
198,
198,
6371,
33279,
82,
796,
7... | 2.572727 | 110 |
# Implementation of the TensforFlow Tutorial for Basic Classification: https://www.tensorflow.org/tutorials/keras/basic_classification
# author: tfreundo
import tensorflow as tf
# High Level TF-API
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plot
# Parameters for Debugging and tweaking the Learning
debug_plot_exampleImage = False # Plots an example image
debug_plot_trainingdata = False # Plots the first x images from the training data
debug_print_exampleNormalization = False # Prints an example before and after normalization to console
debug_plot_prediction = False # Plots the image and the predictions per class/category
debug_plot_prediction_imgIndex = 1 # The index of the image which should be used for prediction visualization
# MNIST Fashion Dataset (70k grayscale images 28x28pixels with values from 0 to 255 consisting of 10 categories)
datasource = keras.datasets.fashion_mnist
(train_imgs, train_lbls), (test_imgs, test_lbls) = datasource.load_data()
print('Loaded %d samples as Training Data and %d samples as Test Data' %(len(train_imgs), len(test_imgs)))
# The class names corresponding to the labels (0-9)
classnames = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###################################
# Data Overview and Preprocessing #
###################################
if(debug_plot_exampleImage):
plotSingleImageColorbar(imgData=train_imgs[0])
if(debug_plot_trainingdata):
plotImagesWithLabels()
if(debug_print_exampleNormalization):
print('BEFORE:\n', train_imgs[0])
train_imgs = normalizeData(train_imgs)
test_imgs = normalizeData(test_imgs)
if(debug_print_exampleNormalization):
print('AFTER NORMALIZATION:\n', train_imgs[0])
######################
# Building the Model #
######################
# Form a sequiental list of layers to execute one after the other (output fed into the next layer)
# Each layer extracts representations from the data fed into them
model = keras.Sequential([
# Flatten/reformat the input images 2d-array (28x28 pixels) to a 1d array (28*28=784 pixels)
keras.layers.Flatten(input_shape=(28,28)),
# Densely resp. fully-connected neural layers
keras.layers.Dense(128, activation=tf.nn.relu),
# Returns an array of 10 probabilities (for the 10 categories to predict)
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Configure the model
model.compile(optimizer=tf.train.AdamOptimizer(), # Optimizer to update the model based on data and the according loss
loss='sparse_categorical_crossentropy', # Loss function measures how accurate the model is during training --> we want to minimize the loss
metrics=['accuracy']) # Metric that measures/monitors the training/testing steps
######################
# Training the Model #
######################
model.fit(train_imgs, train_lbls, epochs=1)
################################
# Evaluate the Models Accuracy #
################################
test_loss, test_acc = model.evaluate(test_imgs, test_lbls)
print('Test Accuracy = ', test_acc) # It is a hint for overfitting if the accuracy on the test data is less than the accuracy on the training data (possible if e.g. performing to many epochs or reducing the training data)
############################
# Make a single Prediction #
############################
if(debug_plot_prediction):
predictions = model.predict(test_imgs) # Make predictions for the whole test data
prediction = predictions[debug_plot_prediction_imgIndex]
print('Prediction for Test Image %d:\n' %debug_plot_prediction_imgIndex, prediction)
prediction_class = np.argmax(prediction)
print('Predicted class: %s | Actual class: %s' %(classnames[prediction_class], classnames[test_lbls[debug_plot_prediction_imgIndex]]))
plotSingleImagePredictionOverview(prediction=prediction, predictedLabel=classnames[prediction_class], trueLabel=classnames[test_lbls[debug_plot_prediction_imgIndex]], img=test_imgs[debug_plot_prediction_imgIndex]) | [
2,
46333,
286,
262,
40280,
1640,
37535,
36361,
329,
14392,
40984,
25,
3740,
1378,
2503,
13,
83,
22854,
11125,
13,
2398,
14,
83,
44917,
82,
14,
6122,
292,
14,
35487,
62,
4871,
2649,
220,
198,
2,
1772,
25,
256,
19503,
41204,
198,
1174... | 3.317033 | 1,227 |
# coding=utf-8
import csv
from flask import render_template, request, redirect, abort, jsonify, url_for, session, flash, send_from_directory
from CTFd.utils import authed, judge_result, allowed_file, get_file_suffix
from CTFd.models import db, GoodBaseInfo, GoodSkuInfo, SkuProxyInfo, get_id, DisplayGoodInfo, getPlatform, PddOrderInfo
from flask import current_app as app
from werkzeug.utils import secure_filename
from CTFd.pddCrawing import PinDuoDuo
from CTFd.orderModel import Order, DetailInfo, OrderInfo, MallInfo, convert_code_to_express, convert_status_code_status, get_good_count, get_good_info, get_good_price
import time
import hashlib
import re
import os
import sys
authority = app.config['MYSQL_USER']
password = app.config['MYSQL_PASSWORD']
name = app.config['DATEBASE_NAME']
reload(sys)
sys.setdefaultencoding('utf-8')
PER_PAGE_COUNT = 20
PDD_COOKIES = ""
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
269,
21370,
198,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
11,
18941,
11,
15614,
11,
33918,
1958,
11,
19016,
62,
1640,
11,
6246,
11,
7644,
11,
3758,
62,
6738,
62,
34945,
198,
6738,
... | 2.956376 | 298 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
| [
2,
19617,
28,
40477,
23,
198,
198,
2,
15069,
2864,
28591,
5097,
2606,
35,
13,
9858,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.73743 | 179 |
from visdom import Visdom
def create_session(**kwargs):
"""
Creates a visdom session
Parameters
----------
kwargs : ...
Returns
-------
object
a visdom session
"""
session = Visdom(**kwargs)
session.close(None)
return session
| [
6738,
1490,
3438,
1330,
6911,
3438,
628,
198,
4299,
2251,
62,
29891,
7,
1174,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7921,
274,
257,
1490,
3438,
6246,
628,
220,
220,
220,
40117,
198,
220,
220,
220,
24200,
... | 2.517544 | 114 |
"""
2) Kernel truncation, log-linear runtimes
=====================================================
In the previous notebook, we've seen that **simulated annealing**
could be used to define efficient coarse-to-fine solvers
of the entropic :math:`\\text{OT}_\\varepsilon` problem.
Adapting ideas from `(Schmitzer, 2016) <https://arxiv.org/abs/1610.06519>`_,
we now explain how the :mod:`SamplesLoss("sinkhorn", backend="multiscale") <geomloss.SamplesLoss>`
layer combines this strategy with a **multiscale encoding of the input measures** to
compute Sinkhorn divergences in :math:`O(n \log(n))` times, on the GPU.
"""
##################################################
#
# .. warning::
# The recent line of Stats-ML papers on entropic OT started by `(Cuturi, 2013) <https://arxiv.org/abs/1306.0895>`_
# has prioritized the theoretical study of **statistical properties**
# over computational efficiency.
# Consequently, in spite of their impact on
# `fluid mechanics <https://arxiv.org/abs/1505.03306>`_,
# `computer graphics <https://arxiv.org/abs/1409.1279>`_ and all fields
# where a `manifold assumption <https://arxiv.org/abs/1708.02469>`_
# may be done on the input measures,
# **multiscale methods have been mostly ignored by authors in the Machine Learning community**.
#
# By providing a fast discrete OT solver that relies on key ideas from both worlds,
# GeomLoss aims at **bridging the gap** between these two bodies of work.
# As researchers become aware of both **geometric** and **statistical**
# points of view on discrete OT, we will hopefully converge towards
# robust, efficient and well-understood generalizations of the Wasserstein distance.
#
# Multiscale Optimal Transport
# -----------------------------
#
# **In the general case,** Optimal Transport problems are linear programs that
# cannot be solved with less than :math:`O(n^2)` operations:
# at the very least, the cost function :math:`\text{C}` should be evaluated on all pairs of points!
# But fortunately, when the data is **intrinsically low-dimensional**, efficient algorithms
# allow us to leverage the structure of the cost matrix :math:`(\text{C}(x_i,y_j))_{i,j}`
# to **prune out** useless computations and reach the optimal :math:`O(n \log(n))`
# complexity that is commonly found in
# `physics <https://en.wikipedia.org/wiki/Fast_multipole_method>`_
# and `computer graphics <https://en.wikipedia.org/wiki/Octree>`_.
#
#
# As far as I can tell, the first multiscale
# OT solver was presented in a seminal paper of `Quentin Mérigot <http://quentin.mrgt.fr/>`_,
# `(Mérigot, 2011) <https://hal.archives-ouvertes.fr/hal-00604684>`_.
# In the simple case of entropic OT, which was best studied in `(Schmitzer, 2016) <https://arxiv.org/abs/1610.06519>`_,
# multiscale schemes rely on **two key observations** made on the :math:`\varepsilon`-scaling descent:
#
# 1. When the blurring radius :math:`\sigma = \varepsilon^{1/p}` is large,
# the dual potentials :math:`f` and :math:`g` define **smooth** functions
# on the ambient space, that can be described accurately with **coarse samples**
# at scale :math:`\sigma`.
# The first few iterations of the Sinkhorn loop could thus be performed quickly,
# on **sub-sampled point clouds** :math:`\tilde{x}_i` and :math:`\tilde{y}_j`
# computed with an appropriate clustering method.
#
# 2. The fuzzy transport plans :math:`\pi_\varepsilon`, solutions of the primal problem :math:`\text{OT}_\varepsilon(\alpha,\beta)`
# for decreasing values of :math:`\varepsilon` typically define a **nested sequence** of
# measures on the product space :math:`\alpha\otimes \beta`.
# Informally, **we may assume that**
#
# .. math::
# \varepsilon ~<~\varepsilon' ~\Longrightarrow~
# \text{Supp}(\pi_\varepsilon) ~\subset~ \text{Supp}(\pi_{\varepsilon'}).
#
# If :math:`(f_\varepsilon,g_\varepsilon)` denotes an optimal dual pair
# for the *coarse* problem :math:`\text{OT}_\varepsilon(\tilde{\alpha},\tilde{\beta})`
# at temperature :math:`\varepsilon`, we know that the **effective support** of
#
# .. math::
# \pi_\varepsilon
# ~=~ \exp \tfrac{1}{\varepsilon}[ f_\varepsilon \oplus g_\varepsilon - \text{C}]
# \,\cdot\, \tilde{\alpha}\otimes\tilde{\beta}
#
# is typically restricted to pairs of *coarse points* :math:`(\tilde{x}_i,\tilde{y}_j)`,
# i.e. pairs of clusters, such that
#
# .. math::
# f_\varepsilon(\tilde{x}_i) + g_\varepsilon(\tilde{y}_j) ~\geqslant~
# \text{C}(\tilde{x}_i, \tilde{y}_j) \,-\,5\varepsilon.
#
# By leveraging this coarse-level information to **prune out computations** at
# a finer level (*kernel truncation*), we may perform a full Sinkhorn loop **without ever computing**
# **point-to-point interactions** that would have a **negligible impact**
# on the updates of the dual potentials.
#
# The GeomLoss implementation
# ------------------------------
#
# In practice, the :mod:`SamplesLoss("sinkhorn", backend="multiscale") <geomloss.SamplesLoss>`
# layer relies on a **single loop**
# that differs significantly from `Bernhard Schmitzer <https://www-m15.ma.tum.de/Allgemeines/BernhardSchmitzer>`_'s
# reference `CPU implementation <https://github.com/bernhard-schmitzer/optimal-transport/tree/master/v0.2.0>`_.
# Some modifications were motivated by **mathematical insights**, and may be relevant
# for all entropic OT solvers:
#
# - As discussed in the previous notebook, if the optional argument **debias** is set to **True**
# (the default behavior), we compute the **unbiased** dual potentials :math:`F` and :math:`G`
# which correspond to the positive and definite Sinkhorn divergence :math:`\text{S}_\varepsilon`.
# - For the sake of **numerical stability**, all computations are performed *in the log-domain*.
# We rely on efficient, **online** Log-Sum-Exp
# routines provided by the `KeOps library <https://www.kernel-operations.io>`_.
# - For the sake of **symmetry**, we use *averaged* updates on the dual potentials :math:`f` and :math:`g`
# instead of the standard *alternate* iterations of the Sinkhorn algorithm.
# This allows us to converge (much) faster when the two input measures
# are **close to each other**, and we also make sure that:
#
# .. math::
# \text{S}_\varepsilon(\alpha,\beta)=\text{S}_\varepsilon(\beta,\alpha),
# ~~\text{S}_\varepsilon(\alpha,\alpha) = 0
# ~~\text{and}~~ \partial_{\alpha} \text{S}_\varepsilon(\alpha,\beta=\alpha) = 0,
#
# even after a *finite* number of iterations.
# - When jumping from coarse to fine scales, we use the "true", **closed-form** expression
# of our dual potentials instead of Bernhard's (simplistic) piecewise-constant **extrapolation** rule.
# In practice, this simple trick allows us to be much more aggressive during the descent
# and only spend **one iteration per value of the temperature** :math:`\varepsilon`.
# - Our gradients are computed using an **explicit formula**, at convergence,
# thus **bypassing a naive backpropagation** through the whole Sinkhorn loop.
#
# Other tricks are more **hardware-dependent**, and result from trade-offs
# between computation times and memory accesses on the GPU:
#
# - CPU implementations typically rely on *lists* and *sparse matrices*;
# but for the sake of **performances on GPUs**, we combine a sorting pass with
# a *block-sparse truncation scheme* that enforces **contiguity in memory**.
# Once again, we rely on CUDA codes that are abstracted and
# `documented <http://www.kernel-operations.io/keops/python/sparsity.html>`_
# in the KeOps library.
# - For the sake of **simplicity**, I only implemented a **two-scale** algorithm
# which performs well when working with 50,000-500,000 samples per measure.
# On the GPU, (semi) brute-force methods tend to have less overhead than finely crafted
# tree-like methods, and I found that using **a single coarse scale** is a good compromise
# for this range of problems.
# In the future, I may try to extend this code
# to let it scale on clouds with *more than a million* of points...
# but I don't know if this would be of use to anybody!
# - As discussed in the next notebook, **our implementation is not limited to dimensions 2 and 3**.
# Feel free to use this layer in conjunction with your **favorite clustering scheme**, e.g. a straightforward K-means
# in dimension 100, and expect decent speed-ups if your data is **intrinsically low-dimensional**.
#
# Crucially, GeomLoss **does not perform any of the sanity checks described in Bernhard's paper**
# (e.g. on updates of the kernel truncation mask),
# which allow him to **guarantee** the correctness of his solution
# to the :math:`\text{OT}_\varepsilon` problem.
# Running these tests during the descent would induce a significant
# overhead, for little practical impact.
#
# .. note::
# As of today, the **"multiscale"** backend of the
# :mod:`SamplesLoss <geomloss.SamplesLoss>` layer
# should thus be understood as a **pragmatic**, GPU-friendly algorithm
# that provides quick estimates of the Wasserstein distance and gradient on large-scale problems,
# without guarantees. I find it *good enough* for most measure-fitting applications...
# But my personal experience is far from covering all use-cases.
# If you observe weird behaviors on your own range of transportation problems, **please let me know!**
#
#
# Setup
# ---------------------
#
# Standard imports:
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import os
from torch.autograd import grad
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
###############################################
# Display routines:
from imageio import imread
###############################################
# Dataset
# --------------
#
# Our source and target samples are drawn from measures whose densities
# are stored in simple PNG files. They allow us to define a pair of discrete
# probability measures:
#
# .. math::
# \alpha ~=~ \sum_{i=1}^N \alpha_i\,\delta_{x_i}, ~~~
# \beta ~=~ \sum_{j=1}^M \beta_j\,\delta_{y_j}.
sampling = 10 if not use_cuda else 2
A_i, X_i = draw_samples("data/ell_a.png", sampling)
B_j, Y_j = draw_samples("data/ell_b.png", sampling)
###############################################
# Scaling strategy
# -------------------
#
# We now display the behavior of the Sinkhorn loss across
# our iterations.
from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids
from geomloss import SamplesLoss
scaling, Nits = 0.5, 9
cluster_scale = 0.1 if not use_cuda else 0.05
plt.figure(figsize=((12, ((Nits - 1) // 3 + 1) * 4)))
for i in range(Nits):
blur = scaling ** i
Loss = SamplesLoss(
"sinkhorn",
p=2,
blur=blur,
diameter=1.0,
cluster_scale=cluster_scale,
scaling=scaling,
backend="multiscale",
)
# Create a copy of the data...
a_i, x_i = A_i.clone(), X_i.clone()
b_j, y_j = B_j.clone(), Y_j.clone()
# And require grad:
a_i.requires_grad = True
x_i.requires_grad = True
b_j.requires_grad = True
# Compute the loss + gradients:
Loss_xy = Loss(a_i, x_i, b_j, y_j)
[F_i, G_j, dx_i] = grad(Loss_xy, [a_i, b_j, x_i])
# The generalized "Brenier map" is (minus) the gradient of the Sinkhorn loss
# with respect to the Wasserstein metric:
BrenierMap = -dx_i / (a_i.view(-1, 1) + 1e-7)
# Compute the coarse measures for display ----------------------------------
x_lab = grid_cluster(x_i, cluster_scale)
_, x_c, a_c = cluster_ranges_centroids(x_i, x_lab, weights=a_i)
y_lab = grid_cluster(y_j, cluster_scale)
_, y_c, b_c = cluster_ranges_centroids(y_j, y_lab, weights=b_j)
# Fancy display: -----------------------------------------------------------
ax = plt.subplot(((Nits - 1) // 3 + 1), 3, i + 1)
ax.scatter([10], [10]) # shameless hack to prevent a slight change of axis...
display_potential(ax, G_j, "#E2C5C5")
display_potential(ax, F_i, "#C8DFF9")
if blur > cluster_scale:
display_samples(ax, y_j, b_j, [(0.55, 0.55, 0.95, 0.2)])
display_samples(ax, x_i, a_i, [(0.95, 0.55, 0.55, 0.2)], v=BrenierMap)
display_samples(ax, y_c, b_c, [(0.55, 0.55, 0.95)])
display_samples(ax, x_c, a_c, [(0.95, 0.55, 0.55)])
else:
display_samples(ax, y_j, b_j, [(0.55, 0.55, 0.95)])
display_samples(ax, x_i, a_i, [(0.95, 0.55, 0.55)], v=BrenierMap)
ax.set_title("iteration {}, blur = {:.3f}".format(i + 1, blur))
ax.set_xticks([0, 1])
ax.set_yticks([0, 1])
ax.axis([0, 1, 0, 1])
ax.set_aspect("equal", adjustable="box")
plt.tight_layout()
plt.show()
##################################################
# Analogy with a Quicksort algorithm
# ---------------------------------------
#
#
# In some sense, Optimal Transport can be understood as a **generalization of sorting problems**
# as we "index" a weighted point cloud with another one. But **how far can we go**
# with this analogy?
#
# **In dimension 1**, when :math:`p \geqslant 1`,
# the optimal Monge map can be computed through a simple **sorting pass**
# on the data with :math:`O(n \log(n))` complexity.
# At the other end of the spectrum, generic OT problems on **high-dimensional**,
# scattered point clouds have little to **no structure** and cannot be solved
# with less than :math:`O(n^2)` or :math:`O(n^3)` operations.
#
# From this perspective, multiscale OT solvers should thus be understood
# as **multi-dimensional Quicksort algorithms**, with coarse **cluster centroids**
# and their targets playing the part of **median pivots**. With its pragmatic GPU implementation,
# GeomLoss has simply delivered on the promise
# made by a long line of research papers:
# **when your data is intrinsically low-dimensional**,
# the runtime needed to compute a Wasserstein distance should be closer
# to a :math:`O(n \log(n))` than to a :math:`O(n^2)`.
#
#
#
| [
37811,
198,
17,
8,
32169,
40122,
341,
11,
2604,
12,
29127,
1057,
22355,
198,
10052,
4770,
1421,
28,
198,
198,
818,
262,
2180,
20922,
11,
356,
1053,
1775,
326,
12429,
14323,
4817,
281,
710,
4272,
1174,
198,
24089,
307,
973,
284,
8160,
... | 2.886331 | 4,865 |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
import importlib,os,re,sys
from robot.libraries.BuiltIn import BuiltIn,RobotNotRunningError
from robot.api import logger
from ansible import modules
from .picker2 import picker2
from ansible.cli.adhoc import AdHocCLI as mycli
from ansible.plugins.callback import CallbackBase
from ansible import context
ansible_password=False
ansible_become_password=False
ansible_user=False
amodules_map={}
from .docs import amodules_docs
r=os.path.dirname(modules.__file__)
rr=len(r)-15
del modules
results={}
resultser={}
for i in os.walk(r):
mo=[m[:-3] for m in i[2] if m[-2:]=='py' and m!='__init__.py']
for m in mo: amodules_map[m]=i[0][rr:].replace('/','.')
class Impansible3(object,metaclass=genImpansible3):
""" Robotframework library to access all ansible internal modules.
All Ansible modules are available as Robotframework's keywords.
The Impansible library can be used without Robotframework.
= Table of contents =
- `Examples`
- `Shortcuts`
- `Keywords`
= Examples =
| ${x}= | `Setup` | localhost |
| ${x}= | `apt` | localhost | package=${PAC} | state=present |
| ${c}= | `get certificate` | localhost | host=www.onet.pl | port=443 | proxy_host=1.1.1.1 |
| ***** Variables *****
| ${PAC} mtr
| ${ansible_password} secret
|
| ***** Settings *****
| library Impansible
| library Collections
| library OperatingSystem
|
| ***** Test Cases *****
| test 1
| ${x}= Setup localhost
| ${y}= get from dictionary ${x} ansible_facts
| ${h}= get from dictionary ${y} ansible_hostname
| ${z}= get from dictionary ${y} ansible_distribution
| Should be Equal ${z} Ubuntu
| Should Contain ${h} tester
| test 2
| [Timeout] 600
| ${x}= apt localhost package=${PAC} state=present
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which ${PAC}
| Should Contain ${w} ${PAC}
|
| test 3
| [Timeout] 600
| ${x}= apt localhost package=${PAC} state=absent
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} absent
| ${w}= Run which ${PAC}
| Should not Contain ${w} ${PAC}
|
| test 4
| ${x}= apt localhost package=python-openssl state=present
| ${c}= get certificate localhost host=www.onet.pl port=443 proxy_host=1.1.1.1
| ${e}= get from dictionary ${c} expired
| Should not be True ${e}
|
| test 5
| ${x}= nitz2
| log ${x}
|
| test 6
| ${w}= command localhost uname -a
| ${w}= get from dictionary ${w} stdout
| Should Contain ${w} GNU/Linux
| # sudo access example:
|
|
| ***** settings *****
| Library Impansible
| Library Collections
|
| ***** variables *****
| ${hostname} localhost
| # without sudo
| #${ansible_password} root_password
| # with sudo
| ${ansible_user} user_name
| ${ansible_become_password} user_password
|
| ***** test cases *****
| test 1
| ${x}= Setup ${hostname}
| ${y}= get from dictionary ${x} ansible_facts
| ${z}= get from dictionary ${y} ansible_distribution
| Should be Equal ${z} Ubuntu
| test 2
| ${x}= command ${hostname} id
| ${x}= get from dictionary ${x} stdout
| Should Contain ${x} root
|
| ```
|
| # Requirements for selenium tests
| ```
| ***** Variables *****
| ${BROWSER} firefox
| ${ansible_password} XXXXXXX
| ${DBHost} localhost
| ${DBName} w3schools
| ${DBUser} XXXXXX
| ${DBPass} XXXXXX
| ${DBPort} 3306
| ${DBFile} w3schools.sql
| ${Furl} https://raw.githubusercontent.com/AndrejPHP/w3schools-database/master/w3schools.sql
| ${gr} /etc/apt/sources.list.d/google-chrome.list
| ${grep} http://mirror.cs.uchicago.edu/google-chrome/pool/main/g/google-chrome-stable/
| #${chrome_version} False
| ${chrome_version} google-chrome-stable_81.0.4044.138-1_amd64.deb
|
| ***** Settings *****
| Library Impansible
| library Collections
| library OperatingSystem
| library String
| #Library DatabaseLibrary
| Libarary SeleniumLibrary
|
| ***** Test Cases *****
| do wp.pl tests
| [Setup] Requirements
| Open Browser http://wp.pl ${BROWSER}
| ${t}= Get Title
| Should contain ${t} Wirtualna
|
|
| ***** Keywords *****
| Requirements
| The Operating System should be Ubuntu
| The Firefox browser should be installed if needed
| The Geckodriver should be installed if needed
| The google repo should be available
| The Chrome should be installed if needed
| The Chromedriver should be installed if needed
| #The MySQL server should be installed
| #Python should have MySQL support
| #The MySQL user have all privileges
| #Mysql should have no database imported
| #Mysql should have database imported
|
| The Operating System should be Ubuntu
| ${x}= Setup localhost
| ${y}= get from dictionary ${x} ansible_facts
| ${z}= get from dictionary ${y} ansible_distribution
| Should be Equal ${z} Ubuntu
|
| The Firefox browser should be installed if needed
| [Timeout] 600
| ${x}= Convert To Lower Case ${BROWSER}
| ${x}= Run Keyword and return status Should Contain ${x} firefox
| Return from keyword if not ${x}
| ${x}= Apt localhost package=firefox state=present
| ${x}= Get from dictionary ${x} invocation
| ${y}= Get from dictionary ${x} module_args
| ${s}= Get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which firefox
| Should Contain ${w} firefox
|
| The Geckodriver should be installed if needed
| [Timeout] 600
| ${x}= Convert To Lower Case ${BROWSER}
| ${x}= Run Keyword and return status Should Contain ${x} firefox
| Return from keyword if not ${x}
| ${x}= Apt localhost package=firefox-geckodriver state=present
| ${x}= Get from dictionary ${x} invocation
| ${y}= Get from dictionary ${x} module_args
| ${s}= Get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which geckodriver
| Should Contain ${w} geckodriver
|
| The Chrome should be installed if needed
| [Timeout] 600
| ${x}= Convert To Lower Case ${BROWSER}
| ${x}= Run Keyword and return status Should Contain ${x} chrome
| Return from keyword if not ${x}
| ${w}= Run which google-chrome-stable
| ${x}= run keyword and return status Should Contain ${w} google-chrome-stable
| Return from keyword if ${x}
| run keyword if "${chrome_version}"!="False" apt localhost deb="${grep}${chrome_version}"
| ${x}= apt localhost package=google-chrome-stable state=present
| ${x}= Get from dictionary ${x} invocation
| ${y}= Get from dictionary ${x} module_args
| ${s}= Get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which google-chrome-stable
| Should Contain ${w} google-chrome-stable
|
| The Chromedriver should be installed if needed
| [Timeout] 600
| ${x}= Convert To Lower Case ${BROWSER}
| ${x}= Run Keyword and return status Should Contain ${x} chrome
| Return from keyword if not ${x}
| ${x}= apt localhost package=chromium-chromedriver state=present
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which chromedriver
| Should Contain ${w} chromedriver
|
| The MySQL server should be installed
| [Timeout] 600
| ${x}= apt localhost package=mysql-server state=present
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} present
| ${w}= Run which mysqld
| Should Contain ${w} mysqld
|
| Python should have MySQL support
| [Timeout] 600
| ${x}= apt localhost package=python-mysqldb state=present
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} present
|
| The MySQL user have all privileges
| [Timeout] 600
| ${x}= apt localhost package=python-mysqldb state=present
| ${x}= get from dictionary ${x} invocation
| ${y}= get from dictionary ${x} module_args
| ${s}= get from dictionary ${y} state
| Should be Equal ${s} present
| mysql_user localhost name=${DBUser} password=${DBPass} priv=*.*:ALL
|
| Mysql should have no database imported
| [Timeout] 600
| mysql db localhost name=${DBName} state=absent
|
| Mysql should have database imported
| [Timeout] 600
| mysql db localhost name=${DBName} state=present
| Get url localhost url=${Furl} dest=/tmp/${DBFile}
| mysql db localhost name=${DBName} state=import target=/tmp/${DBFile}
|
| Mysql requirements
| The MySQL server should be installed
| Python should have MySQL support
| Mysql should have no database imported
| Mysql should have database imported
| The MySQL user have all privileges
|
| The google repo should be available
| [Timeout] 600
| ${x}= Stat localhost path="${gr}"
| ${x}= get from dictionary ${x} stat
| ${x}= get from dictionary ${x} exists
| run keyword if not ${x} Copy localhost content='deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' dest="${gr}"
| run keyword if not ${x} shell localhost wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add -
| run keyword if not ${x} apt localhost update_cache=yes
| ${x}= Stat localhost path="${gr}"
| ${x}= get from dictionary ${x} stat
| ${x}= get from dictionary ${x} exists
| Should be true ${x} "The google repo is not available"
```
"""
ROBOT_LIBRARY_VERSION = '0.11'
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
11748,
1330,
8019,
11,
418,
11,
260,
11,
17597,
198,
6738,
9379,
13,
75,
11127,
13,
39582,
818,
1330,
2847... | 2.327281 | 4,571 |
import json
| [
11748,
33918,
628,
628,
628,
628
] | 3.166667 | 6 |
"""Tense analysis and detection related utilities."""
# pylint: disable=E0611
PRESENT = 'PRESENT'
PAST = 'PAST'
FUTURE = 'FUTURE'
MODAL = 'MODAL'
NORMAL = 'NORMAL'
| [
37811,
51,
1072,
3781,
290,
13326,
3519,
20081,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
36,
3312,
1157,
198,
198,
48296,
3525,
796,
705,
48296,
3525,
6,
198,
47,
11262,
796,
705,
47,
11262,
6,
198,
37,
3843,
11335,
796,
... | 2.538462 | 65 |
from thiamsu.settings.base import *
| [
6738,
294,
1789,
2385,
13,
33692,
13,
8692,
1330,
1635,
198
] | 3.272727 | 11 |
# flake8: noqa: F401
# isort: off
from finrl.exchange.common import MAP_EXCHANGE_CHILDCLASS
from finrl.exchange.exchange import Exchange
# isort: on
from finrl.exchange.bibox import Bibox
from finrl.exchange.binance import Binance
from finrl.exchange.bittrex import Bittrex
from finrl.exchange.exchange import (available_exchanges, ccxt_exchanges,
get_exchange_bad_reason, is_exchange_bad,
is_exchange_known_ccxt, is_exchange_officially_supported,
market_is_active, timeframe_to_minutes, timeframe_to_msecs,
timeframe_to_next_date, timeframe_to_prev_date,
timeframe_to_seconds)
from finrl.exchange.ftx import Ftx
from finrl.exchange.kraken import Kraken
| [
2,
781,
539,
23,
25,
645,
20402,
25,
376,
21844,
198,
2,
318,
419,
25,
572,
198,
6738,
957,
45895,
13,
1069,
3803,
13,
11321,
1330,
34645,
62,
6369,
3398,
27746,
62,
3398,
26761,
31631,
198,
6738,
957,
45895,
13,
1069,
3803,
13,
1... | 2.051345 | 409 |
"""Contains the impulse response augmentation model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from data_utils.augmentor.base import AugmentorBase
from data_utils.utility import read_manifest
from data_utils.audio import AudioSegment
class ImpulseResponseAugmentor(AugmentorBase):
"""Augmentation model for adding impulse response effect.
:param rng: Random generator object.
:type rng: random.Random
:param impulse_manifest_path: Manifest path for impulse audio data.
:type impulse_manifest_path: basestring
"""
def transform_audio(self, audio_segment):
"""Add impulse response effect.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
impulse_json = self._rng.sample(self._impulse_manifest, 1)[0]
impulse_segment = AudioSegment.from_file(impulse_json['audio_filepath'])
audio_segment.convolve(impulse_segment, allow_resample=True)
| [
37811,
4264,
1299,
262,
25278,
2882,
16339,
14374,
2746,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
1366,
... | 3.005435 | 368 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, division
import time
from six.moves import range
import numpy as np
from .normalized_distance import all_pairs_normalized_distances
def knn_initialize(X, missing_mask, verbose=False):
"""
Fill X with NaN values if necessary, construct the n_samples x n_samples
distance matrix and set the self-distance of each row to infinity.
"""
X_row_major = X.copy("C")
if missing_mask.sum() != np.isnan(X_row_major).sum():
# if the missing values have already been zero-filled need
# to put NaN's back in the data matrix for the distances function
X_row_major[missing_mask] = np.nan
D = all_pairs_normalized_distances(X_row_major, verbose=verbose)
# set diagonal of distance matrix to infinity since we don't want
# points considering themselves as neighbors
np.fill_diagonal(D, np.inf)
return X_row_major, D
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.225877 | 456 |
import torch
import os.path
from datasets.dataset_factory import get_dataset
from torch.utils.data import DataLoader
import copy
from tqdm import tqdm
| [
11748,
28034,
198,
11748,
28686,
13,
6978,
198,
6738,
40522,
13,
19608,
292,
316,
62,
69,
9548,
1330,
651,
62,
19608,
292,
316,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
11748,
4866,
198,
6738,
256,
80,
36020,
133... | 3.304348 | 46 |
# Copyright 2017-2020 object_database Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from object_database.web import cells as cells
from object_database.web.CellsTestPage import CellsTestPage
| [
2,
220,
220,
15069,
2177,
12,
42334,
2134,
62,
48806,
46665,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287... | 3.589109 | 202 |
#!/usr/bin/python
# coding=utf-8
from urllib import request
import json
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
2956,
297,
571,
1330,
2581,
198,
11748,
33918,
628
] | 2.846154 | 26 |
from django.forms.utils import flatatt
from django.utils import six
from django.utils.encoding import force_text, force_str
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
try:
from urlparse import urlparse, parse_qs, urlunparse
except ImportError:
from urllib.parse import urlparse, parse_qs, urlunparse
from .text import text_value
from .exceptions import SpectreError
def render_link_tag(url, rel="stylesheet", media=None):
"""
Build a link tag
"""
url_dict = url_to_attrs_dict(url, url_attr="href")
url_dict.setdefault("href", url_dict.pop("url", None))
url_dict["rel"] = rel
if media:
url_dict["media"] = media
return render_tag("link", attrs=url_dict, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = "<{tag}{attrs}>{content}"
if content or close:
builder += "</{tag}>"
return format_html(
builder,
tag=tag,
attrs=mark_safe(flatatt(attrs)) if attrs else "",
content=text_value(content),
)
def url_to_attrs_dict(url, url_attr):
"""
Sanitize url dict as used in django-spectre settings.
"""
result = dict()
# If url is not a string, it should be a dict
if isinstance(url, six.string_types):
url_value = url
else:
try:
url_value = url["url"]
except TypeError:
raise SpectreError(
'Function "url_to_attrs_dict" expects a string or a dict with key "url".'
)
crossorigin = url.get("crossorigin", None)
integrity = url.get("integrity", None)
if crossorigin:
result["crossorigin"] = crossorigin
if integrity:
result["integrity"] = integrity
result[url_attr] = url_value
return result
def url_replace_param(url, name, value):
"""
Replace a GET parameter in an URL
"""
url_components = urlparse(force_str(url))
query_params = parse_qs(url_components.query)
query_params[name] = value
query = urlencode(query_params, doseq=True)
return force_text(urlunparse([
url_components.scheme,
url_components.netloc,
url_components.path,
url_components.params,
query,
url_components.fragment,
]))
| [
6738,
42625,
14208,
13,
23914,
13,
26791,
1330,
6228,
1078,
198,
6738,
42625,
14208,
13,
26791,
1330,
2237,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
2700,
62,
5239,
11,
2700,
62,
2536,
198,
6738,
42625,
14208,
13,
2679... | 2.412475 | 994 |
import javalang
import json
from tqdm import tqdm
import re
# def concatParentheses(code_list):
# temp_code = ''
# new_code_list = []
# for code in code_list:
# code = code.strip()
# if temp_code == '' and code != '{' and code != '}':
with open(f'./../../../../Downloads/code2comment_dataset/dataset/TLcodesum_dataset/test/test.json', 'r') as f:
json_lines = f.readlines()
split_code = {}
for line in json_lines:
json_data = json.loads(line.strip())
ids = json_data['id']
code = json_data['code'].strip()
code_list = code.split('\n')
splited = []
for c in code_list[:40]:
c = c.strip()
tokens = list(javalang.tokenizer.tokenize(c))
tks = []
for tk in tokens:
if tk.__class__.__name__ == 'String' or tk.__class__.__name__ == 'Character':
tks.append('STR_')
elif 'Integer' in tk.__class__.__name__ or 'FloatingPoint' in tk.__class__.__name__:
tks.append('NUM_')
else:
tks.append(tk.value)
splited.append(" ".join(tks))
split_code[ids] = splited
print(len(split_code)) | [
11748,
474,
9226,
648,
198,
11748,
33918,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
302,
628,
198,
2,
825,
1673,
265,
24546,
39815,
7,
8189,
62,
4868,
2599,
198,
2,
220,
220,
220,
220,
20218,
62,
8189,
796,
10148,... | 2.099819 | 551 |
import os.path
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
c = ConfigParser.ConfigParser()
#This needs to be configurable one day somehow
c.read(['./bcfg2.conf'])
defaults = {'database_engine':'sqlite3',
'database_name':'./dev.db',
'database_user':'',
'database_password':'',
'database_host':'',
'database_port':3306,
'default_mx':'localhost',
'priority':10,
'authorized_group':'admins',
}
if c.has_section('hostbase'):
options = dict(c.items('hostbase'))
else:
options = defaults
# Django settings for Hostbase project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Root', 'root'),
)
MANAGERS = ADMINS
# 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_ENGINE = options['database_engine']
# Or path to database file if using sqlite3.
DATABASE_NAME = options['database_name']
# Not used with sqlite3.
DATABASE_USER = options['database_user']
# Not used with sqlite3.
DATABASE_PASSWORD = options['database_password']
# Set to empty string for localhost. Not used with sqlite3.
DATABASE_HOST = options['database_host']
# Set to empty string for default. Not used with sqlite3.
DATABASE_PORT = int(options['database_port'])
# Local time zone for this installation. All choices can be found here:
# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
try:
TIME_ZONE = c.get('statistics', 'time_zone')
except:
TIME_ZONE = None
# enter the defauly MX record machines will get in Hostbase
# this setting may move elsewhere eventually
DEFAULT_MX = options['default_mx']
PRIORITY = int(options['priority'])
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Uncomment a backend below if you would like to use it for authentication
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'Bcfg2.Server.Hostbase.backends.NISBackend',
#'Bcfg2.Server.Hostbase.backends.LDAPBacken',
)
# enter an NIS group name you'd like to give access to edit hostbase records
AUTHORIZED_GROUP = options['authorized_group']
#create login url area:
import django.contrib.auth
django.contrib.auth.LOGIN_URL = '/login'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# Just for development
SERVE_MEDIA = DEBUG
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*%=fv=yh9zur&gvt4&*d#84o(cy^-*$ox-v1e9%32pzf2*qu#s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.request",
"django.core.context_processors.media",
# Django development version.
# "django.core.context_processors.csrf",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'Bcfg2.Server.Hostbase.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
'/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
'/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/hostbase/webtemplates',
'/usr/lib/python2.3/site-packages/Bcfg2/Server/Hostbase/templates',
'/usr/lib/python2.4/site-packages/Bcfg2/Server/Hostbase/templates',
'/usr/share/bcfg2/Hostbase/templates',
os.path.join(PROJECT_ROOT, 'templates'),
os.path.join(PROJECT_ROOT, 'hostbase/webtemplates'),
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'Bcfg2.Server.Hostbase.hostbase',
)
LOGIN_URL = '/login/'
| [
11748,
28686,
13,
6978,
198,
2,
46021,
1330,
198,
6738,
347,
37581,
17,
13,
33,
37581,
17,
20519,
18,
74,
1330,
17056,
46677,
198,
198,
31190,
23680,
62,
13252,
2394,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
... | 2.535097 | 1,966 |
"""This script handles test cases for the login module"""
import unittest
from flask import request, session
from app.views import create_app
class LoginTestCase(unittest.TestCase):
"""Login Feature specific Test Cases will go here"""
def setUp(self):
"""Setup test app"""
self.app = create_app('tests.config')
def tearDown(self):
"""Destroy test app"""
def login(self, c, username, password):
"""Login helper function"""
return c.post('/login', data=dict(
username=username,
password=password,
), follow_redirects=True)
def logout(self, c):
"""Logout helper function"""
return c.get('/logout', follow_redirects=True)
def test_login_logout_good(self):
"""Test Login and Logout using helper functions"""
with self.app.test_client() as c:
rv = self.login(c,
self.app.config['USERNAME'],
self.app.config['PASSWORD'])
# Check log in successful
self.assertEqual(request.path, '/query')
# Check logged in session variable is set to True
self.assertTrue(session.get('logged_in'))
rv = self.logout(c)
# Check log out successful
self.assertEqual(request.path, '/query')
# Check logged in session variable is set to None
self.assertEqual(session.get('logged_in'), None)
def test_login_invalid_username(self):
"""Test login with Invalid Username"""
with self.app.test_client() as c:
# Check for Invalid Username
rv = self.login(c,
self.app.config['USERNAME'] + 'x',
self.app.config['PASSWORD'])
# Check log in fails
self.assertEqual(request.path, '/login')
# Check logged in session variable is set to None
self.assertEqual(session.get('logged_in'), None)
# Check if error message is displayed for invalid username
self.assertIn(b"Invalid Username. Please try again.", rv.data)
def test_login_invalid_password(self):
"""Test login with Invalid Password"""
with self.app.test_client() as c:
# Check for Invalid Password
rv = self.login(c,
self.app.config['USERNAME'],
self.app.config['PASSWORD'] + 'x')
# Check log in fails
self.assertEqual(request.path, '/login')
# Check logged in session variable is set to None
self.assertEqual(session.get('logged_in'), None)
# Check if error message is displayed for invalid password
self.assertIn(b"Invalid Password. Please try again.", rv.data)
if __name__ == '__main__':
unittest.main() | [
37811,
1212,
4226,
17105,
1332,
2663,
329,
262,
17594,
8265,
37811,
198,
198,
11748,
555,
715,
395,
198,
6738,
42903,
1330,
2581,
11,
6246,
198,
6738,
598,
13,
33571,
1330,
2251,
62,
1324,
628,
198,
4871,
23093,
14402,
20448,
7,
403,
... | 2.229584 | 1,298 |
from django.urls import path
from .views import (testPageView, HomePageView, AboutPageView)
app_name = "pages"
urlpatterns = [
path('', HomePageView.as_view(), name="home"),
path('about/', AboutPageView.as_view(), name="about"),
path('test/', testPageView, name="test")
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
357,
9288,
9876,
7680,
11,
5995,
9876,
7680,
11,
7994,
9876,
7680,
8,
198,
198,
1324,
62,
3672,
796,
366,
31126,
1,
198,
198,
6371,
33279,
82,
796,
685,... | 2.803922 | 102 |
from operators.sourcetoredshift import SourceToRedshiftOperator
from operators.createtables import CreateTablesOperator
from operators.loadfact import FactOperator
from operators.loaddimension import DimensionOperator
__all__ = [
'SourceToRedshiftOperator',
'CreateTablesOperator'
'LoadFactOperator',
'LoadDimensionOperator',
] | [
6738,
12879,
13,
82,
454,
66,
316,
1850,
30846,
1330,
8090,
2514,
7738,
30846,
18843,
1352,
198,
6738,
12879,
13,
20123,
316,
2977,
1330,
13610,
51,
2977,
18843,
1352,
198,
6738,
12879,
13,
2220,
22584,
1330,
19020,
18843,
1352,
198,
67... | 3.372549 | 102 |
# https://adventofcode.com/2019/day/6
import os
orbit_data = {}
with open(os.path.join(os.path.dirname(__file__), "../data.txt"), "r") as f:
for line in f:
# child > parent
relation = line.strip().split(")")
orbit_data[relation[1]] = relation[0]
print(process(orbit_data))
| [
2,
3740,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
23344,
14,
820,
14,
21,
198,
198,
11748,
28686,
628,
198,
198,
42594,
62,
7890,
796,
23884,
198,
4480,
1280,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
8... | 2.343511 | 131 |
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from intel import config
import os
import pytest
import tempfile
import time
| [
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 3.819672 | 183 |
import cv2
import numpy as np
from pypylon import pylon
from threading import Lock
# Image event handler.
if __name__ == '__main__':
CAMERA_SERIAL = "23517286"
cam = PylonCapture(CAMERA_SERIAL)
cam.connect()
while True:
res, image = cam.grab()
if res:
image_resized = cv2.resize(image, (640, 480))
cv2.imshow('Image', image_resized)
k = cv2.waitKey(1)
if k == ord('q'):
break
cam.close() | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
279,
4464,
15158,
1330,
279,
15158,
198,
6738,
4704,
278,
1330,
13656,
198,
198,
2,
7412,
1785,
21360,
13,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,... | 2.024793 | 242 |
# @Description: gaussianBlur.py
# @Author: 孤烟逐云zjy
# @Date: 2020/4/23 9:21
# @SoftWare: PyCharm
# @CSDN: https://blog.csdn.net/zjy123078_zjy
# @博客园: https://www.cnblogs.com/guyan-2020/
import cv2 as cv
import numpy as np
src = cv.imread("./images/raindropGirl.jpg")
# 统计时间
# getTickCount()函数返回操作系统启动到当前所经过的计时周期数
t1 = cv.getTickCount()
Gaussian_noise(src)
t2 = cv.getTickCount()
# getTickFrequency()函数,返回CPU的频率
time = (t2 - t1)/cv.getTickFrequency()
print("耗费时间:%s"%time) # 耗费时间:10.2369628
# 高斯模糊对高斯噪声有一定的抑制作用
# GaussianBlur(src, ksize, sigmaX, dst=None, sigmaY=None, borderType=None)
# dst = cv.GaussianBlur(src, (0, 0), 15)
# cv.imshow("Gaussian image 2", dst)
Gaussian_demo(src)
cv.waitKey(0)
cv.destroyAllWindows() | [
2,
2488,
11828,
25,
31986,
31562,
3629,
333,
13,
9078,
198,
2,
2488,
13838,
25,
10263,
255,
97,
163,
225,
253,
34460,
238,
12859,
239,
89,
73,
88,
198,
2,
2488,
10430,
25,
12131,
14,
19,
14,
1954,
860,
25,
2481,
198,
2,
2488,
18... | 1.612832 | 452 |
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import BytesIO
from ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_plugin import JenkinsPlugin
from ansible.module_utils.common._collections_compat import Mapping
GITHUB_DATA = {"url": u'https://api.github.com/repos/ansible/ansible',
"response": b"""
{
"id": 3638964,
"name": "ansible",
"full_name": "ansible/ansible",
"owner": {
"login": "ansible",
"id": 1507452,
"avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ansible",
"html_url": "https://github.com/ansible",
"followers_url": "https://api.github.com/users/ansible/followers",
"following_url": "https://api.github.com/users/ansible/following{/other_user}",
"gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
"organizations_url": "https://api.github.com/users/ansible/orgs",
"repos_url": "https://api.github.com/users/ansible/repos",
"events_url": "https://api.github.com/users/ansible/events{/privacy}",
"received_events_url": "https://api.github.com/users/ansible/received_events",
"type": "Organization",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/ansible/ansible",
"description": "Ansible is a radically simple IT automation platform that makes your applications and systems easier to deploy.",
"fork": false,
"url": "https://api.github.com/repos/ansible/ansible",
"forks_url": "https://api.github.com/repos/ansible/ansible/forks",
"keys_url": "https://api.github.com/repos/ansible/ansible/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/ansible/ansible/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/ansible/ansible/teams",
"hooks_url": "https://api.github.com/repos/ansible/ansible/hooks",
"issue_events_url": "https://api.github.com/repos/ansible/ansible/issues/events{/number}",
"events_url": "https://api.github.com/repos/ansible/ansible/events",
"assignees_url": "https://api.github.com/repos/ansible/ansible/assignees{/user}",
"branches_url": "https://api.github.com/repos/ansible/ansible/branches{/branch}",
"tags_url": "https://api.github.com/repos/ansible/ansible/tags",
"blobs_url": "https://api.github.com/repos/ansible/ansible/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/ansible/ansible/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/ansible/ansible/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/ansible/ansible/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/ansible/ansible/statuses/{sha}",
"languages_url": "https://api.github.com/repos/ansible/ansible/languages",
"stargazers_url": "https://api.github.com/repos/ansible/ansible/stargazers",
"contributors_url": "https://api.github.com/repos/ansible/ansible/contributors",
"subscribers_url": "https://api.github.com/repos/ansible/ansible/subscribers",
"subscription_url": "https://api.github.com/repos/ansible/ansible/subscription",
"commits_url": "https://api.github.com/repos/ansible/ansible/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/ansible/ansible/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/ansible/ansible/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/ansible/ansible/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/ansible/ansible/contents/{+path}",
"compare_url": "https://api.github.com/repos/ansible/ansible/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/ansible/ansible/merges",
"archive_url": "https://api.github.com/repos/ansible/ansible/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/ansible/ansible/downloads",
"issues_url": "https://api.github.com/repos/ansible/ansible/issues{/number}",
"pulls_url": "https://api.github.com/repos/ansible/ansible/pulls{/number}",
"milestones_url": "https://api.github.com/repos/ansible/ansible/milestones{/number}",
"notifications_url": "https://api.github.com/repos/ansible/ansible/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/ansible/ansible/labels{/name}",
"releases_url": "https://api.github.com/repos/ansible/ansible/releases{/id}",
"deployments_url": "https://api.github.com/repos/ansible/ansible/deployments",
"created_at": "2012-03-06T14:58:02Z",
"updated_at": "2017-09-19T18:10:54Z",
"pushed_at": "2017-09-19T18:04:51Z",
"git_url": "git://github.com/ansible/ansible.git",
"ssh_url": "git@github.com:ansible/ansible.git",
"clone_url": "https://github.com/ansible/ansible.git",
"svn_url": "https://github.com/ansible/ansible",
"homepage": "https://www.ansible.com/",
"size": 91174,
"stargazers_count": 25552,
"watchers_count": 25552,
"language": "Python",
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": false,
"has_pages": false,
"forks_count": 8893,
"mirror_url": null,
"open_issues_count": 4283,
"forks": 8893,
"open_issues": 4283,
"watchers": 25552,
"default_branch": "devel",
"organization": {
"login": "ansible",
"id": 1507452,
"avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ansible",
"html_url": "https://github.com/ansible",
"followers_url": "https://api.github.com/users/ansible/followers",
"following_url": "https://api.github.com/users/ansible/following{/other_user}",
"gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
"organizations_url": "https://api.github.com/users/ansible/orgs",
"repos_url": "https://api.github.com/users/ansible/repos",
"events_url": "https://api.github.com/users/ansible/events{/privacy}",
"received_events_url": "https://api.github.com/users/ansible/received_events",
"type": "Organization",
"site_admin": false
},
"network_count": 8893,
"subscribers_count": 1733
}
"""
}
def test__get_json_data(mocker):
"test the json conversion of _get_url_data"
timeout = 30
params = {
'url': GITHUB_DATA['url'],
'timeout': timeout
}
module = mocker.Mock()
module.params = params
JenkinsPlugin._csrf_enabled = pass_function
JenkinsPlugin._get_installed_plugins = pass_function
JenkinsPlugin._get_url_data = mocker.Mock()
JenkinsPlugin._get_url_data.return_value = BytesIO(GITHUB_DATA['response'])
jenkins_plugin = JenkinsPlugin(module)
json_data = jenkins_plugin._get_json_data(
"{url}".format(url=GITHUB_DATA['url']),
'CSRF')
assert isinstance(json_data, Mapping)
def test__new_fallback_urls(mocker):
"test generation of new fallback URLs"
params = {
"url": "http://fake.jenkins.server",
"timeout": 30,
"name": "test-plugin",
"version": "1.2.3",
"updates_url": ["https://some.base.url"],
"latest_plugins_url_segments": ["test_latest"],
"versioned_plugins_url_segments": ["ansible", "versioned_plugins"],
"update_json_url_segment": ["unreachable", "updates/update-center.json"],
}
module = mocker.Mock()
module.params = params
JenkinsPlugin._csrf_enabled = pass_function
JenkinsPlugin._get_installed_plugins = pass_function
jenkins_plugin = JenkinsPlugin(module)
latest_urls = jenkins_plugin._get_latest_plugin_urls()
assert isInList(latest_urls, "https://some.base.url/test_latest/test-plugin.hpi")
versioned_urls = jenkins_plugin._get_versioned_plugin_urls()
assert isInList(versioned_urls, "https://some.base.url/versioned_plugins/test-plugin/1.2.3/test-plugin.hpi")
json_urls = jenkins_plugin._get_update_center_urls()
assert isInList(json_urls, "https://some.base.url/updates/update-center.json")
| [
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
27975,
45761,
393,
3740,
1378,
2503,
13,
41791,
13,
2398,
14,
677,
4541,
14,
70,
489,
12,
18,
13,
15,
13,
14116,
8,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
... | 2.488035 | 3,385 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import orm
from keystone.common import driver_hints
from keystone.common import sql
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone import identity
class UserGroupMembership(sql.ModelBase, sql.DictBase):
"""Group membership join table."""
__tablename__ = 'user_group_membership'
user_id = sql.Column(sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True)
group_id = sql.Column(sql.String(64),
sql.ForeignKey('group.id'),
primary_key=True)
| [
2,
15069,
2321,
4946,
25896,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
... | 2.90625 | 448 |
""" The default strategy that iterates through the whole parameter space """
from __future__ import print_function
from kernel_tuner.searchspace import Searchspace
def tune(runner, kernel_options, device_options, tuning_options):
""" Tune all instances in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
# create the searchspace
searchspace = Searchspace(tuning_options, runner.dev.max_threads, sort=True)
# call the runner
results, env = runner.run(searchspace.list, kernel_options, tuning_options)
return results, env
| [
37811,
383,
4277,
4811,
326,
11629,
689,
832,
262,
2187,
11507,
2272,
37227,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
9720,
62,
28286,
263,
13,
12947,
13200,
1330,
11140,
13200,
628,
198,
4299,
14009,
7,
16737... | 3.474227 | 388 |
#include <bits/stdc++.h>
using namespace std;
};
class SinglyLinkedList {
public:
SinglyLinkedListNode *head;
SinglyLinkedListNode *tail;
SinglyLinkedList() {
this->head = nullptr;
this->tail = nullptr;
}
};
void print_singly_linked_list(SinglyLinkedListNode* node, string sep, ofstream& fout) {
while (node) {
fout << node->data;
node = node->next;
if (node) {
fout << sep;
}
}
}
void free_singly_linked_list(SinglyLinkedListNode* node) {
while (node) {
SinglyLinkedListNode* temp = node;
node = node->next;
free(temp);
}
}
// Complete the insertNodeAtHead function below.
/*
* For your reference:
*
* SinglyLinkedListNode {
* int data;
* SinglyLinkedListNode* next;
* };
*
*/
SinglyLinkedListNode* insertNodeAtHead(SinglyLinkedListNode* llist, int data) {
SinglyLinkedListNode* temp = (SinglyLinkedListNode*)(malloc(sizeof(SinglyLinkedListNode)));
temp->data = data;
if(llist == NULL){
llist = temp;
}
else{
SinglyLinkedListNode* t;
t = llist;
llist = temp;
temp->next = t;
}
return llist;
}
int main()
{
ofstream fout(getenv("OUTPUT_PATH"));
SinglyLinkedList* llist = new SinglyLinkedList();
int llist_count;
cin >> llist_count;
cin.ignore(numeric_limits<streamsize>::max(), '\n');
for (int i = 0; i < llist_count; i++) {
int llist_item;
cin >> llist_item;
cin.ignore(numeric_limits<streamsize>::max(), '\n');
SinglyLinkedListNode* llist_head = insertNodeAtHead(llist->head, llist_item);
llist->head = llist_head;
}
print_singly_linked_list(llist->head, "\n", fout);
fout << "\n";
free_singly_linked_list(llist->head);
fout.close();
return 0;
}
| [
2,
17256,
1279,
9895,
14,
301,
17896,
4880,
13,
71,
29,
198,
198,
3500,
25745,
14367,
26,
198,
19629,
198,
198,
4871,
311,
4420,
11280,
276,
8053,
1391,
198,
220,
220,
220,
1171,
25,
198,
220,
220,
220,
220,
220,
220,
220,
311,
44... | 2.192857 | 840 |
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Util - logs
=====================
This file contains all logging methods.
"""
import os
import logging
import json
from contextlib import contextmanager
from logging import config
from pycompss.util.exceptions import PyCOMPSsException
CONFIG_FUNC = config.dictConfig
# Keep configs to avoid read the cfg many times
CONFIGS = dict()
def get_logging_cfg_file(log_level):
# type: (str) -> str
""" Retrieves the logging configuration file.
:param log_level: Log level [ 'trace'|'debug'|'info'|'api'|'off' ].
:return: Logging configuration file.
:raise PyCOMPSsException: Unsupported log level.
"""
cfg_files = {
'trace': 'logging_debug.json', # trace level == debug level
'debug': 'logging_debug.json',
'info': 'logging_info.json',
'api': 'logging_off.json', # api level == off level
'off': 'logging_off.json'
}
if log_level in cfg_files:
logging_cfg_file = cfg_files[log_level]
return logging_cfg_file
else:
raise PyCOMPSsException("Unsupported logging level.")
def clean_log_configs():
# type: () -> None
""" Removes all stored log configurations.
:return: None
"""
CONFIGS.clear()
def __read_log_config_file__(log_config_file):
# type: (str) -> dict
""" Reads the given config file.
If already read, retrieves from global dictionary.
:param log_config_file: Configuration file to read.
:return: Configuration file content.
"""
if log_config_file in CONFIGS:
conf = CONFIGS[log_config_file]
else:
with open(log_config_file, 'rt') as lcf_fd:
conf = json.loads(lcf_fd.read())
CONFIGS[log_config_file] = conf
return conf
def init_logging(log_config_file, log_path):
# type: (str, str) -> None
""" Master logging initialization.
:param log_config_file: Log file name.
:param log_path: Json log files path.
:return: None
"""
if os.path.exists(log_config_file):
conf = __read_log_config_file__(log_config_file)
handler = "error_file_handler"
if handler in conf["handlers"]:
errors_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = log_path + errors_file
handler = "info_file_handler"
if handler in conf["handlers"]:
info_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = log_path + info_file
handler = "debug_file_handler"
if handler in conf["handlers"]:
debug_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = log_path + debug_file
CONFIG_FUNC(conf)
else:
logging.basicConfig(level=logging.INFO) # NOSONAR
def init_logging_worker(log_config_file, tracing):
# type: (str, bool) -> None
""" Worker logging initialization.
:param log_config_file: Log file name.
:param tracing: If tracing is enabled (the log dir changes).
:return: None
"""
if os.path.exists(log_config_file):
conf = __read_log_config_file__(log_config_file)
if tracing:
# The workspace is within the folder 'workspace/python'
# Remove the last folder
handler = "error_worker_file_handler"
if handler in conf["handlers"]:
errors_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = '../' + errors_file
handler = "info_worker_file_handler"
if handler in conf["handlers"]:
info_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = '../' + info_file
handler = "debug_worker_file_handler"
if handler in conf["handlers"]:
debug_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = '../' + debug_file
CONFIG_FUNC(conf)
else:
logging.basicConfig(level=logging.INFO) # NOSONAR
def init_logging_worker_piper(log_config_file, log_dir):
# type: (str, str) -> None
""" Worker logging initialization.
:param log_config_file: Log file name.
:param log_dir: Log directory.
:return: None
"""
if os.path.exists(log_config_file):
conf = __read_log_config_file__(log_config_file)
handler = "error_worker_file_handler"
if handler in conf["handlers"]:
errors_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = os.path.join(log_dir, errors_file)
handler = "info_worker_file_handler"
if handler in conf["handlers"]:
info_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = os.path.join(log_dir, info_file)
handler = "debug_worker_file_handler"
if handler in conf["handlers"]:
debug_file = conf["handlers"][handler].get("filename")
conf["handlers"][handler]["filename"] = os.path.join(log_dir, debug_file)
CONFIG_FUNC(conf)
else:
logging.basicConfig(level=logging.INFO) # NOSONAR
def update_logger_handlers(log_config_file, job_out=None, job_err=None):
# type: (str, str, str) -> None
""" Worker logging update.
:param log_config_file: Log file name.
:param job_out: out file path.
:param job_err: err file path.
:return: None
"""
if os.path.exists(log_config_file):
conf = __read_log_config_file__(log_config_file)
if job_err:
handler = "error_worker_file_handler"
if handler in conf["handlers"]:
conf["handlers"][handler]["filename"] = job_err
if job_out:
handler = "info_worker_file_handler"
if handler in conf["handlers"]:
conf["handlers"][handler]["filename"] = job_out
handler = "debug_worker_file_handler"
if handler in conf["handlers"]:
conf["handlers"][handler]["filename"] = job_out
CONFIG_FUNC(conf)
else:
logging.basicConfig(level=logging.INFO) # NOSONAR
@contextmanager
def swap_logger_name(logger, new_name):
# type: (typing.Any, str) -> None
""" Swaps the current logger with the new one
:param logger: Logger facility.
:param new_name: Logger name.
:return: None
"""
previous_name = logger.name
logger.name = new_name
yield # here the code runs
logger.name = previous_name
@contextmanager
def keep_logger():
# type: () -> None
""" Do nothing with the logger.
It is used when the swap_logger_name does not need to be applied.
:return: None
"""
yield # here the code runs
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
220,
15069,
6244,
12,
1238,
2481,
15142,
3115,
785,
48074,
3337,
357,
2503,
13,
65,
1416,
13,
274,
8,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.468945 | 3,043 |
"""
double each entry in a given array recursively
E.g [1, 2, 3, 4, 5]
"""
if __name__ == '__main__':
a = [1,2,3,4,5]
print(double_entries(a))
| [
37811,
198,
220,
220,
220,
4274,
1123,
5726,
287,
257,
1813,
7177,
664,
1834,
2280,
198,
220,
220,
220,
412,
13,
70,
685,
16,
11,
362,
11,
513,
11,
604,
11,
642,
60,
198,
37811,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
8... | 2 | 83 |
""" Record class that stores {Key: List[Value]} dict
"""
import atexit
import typing as t
from collections import defaultdict
from pathlib import Path
import click
import pandas as pd
_SAVE_FN = t.Callable[[pd.DataFrame, Path], None]
class Record:
"""Available colors: black, red, green, yellow, blue, magenta, cyan, white"""
_SAVE_FUNCTIONS = {
"csv": _save_csv,
"jsonl": _save_jsonl,
"parquet": _save_parquet,
}
| [
37811,
13266,
1398,
326,
7000,
1391,
9218,
25,
7343,
58,
11395,
48999,
8633,
198,
37811,
198,
11748,
379,
37023,
198,
11748,
19720,
355,
256,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
3904,
... | 2.668605 | 172 |
import dg
from .xmlio import XMLProtocol, Node
from .xmpp import client, Client, JabberID, XMPPError, XMPPMessage, XMPPPresence
| [
11748,
288,
70,
198,
6738,
764,
19875,
952,
1330,
23735,
19703,
4668,
11,
19081,
198,
6738,
764,
87,
76,
381,
220,
1330,
5456,
11,
20985,
11,
24404,
527,
2389,
11,
1395,
7378,
47,
12331,
11,
1395,
7378,
5868,
7589,
11,
1395,
7378,
1... | 2.804348 | 46 |
#!/usr/bin/env python
"""
Module for coordinating mpi
Author: {0} ({1})
This program is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
try:
from mpi4py import MPI
except ImportError:
print('mpi4py not found')
try:
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
root = 0
size = comm.Get_size()
mpi = True
except NameError:
comm = 0
rank = 0
root = 0
size = 0
mpi = False
print('MPI disabled')
class Tags(object):
""" MPI tags """
DONE = 1
INPUTS = 2
LOG = 3
IO_TICKET = 4
IO_REQUEST = 5
IO_FINISHED = 6
RESULTS = 7
SHUTDOWN = 8
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
26796,
329,
35449,
285,
14415,
198,
198,
13838,
25,
1391,
15,
92,
37913,
16,
30072,
198,
198,
1212,
1430,
318,
636,
286,
37292,
6500,
11,
262,
9355,
329,
198,
34556,
... | 2.292063 | 315 |
from email.policy import default
from email.utils import parsedate
import os
import mlflow
import mlflow.sklearn
import argparse
import time
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_percentage_error, mean_squared_error, mean_absolute_error,r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--alpha","-a", type=float, default=0.5)
args.add_argument("--l1_ratio","-l1", type=float, default=0.5)
parsed_args = args.parse_args()
main(alpha=parsed_args.alpha , l1_ratio=parsed_args.l1_ratio)
| [
6738,
3053,
13,
30586,
1330,
4277,
198,
6738,
3053,
13,
26791,
1330,
44267,
378,
198,
11748,
28686,
198,
11748,
285,
1652,
9319,
198,
11748,
285,
1652,
9319,
13,
8135,
35720,
198,
11748,
1822,
29572,
198,
11748,
640,
220,
198,
11748,
29... | 2.708812 | 261 |
import unittest
from datetime import datetime
from unittest.mock import MagicMock
from app.main import calendar
from app.main.holiday import Holiday
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
198,
6738,
598,
13,
12417,
1330,
11845,
198,
6738,
598,
13,
12417,
13,
37689,
1330,
22770,
628,
198,
198... | 3.125 | 64 |
#!/usr/bin/env python
#########################################
# Zeitcoin AMP Class
#########################################
import sys, os, time, threading, hashlib, random
from zeitcoindb import hashtable
from zeitcoinutility import utility,encyption
from zeitcointrans import transactions
from twisted.protocols import amp
from twisted.protocols.amp import AMP
from twisted.web import server
from twisted.application import service, internet
from twisted.internet import reactor, defer, endpoints, task, threads
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.internet.protocol import Factory
from twisted.internet.threads import deferToThread
ALIVE=1
FNAME='zeitcoin'
ADDRESS='127.0.0.1'
PORT=1234
GUID='1'
TXCOINHASH=''
TXADDRESS=''
TXMESSAGE=''
##########################################################################
# move to another file protocol which holds classes Ziet, clientcommands #
##########################################################################
@defer.deferredGenerator
@defer.deferredGenerator
cc=clientcommands()
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
guidlist = wfd.getResult()
#guidlist = ht.getallguidht(c)
for guid in guidlist:
address,port=ht.getaddress(c,guid)
wfd = defer.waitForDeferred(cc.doping(address,port,guid))
yield wfd
result = cc.doping(str(address),int(port),guid)
ht.tclosedb(dbpool)
return
@defer.deferredGenerator
cc=clientcommands()
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
guidlist = wfd.getResult()
#guidlist = ht.getallguidht(c)
for guid in guidlist:
address,port=ht.getaddress(c,guid)
wfd = defer.waitForDeferred(cc.doping(address,port,guid))
yield wfd
result = cc.doping(str(address),int(port),guid)
ht.closedb(conn)
return
@defer.deferredGenerator
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
7804,
2,
198,
2,
47447,
3630,
3001,
47,
5016,
198,
29113,
7804,
2,
198,
198,
11748,
25064,
11,
28686,
11,
640,
11,
4704,
278,
11,
12234,
8019,
11,
4738,
198,
6738,
41271,
270,... | 2.833554 | 757 |
# -*- coding: utf-8 -*-
import os
import unittest
from context import icsuit
from icsuit.cli.loader import findmod, importmod, listmod
class TestLoader(unittest.TestCase):
"""Unit test cases for cli.loader."""
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
6738,
4732,
1330,
220,
873,
5013,
198,
198,
6738,
220,
873,
5013,
13,
44506,
13,
29356,
1330,
1064,
4666,
11,
1... | 2.66 | 100 |
from day_03.solution import parse, part_1, part_2
| [
6738,
1110,
62,
3070,
13,
82,
2122,
1330,
21136,
11,
636,
62,
16,
11,
636,
62,
17,
628,
198
] | 2.736842 | 19 |
from flask import Blueprint
from flask import current_app
from flask import jsonify
from flask import g
from flask import request
from eth_app.db import get_connection, get_data, add_data
from eth_app.API import Api
import os
import pandas as pd
import numpy as np
API_KEY = os.getenv("API_KEY")
bp = Blueprint('api', __name__)
@bp.route("/test")
@bp.route("/ethelement", methods=["POST", "GET"])
@bp.route("/ethelementfiltered", methods=["GET"])
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
42903,
1330,
33918,
1958,
198,
6738,
42903,
1330,
308,
198,
6738,
42903,
1330,
2581,
198,
198,
6738,
4555,
62,
1324,
13,
9945,
1330,
651,
62,
38659,
11,
651,
... | 3.006579 | 152 |
"""Helper module"""
| [
37811,
47429,
8265,
37811,
628
] | 4.2 | 5 |
import typer
from mltk import cli
@cli.root_cli.command("commander", cls=cli.VariableArgumentParsingCommand)
def silabs_commander_command(ctx: typer.Context):
"""Silab's Commander Utility
This utility allows for accessing a Silab's embedded device via JLink.
For more details issue command: mltk commander --help
"""
# Import all required packages here instead of at top
# to help improve the CLI's responsiveness
from mltk.utils.commander import issue_command
logger = cli.get_logger()
try:
issue_command(*ctx.meta['vargs'], outfile=logger)
except Exception as e:
cli.handle_exception('Commander failed', e)
| [
198,
11748,
1259,
525,
198,
6738,
285,
2528,
74,
1330,
537,
72,
220,
628,
198,
198,
31,
44506,
13,
15763,
62,
44506,
13,
21812,
7203,
9503,
4066,
1600,
537,
82,
28,
44506,
13,
43015,
28100,
1713,
47,
945,
278,
21575,
8,
198,
4299,
... | 2.838174 | 241 |
import tornado.template
from pypugjs import Compiler as _Compiler
from pypugjs.exceptions import CurrentlyNotSupported
from pypugjs.runtime import attrs, escape, iteration
from pypugjs.utils import process
ATTRS_FUNC = '__pypugjs_attrs'
ESCAPE_FUNC = '__pypugjs_escape'
ITER_FUNC = '__pypugjs_iter'
# Patch tornado template engine for preprocess PugJS templates
| [
11748,
33718,
13,
28243,
198,
198,
6738,
279,
4464,
1018,
8457,
1330,
3082,
5329,
355,
4808,
7293,
5329,
198,
6738,
279,
4464,
1018,
8457,
13,
1069,
11755,
1330,
16888,
3673,
48181,
198,
6738,
279,
4464,
1018,
8457,
13,
43282,
1330,
708... | 2.99187 | 123 |
# -*- coding: utf-8 -*-
# 签约服务
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
2,
13328,
255,
122,
163,
118,
99,
17312,
235,
27950,
94,
628
] | 1.259259 | 27 |
import argparse
import time
import parsl
from parsl.app.app import App
from parsl.tests.configs.local_threads import config
parsl.clear()
parsl.load(config)
@App('python')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_parallel_for()
# x = test_parallel_for(int(args.count))
# x = test_stdout()
# raise_error(0)
| [
11748,
1822,
29572,
198,
11748,
640,
198,
198,
11748,
13544,
75,
198,
6738,
13544,
75,
13,
1324,
13,
1324,
1330,
2034,
198,
6738,
13544,
75,
13,
41989,
13,
11250,
82,
13,
12001,
62,
16663,
82,
1330,
4566,
198,
198,
79,
945,
75,
13,
... | 2.33677 | 291 |
#attendance_file is the source file and list_file is the destination file
__author__ = "Vallisha M"
__version__ = '0.0.0'
from os import listdir
from os.path import isfile, join
import tkinter as tk
import pandas as pd
import re
from tkinter import *
from tkinter import filedialog
from tkinter.filedialog import askopenfile
threshold = 20 #Minimum time of attendance to be marked present
root=Tk()
# creating a label for
# name using widget Label
root.wm_title("Attendance Marker")
canvas=Canvas(root,width=600,height=400)
canvas.create_image(0, 0,anchor=NW)
canvas.create_text(580, 390, font = ("Purisa", 9), text = 'v'+__version__, fill = 'blue')
canvas.create_text(300, 100, font = ("Purisa", 20), text = "Attendance Marker", fill = 'black')
canvas.create_text(310, 200, font = ("Purisa", 12), text = "Click on one of the buttons to continue", fill = 'black')
canvas.create_text(300, 390, font = ("Purisa", 10), text = "Make sure you have attendance and class list files closed while using Attendance Marker.\n", fill = 'black')
button1 = Button(text = "File", command = file_submit)
button1.configure(width = 10,background = 'gray', activebackground = "#33B5E5", relief = RAISED)
button1_window = canvas.create_window(200, 220, anchor=NW, window=button1)
button2 = Button(text = "Folder", command = folder_submit)
button2.configure(width = 10,background = 'gray', activebackground = "#33B5E5", relief = RAISED)
button2_window = canvas.create_window(340, 220, anchor=NW, window=button2)
canvas.pack()
root.mainloop()
| [
2,
1078,
437,
590,
62,
7753,
318,
262,
2723,
2393,
290,
1351,
62,
7753,
318,
262,
10965,
2393,
201,
198,
834,
9800,
834,
796,
366,
53,
439,
19388,
337,
1,
201,
198,
834,
9641,
834,
796,
705,
15,
13,
15,
13,
15,
6,
201,
198,
20... | 2.834829 | 557 |
from mab import algs
import numpy as np
| [
6738,
285,
397,
1330,
435,
14542,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
198
] | 2.823529 | 17 |
# antioch
# Copyright (c) 1999-2019 Phil Christensen
#
# See LICENSE for details
"""
Provide testing for the codebase
"""
import pkg_resources as pkg
from antioch.core import bootstrap
from django.conf import settings
from django.db import connection | [
2,
1885,
41097,
198,
2,
15069,
357,
66,
8,
7358,
12,
23344,
4543,
49195,
198,
2,
198,
2,
4091,
38559,
24290,
329,
3307,
198,
198,
37811,
198,
15946,
485,
4856,
329,
262,
2438,
8692,
198,
37811,
198,
198,
11748,
279,
10025,
62,
37540... | 3.513889 | 72 |
# -*- encoding: utf-8 -*-
#
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp. #
#
# Authors: Svetlana Shturm <sshturm@mirantis.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove extra indexes
Revision ID: b6ae66d05e3
Revises: 17738166b91
Create Date: 2013-08-19 15:54:43.529222
"""
# revision identifiers, used by Alembic.
revision = 'b6ae66d05e3'
down_revision = '17738166b91'
from alembic import op
import sqlalchemy as sa
INDEXES = (
# ([dialects], table_name, index_name, create/delete, uniq/not_uniq,
# length_limited)
(['mysql', 'sqlite', 'postgresql'],
'resource',
'resource_user_id_project_id_key',
('user_id', 'project_id'), True, False, True),
(['mysql'], 'source', 'id', ('id',), False, True, False))
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
3050,
12,
9804,
4946,
25896,
5693,
198,
2,
15069,
2321,
12,
6390,
19764,
11421,
13,
1303,
198,
2,
198,
2,
46665,
25,
311,
16809,
75,
2271,
911,
365... | 2.907449 | 443 |
from django import template
from django.http import Http404
from django.conf import settings
from copy import copy
register = template.Library()
REMOVE_ORDER = '<a href="{}"><i class="icon-remove"></i></a>'
sort_directions = {
'asc': {'icon':'icon-arrow-up', 'inverse': 'desc'},
'desc': {'icon':'icon-arrow-down', 'inverse': 'asc'},
'': {'icon':'icon-arrow-down', 'inverse': 'asc'},
}
def sortable_header(parser, token):
"""
Parses a tag that's supposed to be in this format: {% sortable_header field title %}
"""
bits = [b.strip('"\'') for b in token.split_contents()]
if len(bits) < 2:
raise TemplateSyntaxError, "anchor tag takes at least 1 argument"
try:
title = bits[2]
except IndexError:
title = bits[1].capitalize()
return SortableHeaderNode(bits[1].strip(), title.strip())
class SortableHeaderNode(template.Node):
"""
Renders an <a> HTML tag with a link which href attribute
includes the field on which we sort and the direction.
and adds an up or down arrow if the field is the one
currently being sorted on.
Eg.
{% anchor name Name %} generates
<a href="/the/current/path/?sort=name" title="Name">Name</a>
"""
sortable_header = register.tag(sortable_header)
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
4866,
1330,
4866,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
2200,
11770,
6089... | 2.648374 | 492 |
import re
import os
from tkinter import *
from tkinter import Tk
from tkinter.filedialog import askdirectory
import time
# todo:
# make renaming temporary, file names go back to their original after the parsing computation is done
# or find a way to parse without needing to rename (probably using regex).
rootWindow = Tk().withdraw()
folder = askdirectory(title="Choose the folder in which the .eml files are located") + "/"
mail_data = {}
# rename
print("Renaming folders...")
file_count = 0
folders = os.listdir(folder)
for file in folders:
if str(file_count) + ".eml" in folders:
file_count += 1
continue
os.rename(folder + file, str(file_count) + ".eml")
file_count += 1
# parsing
for dirpath, dirname, filename in os.walk(folder):
for file in filename:
file_data = open(folder + file, "r")
try:
for line in file_data:
line = line.rstrip()
email_parsed = re.findall("^From: .* <(\S+)>", line)
if len(email_parsed) > 0:
email = email_parsed[0]
print("Email found:", email)
mail_data[email] = mail_data.get(email, 0) + 1
except UnicodeDecodeError:
continue
# show results sorted by number of ocurrences
# transforms dict into a list of tuples, inverts (key, val) for (val, key) and sorts it reversely
print("\nTake a look at the results:\n")
inversed_dict = [(count, email) for email, count in mail_data.items()]
for count, email in sorted(inversed_dict, reverse=True):
print("Email:", email)
print("Count:", count)
print("\n")
# user interaction for filtering by keyword
while True:
user_filter = input("Type a keyword and I will see what I can find. Type quit to, well... quit:\n")
if user_filter == 'quit':
print("Bye!")
break
print("The chosen keyword was:", user_filter, "\n")
found_keyword_results = []
for email, count in mail_data.items():
if user_filter in email:
found_keyword_results.append((count, email))
if len(found_keyword_results) == 0:
print("Nothing was found!\n")
continue
else:
print("Found something!\n")
for count, email in sorted(found_keyword_results, reverse=True):
print("Email", email)
print("Count", count)
print("\n")
| [
11748,
302,
198,
11748,
28686,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
309,
74,
198,
6738,
256,
74,
3849,
13,
69,
3902,
498,
519,
1330,
1265,
34945,
198,
11748,
640,
198,
198,
2,
284,
4598,
25,
198,
2... | 2.85486 | 751 |
from django.conf.urls import url, include
from .program.views import (EpisodeView, EpisodeProgramView, EpisodePlaylistView,
ProgramListView, IndexView, EpisodesByTagView,
SearchEpisodeByTagsView, EpisodesBySearchedTagsView,
ProgramDetailView)
from .blog.views import (EntryView, ArchiveView, EntriesByTagView, SearchEntriesByTagsView,
EntriesBySearchedTagsView)
from .tag_search.views import SearchByTagsView, SearchResultsView
from .partners.views import MediaPatronageRequestConfirmView, MediaPatronageRequestView
urlpatterns = [
url(r'^$', IndexView.as_view(), name='index'),
url(r'^serie/$', ProgramListView.as_view(), name='program_list'),
url(r'^series/(?P<slug>[\w_-]+)/?$', ProgramDetailView.as_view(), name='program_detail'),
url(r'^watch/series/(?P<program_slug>[\w_-]+)/(?P<episode_slug>[\w_-]+)/?$',
EpisodeProgramView.as_view(), name='program_episode_detail'),
url(r'^watch/playlist/(?P<playlist_slug>[\w_-]+)/(?P<episode_slug>[\w_-]+)/?$',
EpisodePlaylistView.as_view(), name='playlist_episode_detail'),
url(r'^watch/(?P<episode_slug>[\w_-]+)/?$',
EpisodeView.as_view(), name='episode_detail'),
url(r'^odcinki/tag/(?P<tags>[\w\%\&\+\._-]+)/?$', EpisodesByTagView.as_view(), name='program_episode_tag'),
url(r'^odcinki/szukaj/?$', SearchEpisodeByTagsView.as_view(), name='program_episode_search'),
url(r'^odcinki/szukaj/(?P<tags>[\w\%\&\+\._-]+)/?$', EpisodesBySearchedTagsView.as_view(),
name='program_episode_search_by_tags'),
url(r'^events/partners/add/confirm/?$', MediaPatronageRequestConfirmView.as_view(), name='event_request_confirm'),
url(r'^events/partners/add/?$', MediaPatronageRequestView.as_view(), name='event_request'),
url(r'^blog/tag/(?P<tags>[\w\%\&\+\._-]+)/?$', EntriesByTagView.as_view(), name='blog_tag'),
url(r'^blog/szukaj/?$', SearchEntriesByTagsView.as_view(), name='blog_search'),
url(r'^blog/szukaj/(?P<tags>[\w\%\&\+\._-]+)/?$', EntriesBySearchedTagsView.as_view(), name='blog_search_by_tags'),
url(r'^blog/(?P<year>\d+)/(?P<month>\d+)/(?P<slug>[\w_-]+)/?$', EntryView.as_view(), name='blog_entry'),
url(r'^blog/?$', ArchiveView.as_view(), name='blog_archive'),
url(r'^szukaj/?$', SearchByTagsView.as_view(), name='tag_search_search'),
url(r'^szukaj/(?P<tags>[\w\%\&\+\._-]+)/?$', SearchResultsView.as_view(), name='tag_search_results'),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^', include('rotv_apps.shortener.urls')),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
198,
6738,
764,
23065,
13,
33571,
1330,
357,
23758,
7680,
11,
7922,
15167,
7680,
11,
7922,
11002,
4868,
7680,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.283333 | 1,140 |
from .processors import InputExample, InputFeatures, DataProcessor, SquadFeatures
from .processors import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features
from .processors import squad_convert_examples_to_features, SquadExample, SquadV1Processor, SquadV2Processor
from .processors import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
from .metrics import is_sklearn_available
if is_sklearn_available():
from .metrics import glue_compute_metrics, xnli_compute_metrics
| [
6738,
764,
14681,
669,
1330,
23412,
16281,
11,
23412,
23595,
11,
6060,
18709,
273,
11,
11630,
23595,
201,
198,
6738,
764,
14681,
669,
1330,
22749,
62,
22915,
62,
76,
4147,
11,
22749,
62,
14681,
669,
11,
22749,
62,
83,
6791,
62,
22510,... | 3.011236 | 178 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-20 22:06
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
16,
319,
1584,
12,
2999,
12,
1238,
2534,
25,
3312,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
from __future__ import absolute_import
from .scanner import *
from .django import *
from .celery import *
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
764,
35836,
1008,
1330,
1635,
198,
6738,
764,
28241,
14208,
1330,
1635,
198,
6738,
764,
7015,
88,
1330,
1635,
628
] | 3.40625 | 32 |
import requests
from flask import request, url_for
from ..core import server
URL_JOONYOUNG = 'https://hooks.slack.com/services/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'
# https://api.slack.com/docs/messages/builder
def _send_slack_message(url, text, attachments=[]):
"""
성공여부를 리턴합니다
"""
if not url:
return False
if not text:
return False
if server.app.config.get('DEBUG'):
url = URL_JOONYOUNG
r = requests.post(
url,
json={
"text": text,
"attachments": [a.__json__() for a in attachments]
})
return (r.text == 'ok')
| [
11748,
7007,
198,
198,
6738,
42903,
1330,
2581,
11,
19016,
62,
1640,
198,
198,
6738,
11485,
7295,
1330,
4382,
198,
198,
21886,
62,
45006,
1340,
36981,
10503,
796,
705,
5450,
1378,
25480,
82,
13,
6649,
441,
13,
785,
14,
30416,
14,
51,
... | 1.975309 | 324 |
import os
import time
import numpy as np
import tensorflow as tf
from multiprocessing import Process, Manager,freeze_support
import sys
import matplotlib.pyplot as plt
import math
sys.path.append('..')
import torch
from core.config import MachineConfig
from playground.Non_DAG_with_Energy.algorithm.random_algorithm import RandomTaskalgorithm
from playground.Non_DAG_with_Energy.algorithm.tetris import Tetris
from playground.Non_DAG_with_Energy.algorithm.first_fit import FirstFitTaskalgorithm
# from playground.Non_DAG_with_Energy.algorithm.DeepJS.DRL import RLAlgorithm
# from playground.Non_DAG_with_Energy.algorithm.DeepJS.agent import Agent
# from playground.Non_DAG_with_Energy.algorithm.DeepJS.brain import Brain
#
# from playground.Non_DAG_with_Energy.algorithm.DeepJS.reward_giver import AverageCompletionRewardGiver
from playground.Non_DAG_with_Energy.utils.csv_reader import CSVReader
from playground.Non_DAG_with_Energy.utils.feature_functions import features_extract_func_ac, features_normalize_func_ac
from playground.Non_DAG_with_Energy.utils.tools import multiprocessing_run, average_completion, average_slowdown, \
average_waiting_time
from playground.Non_DAG_with_Energy.utils.episode import Episode
from playground.Non_DAG_with_Energy.algorithm.dueling_DDQN.DQN_algorithm import DQNAlgorithm
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.compat.v1.disable_eager_execution()
np.random.seed(41)
tf.random.set_seed(1)
torch.manual_seed(1)
# ************************ Parameters Setting Start ************************
machines_number = 5
jobs_len = 2
n_iter = 2000
jobs_csv = './jobs.csv'
# brain = Brain(9)
# reward_giver = AverageCompletionRewardGiver()
# features_extract_func = features_extract_func_ac
# features_normalize_func = features_normalize_func_ac
#
# model_dir = './agents/%s' % name
# # ************************ Parameters Setting End ************************
#
# if not os.path.isdir(model_dir):
# os.makedirs(model_dir)
#
# agent = Agent(name, brain, 1, reward_to_go=True, nn_baseline=True, normalize_advantages=True,
# model_save_path='%s/model.ckpt' % model_dir)
machine_configs = [MachineConfig(64, 1, 1) for i in range(machines_number)]
csv_reader = CSVReader(jobs_csv)
jobs_configs = csv_reader.generate(10, jobs_len)
print("-----------------------------------------first_fit------------------------------------------")
tic = time.time()
algorithm = FirstFitTaskalgorithm()
episode = Episode(machine_configs, jobs_configs, algorithm, "./tetris.json")
episode.run()
print("total energy consume", episode.simulation.monitor[0].total_energy_consume)
print(episode.env.now, time.time() - tic, average_completion(episode),average_waiting_time(episode), average_slowdown(episode))
print("-----------------------------------------drl------------------------------------------")
algorithm = DQNAlgorithm(machine_configs)
total_energy_consume_list = []
total_called_num_list = []
total_reward_of_ep_list = []
average_completion_list = []
average_waitting_time_list = []
for i in range(n_iter):
# try:
print("iter:", i)
tic = time.time()
episode = Episode(machine_configs, jobs_configs, algorithm, "./event_file.json")
if __name__=="__main__":
freeze_support()
episode.run()
total_called_num_list.append(algorithm.total_called_num)
print("called nums", algorithm.total_called_num)
total_energy_consume_list.append(episode.simulation.cluster.monitor.total_energy_consume)
print("total energy consume", episode.simulation.cluster.monitor.total_energy_consume)
total_reward_of_ep_list.append(algorithm.total_reward_of_ep)
print("total_reward_of_ep", algorithm.total_reward_of_ep)
average_completion_res = average_completion(episode)
average_completion_list.append(average_completion_res)
average_waitting_time_res = average_waiting_time(episode)
average_waitting_time_list.append(average_waitting_time_res)
print(episode.env.now, time.time() - tic, average_completion_res, average_waitting_time_res,
average_slowdown(episode))
algorithm.reset()
if(i%100==0 and i!=0):
# plot reward of ep per 100 cycles
fig, ax = plt.subplots(1, 1)
ax.plot(np.arange(len(total_reward_of_ep_list)),
total_reward_of_ep_list,'g-'
)
# plt.xlabel('l1:total_reward_of_ep_'+str(i))
# plt.savefig("./total_reward_of_ep.png")
# plt.show()
ax1=ax.twinx()
ax1.plot(np.arange(len(total_energy_consume_list)),
total_energy_consume_list,'b--'
)
ax2=ax.twinx()
ax2.plot(np.arange(len(total_called_num_list)),
total_called_num_list,'r--'
)
ax.set_ylabel('reward', color='g')
ax1.set_ylabel('energy', color='b') # 设置Y1轴标题
ax2.set_ylabel('call_nums', color='r')
ax1.set_xlabel('ep_'+str(i))
plt.savefig("./energyconsume.png")
plt.show()
# algorithm.dqn.plot_cost()
# except BaseException as e:
# print(e)
print(total_energy_consume_list)
print(total_called_num_list)
print(average_completion_list)
plt.plot(np.arange(len(total_called_num_list)), total_called_num_list)
plt.xlabel('l1:called num')
plt.savefig("./callednum.png")
plt.show()
plt.plot(np.arange(len(total_energy_consume_list)), total_energy_consume_list)
plt.xlabel('l1:energy_consume')
plt.savefig("./energyconsume.png")
plt.show()
plt.plot(np.arange(len(total_reward_of_ep_list)), total_reward_of_ep_list)
plt.xlabel('l1:total_reward_of_ep')
plt.savefig("./total_reward_of_ep.png")
plt.show()
plt.plot(np.arange(len(average_completion_list)), average_completion_list)
plt.xlabel('l1:average_completion')
plt.savefig("./average_completion_list.png")
plt.show()
plt.plot(np.arange(len(average_waitting_time_list)), average_waitting_time_list)
plt.xlabel('l1:average_waitting_time_list')
plt.savefig("./average_waitting_time_list.png")
plt.show()
energy_consume_divided_by_reward = []
for i in range(len(total_energy_consume_list)):
energy_consume_divided_by_reward.append(total_energy_consume_list[i] / total_reward_of_ep_list[i])
plt.plot(np.arange(len(energy_consume_divided_by_reward)), energy_consume_divided_by_reward)
plt.xlabel('l1:energy_consume/total_reward_of_ep')
plt.show()
algorithm.dqn.plot_cost()
print("-----------------------------------------test-phase------------------------------------------")
tic = time.time()
jobs_configs = csv_reader.generate(50, 50)
episode = Episode(machine_configs, jobs_configs, algorithm, "./event_file.json")
episode.run()
print("called nums", algorithm.total_called_num)
print("total energy consume", algorithm.total_energy_consume)
print("total_reward_of_ep", algorithm.total_reward_of_ep)
print(episode.env.now, time.time() - tic, average_completion(episode),average_waiting_time(episode), average_slowdown(episode))
algorithm.reset()
print("-----------------------------------------tetris------------------------------------------")
tic = time.time()
algorithm = Tetris()
episode = Episode(machine_configs, jobs_configs, algorithm, "./tetris.json")
episode.run()
print("total energy consume", episode.simulation.monitor[0].total_energy_consume)
print(episode.env.now, time.time() - tic, average_completion(episode), average_waiting_time(episode),average_slowdown(episode))
print("-----------------------------------------first_fit------------------------------------------")
tic = time.time()
algorithm = FirstFitTaskalgorithm()
episode = Episode(machine_configs, jobs_configs, algorithm, "./tetris.json")
episode.run()
print("total energy consume", episode.simulation.monitor[0].total_energy_consume)
print(episode.env.now, time.time() - tic, average_completion(episode),average_waiting_time(episode), average_slowdown(episode))
print("-----------------------------------------random------------------------------------------")
random_ec = []
random_ac = []
random_wt=[]
for i in range(100):
tic = time.time()
algorithm = RandomTaskalgorithm()
episode = Episode(machine_configs, jobs_configs, algorithm, "./tetris.json")
episode.run()
# print("total energy consume", episode.simulation.monitor[0].total_energy_consume)
random_ec.append(episode.simulation.monitor[0].total_energy_consume)
random_ac.append(average_completion(episode))
random_wt.append(average_waiting_time(episode))
# print(episode.env.now, time.time() - tic, average_completion(episode), average_waiting_time(episode),average_slowdown(episode))
print(random_ec)
print(random_ac)
print("min_random_ec", random_ec[np.argmin(np.array(random_ec))])
print("min_random_ac", random_ac[np.argmin(np.array(random_ac))])
print("min_random_wt", random_ac[np.argmin(np.array(random_wt))])
print("mean_random_ec", np.average(np.array(random_ec)))
print("mean_random_ac", np.average(np.array(random_ac)))
print("mean_random_wt", np.average(np.array(random_wt)))
#
# for itr in range(n_iter):
# tic = time.time()
# print("********** Iteration %i ************" % itr)
# processes = []
#
# manager = Manager()
# trajectories = manager.list([])
# makespans = manager.list([])
# average_completions = manager.list([])
# average_slowdowns = manager.list([])
# for i in range(n_episode):
# algorithm = RLAlgorithm(agent, reward_giver, features_extract_func=features_extract_func,
# features_normalize_func=features_normalize_func)
# episode = Episode(machine_configs, jobs_configs, algorithm, None)
# algorithm.reward_giver.attach(episode.simulation)
# p = Process(target=multiprocessing_run,
# args=(episode, trajectories, makespans, average_completions, average_slowdowns))
#
# processes.append(p)
#
# for p in processes:
# p.start()
#
# for p in processes:
# p.join()
#
# agent.log('makespan', np.mean(makespans), agent.global_step)
# agent.log('average_completions', np.mean(average_completions), agent.global_step)
# agent.log('average_slowdowns', np.mean(average_slowdowns), agent.global_step)
#
# toc = time.time()
#
# print(np.mean(makespans), toc - tic, np.mean(average_completions), np.mean(average_slowdowns))
#
# all_observations = []
# all_actions = []
# all_rewards = []
# for trajectory in trajectories:
# observations = []
# actions = []
# rewards = []
# for node in trajectory:
# observations.append(node.observation)
# actions.append(node.action)
# rewards.append(node.reward)
#
# all_observations.append(observations)
# all_actions.append(actions)
# all_rewards.append(rewards)
#
# all_q_s, all_advantages = agent.estimate_return(all_rewards)
#
# agent.update_parameters(all_observations, all_actions, all_advantages)
#
# agent.save()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
11,
9142,
11,
5787,
2736,
62,
11284,
198,
11748,
25064,
198,
11748,
2603,
29487,
... | 2.620888 | 4,165 |
"""
Sample script to demonstrate inheritance
"""
from Net2Scripting import init_logging
from Net2Scripting.net2xs import Net2XS
# Operator id 0 is System Engineer
OPERATOR_ID = 0
# Default Net2 password
OPERATOR_PWD = "net2"
# When running on the machine where Net2 is installed
NET2_SERVER = "localhost"
class MyNet2XS(Net2XS):
"""Inherited class for additional functionality
"""
global Net2XS
def get_current_user_id(self):
"""Return logged on user id
"""
# Place a lock, for thread safety
# (if you don't have threads you kan skip this)
with Net2XS._lock:
# Basic check if client connection is valid
self._check_client()
return self._client.CurrentUserID
def get_client_members(self):
"""Return all Net2 client object members using the introspective
Python dir function
"""
with Net2XS._lock:
self._check_client()
return dir(self._client)
if __name__ == "__main__":
# Init log4net
init_logging()
with MyNet2XS(NET2_SERVER) as net2:
# Authenticate
net2.authenticate(OPERATOR_ID, OPERATOR_PWD)
# Show current user id
print("Current used id:", net2.get_current_user_id())
# Show all members
print("Net2 client members:", net2.get_client_members())
| [
37811,
198,
36674,
4226,
284,
10176,
24155,
198,
37811,
198,
198,
6738,
3433,
17,
7391,
278,
1330,
2315,
62,
6404,
2667,
198,
6738,
3433,
17,
7391,
278,
13,
3262,
17,
34223,
1330,
3433,
17,
55,
50,
198,
198,
2,
35946,
4686,
657,
318... | 2.490909 | 550 |
from django.db import migrations, models
import django.db.models.deletion
| [
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 3.26087 | 23 |
# -*- coding: utf-8 -*-
import json
import os
import re
import logging
from logging.handlers import RotatingFileHandler
from logging import getLogger
from time import sleep
from flask import Flask, url_for, render_template, request, redirect, \
jsonify, send_from_directory, send_file, Response
import gevent
from gevent.wsgi import WSGIServer
from gevent.queue import Queue
from werkzeug.contrib.cache import SimpleCache
from sqlalchemy import desc
from sqlalchemy.orm import eagerload
from db_models.shared_models import db
from db_models.datasets import Dataset
from db_models.models import Model
import deeplearning.runner as runner
from deeplearning.log_subscriber import train_logger
import common.utils as ds_util
from common import strings
from gevent.wsgi import WSGIServer
__version__ = '0.7.0'
app = Flask(__name__)
app.config.from_envvar('CSLAIER_CONFIG')
cslaier_config_params = ('DATABASE_PATH', 'UPLOADED_RAW_FILE',
'UPLOADED_FILE', 'PREPARED_DATA', 'TRAINED_DATA',
'INSPECTION_TEMP', 'LOG_DIR')
# WebApp settings
app.config['CSLAIER_ROOT'] = os.getcwd()
normalize_config_path()
# Logging settings
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
debug_log = os.path.join(app.config['LOG_DIR'], 'debug.log')
error_log = os.path.join(app.config['LOG_DIR'], 'error.log')
debug_file_handler = RotatingFileHandler(
debug_log, maxBytes=100000000, backupCount=10
)
debug_file_handler.setLevel(logging.INFO)
debug_file_handler.setFormatter(formatter)
error_file_handler = RotatingFileHandler(
error_log, maxBytes=100000000, backupCount=10
)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
loggers = (app.logger, getLogger('db_models.datasets'),
getLogger('db_models.models'),
getLogger('deeplearning.runner'),
getLogger('deeplearning.prepare.prepare_for_imagenet'),
getLogger('deeplearning.prepare.prepare_for_lstm'),
getLogger('deeplearning.train.train_lstm'),
getLogger('deeplearning.train.train_imagenet'))
for logger in loggers:
logger.setLevel(logging.INFO)
logger.addHandler(debug_file_handler)
logger.addHandler(error_file_handler)
# Database settings
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = app.config['DEBUG'] # DEBUG用設定
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_PATH']
db.init_app(app)
cache = SimpleCache()
@app.route('/')
@app.route('/files/<int:dataset_id>/<path:image_path>')
@app.route('/layers/<int:id>/<int:epoch>/<string:filename>')
@app.route('/inspection/<string:filename>')
@app.route('/dataset/show/<int:id>/')
@app.route('/dataset/show/<int:id>/<path:category>')
@app.route('/dataset/remove/<int:id>')
@app.route('/dataset/remove/<int:id>/category/', methods=['POST'])
@app.route('/dataset/<int:id>/create/category/', methods=['POST'])
@app.route('/dataset/<int:id>/upload/<path:category_path>', methods=['POST'])
@app.route('/dataset/<int:id>/remove/file/<path:category_path>', methods=['POST'])
@app.route('/models/new', methods=['GET', 'POST'])
@app.route('/models/show/<int:id>')
@app.route('/models/inspect/', methods=['POST'])
@app.route('/admin/')
@app.route('/admin/models/')
@app.route('/admin/models/remove/', methods=['POST'])
@app.route('/admin/datasets/')
@app.route('/admin/datasets/remove/<int:id>')
@app.route('/admin/datasets/update/', methods=['POST'])
# =====================================================================
# API
# =====================================================================
@app.route('/api/dataset/get/<int:offset>/')
@app.route('/api/dataset/upload', methods=['POST'])
@app.route('/api/dataset/set_path', methods=['POST'])
@app.route('/api/dataset/<int:id>/get/text/full/<path:filepath>')
@app.route('/api/models/get/model_template/<string:model_name>')
@app.route('/api/models/remove', methods=['POST'])
@app.route('/api/models/check_train_progress')
@app.route('/api/models/start/train', methods=['POST'])
@app.route('/api/models/resume/train', methods=['POST'])
@app.route('/api/models/<int:id>/get/train_data/log/')
# SSE "protocol" is described here: http://mzl.la/UPFyxY
@app.route('/api/models/<int:model_id>/get/train_data/log/subscribe')
@app.route('/api/models/<int:id>/get/train_data/graph/')
@app.route('/api/models/<int:id>/get/layer_names/<int:epoch>')
@app.route('/api/models/<int:id>/get/layer_viz/<int:epoch>/<string:layer_name>')
@app.route('/api/models/lstm/generate_text/', methods=['POST'])
@app.route('/api/models/download/files/', methods=['POST'])
@app.route('/api/models/terminate/train/', methods=['POST'])
# =====================================================================
# misc.
# =====================================================================
if __name__ == '__main__':
app.debug = app.config['DEBUG']
server = WSGIServer((app.config['HOST'], app.config['PORT']), app)
print '[INFO] Starting CSLAIER server on {}:{}'.format(app.config['HOST'], app.config['PORT'])
server.serve_forever()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
18931,
198,
6738,
18931,
13,
4993,
8116,
1330,
18481,
803,
8979,
25060,
198,
6738,
18931,
1330,
651,
11187,
1... | 2.592629 | 2,008 |
try: import cPickle as pickle
except: import pickle
from os import environ
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import itertools
from matplotlib import rc
import numpy as np
import pandas as pd
import seaborn as sns
import pickle as pkl
import functools
font = {'family': 'serif', 'serif': ['computer modern roman']}
rc('text', usetex=False)
rc('font', weight='bold')
rc('font', size=2)
rc('lines', markersize=2.5)
rc('lines', linewidth=0.5)
rc('xtick', labelsize=1)
rc('ytick', labelsize=1)
rc('axes', labelsize='small')
rc('axes', labelweight='bold')
rc('axes', titlesize='small')
rc('axes', linewidth=1)
rc("axes", labelsize = 15)
rc("axes", labelpad=4)
plt.rc('font', **font)
sns.set_style("darkgrid", {"xtick.bottom":True, "ytick.left":True})
sns.set_context("paper", font_scale=0.55 , rc={"lines.linewidth": 0.1, "xtick.major.size":0.1, "ytick.major.size":0.1, "legend.fontsize":0.001})
print(sns.plotting_context())
import pdb
import networkx as nx
| [
28311,
25,
1330,
269,
31686,
293,
355,
2298,
293,
198,
16341,
25,
1330,
2298,
293,
198,
6738,
28686,
1330,
551,
2268,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
1... | 2.582902 | 386 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198
] | 3.444444 | 9 |
#! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
test_colours.py
Desscription:
Unit tests for mac_colours.
Version:
1 - Inital release
Author:
J.MacGrillen <macgrillen@gmail.com>
Copyright:
Copyright (c) John MacGrillen. All rights reserved.
"""
import maclib.mac_colours as colours
def test_limit_01_inrange():
"""
Feed the function a number in the range 0-255 to
make sure it works when fed with a number we expect.
"""
test_number: int = 128
assert colours.limit_number(test_number) == test_number
def test_limit_02_over_limit():
"""
Test that if the number is over 255 it returns 255.
"""
assert colours.limit_number(400) == 255
def test_limit_03_minus_number():
"""
What happens if we use a minus number?
"""
assert colours.limit_number(-10) == 0
def test_colour_01_inrange():
"""
Test the return values align with what we think
they should be.
"""
expected_value = '#3ca184'
assert colours.rgb2hex(red=60, green=161, blue=132) == expected_value
def test_colour_02_too_high():
"""
Test what happens when one of the values is too high
"""
expected_value = '#6effbd'
assert colours.rgb2hex(red=110, green=500, blue=189) == expected_value
def test_colour_03_minus_number():
"""
Test minus numbers return as a zero
"""
expected_value = '#6e00bd'
assert colours.rgb2hex(red=110, green=-50, blue=189) == expected_value
if __name__ == "__main__":
pass
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
6530,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1332,
62,
4033,
4662,
13,
9078,
198,
220,
22... | 2.561779 | 607 |
from django.test import TestCase
from pandora import Box
class MultiThreadedTests(TestCase):
"""
Please contribute a patch if you know how to easily test the multithreaded
behaviour of pandora's box.
"""
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
19798,
5799,
1330,
8315,
628,
198,
198,
4871,
15237,
16818,
276,
51,
3558,
7,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4222,
8676,
257,
8529,
611,
3... | 3.231884 | 69 |
from nornir import InitNornir
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.functions.text import print_result
from nornir_test.nornir_utilities import nornir_set_creds
if __name__ == "__main__":
main()
| [
6738,
299,
1211,
343,
1330,
44707,
45,
1211,
343,
198,
6738,
299,
1211,
343,
13,
37390,
13,
83,
6791,
13,
3262,
16090,
1330,
2010,
76,
12125,
62,
21280,
62,
21812,
198,
6738,
299,
1211,
343,
13,
37390,
13,
12543,
2733,
13,
5239,
133... | 2.747253 | 91 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import csv
import os
import logging
import gzip
__license__ = "X11"
def read_csv_as_object(path):
"""
Read CSV lines as objects.
"""
results = []
with open(path) as stream:
reader = csv.reader(stream, delimiter=",", quotechar='"')
header = next(reader)
for row in reader:
new_object = {}
for index in range(0, len(row)):
new_object[header[index]] = row[index]
results.append(new_object)
return results
if __name__ == "__main__":
raise Exception("This module can be used only as a library!")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
33918,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
308,
13344,
198,
198,
834,
... | 2.325175 | 286 |
# -*- coding: utf-8 -*-
"""Optimus.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1E2Wmkz43H4FgDefXwK8kyLjgGGC7y_4w
"""
import random
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
import itertools
# import random, string
# password = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(20))
# #Download ngrok
# ! wget -q -c -nc https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
# ! unzip -qq -n ngrok-stable-linux-amd64.zip
# #Setup sshd
# ! apt-get install -qq -o=Dpkg::Use-Pty=0 openssh-server pwgen > /dev/null
# #Set root password
# ! echo root:$password | chpasswd
# ! mkdir -p /var/run/sshd
# ! echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
# ! echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
# ! echo "LD_LIBRARY_PATH=/usr/lib64-nvidia" >> /root/.bashrc
# ! echo "export LD_LIBRARY_PATH" >> /root/.bashrc
# #Run sshd
# get_ipython().system_raw('/usr/sbin/sshd -D &')
# #Ask token
# print("Copy authtoken from https://dashboard.ngrok.com/auth")
# import getpass
# authtoken = getpass.getpass()
# #Create tunnel
# get_ipython().system_raw('./ngrok authtoken $authtoken && ./ngrok tcp 22 &')
# #Print root password
# print("Root password: {}".format(password))
# #Get public address
# ! curl -s http://localhost:4040/api/tunnels | python3 -c \
# "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
!nvidia-smi
# ! curl -s http://localhost:4040/api/tunnels | python3 -c \
# "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
#!wget https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/download/P5ocxmvCguW59E8YFZnh%2Fversions%2FUNHbff4Y6pynSWzk4vQQ%2Ffiles%2Ftest.tsv.zip
!unzip train.tsv.zip
!unzip test.tsv.zip
train_df = pd.read_csv('train.tsv', delimiter='\t')
train_df.head()
params = {'C': [1.0, 0.1], 'fit_intercept':[False], 'ngram_range':[(1,3)], 'max_features':[10000, 5000], 'max_iter':[1000, 500], 'use_tf_idf':[True, False], 'analyzer':['word']}
optimus = Optimus(params)
final_result = optimus.perform_optimus_search(train_df)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
27871,
20704,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
1378,
... | 2.500527 | 949 |
from avanzapy.constants import InstrumentType
from avanzapy.stock import Stock
from avanzapy.fund import Fund
from avanzapy.bond import Bond
from avanzapy.option import Option
from avanzapy.futureforward import FutureForward
from avanzapy.certificate import Certificate
from avanzapy.warrant import Warrant
from avanzapy.etf import Etf
from avanzapy.index import Index
from avanzapy.premiumbond import PremiumBond
from avanzapy.subscriptionoption import SubscriptionOption
from avanzapy.equitylinkedbond import EquityLinkedBond
from avanzapy.convertible import Convertible
"""
Instrument factory, creates a particular instrument
with some given data.
TODO: Ugly
""" | [
6738,
1196,
35410,
12826,
13,
9979,
1187,
1330,
42410,
6030,
198,
198,
6738,
1196,
35410,
12826,
13,
13578,
1330,
10500,
198,
6738,
1196,
35410,
12826,
13,
10990,
1330,
7557,
198,
6738,
1196,
35410,
12826,
13,
65,
623,
1330,
12812,
198,
... | 3.650273 | 183 |
bl_info = {
"name": "Qor JSON Scene Export",
"author": "Grady O'Connell",
"blender": (2,7,6),
"version": (0,0,1),
"location": "File > Import-Export",
"description": "Import-Export Qor JSON data format (export only)",
"category": "Import-Export",
"wiki_url": "https://github.com/flipcoder/qor",
"tracker_url": "https://github.com/flipcoder/qor",
}
import bpy
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
#def menu_func_import(self, context):
#self.layout.operator(ImportQor.bl_idname, text="Qor JSON (.json)")
if __name__ == "__main__":
register()
| [
2436,
62,
10951,
796,
1391,
198,
220,
220,
220,
366,
3672,
1298,
220,
220,
220,
220,
220,
220,
220,
220,
366,
48,
273,
19449,
28315,
36472,
1600,
198,
220,
220,
220,
366,
9800,
1298,
220,
220,
220,
220,
220,
220,
366,
8642,
4597,
... | 2.313793 | 290 |
__author__ = 'nb254'
import sqlite3
import numpy
import csv
import sys
sys.path.append("/mnt/nb254_data/src/utils/")
#import pandas as pd
from once import once, oncecleardb, onceprintdb, onceinit
DIR = '/mnt/nb254_data/db/'
| [
834,
9800,
834,
796,
705,
46803,
24970,
6,
198,
11748,
44161,
578,
18,
198,
11748,
299,
32152,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
14,
76,
429,
14,
46803,
24970,
62,
7890,
14,
10677,
14,
... | 2.586207 | 87 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to convert id3 v1 tags in a mp3 file to id3 v2."""
import sys
import os
import subprocess # noqa: S404, B404 nosec
# requires tool "id3v2"
# id3v2 -l Prince/Unknown\ Album/When\ Doves\ Cry.mp3
# id3v1 tag info for Prince/Unknown Album/When Doves Cry.mp3:
# Title : When Doves Cry Artist: Prince
# Album : The Rolling Stone Magazines 50 Year: 1984, Genre: Other (12)
# Comment: Track: 52
# Prince/Unknown Album/When Doves Cry.mp3: No ID3v2 tag
# After conversion:
# id3v1 tag info for Prince/Unknown Album/When Doves Cry.mp3:
# Title : When Doves Cry Artist: Prince
# Album : The Rolling Stone Magazines 50 Year: 1984, Genre: Other (12)
# Comment: Track: 52
# id3v2 tag info for Prince/Unknown Album/When Doves Cry.mp3:
# TIT2 (Title/songname/content description): When Doves Cry
# TPE1 (Lead performer(s)/Soloist(s)): Prince
# TALB (Album/Movie/Show title): The Rolling Stone Magazines 50
# TYER (Year): 1984
# TRCK (Track number/Position in set): 52
# TCON (Content type): Other (12)
def convert_id3v1_to_id3v2(path):
"""Convert idv3 tags from v1 to v2 using cli tool id3v2."""
if not os.path.isfile(path):
raise ValueError('Not a path: {0}'.format(path))
cmd = ['id3v2', '-C', path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603
stdout, stderr = proc.communicate()
if proc.returncode != 0:
print('Error: non-zero exit code: %i' % proc.returncode) # noqa: T001
if len(stdout):
print(stdout) # noqa: T001
if len(stderr):
print(stderr) # noqa: T001
if stderr.find('Tags could not be converted') > -1:
print('Error: %s' % (stdout + stderr)) # noqa: T001
return proc.returncode
def get_id3_versions(path):
"""
Return an array containing 1,2 or nothing.
:param path: path to mp3
"""
if not os.path.isfile(path):
raise ValueError('Not a path: {0}'.format(path))
cmd = ['id3v2', '-l', path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603
buf, _err = proc.communicate()
versions = []
if buf.find('id3v1 tag info for') > -1:
versions.append(1)
if buf.find('id3v2 tag info for') > -1:
versions.append(2)
return versions
def main():
"""Run main program."""
import argparse
parser = argparse.ArgumentParser(description='Convert id3v1 tags to id3v2 tags.')
parser.add_argument('paths', metavar='PATH', type=str, nargs='+',
help='path to music files')
parser.add_argument('--dry-run', dest='dryrun', action='store_true',
default=False, help='only show affected files')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='list files being inspected')
args = parser.parse_args()
for start_path in args.paths:
for root, _dirs, files in os.walk(start_path):
for relpath in files:
abspath = os.path.join(root, relpath)
if args.verbose:
print(abspath) # noqa: T001
id3version = get_id3_versions(abspath)
if id3version:
if 1 in id3version and 2 not in id3version:
if not args.verbose:
print(abspath) # noqa: T001
if not args.dryrun:
convert_id3v1_to_id3v2(abspath)
print('%r -> %r' % (id3version, get_id3_versions(abspath))) # noqa: T001
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
7391,
284,
10385,
4686,
18,
410,
16,
15940,
287,
257,
29034,
18,
2393,
284,
4686,
18,
410,
17,
526,
... | 2.157329 | 1,767 |
import usaddress
| [
11748,
514,
21975,
628,
198
] | 3.8 | 5 |
"""Classes for federated experiments.
Experiment classes are supposed to be, as far as possible, agnostic to models,
loss functions and optimizers. They take care of training, testing and logging.
Note on parameter/`state_dict` distinction: Currently, this implementation
treats all members of the `state_dict` as parameters that need to be
communicated over the network, whether or not they are model parameters (or e.g.
buffers). It seems sensible to me to sync all of the `state_dict`, but I'm not
100% sure if this is actually what needs to happen, so if I later discover that
this is mistaken, this implementation may change to send only elements in
`model.parameters()`.
"""
# Chuan-Zheng Lee <czlee@stanford.edu>
# July 2021
import argparse
import logging
import pathlib
from typing import Callable, Dict, Optional, Sequence, Tuple
import torch
import utils
from . import optimizers
from .experiment import BaseExperiment
logger = logging.getLogger(__name__)
ClientLRSchedulersType = Optional[Sequence[Optional[torch.optim.lr_scheduler._LRScheduler]]]
class BaseFederatedExperiment(BaseExperiment):
"""Base class for federated experiments.
This takes care of splitting the datasets among clients and training
individual clients, which should be common functionality to all federated
experiments.
"""
default_params = BaseExperiment.default_params.copy()
default_params.update({
'epochs': 1,
'rounds': 20,
'clients': 10,
'send': 'deltas',
'client_sync': True,
})
def __init__(
self,
client_datasets: Sequence[torch.utils.data.Dataset],
test_dataset: torch.utils.data.Dataset,
client_models: Sequence[torch.nn.Module],
global_model: torch.nn.Module,
loss_fn: Callable,
metric_fns: Dict[str, Callable],
client_optimizers: Sequence[torch.optim.Optimizer],
results_dir: pathlib.Path,
device='cpu',
client_lr_schedulers: ClientLRSchedulersType = None,
sqerror_reference: Optional[Tuple[torch.nn.Module, torch.optim.Optimizer]] = None,
**params):
"""This constructor requires all client datasets, models and optimizers
to be pre-constructed ready to be passed into this constructor. The
constructor will sync the client models with the global model before
starting training.
"""
super().__init__(loss_fn, metric_fns, results_dir, device, **params)
if not len(client_datasets) == len(client_models) == len(client_optimizers):
raise ValueError(f"There are {len(client_datasets)} client datasets, "
f"{len(client_models)} client models and "
f"{len(client_optimizers)} client optimizers.")
self.nclients = len(client_datasets)
self.client_datasets = client_datasets
self.test_dataset = test_dataset
self.client_models = [model.to(device) for model in client_models]
self.global_model = global_model.to(device)
self.client_optimizers = client_optimizers
self.client_lr_schedulers = client_lr_schedulers
self.client_dataloaders = [
torch.utils.data.DataLoader(dataset, batch_size=self.params['batch_size'])
for dataset in self.client_datasets
]
self.test_dataloader = torch.utils.data.DataLoader(
self.test_dataset,
batch_size=self.params['batch_size'],
)
# sync client models before starting
for model in self.client_models:
model.load_state_dict(global_model.state_dict())
if sqerror_reference:
self._saving_squared_error = True
self._setup_sqerror_reference(*sqerror_reference, device)
else:
self._saving_squared_error = False
@classmethod
@classmethod
def from_arguments(cls,
train_dataset: Sequence[torch.utils.data.Dataset],
test_dataset: torch.utils.data.Dataset,
model_fn: Callable[[], torch.nn.Module],
loss_fn: Callable,
metric_fns: Dict[str, Callable],
results_dir: pathlib.Path,
args: argparse.Namespace):
"""Instantiates a FederatedAveragingExperiment object from arguments
provided by an `ArgumentParser.parse_args()` call.
"""
device = cls._interpret_cpu_arg(args.cpu)
nclients = args.clients
data_per_client = args.data_per_client
if data_per_client is None:
client_lengths = utils.divide_integer_evenly(len(train_dataset), nclients)
else:
if data_per_client * nclients > len(train_dataset):
message = (f"There isn't enough data ({len(train_dataset)}) to get "
f"{data_per_client} examples for each of {nclients} clients.")
logger.error(message)
raise ValueError(message)
client_lengths = [data_per_client] * nclients
train_dataset = torch.utils.data.Subset(train_dataset, range(data_per_client * nclients))
client_datasets = torch.utils.data.random_split(train_dataset, client_lengths)
global_model = model_fn()
client_models = [model_fn() for i in range(nclients)]
client_optimizers = [
optimizers.make_optimizer(
model.parameters(),
algorithm=args.optimizer_client,
lr=args.lr_client,
momentum=args.momentum_client,
weight_decay=args.weight_decay_client,
) for model in client_models
]
client_lr_schedulers = [
optimizers.make_scheduler(args.lr_scheduler_client, optimizer)
for optimizer in client_optimizers
]
if args.save_squared_error:
if args.momentum_client > 0:
message = "Saving squared error doesn't make any sense with SGD momentum"
logger.error(message)
raise ValueError(message)
sqerror_ref_model = model_fn()
sqerror_ref_optimizer = torch.optim.SGD(sqerror_ref_model.parameters(), lr=args.lr_client)
sqerror_reference = (sqerror_ref_model, sqerror_ref_optimizer)
else:
sqerror_reference = None
params = cls.extract_params_from_args(args)
return cls(
client_datasets, test_dataset, client_models, global_model, loss_fn,
metric_fns, client_optimizers, results_dir,
device=device, client_lr_schedulers=client_lr_schedulers,
sqerror_reference=sqerror_reference,
**params,
)
def train_clients(self):
"""Trains all clients through one round of the number of epochs specified
in `self.params['epochs']`.
"""
records = {}
clients = zip(self.client_dataloaders, self.client_models, self.client_optimizers)
nepochs = self.params['epochs']
for i, (dataloader, model, optimizer) in enumerate(clients):
for j in range(nepochs):
train_loss = self._train(dataloader, model, optimizer)
logger.info(f"Client {i}/{self.nclients}, epoch {j}/{nepochs}: loss {train_loss}")
records[f"train_loss_client{i}"] = train_loss
return records
@staticmethod
def flatten_state_dict(state_dict: dict) -> torch.Tensor:
"""Flattens a given model's state dict into a single tensor. Normally,
the state dict passed to this method will be that of a client model.
Subclasses normally shouldn't use this method. To retrieve the values
the client should send, use `get_values_to_send()`.
"""
states = [state.flatten() for state in state_dict.values()]
flattened = torch.hstack(states).reshape(1, -1)
return flattened
def unflatten_state_dict(self, tensor: torch.Tensor) -> dict:
"""Unflattens a (presumably 1-D) tensor into a state dict compatible
with the global model.
Subclasses normally shouldn't use this method. To update the model with
received values, use `update_global_model()`.
"""
flattened = tensor.flatten()
new_state_dict = {}
cursor = 0
for key, value in self.global_model.state_dict().items():
numel = value.numel()
part = flattened[cursor:cursor + numel]
new_state_dict[key] = part.reshape(value.size())
cursor += numel
assert cursor == flattened.numel()
return new_state_dict
def get_values_to_send(self, model) -> torch.Tensor:
"""Returns the values that should be sent from the client.
Subclasses can use this method to retrieve the vector that needs to be
communicated from clients to the server."""
local_flattened = self.flatten_state_dict(model.state_dict())
if self.params['send'] == 'deltas':
global_flattened = self.flatten_state_dict(self.global_model.state_dict())
return local_flattened - global_flattened
elif self.params['send'] == 'params':
return local_flattened
else:
raise ValueError("Unknown 'send' spec: " + str(self.params['send']))
def update_global_model(self, values):
"""Update the global model with the values provided, which should be the
values inferred by the server from the received signals. For example, in
federated averaging, `values` would be the mean of what each client
returns from `get_values_to_send()`.
This also synchronizes all of the client models with this global model,
since we don't model the downlink in these simulations.
Subclasses can use this method to handle received values.
"""
if self._saving_squared_error:
reference_values = self._get_reference_values()
self.records['estimation_sqerror'] = (reference_values - values).square().sum().item()
self.log_model_json(self.current_round, self.reference_model, prefix="reference_")
if self.params['send'] == 'deltas':
global_flattened = self.flatten_state_dict(self.global_model.state_dict())
updated_values = global_flattened + values
new_state_dict = self.unflatten_state_dict(updated_values)
elif self.params['send'] == 'params':
new_state_dict = self.unflatten_state_dict(values)
self.global_model.load_state_dict(new_state_dict)
if self.params['client_sync']:
for model in self.client_models: # sync client models
model.load_state_dict(new_state_dict)
else:
logger.warning("Skipping client synchronization")
def _setup_sqerror_reference(self, reference_model, reference_optimizer, device):
"""Sets up reference model, dataset and optimizer for squared error
saving. These objects are used to check what the "true" gradient is,
using full gradient descent. We maintain separate objects for them, to
avoid interfering with the main learning process.
Used in combination with `_get_reference_value()`."""
# This is a little hacky, because it tries to do everything without
# direct access to the arguments passed to `BaseFederatedExperiment.from_arguments()`.
# Could be worth refactoring how `BaseFederatedExperiment.from_arguments()` works,
# but this would lose flexibility in the constructor.
logger.info("Saving squared error data")
self.reference_model = reference_model.to(device)
self.reference_optimizer = reference_optimizer
# merge all the client datasets
self.reference_dataset = torch.utils.data.ConcatDataset(self.client_datasets)
self.reference_dataloader = torch.utils.data.DataLoader(self.reference_dataset,
batch_size=len(self.reference_dataset))
logger.debug(f"Reference dataset has {len(self.reference_dataset)} examples")
def _get_reference_values(self):
"""Trains on the global model and logs the "true" value that would be
returned by `get_values_to_send()` if it had knowledge of all the data.
This is called from `update_global_model()`; subclasses shouldn't need
to call it.
"""
self.reference_model.load_state_dict(self.global_model.state_dict())
nepochs = self.params['epochs']
for j in range(nepochs):
train_loss = self._train(self.reference_dataloader, self.reference_model,
self.reference_optimizer)
logger.info(f"Reference model, epoch {j}/{nepochs}: loss {train_loss}")
return self.get_values_to_send(self.reference_model)
def transmit_and_aggregate(self):
"""Transmits the client models from `self.client_models` and aggregates
them at the server. At the time this is called, the clients are assumed
to have been trained for this round (by `self.train_clients()`).
When this is called, `self.records` is a dict that will be logged to a
CSV file (with keys as column headers). Subclasses may optionally add
entries to this dict. If they do so, they should modify the dict
in-place. Also, `self.current_round` will be the current round number.
Subclasses must implement this method."""
raise NotImplementedError
def step_lr_schedulers(self):
"""Steps all client learning rate schedulers."""
if self.client_lr_schedulers is None:
return
for i, scheduler in enumerate(self.client_lr_schedulers):
if scheduler:
self.records[f"lr_client{i}"] = scheduler.get_last_lr()[0] # log before step
scheduler.step()
def run(self):
"""Runs the experiment once."""
nrounds = self.params['rounds']
csv_logger = self.get_csv_logger('training.csv', index_field='round')
for r in range(nrounds):
self.current_round = r
self.records = self.train_clients() # this overwrites self.records
self.transmit_and_aggregate()
test_results = self.test()
self.records.update(test_results)
self.step_lr_schedulers()
logger.info(f"Round {r}: " + ", ".join(f"{k} {v:.7f}" for k, v in test_results.items()))
csv_logger.log(r, self.records)
self.log_model_json(r, self.global_model)
self.current_round = None
csv_logger.close()
test_results = self.test()
self.log_evaluation(test_results)
class OldFederatedAveragingExperiment(BaseFederatedExperiment):
"""Old version of a class for a simple federated averaging experiment.
This is a simpler version of `FederatedAveragingExperiment`. Rather than
flatten and unflatten the state dict, it goes through the state dict and
averages each tensor in the state dict separately. It's deprecated in favor
of `FederatedAveragingExperiment`."""
def transmit_and_aggregate(self):
"""Aggregates client models by taking the mean."""
global_dict = self.global_model.state_dict()
for k in global_dict.keys():
client_states = [model.state_dict()[k].float() for model in self.client_models]
global_dict[k] = torch.stack(client_states, 0).mean(0)
self.global_model.load_state_dict(global_dict)
for model in self.client_models:
model.load_state_dict(self.global_model.state_dict())
class FederatedAveragingExperiment(BaseFederatedExperiment):
"""Class for a simple federated averaging experiment.
This class doesn't attempt to model the channel at all. It just trains
clients individually, and assumes the clients can send whatever they want
to the server errorlessly.
This class should do the same thing as `OldFederatedAveragingExperiment`,
just in a slightly more roundabout way. It uses the `get_values_to_send()`
and `update_global_model()` methods of BaseFederatedExperiment to simplify
its own implementation. The advantage of doing this is that the class can
take advantage of options specifying what values clients should send (e.g.,
whether to send the model parameters themselves, or updates as deltas). The
disadvantage is that flattening and unflattening the state dict is, overall,
a little bit more complicated.
"""
description = """\
Federated averaging with unconstrained communication.
"""
def transmit_and_aggregate(self):
"""Aggregates client models."""
client_values = [self.get_values_to_send(model) for model in self.client_models]
client_average = torch.stack(client_values, 0).mean(0)
self.update_global_model(client_average)
| [
37811,
9487,
274,
329,
28062,
515,
10256,
13,
198,
198,
20468,
3681,
6097,
389,
4385,
284,
307,
11,
355,
1290,
355,
1744,
11,
556,
43758,
284,
4981,
11,
198,
22462,
5499,
290,
6436,
11341,
13,
1119,
1011,
1337,
286,
3047,
11,
4856,
... | 2.478375 | 6,867 |
from pygorithms.algorithms.sorting.selection_sort import selection_sort
| [
6738,
12972,
7727,
907,
13,
282,
7727,
907,
13,
82,
24707,
13,
49283,
62,
30619,
1330,
6356,
62,
30619,
628
] | 3.65 | 20 |