code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities and types for defining networks, these depend on PyTorch.
"""
import warnings
import torch
import torch.nn.functional as f
def one_hot(labels, num_classes):
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
num_dims = labels.dim()
if num_dims > 1:
assert labels.shape[1] == 1, 'labels should have a channel with length equals to one.'
labels = torch.squeeze(labels, 1)
labels = f.one_hot(labels.long(), num_classes)
new_axes = [0, -1] + list(range(1, num_dims - 1))
labels = labels.permute(*new_axes)
if not labels.is_contiguous():
return labels.contiguous()
return labels
def slice_channels(tensor, *slicevals):
slices = [slice(None)] * len(tensor.shape)
slices[1] = slice(*slicevals)
return tensor[slices]
def predict_segmentation(logits, mutually_exclusive=False, threshold=0):
"""
Given the logits from a network, computing the segmentation by thresholding all values above 0
if multi-labels task, computing the `argmax` along the channel axis if multi-classes task,
logits has shape `BCHW[D]`.
Args:
logits (Tensor): raw data of model output.
mutually_exclusive (bool): if True, `logits` will be converted into a binary matrix using
a combination of argmax, which is suitable for multi-classes task. Defaults to False.
threshold (float): thresholding the prediction values if multi-labels task.
"""
if not mutually_exclusive:
return (logits >= threshold).int()
else:
if logits.shape[1] == 1:
warnings.warn('single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.')
return (logits >= threshold).int()
return logits.argmax(1, keepdim=True)
| [
"warnings.warn",
"torch.squeeze"
] | [((1316, 1340), 'torch.squeeze', 'torch.squeeze', (['labels', '(1)'], {}), '(labels, 1)\n', (1329, 1340), False, 'import torch\n'), ((2511, 2622), 'warnings.warn', 'warnings.warn', (['"""single channel prediction, `mutually_exclusive=True` ignored, use threshold instead."""'], {}), "(\n 'single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.'\n )\n", (2524, 2622), False, 'import warnings\n')] |
# This file is also a independently runnable file in addition to being a module.
# You can run this file to test NewRaphAlgorithm function.
'''
This program demonstrates Newton Raphson Algorithm(NPA).
It is advised to follow all rules of the algorithm while entering the input.
(Either read rules provided in README.md or search online)
Program will cause an error in case any of the algorithm rules are not obeyed.
Simply import NewRaphAlgorithm function in any program to use this algorithm to find roots of a math function.
This program has a dependency on SymPy library. We used Sympy library here to convert math function in string form
into a solvable function and to differentiate that function.
NewRaphAlgorithm function accepts only a string as the math function, a float as root nearpoint
and an integer as no. of decimal places of approximation (Any float provided will be converted to a whole number).
Some non-algorithmic rules particular to ths module are:
1- There should always be a '*' sign between any number and variable or brackets.
For Example: 'x^2-4x-7' is not allowed. Write it as 'x^2-4*x-7'.
Also, '4(x+2)' is not allowed. Write it as '4*(x+2)'
2- Multiple alphabets together are considered as one variable.
For Example: '2*kite' is considered same as '2*x'.
NOTE- ITS NOT THAT I AM 100% SURE THAT THIS PROGRAM IS COMPLETELY BUG FREE, BUT I HAVE FIXED A PRETTY GOOD CHUNK OF THEM.
BUT IF STILL A BUG IS FOUND THAT MEANS I DIDN'T ENCOUNTER THAT BUG IN MY TESTING. I TESTED THIS PROGRAM WITH OVER A 100 POLYNOMIALS.'''
# Importing SymPy library functions
from sympy import sympify, diff
# Creating a custom error for this program. This helps the user to pinpoint the exact problem
class New_Raph_Error(Exception):
pass
# Defining a function for Newton Raphson Algorithm (NRA).
# This accepts a string as the math function, a float as root nearpoint, an integer as no. of decimal places of approximation.
def NewRaphAlgorithm(equation, nearpoint, decimal= 3):
try: #Checking invaild input
decimal = abs(int(decimal))
except (TypeError, ValueError):
raise New_Raph_Error('Only whole numbers are accepted as decimal places')
try: #Checking invaild input
nearpoint = float(nearpoint)
except (TypeError, ValueError):
raise New_Raph_Error('Only rational numbers are accepted as root nearpoint')
try: #Checking invaild input
equation = str(equation)
equation = sympify(equation)
except:
raise New_Raph_Error("Please use '*' between any number and variable or brackets.")
try: #Checking invaild input
diff_equation = diff(equation) #Differentiation of given equation
except ValueError:
raise New_Raph_Error('Newton Raphson Method can solve only one variable polynomials')
try: #Checking invaild input
var = list(equation.free_symbols)[0]
except IndexError:
raise New_Raph_Error('A number has been entered instead of a equation')
#Checking invaild input
if float(diff_equation.subs(var, nearpoint)) == 0:
raise New_Raph_Error('Root assumption provided is either on maxima or minima of the function')
prev_np = None #Declaring previous nearpoint for comparison with nearpoint in NRA
# Looping through actual Algorithm
x=0
while x<1000 and prev_np != nearpoint and nearpoint != float('nan'):
eq = float(equation.subs(var, nearpoint)) #Solving given function by substituting nearpoint
diff_eq = float(diff_equation.subs(var, nearpoint)) #Solving differentiated function similarly
prev_np = nearpoint
try:
nearpoint = nearpoint - (eq/diff_eq) #Formula for NRA
except ZeroDivisionError:
return prev_np
nearpoint = round(nearpoint, decimal) #Rounding answer to the number of decimal places given
x+=1
# Post Algorithm result validity checking
if nearpoint == float('nan'):
raise New_Raph_Error('''There is a local minima or maxima or a point of inflection around
root assumption provided and nearest root value''')
elif x==1000:
raise New_Raph_Error('''Entered polynomial doesn't have any real root''')
else:
return nearpoint
#-------------------------------------------------NRA Module Ends Here--------------------------------------------------------
# The code following is a sample execution program for NRA.
# Anything folllowing will only execute if this file is run directly without importing.
if __name__ == '__main__':
print('''This program demonstrates Newton Raphson Algorithm.
It is advised to follow all rules of the algorithm while entering the input.
Program will cause an error in case any of the algorithm rules are not obeyed.
There should always be a '*' sign between any number and variable.
E.g.- 'x^2-4x-7' is not allowed. Write it as 'x^2-4*x-7'.
Multiple alphabets together are considered as one variable.
E.g.- '2*kite' is considered same as '2*x'.\n''')
equation = input('Enter a one variable polynomial: ')
nearpoint = input('Enter value of a number close to a root: ')
decimal = input('Enter the no. of decimal places for the appoximation of the root: ')
print(f'\nOne of the roots of given function is: {NewRaphAlgorithm(equation, nearpoint, decimal)}')
# Created by <NAME> (aka ZenithFlux on github- https://github.com/ZenithFlux/) | [
"sympy.sympify",
"sympy.diff"
] | [((2606, 2623), 'sympy.sympify', 'sympify', (['equation'], {}), '(equation)\n', (2613, 2623), False, 'from sympy import sympify, diff\n'), ((2793, 2807), 'sympy.diff', 'diff', (['equation'], {}), '(equation)\n', (2797, 2807), False, 'from sympy import sympify, diff\n')] |
from itertools import groupby
from taxonomic import lca
from taxonomic import ncbi
n = ncbi.NCBI()
def read_relationships():
for line in open('cold/GMGC.relationships.txt'):
a,_,b = line.rstrip().split()
yield a,b
fr12name = {}
for line in open('cold/freeze12.rename.table'):
i,g,_ = line.strip().split('\t',2)
fr12name[i] = g
rename = {}
for line in open('cold/GMGC10.rename.table.txt'):
new,old = line.rstrip().split()
rename[old] = new
with open('GMGC10.inner.taxonomic.map', 'wt') as output:
for g,origs in groupby(read_relationships(), lambda a_b:a_b[1]):
fr_genes = [o for o,_ in origs if o.startswith('Fr12_')]
if fr_genes:
classif = lca.lca([n.path(fr12name[f].split('.')[0]) for f in fr_genes])
classif = classif[-1]
sp = n.at_rank(classif, 'species')
if sp:
output.write('\t'.join([g, rename[g], classif, sp]))
output.write('\n')
| [
"taxonomic.ncbi.NCBI"
] | [((87, 98), 'taxonomic.ncbi.NCBI', 'ncbi.NCBI', ([], {}), '()\n', (96, 98), False, 'from taxonomic import ncbi\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import openpyxl
import csv
path = os.curdir
if len(sys.argv) > 1:
path = sys.argv[1]
for excel_file in os.listdir(path):
if (not os.path.isfile(excel_file)) or not excel_file.endswith('.xlsx'):
continue
print('{0}: Writing to csv...'.format(excel_file))
wb = openpyxl.load_workbook(excel_file)
excel_filename = excel_file[:-5]
for sheet_name in wb.get_sheet_names():
sheet = wb.get_sheet_by_name(sheet_name)
csv_filename = '{0}_{1}.csv'.format(excel_filename, sheet_name)
with open(csv_filename, 'w', newline='') as f:
writer = csv.writer(f)
for row_num in range(1, sheet.max_row + 1):
row_data = []
for col_num in range(1, sheet.max_column + 1):
row_data.append(
sheet.cell(row=row_num, column=col_num).value
)
writer.writerow(row_data)
| [
"openpyxl.load_workbook",
"csv.writer",
"os.listdir",
"os.path.isfile"
] | [((178, 194), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (188, 194), False, 'import os\n'), ((354, 388), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['excel_file'], {}), '(excel_file)\n', (376, 388), False, 'import openpyxl\n'), ((208, 234), 'os.path.isfile', 'os.path.isfile', (['excel_file'], {}), '(excel_file)\n', (222, 234), False, 'import os\n'), ((667, 680), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (677, 680), False, 'import csv\n')] |
from gpiozero import Button
from time import sleep, time
import blinkt
import random
import requests
import sys
from constants import *
FNT_URL = "https://api.fortnitetracker.com/v1/profile/{}/{}"
FNT_REFRESH_TIME_SECS = 30
# debug shorter refresh
# FNT_REFRESH_TIME_SECS = 10
class FortniteAPIError(Exception):
pass
class FortniteResponseError(Exception):
pass
def get_lifetime_wins():
header = {"TRN-Api-Key": FNT_API_KEY}
url = FNT_URL.format("all", FN_PLAYER)
response = requests.get(url, headers=header)
# debug
# print(response)
# print(response.headers)
# with open("response.txt", "w") as f:
# f.write(response.text)
if response.status_code != 200:
raise FortniteResponseError("HTTP Status {}".format(response.status_code))
# check its a valid json response
try:
response.json()
except ValueError:
raise FortniteAPIError("Invalid JSON response")
# check for errors
if "error" in response.json().keys():
raise FortniteAPIError(response.json()["error"])
# get the stats
life_time_stats = response.json()["lifeTimeStats"]
# debug
# print(life_time_stats)
response.close()
# convert key value pair to a dictionary
data = {}
for stat in life_time_stats:
if stat["value"].isnumeric():
stat["value"] = int(stat["value"])
data[stat["key"]] = stat["value"]
return data
def on(r=255, g=255, b=255):
blinkt.set_all(r, g, b)
blinkt.show()
def off():
blinkt.clear()
blinkt.show()
def flash(r, g, b, times, delay):
for i in range(times):
blinkt.set_all(r, g, b)
blinkt.show()
sleep(delay)
off()
sleep(delay)
def crazy_lights(min_leds, max_leds, r1, r2, g1, g2, b1, b2, length, delay):
start_time = time()
while time() - start_time < length:
# how many pixels
pixels = random.sample(range(blinkt.NUM_PIXELS), random.randint(min_leds, max_leds))
for pixel in range(blinkt.NUM_PIXELS):
r, g, b = random.randint(r1,r2), random.randint(g1,g2), random.randint(b1,b2)
if pixel in pixels:
# blinkt.set_pixel(pixel, random.randint(r1,r2), random.randint(g1,g2), random.randint(b1,b2))
blinkt.set_pixel(pixel, r, g, b)
else:
blinkt.set_pixel(pixel, 0, 0, 0)
blinkt.show()
sleep(delay)
def run_cube():
# check connection
on(0, 255, 0)
prev_life_time_wins = get_lifetime_wins()
flash(0, 255, 0, 3, 0.25)
# start
on()
next_check = time() + FNT_REFRESH_TIME_SECS
while switch.is_pressed:
# debug with button
# while not switch.is_pressed:
sleep(0.1)
if time() > next_check:
life_time_wins = get_lifetime_wins()
# debug
# print(life_time_wins)
# check a win
if life_time_wins["Wins"] > prev_life_time_wins["Wins"]:
crazy_lights(5, 8, 0, 255, 0, 255, 0, 255, 60, 0.1)
print("Wins")
on()
# check a high position
elif life_time_wins["Top 3s"] > prev_life_time_wins["Top 3s"]:
crazy_lights(1, 5, 0, 255, 0, 255, 0, 255, 45, 0.4)
print("Top 3s")
on()
elif life_time_wins["Top 10"] > prev_life_time_wins["Top 10"]:
crazy_lights(1, 5, 0, 255, 0, 255, 0, 255, 45, 0.4)
print("Top 10")
on()
elif life_time_wins["Top 5s"] > prev_life_time_wins["Top 5s"]:
crazy_lights(1, 5, 0, 255, 0, 255, 0, 255, 45, 0.4)
print("Top 5s")
on()
prev_life_time_wins = life_time_wins
next_check = time() + FNT_REFRESH_TIME_SECS
off()
print("Fortnite stopped")
# debug with button
# switch.wait_for_release()
switch = Button(17)
blinkt.set_clear_on_exit()
running = True
restart_after_error = False
print("Service running")
while running:
try:
if restart_after_error:
restart_after_error = False
else:
switch.wait_for_press()
# debug with button
# switch.wait_for_release()
print("Fortnite started")
run_cube()
except FortniteResponseError as err:
print("Fortnite Response Error: {}".format(err))
flash(0, 0, 255, 3, 0.25)
restart_after_error = True
except FortniteAPIError as err:
print("Stopping - Fortnite API Error: {}".format(err))
flash(255, 0, 255, 3, 0.25)
on(255, 0, 255)
switch.wait_for_release()
off()
except KeyboardInterrupt:
print("Service cancelled")
off()
running = False
except:
print("Stopping - Unexpected error:", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
flash(255, 0, 0, 3, 0.25)
on(255,0,0)
switch.wait_for_release()
off()
| [
"blinkt.show",
"blinkt.set_clear_on_exit",
"blinkt.set_pixel",
"requests.get",
"gpiozero.Button",
"blinkt.set_all",
"time.sleep",
"sys.exc_info",
"blinkt.clear",
"time.time",
"random.randint"
] | [((4045, 4055), 'gpiozero.Button', 'Button', (['(17)'], {}), '(17)\n', (4051, 4055), False, 'from gpiozero import Button\n'), ((4056, 4082), 'blinkt.set_clear_on_exit', 'blinkt.set_clear_on_exit', ([], {}), '()\n', (4080, 4082), False, 'import blinkt\n'), ((502, 535), 'requests.get', 'requests.get', (['url'], {'headers': 'header'}), '(url, headers=header)\n', (514, 535), False, 'import requests\n'), ((1487, 1510), 'blinkt.set_all', 'blinkt.set_all', (['r', 'g', 'b'], {}), '(r, g, b)\n', (1501, 1510), False, 'import blinkt\n'), ((1515, 1528), 'blinkt.show', 'blinkt.show', ([], {}), '()\n', (1526, 1528), False, 'import blinkt\n'), ((1545, 1559), 'blinkt.clear', 'blinkt.clear', ([], {}), '()\n', (1557, 1559), False, 'import blinkt\n'), ((1564, 1577), 'blinkt.show', 'blinkt.show', ([], {}), '()\n', (1575, 1577), False, 'import blinkt\n'), ((1846, 1852), 'time.time', 'time', ([], {}), '()\n', (1850, 1852), False, 'from time import sleep, time\n'), ((1648, 1671), 'blinkt.set_all', 'blinkt.set_all', (['r', 'g', 'b'], {}), '(r, g, b)\n', (1662, 1671), False, 'import blinkt\n'), ((1680, 1693), 'blinkt.show', 'blinkt.show', ([], {}), '()\n', (1691, 1693), False, 'import blinkt\n'), ((1702, 1714), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (1707, 1714), False, 'from time import sleep, time\n'), ((1737, 1749), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (1742, 1749), False, 'from time import sleep, time\n'), ((2420, 2433), 'blinkt.show', 'blinkt.show', ([], {}), '()\n', (2431, 2433), False, 'import blinkt\n'), ((2442, 2454), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (2447, 2454), False, 'from time import sleep, time\n'), ((2637, 2643), 'time.time', 'time', ([], {}), '()\n', (2641, 2643), False, 'from time import sleep, time\n'), ((2766, 2776), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (2771, 2776), False, 'from time import sleep, time\n'), ((1864, 1870), 'time.time', 'time', ([], {}), '()\n', (1868, 1870), False, 'from time import sleep, time\n'), ((1977, 2011), 'random.randint', 'random.randint', (['min_leds', 'max_leds'], {}), '(min_leds, max_leds)\n', (1991, 2011), False, 'import random\n'), ((2788, 2794), 'time.time', 'time', ([], {}), '()\n', (2792, 2794), False, 'from time import sleep, time\n'), ((2083, 2105), 'random.randint', 'random.randint', (['r1', 'r2'], {}), '(r1, r2)\n', (2097, 2105), False, 'import random\n'), ((2106, 2128), 'random.randint', 'random.randint', (['g1', 'g2'], {}), '(g1, g2)\n', (2120, 2128), False, 'import random\n'), ((2129, 2151), 'random.randint', 'random.randint', (['b1', 'b2'], {}), '(b1, b2)\n', (2143, 2151), False, 'import random\n'), ((2311, 2343), 'blinkt.set_pixel', 'blinkt.set_pixel', (['pixel', 'r', 'g', 'b'], {}), '(pixel, r, g, b)\n', (2327, 2343), False, 'import blinkt\n'), ((2378, 2410), 'blinkt.set_pixel', 'blinkt.set_pixel', (['pixel', '(0)', '(0)', '(0)'], {}), '(pixel, 0, 0, 0)\n', (2394, 2410), False, 'import blinkt\n'), ((3894, 3900), 'time.time', 'time', ([], {}), '()\n', (3898, 3900), False, 'from time import sleep, time\n'), ((4973, 4987), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4985, 4987), False, 'import sys\n'), ((4992, 5006), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5004, 5006), False, 'import sys\n'), ((5011, 5025), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5023, 5025), False, 'import sys\n')] |
#!/usr/bin/env python
import os.path
import sys
from pprint import pprint
import nist_database
def sort_dict(d, key=None, reverse=False):
"""
Returns an OrderedDict object whose keys are ordered according to their
value.
Args:
d:
Input dictionary
key:
function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse:
allows to reverse sort order.
"""
import collections
kv_items = [kv for kv in d.items()]
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items)
#class AtomicData(dict):
#
# _mandatory_keys = [
# "Etot",
# "Ekin",
# "Ecoul",
# "Eenuc",
# "Exc",
# ]
#
# def __init__(self, *args, **kwargs):
# super(AtomicData, self).__init__(*args, **kwargs)
#
# for k in self._mandatory_keys:
# if k not in self:
# raise ValueError("mandatory key %s is missing" % k)
# #self.symbol
# #self.Z = Z
# #self.configurations = configurations
def make_nist_configurations(path):
neutral, cations, symb2Z = parse_nist_configurations(path)
print("# Computer generated code")
print("neutral = ")
pprint(neutral)
print("")
print("# Computer generated code")
print("cations = ")
pprint(cations)
print("")
print("# Computer generated code")
print("symb2Z = ")
pprint(symb2Z)
print("")
def parse_nist_configurations(path):
"""Read and parse the file with the configurations."""
# Z Symbol Neutral Positive ion
# 1 H 1s^1 -
# 2 He 1s^2 1s^1
neutral, cations, symb2Z = {}, {}, {}
count = 1
with open(os.path.join(path, "configurations"), "r") as fh:
for line in fh:
if not (len(line) > 1 and line[1].isdigit()):
continue
head, catio = line[:44], line[44:].strip()
toks = head.split()
Z, symbol, neutr = int(toks[0]), toks[1], " ".join(t for t in toks[2:])
assert Z == count
count += 1
neutr = neutr.replace("^", "")
catio = catio.replace("^", "")
neutral[symbol] = neutr
cations[symbol] = catio if catio != "-" else None
symb2Z[symbol] = Z
return neutral, cations, symb2Z
def occupations_from_symbol(symbol):
Z = symb2Z[symbol]
configuration = neutral[Z]
if configuration[0][0] == '[':
occupations = occupations_from_symbol(configuration[0][1:-1])
configuration = configuration[1:]
else:
occupations = []
for s in configuration:
occupations.append((s[:2], int(s[3:])))
return occupations
def extract_nistdata(path, nist_xctype, iontype):
# http://www.physics.nist.gov/PhysRefData/DFTdata/
# Read and parse the configurations file
# Z Symbol Neutral Positive ion
# 1 H 1s^1 -
# 2 He 1s^2 1s^1
Ztable = {}
configurations = [['X', '']]
Z = 1
with open(os.path.join(path, "configurations"), "r") as fh:
for line in fh:
if len(line) > 1 and line[1].isdigit():
line = line[:44].split()
symbol = line[1]
Ztable[symbol] = Z
assert int(line[0]) == Z
configurations.append(line[2:])
Z += 1
def occupations_from_symbol(symbol):
Z = Ztable[symbol]
configuration = configurations[Z]
if configuration[0][0] == '[':
occupations = occupations_from_symbol(configuration[0][1:-1])
configuration = configuration[1:]
else:
occupations = []
for s in configuration:
occupations.append((s[:2], int(s[3:])))
return occupations
# Ex of file with AE results:
# Etot = -675.742283
# Ekin = 674.657334
# Ecoul = 285.206130
# Eenuc = -1601.463209
# Exc = -34.142538
# 1s -143.935181
# 2s -15.046905
# 2p -12.285376
# 3s -1.706331
# 3p -1.030572
# 4s -0.141411
nistdata = {}
spdf = {'s': 0, 'p': 1, 'd': 2, 'f': 3}
for (symbol, Z) in Ztable.items():
#print("in symbol %s" % symbol)
occupations = occupations_from_symbol(symbol)
fname = os.path.join(path, nist_xctype, iontype, '%02d%s' % (Z, symbol))
energies, eigens = {}, {}
with open(fname, 'r') as fh:
for n in range(5):
ename, evalue = fh.readline().split("=")
energies[ename.strip()] = float(evalue)
for line in fh:
state, eig = line.split()
eigens[state] = float(eig)
nloe = []
for (state, occ) in occupations:
n = int(state[0])
l = spdf[state[1]]
eig = eigens[state]
nloe.append((n, l, occ, eig))
nistdata[symbol] = (Z, nloe, energies)
return nistdata
def make_nistmodule(path, nist_xctype):
#for iontype in ["neutrals", "cations",]:
for iontype in ["neutrals",]:
print('# Computer generated code: nist_xctype = %s, iontype = %s' % (nist_xctype, iontype))
print("format:\n\t element: (atomic number, [(n, l, occ, energy), ...]")
print('%s = ' % iontype)
data = extract_nistdata(path, nist_xctype, iontype)
pprint(data)
#print(json.dumps(data, indent=4))
if __name__ == '__main__':
import sys
from qatom import states_from_string, AtomicConfiguration
for symbol in nist_database.symbols:
aconf = AtomicConfiguration.neutral_from_symbol(symbol)
print(aconf)
sys.exit(1)
path = sys.argv[1]
#neutral = {}
#for (symbol, confstr) in nist_database.neutral.items():
# states = states_from_string(confstr)
# print(confstr)
# neutral[symbol] = (nist_database.symb2Z[symbol], states)
#for s in states:
# pprint(tuple(s))
#aconf = AtomicConfiguration.from_string(confstr, symb2Z[symbol])
#make_nist_configurations(path)
#xctypes = ["LDA", "ScRLDA"] #, "LSD", "RLDA",]
nist_xctypes = ["LDA",]
for xctype in nist_xctypes:
print("xctype: %s" % xctype)
make_nistmodule(path, xctype)
#iontype = "neutrals"
#dft_data = extract_nistdata(sys.argv[1], nist_xctype, iontype)
#make_nistmodule(sys.argv[1], "LDA")
| [
"collections.OrderedDict",
"qatom.AtomicConfiguration.neutral_from_symbol",
"pprint.pprint",
"sys.exit"
] | [((854, 887), 'collections.OrderedDict', 'collections.OrderedDict', (['kv_items'], {}), '(kv_items)\n', (877, 887), False, 'import collections\n'), ((1547, 1562), 'pprint.pprint', 'pprint', (['neutral'], {}), '(neutral)\n', (1553, 1562), False, 'from pprint import pprint\n'), ((1645, 1660), 'pprint.pprint', 'pprint', (['cations'], {}), '(cations)\n', (1651, 1660), False, 'from pprint import pprint\n'), ((1742, 1756), 'pprint.pprint', 'pprint', (['symb2Z'], {}), '(symb2Z)\n', (1748, 1756), False, 'from pprint import pprint\n'), ((6209, 6220), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6217, 6220), False, 'import sys\n'), ((5916, 5928), 'pprint.pprint', 'pprint', (['data'], {}), '(data)\n', (5922, 5928), False, 'from pprint import pprint\n'), ((6136, 6183), 'qatom.AtomicConfiguration.neutral_from_symbol', 'AtomicConfiguration.neutral_from_symbol', (['symbol'], {}), '(symbol)\n', (6175, 6183), False, 'from qatom import states_from_string, AtomicConfiguration\n')] |
import gym
import time
import random
import custom_envs
import pprint
class MCOBJ(object):
def __init__(self, grid_mdp):
self.env = grid_mdp
# Just for pass the error.
self.states = self.env.getStates()
self.actions = self.env.getActions()
def gen_random_pi_sample(self, num):
"""蒙特卡罗样本采集
Arguments:
num {number} -- 次数
Returns:
array -- 状态集,行为集,回报集
"""
state_sample = []
action_sample = []
reward_sample = []
for _ in range(num):
s_tmp = []
a_tmp = []
r_tmp = []
s = self.states[int(random.random() * len(self.states))]
t = False
while t == False:
a = self.actions[int(random.random() * len(self.actions))]
t, s1, r = self.env.transform1(s, a)
s_tmp.append(s)
r_tmp.append(r)
a_tmp.append(a)
s = s1
# 样本包含多个状态序列。
state_sample.append(s_tmp)
action_sample.append(a_tmp)
reward_sample.append(r_tmp)
return state_sample, action_sample, reward_sample
def mc_evaluation(self, state_sample, action_sample, reward_sample, gamma=0.8):
vfunc, nfunc = dict(), dict()
for s in self.states:
vfunc[s] = 0.0
nfunc[s] = 0.0
for iter1 in range(len(state_sample)):
G = 0.0
# 逆向计算初始状态的累积回报: s1 -> s2 -> s3 -> s7
for step in range(len(state_sample[iter1])-1, -1, -1):
G *= gamma
G += reward_sample[iter1][step]
# 正向计算每个状态处的累计回报
for step in range(len(state_sample[iter1])):
s = state_sample[iter1][step]
vfunc[s] += G
nfunc[s] += 1.0
G -= reward_sample[iter1][step]
G /= gamma
# 在每个状态处求经验平均
for s in self.states:
if nfunc[s] > 0.000001:
vfunc[s] /= nfunc[s]
return vfunc
def mc(self, num_iter1, epsilon, gamma):
# x, y = [], []
qfunc, n = dict(), dict()
for s in self.states:
for a in self.actions:
qfunc["%d_%s"%(s, a)] = 0.0
n["%d_%s"%(s, a)] = 0.001
for _ in range(num_iter1):
# x.append(iter1)
# y.append(compute_error(qfunc))
s_sample, a_sample, r_sample = [], [], []
s = self.states[int(random.random() * len(self.states))]
t = False
count = 0
while False == t and count < 100:
a = self.epsilon_greedy(qfunc, s, epsilon)
t, s1, r = self.env.transform1(s, a)
s_sample.append(s)
a_sample.append(a)
r_sample.append(r)
s = s1
count += 1
g = 0.0
for i in range(len(s_sample)-1, -1, -1):
g *= gamma
g += r_sample[i]
for i in range(len(s_sample)):
key = "%d_%s"%(s_sample[i], a_sample[i])
n[key] += 1.0
qfunc[key] = (qfunc[key] * (n[key] - 1) + g) / n[key]
g -= r_sample[i]
g /= gamma
return qfunc
def epsilon_greedy(self, qfunc, state, epsilon):
if random.random() > epsilon:
return self.__max_action(qfunc, state)
else:
return self.actions[int(random.random() * len(self.actions))]
def __max_action(self, qfunc, state):
state_rewards = { key: value for key, value in qfunc.items() if int(key.split("_")[0]) == state }
max_reward = max(zip(state_rewards.values(), state_rewards.keys()))
return max_reward[1].split("_")[1]
def main():
env = gym.make('GridEnv-v0')
gamma = env.gamma
mc_obj = MCOBJ(env)
### 探索策略
### method1
# state_sample, action_sample, reward_sample = mc_obj.gen_random_pi_sample(10)
# print(state_sample)
# print(action_sample)
# print(reward_sample)
# vfunc = mc_obj.mc_evaluation(state_sample, action_sample, reward_sample, gamma)
# print('mc evaluation: ')
# print(vfunc)
### method2
qfunc = mc_obj.mc(10, 0.5, gamma)
# 打印最终行为值函数。
pprint.pprint(qfunc)
### 使用最终行为值函数让机器人找金币。
# 尝试找金币10次.
for _ in range(10):
env.reset()
env.render()
time.sleep(0.3)
state = env.getState()
# 判断是否为最终状态
if state in env.getTerminateStates():
time.sleep(1)
continue
# 根据最终行为值函数采取行为
is_not_terminal = True
while is_not_terminal:
action = qfunc[state]
next_state, _, is_terminal, _ = env.step(action)
state = next_state
env.render()
is_not_terminal = not is_terminal
if is_not_terminal:
time.sleep(0.3)
else:
time.sleep(1)
if __name__ == "__main__":
main() | [
"random.random",
"time.sleep",
"gym.make",
"pprint.pprint"
] | [((3883, 3905), 'gym.make', 'gym.make', (['"""GridEnv-v0"""'], {}), "('GridEnv-v0')\n", (3891, 3905), False, 'import gym\n'), ((4357, 4377), 'pprint.pprint', 'pprint.pprint', (['qfunc'], {}), '(qfunc)\n', (4370, 4377), False, 'import pprint\n'), ((4493, 4508), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (4503, 4508), False, 'import time\n'), ((3425, 3440), 'random.random', 'random.random', ([], {}), '()\n', (3438, 3440), False, 'import random\n'), ((4618, 4631), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4628, 4631), False, 'import time\n'), ((4984, 4999), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (4994, 4999), False, 'import time\n'), ((5034, 5047), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5044, 5047), False, 'import time\n'), ((680, 695), 'random.random', 'random.random', ([], {}), '()\n', (693, 695), False, 'import random\n'), ((2552, 2567), 'random.random', 'random.random', ([], {}), '()\n', (2565, 2567), False, 'import random\n'), ((3553, 3568), 'random.random', 'random.random', ([], {}), '()\n', (3566, 3568), False, 'import random\n'), ((806, 821), 'random.random', 'random.random', ([], {}), '()\n', (819, 821), False, 'import random\n')] |
#coding=utf-8
#-*- coding: utf-8 -*-
import os
import sys
sys.path.append("../frame/")
from loggingex import LOG_INFO
from loggingex import LOG_ERROR
from loggingex import LOG_WARNING
from mysql_manager import mysql_manager
class prepare_table():
def __init__(self, conn_name, table_template_name):
self._conn_name = conn_name
self._table_template_name = table_template_name
self._create_table_format = ""
def prepare(self, table_name):
self._create_table_if_not_exist(table_name)
def _get_table_template(self):
file_path = "./conf/table_template/" + self._table_template_name + ".ttpl"
if False == os.path.isfile(file_path):
LOG_WARNING("can't read %s" %(file_path))
return
fobj = open(file_path)
try:
self._create_table_format = fobj.read()
except:
self._create_table_format = ""
LOG_WARNING("get table_template file error.path: %s" % (file_path))
finally:
fobj.close()
def _create_table_if_not_exist(self, table_name):
db_manager = mysql_manager()
conn = db_manager.get_mysql_conn(self._conn_name)
if False == conn.has_table(table_name):
if len(self._create_table_format) == 0:
self._get_table_template()
if len(self._create_table_format) == 0:
return
sql = self._create_table_format % (table_name)
data = conn.execute(sql)
conn.refresh_tables_info()
if __name__ == "__main__":
import os
os.chdir("../../")
sys.path.append("./src/frame/")
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from j_load_mysql_conf import j_load_mysql_conf
from scheduler_frame_conf_inst import scheduler_frame_conf_inst
frame_conf_inst = scheduler_frame_conf_inst()
frame_conf_inst.load("./conf/frame.conf")
j_load_mysql_conf_obj = j_load_mysql_conf()
j_load_mysql_conf_obj.run()
a = prepare_table("daily_temp", "today_market_maker")
a.prepare("test")
| [
"sys.setdefaultencoding",
"mysql_manager.mysql_manager",
"os.chdir",
"os.path.isfile",
"j_load_mysql_conf.j_load_mysql_conf",
"scheduler_frame_conf_inst.scheduler_frame_conf_inst",
"loggingex.LOG_WARNING",
"sys.path.append"
] | [((59, 87), 'sys.path.append', 'sys.path.append', (['"""../frame/"""'], {}), "('../frame/')\n", (74, 87), False, 'import sys\n'), ((1597, 1615), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (1605, 1615), False, 'import os\n'), ((1620, 1651), 'sys.path.append', 'sys.path.append', (['"""./src/frame/"""'], {}), "('./src/frame/')\n", (1635, 1651), False, 'import sys\n'), ((1692, 1722), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (1714, 1722), False, 'import sys\n'), ((1871, 1898), 'scheduler_frame_conf_inst.scheduler_frame_conf_inst', 'scheduler_frame_conf_inst', ([], {}), '()\n', (1896, 1898), False, 'from scheduler_frame_conf_inst import scheduler_frame_conf_inst\n'), ((1974, 1993), 'j_load_mysql_conf.j_load_mysql_conf', 'j_load_mysql_conf', ([], {}), '()\n', (1991, 1993), False, 'from j_load_mysql_conf import j_load_mysql_conf\n'), ((1124, 1139), 'mysql_manager.mysql_manager', 'mysql_manager', ([], {}), '()\n', (1137, 1139), False, 'from mysql_manager import mysql_manager\n'), ((669, 694), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (683, 694), False, 'import os\n'), ((708, 748), 'loggingex.LOG_WARNING', 'LOG_WARNING', (['("can\'t read %s" % file_path)'], {}), '("can\'t read %s" % file_path)\n', (719, 748), False, 'from loggingex import LOG_WARNING\n'), ((938, 1003), 'loggingex.LOG_WARNING', 'LOG_WARNING', (["('get table_template file error.path: %s' % file_path)"], {}), "('get table_template file error.path: %s' % file_path)\n", (949, 1003), False, 'from loggingex import LOG_WARNING\n')] |
#!/usr/bin/env python3
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
# set programatically the path to 'sim-environment' directory (alternately can also set PYTHONPATH)
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/sim-environment/src')
import measurement as m
import utils.constants as constants
import numpy as np
import torch
import random
from pathlib import Path
np.random.seed(constants.RANDOM_SEED)
random.seed(constants.RANDOM_SEED)
torch.cuda.manual_seed(constants.RANDOM_SEED)
torch.manual_seed(constants.RANDOM_SEED)
np.set_printoptions(precision=3)
Path("saved_models").mkdir(parents=True, exist_ok=True)
Path("best_models").mkdir(parents=True, exist_ok=True)
if __name__ == '__main__':
print('running measurement model training')
measurement = m.Measurement(render=False, pretrained=False)
train_epochs = 500
eval_epochs = 5
measurement.train(train_epochs, eval_epochs)
# file_name = '../bckp/dec_13/best_models/likelihood_mse_best.pth'
# measurement.test(file_name)
del measurement
| [
"torch.manual_seed",
"sys.path.insert",
"pathlib.Path",
"random.seed",
"measurement.Measurement",
"sys.path.index",
"numpy.random.seed",
"torch.cuda.manual_seed",
"numpy.set_printoptions"
] | [((486, 523), 'numpy.random.seed', 'np.random.seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (500, 523), True, 'import numpy as np\n'), ((524, 558), 'random.seed', 'random.seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (535, 558), False, 'import random\n'), ((559, 604), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (581, 604), False, 'import torch\n'), ((605, 645), 'torch.manual_seed', 'torch.manual_seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (622, 645), False, 'import torch\n'), ((646, 678), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (665, 678), True, 'import numpy as np\n'), ((885, 930), 'measurement.Measurement', 'm.Measurement', ([], {'render': '(False)', 'pretrained': '(False)'}), '(render=False, pretrained=False)\n', (898, 930), True, 'import measurement as m\n'), ((77, 97), 'sys.path.index', 'sys.path.index', (['path'], {}), '(path)\n', (91, 97), False, 'import sys\n'), ((680, 700), 'pathlib.Path', 'Path', (['"""saved_models"""'], {}), "('saved_models')\n", (684, 700), False, 'from pathlib import Path\n'), ((736, 755), 'pathlib.Path', 'Path', (['"""best_models"""'], {}), "('best_models')\n", (740, 755), False, 'from pathlib import Path\n'), ((129, 153), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (144, 153), False, 'import sys\n')] |
from django.contrib.auth import authenticate
from django.shortcuts import render
from rest_framework import serializers
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from .models import Editor
from .models import User
class EditorAuthTokenSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField(
trim_whitespace = False
)
class EditorLogin(APIView):
def post(self, request, *args, **kwargs):
serializer = EditorAuthTokenSerializer(
data=request.data
)
if not serializer.is_valid():
return Response(
data = serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
email = serializer.validated_data['email']
password = serializer.validated_data['password']
try:
user = User.objects.get(email = email)
except User.DoesNotExist:
return Response(
'User does not exist',
status = status.HTTP_401_UNAUTHORIZED
)
if not user.is_staff:
try:
editor = Editor.objects.get(user__email=email)
except Editor.DoesNotExist:
return Response(
'Editor does not exist',
status = status.HTTP_401_UNAUTHORIZED
)
user = authenticate(
request=request,
username=user.username,
password=password
)
if not user:
return Response(
'Incorrect password',
status = status.HTTP_401_UNAUTHORIZED
)
token, created = Token.objects.get_or_create(user=user)
return Response(
data = {
'email': user.email,
'token': token.key
}
)
| [
"django.contrib.auth.authenticate",
"rest_framework.serializers.EmailField",
"rest_framework.response.Response",
"rest_framework.serializers.CharField",
"rest_framework.authtoken.models.Token.objects.get_or_create"
] | [((413, 437), 'rest_framework.serializers.EmailField', 'serializers.EmailField', ([], {}), '()\n', (435, 437), False, 'from rest_framework import serializers\n'), ((453, 497), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'trim_whitespace': '(False)'}), '(trim_whitespace=False)\n', (474, 497), False, 'from rest_framework import serializers\n'), ((1516, 1588), 'django.contrib.auth.authenticate', 'authenticate', ([], {'request': 'request', 'username': 'user.username', 'password': 'password'}), '(request=request, username=user.username, password=password)\n', (1528, 1588), False, 'from django.contrib.auth import authenticate\n'), ((1816, 1854), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'user'}), '(user=user)\n', (1843, 1854), False, 'from rest_framework.authtoken.models import Token\n'), ((1870, 1926), 'rest_framework.response.Response', 'Response', ([], {'data': "{'email': user.email, 'token': token.key}"}), "(data={'email': user.email, 'token': token.key})\n", (1878, 1926), False, 'from rest_framework.response import Response\n'), ((736, 804), 'rest_framework.response.Response', 'Response', ([], {'data': 'serializer.errors', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (744, 804), False, 'from rest_framework.response import Response\n'), ((1675, 1742), 'rest_framework.response.Response', 'Response', (['"""Incorrect password"""'], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "('Incorrect password', status=status.HTTP_401_UNAUTHORIZED)\n", (1683, 1742), False, 'from rest_framework.response import Response\n'), ((1080, 1148), 'rest_framework.response.Response', 'Response', (['"""User does not exist"""'], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "('User does not exist', status=status.HTTP_401_UNAUTHORIZED)\n", (1088, 1148), False, 'from rest_framework.response import Response\n'), ((1370, 1440), 'rest_framework.response.Response', 'Response', (['"""Editor does not exist"""'], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "('Editor does not exist', status=status.HTTP_401_UNAUTHORIZED)\n", (1378, 1440), False, 'from rest_framework.response import Response\n')] |
import argparse
import os
import json
from torch.utils.tensorboard import SummaryWriter
import random
import numpy as np
import zipfile
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
from LAUG.nlu.jointBERT_new.dataloader import Dataloader
from LAUG.nlu.jointBERT_new.jointBERT import JointBERT
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
parser = argparse.ArgumentParser(description="Train a model.")
parser.add_argument('--config_path',
help='path to config file')
if __name__ == '__main__':
args = parser.parse_args()
config = json.load(open(args.config_path))
data_dir = config['data_dir']
output_dir = config['output_dir']
log_dir = config['log_dir']
DEVICE = config['DEVICE']
set_seed(config['seed'])
if 'multiwoz' in data_dir:
print('-'*20 + 'dataset:multiwoz' + '-'*20)
from LAUG.nlu.jointBERT_new.multiwoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'camrest' in data_dir:
print('-' * 20 + 'dataset:camrest' + '-' * 20)
from LAUG.nlu.jointBERT_new.camrest.postprocess import is_slot_da, calculateF1, recover_intent
elif 'crosswoz' in data_dir:
print('-' * 20 + 'dataset:crosswoz' + '-' * 20)
from LAUG.nlu.jointBERT_new.crosswoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'frames' in data_dir:
print('-' * 20 + 'dataset:frames' + '-' * 20)
from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent
intent_vocab = json.load(open(os.path.join(data_dir, 'intent_vocab.json')))
tag_vocab = json.load(open(os.path.join(data_dir, 'tag_vocab.json')))
req_vocab = json.load(open(os.path.join(data_dir, 'req_vocab.json')))
req_slot_vocab = json.load(open(os.path.join(data_dir, 'req_slot_vocab.json')))
slot_intent_vocab = json.load(open(os.path.join(data_dir,'slot_intent_vocab.json')))
print('intent_vocab = ',intent_vocab)
print('tag_vocab = ', tag_vocab)
print('req_vocab = ', req_vocab)
print('req_slot_vocab = ', req_slot_vocab)
print('='*100)
dataloader = Dataloader(intent_vocab=intent_vocab, tag_vocab=tag_vocab, req_vocab=req_vocab, req_slot_vocab=req_slot_vocab, slot_intent_vocab=slot_intent_vocab,
pretrained_weights=config['model']['pretrained_weights'])
print('intent num:', len(intent_vocab))
print('tag num:', len(tag_vocab))
print('req num:', len(req_vocab))
for data_key in ['train', 'val', 'test']:
dataloader.load_data(json.load(open(os.path.join(data_dir, '{}_data.json'.format(data_key)))), data_key,
cut_sen_len=config['cut_sen_len'], use_bert_tokenizer=config['use_bert_tokenizer'])
print('{} set size: {}'.format(data_key, len(dataloader.data[data_key])))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir)
model = JointBERT(config['model'], DEVICE, dataloader.tag_dim, dataloader.intent_dim, dataloader.req_dim, dataloader, dataloader.intent_weight, dataloader.req_weight)
model.to(DEVICE)
if config['model']['finetune']:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': config['model']['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config['model']['learning_rate'],
eps=config['model']['adam_epsilon'])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config['model']['warmup_steps'],
num_training_steps=config['model']['max_step'])
else:
for n, p in model.named_parameters():
if 'bert' in n:
p.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=config['model']['learning_rate'])
for name, param in model.named_parameters():
print(name, param.shape, param.device, param.requires_grad)
max_step = config['model']['max_step']
check_step = config['model']['check_step']
batch_size = config['model']['batch_size']
print('check_step = {}, batch_size = {}'.format(check_step, batch_size))
model.zero_grad()
train_slot_loss, train_intent_loss, train_req_loss = 0, 0, 0
best_val_f1 = 0.
writer.add_text('config', json.dumps(config))
for step in range(1, max_step + 1):
model.train()
batched_data = dataloader.get_train_batch(batch_size)
batched_data = tuple(t.to(DEVICE) for t in batched_data)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor,base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = batched_data
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
_, _, _, slot_loss, intent_loss, req_loss = model.forward(word_seq_tensor, word_mask_tensor, tag_seq_tensor, tag_mask_tensor,
intent_tensor, req_tensor, req_mask_tensor, context_seq_tensor, context_mask_tensor)
train_slot_loss += slot_loss.item()
train_intent_loss += intent_loss.item()
train_req_loss += req_loss.item()
loss = slot_loss + intent_loss + req_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
if config['model']['finetune']:
scheduler.step() # Update learning rate schedule
model.zero_grad()
if step % check_step == 0:
train_slot_loss = train_slot_loss / check_step
train_intent_loss = train_intent_loss / check_step
train_req_loss = train_req_loss / check_step
print('[%d|%d] step' % (step, max_step))
print('\t slot loss:', train_slot_loss)
print('\t intent loss:', train_intent_loss)
print('\t request loss:', train_req_loss)
predict_golden = {'intent': [], 'slot': [], 'req':[],'overall': []}
val_slot_loss, val_intent_loss,val_req_loss = 0, 0,0
model.eval()
for pad_batch, ori_batch, real_batch_size in dataloader.yield_batches(batch_size, data_key='val'):
pad_batch = tuple(t.to(DEVICE) for t in pad_batch)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor, base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = pad_batch
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
with torch.no_grad():
slot_logits, intent_logits, req_logits,slot_loss, intent_loss,req_loss = model.forward(word_seq_tensor,
word_mask_tensor,
tag_seq_tensor,
tag_mask_tensor,
intent_tensor,
req_tensor,
req_mask_tensor,
context_seq_tensor,
context_mask_tensor)
val_slot_loss += slot_loss.item() * real_batch_size
val_intent_loss += intent_loss.item() * real_batch_size
val_req_loss += req_loss.item() * real_batch_size
for j in range(real_batch_size):
predict_intent, predict_req, predict_slot, predict_overall = recover_intent(dataloader, intent_logits[j], req_logits[j*dataloader.req_dim: (j+1)*dataloader.req_dim], slot_logits[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim], base_tag_mask_tensor[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim],
ori_batch[j][0], ori_batch[j][-4])
#assert(ori_batch[j][3] != [])
predict_golden['overall'].append({
'predict': predict_overall,
'golden': ori_batch[j][3]
})
predict_golden['req'].append({
'predict':predict_req,
'golden':ori_batch[j][5] #req
})
'''
predict_golden['slot'].append({
'predict': predict_slot,#[x for x in predicts if is_slot_da(x)],
'golden': ori_batch[j][1]#tag
})
'''
predict_golden['intent'].append({
'predict': predict_intent,
'golden': ori_batch[j][2]#intent
})
for j in range(10):
writer.add_text('val_sample_{}'.format(j),
json.dumps(predict_golden['overall'][j], indent=2, ensure_ascii=False),
global_step=step)
total = len(dataloader.data['val'])
val_slot_loss /= total
val_intent_loss /= total
val_req_loss /= total
print('%d samples val' % total)
print('\t slot loss:', val_slot_loss)
print('\t intent loss:', val_intent_loss)
print('\t req loss:', val_req_loss)
writer.add_scalar('intent_loss/train', train_intent_loss, global_step=step)
writer.add_scalar('intent_loss/val', val_intent_loss, global_step=step)
writer.add_scalar('req_loss/train', train_req_loss, global_step=step)
writer.add_scalar('req_loss/val', val_req_loss, global_step=step)
writer.add_scalar('slot_loss/train', train_slot_loss, global_step=step)
writer.add_scalar('slot_loss/val', val_slot_loss, global_step=step)
for x in ['intent','req','overall']:
#for x in ['intent', 'slot', 'req','overall']:# pass slot
precision, recall, F1 = calculateF1(predict_golden[x], x=='overall')
print('-' * 20 + x + '-' * 20)
print('\t Precision: %.2f' % (100 * precision))
print('\t Recall: %.2f' % (100 * recall))
print('\t F1: %.2f' % (100 * F1))
writer.add_scalar('val_{}/precision'.format(x), precision, global_step=step)
writer.add_scalar('val_{}/recall'.format(x), recall, global_step=step)
writer.add_scalar('val_{}/F1'.format(x), F1, global_step=step)
if F1 > best_val_f1:
best_val_f1 = F1
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.bin'))
print('best val F1 %.4f' % best_val_f1)
print('save on', output_dir)
train_slot_loss, train_intent_loss = 0, 0
writer.add_text('val overall F1', '%.2f' % (100 * best_val_f1))
writer.close()
model_path = os.path.join(output_dir, 'pytorch_model.bin')
zip_path = config['zipped_model_path']
print('zip model to', zip_path)
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
zf.write(model_path)
| [
"torch.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"LAUG.nlu.jointBERT_new.frames.postprocess.calculateF1",
"argparse.ArgumentParser",
"os.makedirs",
"zipfile.ZipFile",
"transformers.AdamW",
"transformers.get_linear_schedule_with_warmup",
"json.dumps",
"os.path.join"... | [((452, 505), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a model."""'}), "(description='Train a model.')\n", (475, 505), False, 'import argparse\n'), ((365, 382), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (376, 382), False, 'import random\n'), ((388, 408), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (402, 408), True, 'import numpy as np\n'), ((414, 437), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (431, 437), False, 'import torch\n'), ((2259, 2479), 'LAUG.nlu.jointBERT_new.dataloader.Dataloader', 'Dataloader', ([], {'intent_vocab': 'intent_vocab', 'tag_vocab': 'tag_vocab', 'req_vocab': 'req_vocab', 'req_slot_vocab': 'req_slot_vocab', 'slot_intent_vocab': 'slot_intent_vocab', 'pretrained_weights': "config['model']['pretrained_weights']"}), "(intent_vocab=intent_vocab, tag_vocab=tag_vocab, req_vocab=\n req_vocab, req_slot_vocab=req_slot_vocab, slot_intent_vocab=\n slot_intent_vocab, pretrained_weights=config['model']['pretrained_weights']\n )\n", (2269, 2479), False, 'from LAUG.nlu.jointBERT_new.dataloader import Dataloader\n'), ((3133, 3155), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3146, 3155), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3171, 3338), 'LAUG.nlu.jointBERT_new.jointBERT.JointBERT', 'JointBERT', (["config['model']", 'DEVICE', 'dataloader.tag_dim', 'dataloader.intent_dim', 'dataloader.req_dim', 'dataloader', 'dataloader.intent_weight', 'dataloader.req_weight'], {}), "(config['model'], DEVICE, dataloader.tag_dim, dataloader.\n intent_dim, dataloader.req_dim, dataloader, dataloader.intent_weight,\n dataloader.req_weight)\n", (3180, 3338), False, 'from LAUG.nlu.jointBERT_new.jointBERT import JointBERT\n'), ((12210, 12255), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model.bin"""'], {}), "(output_dir, 'pytorch_model.bin')\n", (12222, 12255), False, 'import os\n'), ((2989, 3015), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3003, 3015), False, 'import os\n'), ((3026, 3049), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3037, 3049), False, 'import os\n'), ((3062, 3085), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3076, 3085), False, 'import os\n'), ((3096, 3116), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3107, 3116), False, 'import os\n'), ((3884, 3997), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': "config['model']['learning_rate']", 'eps': "config['model']['adam_epsilon']"}), "(optimizer_grouped_parameters, lr=config['model']['learning_rate'],\n eps=config['model']['adam_epsilon'])\n", (3889, 3997), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((4042, 4187), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': "config['model']['warmup_steps']", 'num_training_steps': "config['model']['max_step']"}), "(optimizer, num_warmup_steps=config['model']\n ['warmup_steps'], num_training_steps=config['model']['max_step'])\n", (4073, 4187), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((5019, 5037), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (5029, 5037), False, 'import json\n'), ((12349, 12401), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(zip_path, 'w', zipfile.ZIP_DEFLATED)\n", (12364, 12401), False, 'import zipfile\n'), ((1683, 1726), 'os.path.join', 'os.path.join', (['data_dir', '"""intent_vocab.json"""'], {}), "(data_dir, 'intent_vocab.json')\n", (1695, 1726), False, 'import os\n'), ((1761, 1801), 'os.path.join', 'os.path.join', (['data_dir', '"""tag_vocab.json"""'], {}), "(data_dir, 'tag_vocab.json')\n", (1773, 1801), False, 'import os\n'), ((1836, 1876), 'os.path.join', 'os.path.join', (['data_dir', '"""req_vocab.json"""'], {}), "(data_dir, 'req_vocab.json')\n", (1848, 1876), False, 'import os\n'), ((1916, 1961), 'os.path.join', 'os.path.join', (['data_dir', '"""req_slot_vocab.json"""'], {}), "(data_dir, 'req_slot_vocab.json')\n", (1928, 1961), False, 'import os\n'), ((2004, 2052), 'os.path.join', 'os.path.join', (['data_dir', '"""slot_intent_vocab.json"""'], {}), "(data_dir, 'slot_intent_vocab.json')\n", (2016, 2052), False, 'import os\n'), ((11242, 11288), 'LAUG.nlu.jointBERT_new.frames.postprocess.calculateF1', 'calculateF1', (['predict_golden[x]', "(x == 'overall')"], {}), "(predict_golden[x], x == 'overall')\n", (11253, 11288), False, 'from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent\n'), ((7397, 7412), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7410, 7412), False, 'import torch\n'), ((8720, 9055), 'LAUG.nlu.jointBERT_new.frames.postprocess.recover_intent', 'recover_intent', (['dataloader', 'intent_logits[j]', 'req_logits[j * dataloader.req_dim:(j + 1) * dataloader.req_dim]', 'slot_logits[j * dataloader.slot_intent_dim:(j + 1) * dataloader.slot_intent_dim\n ]', 'base_tag_mask_tensor[j * dataloader.slot_intent_dim:(j + 1) * dataloader.\n slot_intent_dim]', 'ori_batch[j][0]', 'ori_batch[j][-4]'], {}), '(dataloader, intent_logits[j], req_logits[j * dataloader.\n req_dim:(j + 1) * dataloader.req_dim], slot_logits[j * dataloader.\n slot_intent_dim:(j + 1) * dataloader.slot_intent_dim],\n base_tag_mask_tensor[j * dataloader.slot_intent_dim:(j + 1) *\n dataloader.slot_intent_dim], ori_batch[j][0], ori_batch[j][-4])\n', (8734, 9055), False, 'from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent\n'), ((10075, 10145), 'json.dumps', 'json.dumps', (["predict_golden['overall'][j]"], {'indent': '(2)', 'ensure_ascii': '(False)'}), "(predict_golden['overall'][j], indent=2, ensure_ascii=False)\n", (10085, 10145), False, 'import json\n'), ((11892, 11937), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model.bin"""'], {}), "(output_dir, 'pytorch_model.bin')\n", (11904, 11937), False, 'import os\n')] |
import json
import time
import requests
import re
from flask import Flask, render_template, jsonify
from pyecharts.charts import Map, Timeline,Kline,Line,Bar,WordCloud
from pyecharts import options as opts
from pyecharts.globals import SymbolType
app = Flask(__name__)
#字典,受限于谷歌调用限制
cn_to_en = {'安哥拉': 'Angola', '阿富汗': 'Afghanistan', '阿尔巴尼亚': 'Albania', '阿尔及利亚': 'Algeria', '安道尔共和国': 'Andorra', '安圭拉岛': 'Anguilla', '安提瓜和巴布达': 'Antigua and Barbuda',
'阿根廷': 'Argentina', '亚美尼亚': 'Armenia', '阿森松': 'Ascension', '澳大利亚': 'Australia', '奥地利': 'Austria', '阿塞拜疆': 'Azerbaijan', '巴哈马': 'Bahamas', '巴林': 'Bahrain',
'孟加拉国': 'Bangladesh', '巴巴多斯': 'Barbados', '白俄罗斯': 'Belarus', '比利时': 'Belgium', '伯利兹': 'Belize', '贝宁': 'Benin', '百慕大群岛': 'Bermuda Is', '玻利维亚': 'Bolivia',
'博茨瓦纳': 'Botswana', '巴西': 'Brazil', '文莱': 'Brunei', '保加利亚': 'Bulgaria', '布基纳法索': 'Burkina Faso', '缅甸': 'Burma', '布隆迪': 'Burundi', '喀麦隆': 'Cameroon',
'加拿大': 'Canada', '开曼群岛': 'Cayman Is', '中非共和国': 'Central African Republic', '乍得': 'Chad', '智利': 'Chile', '中国': 'China', '哥伦比亚': 'Colombia', '刚果': 'Congo',
'库克群岛': 'Cook Is', '哥斯达黎加': 'Costa Rica', '古巴': 'Cuba', '塞浦路斯': 'Cyprus', '捷克': 'Czech Republic', '丹麦': 'Denmark', '吉布提': 'Djibouti', '多米尼加共和国': 'Dominica Rep',
'厄瓜多尔': 'Ecuador', '埃及': 'Egypt', '萨尔瓦多': 'EI Salvador', '爱沙尼亚': 'Estonia', '埃塞俄比亚': 'Ethiopia', '斐济': 'Fiji', '芬兰': 'Finland', '法国': 'France', '法属圭亚那': 'French Guiana',
'法属玻利尼西亚': 'French Polynesia', '加蓬': 'Gabon', '冈比亚': 'Gambia', '格鲁吉亚': 'Georgia', '德国': 'Germany', '加纳': 'Ghana', '直布罗陀': 'Gibraltar', '希腊': 'Greece', '格林纳达': 'Grenada',
'关岛': 'Guam', '危地马拉': 'Guatemala', '几内亚': 'Guinea', '圭亚那': 'Guyana', '海地': 'Haiti', '洪都拉斯': 'Honduras', '香港': 'Hongkong', '匈牙利': 'Hungary', '冰岛': 'Iceland', '印度': 'India',
'印度尼西亚': 'Indonesia', '伊朗': 'Iran', '伊拉克': 'Iraq', '爱尔兰':'Ireland', '以色列': 'Israel', '意大利': 'Italy', '科特迪瓦': 'Ivory Coast', '牙买加': 'Jamaica', '日本': 'Japan', '约旦': 'Jordan',
'柬埔寨': 'Kampuchea (Cambodia )', '哈萨克斯坦': 'Kazakstan', '肯尼亚': 'Kenya', '韩国': 'Korea', '科威特': 'Kuwait', '吉尔吉斯坦': 'Kyrgyzstan', '老挝': 'Laos', '拉脱维亚': 'Latvia', '黎巴嫩': 'Lebanon',
'莱索托': 'Lesotho', '利比里亚': 'Liberia', '利比亚': 'Libya', '列支敦士登': 'Liechtenstein', '立陶宛': 'Lithuania', '卢森堡': 'Luxembourg', '澳门': 'Macao', '马达加斯加': 'Madagascar',
'马拉维': 'Malawi', '马来西亚': 'Malaysia', '马尔代夫': 'Maldives', '马里': 'Mali', '马耳他': 'Malta', '马里亚那群岛': 'Mariana Is', '马提尼克': 'Martinique', '毛里求斯': 'Mauritius', '墨西哥': 'Mexico',
'摩尔多瓦': 'Moldova', '摩纳哥': 'Monaco', '蒙古': 'Mongolia', '蒙特塞拉特岛': 'Montserrat Is', '摩洛哥': 'Morocco', '莫桑比克': 'Mozambique', '纳米比亚': 'Namibia', '瑙鲁': 'Nauru', '尼泊尔': 'Nepal',
'荷属安的列斯': 'Netheriands Antilles', '荷兰': 'Netherlands', '新西兰': 'New Zealand', '尼加拉瓜': 'Nicaragua', '尼日尔': 'Niger', '尼日利亚': 'Nigeria', '朝鲜': 'North Korea', '挪威': 'Norway',
'阿曼': 'Oman', '巴基斯坦': 'Pakistan', '巴拿马':'Panama', '巴布亚新几内亚': 'Papua New Cuinea', '巴拉圭': 'Paraguay', '秘鲁': 'Peru', '菲律宾': 'Philippines', '波兰': 'Poland', '葡萄牙': 'Portugal',
'波多黎各': 'Puerto Rico', '卡塔尔': 'Qatar', '留尼旺': 'Reunion', '罗马尼亚': 'Romania', '俄罗斯': 'Russia', '圣卢西亚': 'St.Lucia', '圣文森特岛': 'Saint Vincent', '东萨摩亚(美)': 'Samoa Eastern',
'西萨摩亚': 'Samoa Western', '圣马力诺': 'San Marino', '圣多美和普林西比': 'Sao Tome and Principe', '沙特阿拉伯': 'Saudi Arabia', '塞内加尔': 'Senegal', '塞舌尔': 'Seychelles', '塞拉利昂': 'Sierra Leone',
'新加坡': 'Singapore', '斯洛伐克': 'Slovakia', '斯洛文尼亚': 'Slovenia', '所罗门群岛': 'Solomon Is', '索马里': 'Somali', '南非': 'South Africa', '西班牙': 'Spain', '斯里兰卡': 'SriLanka',
'圣文森特': 'St.Vincent', '苏丹': 'Sudan', '苏里南': 'Suriname', '斯威士兰': 'Swaziland', '瑞典': 'Sweden', '瑞士': 'Switzerland', '叙利亚': 'Syria', '台湾省': 'Taiwan', '塔吉克斯坦': 'Tajikstan',
'坦桑尼亚': 'Tanzania', '泰国': 'Thailand', '多哥': 'Togo', '汤加': 'Tonga', '特立尼达和多巴哥': 'Trinidad and Tobago', '突尼斯': 'Tunisia', '土耳其': 'Turkey', '土库曼斯坦': 'Turkmenistan',
'乌干达': 'Uganda', '乌克兰': 'Ukraine', '阿联酋': 'United Arab Emirates', '英国': 'United Kiongdom', '美国': 'United States', '乌拉圭': 'Uruguay', '乌兹别克斯坦': 'Uzbekistan',
'委内瑞拉': 'Venezuela', '越南': 'Vietnam', '也门': 'Yemen', '南斯拉夫': 'Yugoslavia', '津巴布韦': 'Zimbabwe', '扎伊尔': 'Zaire', '赞比亚': 'Zambia','克罗地亚':'Croatia','北马其顿':'North Macedonia'}
def update_news():
url = 'https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E8%82%BA%E7%82%8E'
r = json.loads(requests.get(url).text)
top10 = r['Result'][0]['items_v2'][0]['aladdin_res']['DisplayData']['result']['items'][:5] #list
news_data = []
for r in top10:
news_data.append({
'title': r['eventDescription'],
'sourceUrl': r['eventUrl'],
'infoSource': time.strftime('%m-%d %H:%M:%S', time.localtime(int(r['eventTime']))) + ' ' + r['siteName'] #时间属性 + 消息来源
}) #构建新的列表
return news_data
def update_overall():
url = 'http://lab.isaaclin.cn/nCoV/api/overall'
overall_data = json.loads(requests.get(url).text) #标准的json数据格式化
overall_data['time'] = time.strftime("%m-%d %H:%M", time.localtime(time.time())) #当前时间
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return overall_data
#
def update_hotnews():
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=5&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return data #list
def word_cloud() -> WordCloud:
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=10&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
datanum = [8,7,6,5,5,4,4,2,1,1]
words = [w for w in zip(data,datanum)]
c = (
WordCloud()
.add("", words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
.set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
)
return c
def update_china_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def update_china_heal_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm'] - i['total']['dead'] - i['total']['heal']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def china_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":500,"max":1000,"color":'red'},
{"min":100,"max":499,"color":'#e26061'},
{"min":10,"max":99,"color":'#f08f7f'},
{"min":1,"max":9,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add(
"确诊人数", data, "china", is_map_symbol_show=False,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全国疫情(2019-nCov)")
)
)
return c
# 获取世界数据
def update_world_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
countryEN = []
total_confirm = []
for i in data['areaTree']:
if i['name'] != '钻石号邮轮':
if i['name'] == '日本本土':
countryEN.append('Japan')
total_confirm.append(i['total']['confirm'])
else:
countryEN.append(cn_to_en[i['name']])
total_confirm.append(i['total']['confirm'])
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def update_world_data1(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
translate = Translator()
country = [] #中文国家提取
total_confirm = []
for i in data['areaTree']:
country.append(i['name'])
total_confirm.append(i['total']['confirm'])
countryEN = [] #翻译
for i in country:
countryEN.append(translate.translate(i).text)
#今日数据
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def world_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":51,"max":1000,"color":'red'},
{"min":11,"max":50,"color":'#e26061'},
{"min":6,"max":10,"color":'#f08f7f'},
{"min":1,"max":5,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add("确诊人数", data, "world",is_map_symbol_show=False)
#.add("商家A", [list(z) for z in zip(countryEN, total_confirm)], "world")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8),)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全球疫情(2019-nCov)")
)
)
return c
def kline()-> Kline:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
a = []
c = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(c)):
if i == 0:
a.append(0)
else:
a.append(int(c[i]) - int(c[i-1]))
b = []
for i in range(len(a)):
if i == 0:
b.append([0,0,0,a[i]])
elif i == 1:
b.append([0,0,a[i-1],a[i]])
elif i == 2:
b.append([0,a[i-2],a[i-1],a[i]])
else:
b.append([a[i-3],a[i-2],a[i-1],a[i]])
c = (
Kline()
.add_xaxis([x['date'] for x in data['chinaDayList']])
.add_yaxis("kline", b)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
xaxis_opts=opts.AxisOpts(is_scale=True),
#title_opts=opts.TitleOpts(title="2019-nCov K线图"),
datazoom_opts=[opts.DataZoomOpts(pos_bottom="-2%",range_end=100)],
)
)
return c
def get_origin_data():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other'
r = requests.get(url)
data = json.loads(json.loads(r.text)['data'])
return data
def line_connect_null() -> Line:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('确诊',[x['confirm'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False)) #‘列表名,[]’
.add_yaxis('疑似',[x['suspect'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('每日确诊增加数',Dailyincrease,areastyle_opts=opts.AreaStyleOpts(opacity=0.5),label_opts=opts.LabelOpts(is_show=False)) #areastyle_opts=opts.AreaStyleOpts(opacity=0.5) 投射面积
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
def line_heal() -> Line:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
#每日疑似增加数
Dailysuspect = []
a = [x['suspect'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailysuspect.append(0)
else:
Dailysuspect.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']])
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']])
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
#海外国家统计
def world_bar() -> Bar:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
country = []
numbers = []
for i in data['areaTree']:
country.append(i['name'])
numbers.append(i['total']['confirm'])
country.reverse()
numbers.reverse()
c = (
Bar()
.add_xaxis(country[:-1])
.add_yaxis("确诊人数", numbers[:-1])
.reversal_axis()
.set_series_opts(label_opts=opts.LabelOpts(position="right",color="black"))
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家统计数据"),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-45,font_size=11)),
)
)
return c
#海外国家趋势
def other_line() -> Line:
url = 'https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/cases_time_v3/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Report_Date_String%20asc&resultOffset=0&resultRecordCount=2000&cacheHint=true'
r_data = json.loads(requests.get(url).text)
data = r_data['features'] #初始化json数据,为dict ['chinaTotal']
dates = []
numbers = []
for i in data:
date = time.strftime("%m.%d", time.localtime(i['attributes']['Report_Date'] / 1000))
dates.append(date)
numbers.append(i['attributes']['Other_Locations'])
c = (
Line()
.add_xaxis(dates) #直接列表
.add_yaxis('确诊',numbers)
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家疫情趋势", subtitle=""),
)
)
return c
def china_online():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
# chinaTotal = data['chinaTotal'] #结果为列表
# chinaAdd = data['chinaAdd']
# lastUpdateTime = data['lastUpdateTime']
return data
@app.route("/")
def index():
other_data = get_origin_data()
return render_template("index.html")
# 全国地图数据
@app.route("/map")
def get_map():
data = update_china_data()
return china_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
# 全国地图数据待治愈
@app.route("/map2")
def get_map2():
data = update_china_heal_data()
return china_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
#世界地图
@app.route("/maps")
def get_maps():
#countryEN,total_confirm = update_world_data()
data = update_world_data()
return world_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。
#疫情播报
@app.route("/news")
def get_news():
news = update_news()
return jsonify(news)
#全国统计数量
@app.route("/online")
def get_online():
onlines = china_online()
return jsonify(onlines)
#实时热榜
@app.route("/hotnews")
def get_hotnews():
hotnews = update_hotnews()
return jsonify(hotnews)
@app.route("/wordcloud")
def get_word_cloud():
word = word_cloud()
return word.dump_options_with_quotes()
# K线
@app.route("/kline")
def get_kline():
c = kline()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/line")
def get_line():
c = line_connect_null()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/worldbar")
def get_worldbar():
c = world_bar()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/worldline")
def get_worldline():
c = other_line()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/heal")
def get_heal():
c = line_heal()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
# @app.route("/overall")
# def get_overall():
# overall = update_overall()
# return jsonify(overall)
if __name__ == "__main__":
#app.run(debug=True)
app.run(host="0.0.0.0",port=5000,debug=True)
| [
"flask.render_template",
"flask.Flask",
"pyecharts.options.VisualMapOpts",
"pyecharts.options.DataZoomOpts",
"pyecharts.charts.Line",
"pyecharts.charts.Bar",
"flask.jsonify",
"pyecharts.options.LabelOpts",
"pyecharts.options.TitleOpts",
"pyecharts.options.LegendOpts",
"pyecharts.charts.WordCloud... | [((255, 270), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'from flask import Flask, render_template, jsonify\n'), ((7343, 7369), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (7353, 7369), False, 'import json\n'), ((7881, 7907), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (7891, 7907), False, 'import json\n'), ((9274, 9300), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (9284, 9300), False, 'import json\n'), ((10011, 10037), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (10021, 10037), False, 'import json\n'), ((12665, 12682), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (12677, 12682), False, 'import requests\n'), ((14172, 14198), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (14182, 14198), False, 'import json\n'), ((15296, 15322), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (15306, 15322), False, 'import json\n'), ((17033, 17059), 'json.loads', 'json.loads', (["r_data['data']"], {}), "(r_data['data'])\n", (17043, 17059), False, 'import json\n'), ((17333, 17362), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (17348, 17362), False, 'from flask import Flask, render_template, jsonify\n'), ((18027, 18040), 'flask.jsonify', 'jsonify', (['news'], {}), '(news)\n', (18034, 18040), False, 'from flask import Flask, render_template, jsonify\n'), ((18130, 18146), 'flask.jsonify', 'jsonify', (['onlines'], {}), '(onlines)\n', (18137, 18146), False, 'from flask import Flask, render_template, jsonify\n'), ((18241, 18257), 'flask.jsonify', 'jsonify', (['hotnews'], {}), '(hotnews)\n', (18248, 18257), False, 'from flask import Flask, render_template, jsonify\n'), ((5978, 5995), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5990, 5995), False, 'import requests\n'), ((6028, 6070), 're.findall', 're.findall', (['"""title\\\\\\\\":\\\\\\\\"(.*?)\\\\\\\\"""', 'r'], {}), '(\'title\\\\\\\\":\\\\\\\\"(.*?)\\\\\\\\\', r)\n', (6038, 6070), False, 'import re\n'), ((6779, 6796), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6791, 6796), False, 'import requests\n'), ((6829, 6871), 're.findall', 're.findall', (['"""title\\\\\\\\":\\\\\\\\"(.*?)\\\\\\\\"""', 'r'], {}), '(\'title\\\\\\\\":\\\\\\\\"(.*?)\\\\\\\\\', r)\n', (6839, 6871), False, 'import re\n'), ((4585, 4602), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4597, 4602), False, 'import requests\n'), ((5150, 5167), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5162, 5167), False, 'import requests\n'), ((5261, 5272), 'time.time', 'time.time', ([], {}), '()\n', (5270, 5272), False, 'import time\n'), ((7103, 7150), 'pyecharts.options.TitleOpts', 'opts.TitleOpts', ([], {'title': '"""WordCloud-shape-diamond"""'}), "(title='WordCloud-shape-diamond')\n", (7117, 7150), True, 'from pyecharts import options as opts\n'), ((7308, 7325), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (7320, 7325), False, 'import requests\n'), ((7846, 7863), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (7858, 7863), False, 'import requests\n'), ((8871, 8931), 'pyecharts.options.VisualMapOpts', 'opts.VisualMapOpts', ([], {'max_': '(1000)', 'is_piecewise': '(True)', 'pieces': 'opt'}), '(max_=1000, is_piecewise=True, pieces=opt)\n', (8889, 8931), True, 'from pyecharts import options as opts\n'), ((8959, 8989), 'pyecharts.options.LegendOpts', 'opts.LegendOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (8974, 8989), True, 'from pyecharts import options as opts\n'), ((9239, 9256), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9251, 9256), False, 'import requests\n'), ((9976, 9993), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9988, 9993), False, 'import requests\n'), ((11136, 11196), 'pyecharts.options.VisualMapOpts', 'opts.VisualMapOpts', ([], {'max_': '(1000)', 'is_piecewise': '(True)', 'pieces': 'opt'}), '(max_=1000, is_piecewise=True, pieces=opt)\n', (11154, 11196), True, 'from pyecharts import options as opts\n'), ((11224, 11254), 'pyecharts.options.LegendOpts', 'opts.LegendOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (11239, 11254), True, 'from pyecharts import options as opts\n'), ((12361, 12389), 'pyecharts.options.AxisOpts', 'opts.AxisOpts', ([], {'is_scale': '(True)'}), '(is_scale=True)\n', (12374, 12389), True, 'from pyecharts import options as opts\n'), ((12705, 12723), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (12715, 12723), False, 'import json\n'), ((13942, 13974), 'pyecharts.options.DataZoomOpts', 'opts.DataZoomOpts', ([], {'range_end': '(100)'}), '(range_end=100)\n', (13959, 13974), True, 'from pyecharts import options as opts\n'), ((14137, 14154), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (14149, 14154), False, 'import requests\n'), ((15073, 15105), 'pyecharts.options.DataZoomOpts', 'opts.DataZoomOpts', ([], {'range_end': '(100)'}), '(range_end=100)\n', (15090, 15105), True, 'from pyecharts import options as opts\n'), ((15261, 15278), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (15273, 15278), False, 'import requests\n'), ((16336, 16353), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (16348, 16353), False, 'import requests\n'), ((16518, 16571), 'time.localtime', 'time.localtime', (["(i['attributes']['Report_Date'] / 1000)"], {}), "(i['attributes']['Report_Date'] / 1000)\n", (16532, 16571), False, 'import time\n'), ((16998, 17015), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (17010, 17015), False, 'import requests\n'), ((12481, 12531), 'pyecharts.options.DataZoomOpts', 'opts.DataZoomOpts', ([], {'pos_bottom': '"""-2%"""', 'range_end': '(100)'}), "(pos_bottom='-2%', range_end=100)\n", (12498, 12531), True, 'from pyecharts import options as opts\n'), ((6978, 6989), 'pyecharts.charts.WordCloud', 'WordCloud', ([], {}), '()\n', (6987, 6989), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((8767, 8809), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)', 'font_size': '(8)'}), '(is_show=False, font_size=8)\n', (8781, 8809), True, 'from pyecharts import options as opts\n'), ((11031, 11073), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)', 'font_size': '(8)'}), '(is_show=False, font_size=8)\n', (11045, 11073), True, 'from pyecharts import options as opts\n'), ((13716, 13747), 'pyecharts.options.AreaStyleOpts', 'opts.AreaStyleOpts', ([], {'opacity': '(0.5)'}), '(opacity=0.5)\n', (13734, 13747), True, 'from pyecharts import options as opts\n'), ((13759, 13788), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (13773, 13788), True, 'from pyecharts import options as opts\n'), ((15710, 15757), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'position': '"""right"""', 'color': '"""black"""'}), "(position='right', color='black')\n", (15724, 15757), True, 'from pyecharts import options as opts\n'), ((15894, 15934), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'rotate': '(-45)', 'font_size': '(11)'}), '(rotate=-45, font_size=11)\n', (15908, 15934), True, 'from pyecharts import options as opts\n'), ((8624, 8629), 'pyecharts.charts.Map', 'Map', ([], {}), '()\n', (8627, 8629), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((10836, 10841), 'pyecharts.charts.Map', 'Map', ([], {}), '()\n', (10839, 10841), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((11978, 11985), 'pyecharts.charts.Kline', 'Kline', ([], {}), '()\n', (11983, 11985), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((12274, 12303), 'pyecharts.options.AreaStyleOpts', 'opts.AreaStyleOpts', ([], {'opacity': '(1)'}), '(opacity=1)\n', (12292, 12303), True, 'from pyecharts import options as opts\n'), ((13617, 13646), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (13631, 13646), True, 'from pyecharts import options as opts\n'), ((16680, 16686), 'pyecharts.charts.Line', 'Line', ([], {}), '()\n', (16684, 16686), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((13509, 13538), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (13523, 13538), True, 'from pyecharts import options as opts\n'), ((14751, 14757), 'pyecharts.charts.Line', 'Line', ([], {}), '()\n', (14755, 14757), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((13401, 13430), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (13415, 13430), True, 'from pyecharts import options as opts\n'), ((15569, 15574), 'pyecharts.charts.Bar', 'Bar', ([], {}), '()\n', (15572, 15574), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n'), ((13277, 13306), 'pyecharts.options.LabelOpts', 'opts.LabelOpts', ([], {'is_show': '(False)'}), '(is_show=False)\n', (13291, 13306), True, 'from pyecharts import options as opts\n'), ((13116, 13122), 'pyecharts.charts.Line', 'Line', ([], {}), '()\n', (13120, 13122), False, 'from pyecharts.charts import Map, Timeline, Kline, Line, Bar, WordCloud\n')] |
# Generated by Django 2.2.4 on 2019-08-03 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('players', models.CharField(max_length=500)),
('dealer', models.CharField(max_length=200)),
('pool', models.IntegerField(default=0)),
('deck', models.CharField(max_length=500)),
('cards_on_table', models.CharField(max_length=100)),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((315, 408), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (331, 408), False, 'from django.db import migrations, models\n'), ((435, 467), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (451, 467), False, 'from django.db import migrations, models\n'), ((497, 529), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (513, 529), False, 'from django.db import migrations, models\n'), ((557, 587), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (576, 587), False, 'from django.db import migrations, models\n'), ((615, 647), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (631, 647), False, 'from django.db import migrations, models\n'), ((685, 717), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (701, 717), False, 'from django.db import migrations, models\n')] |
# coding=utf-8
import unittest
from mock import patch
from ddt import ddt, data
from digestparser.objects import Digest
import activity.activity_PostDigestJATS as activity_module
from activity.activity_PostDigestJATS import activity_PostDigestJATS as activity_object
import tests.activity.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger, FakeResponse
import tests.test_data as test_case_data
from tests.activity.classes_mock import FakeStorageContext
from tests.classes_mock import FakeSMTPServer
import provider.digest_provider as digest_provider
def input_data(file_name_to_change=""):
activity_data = test_case_data.ingest_digest_data
activity_data["file_name"] = file_name_to_change
return activity_data
@ddt
class TestPostDigestJats(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
def tearDown(self):
# clean the temporary directory
self.activity.clean_tmp_dir()
@patch.object(activity_module.email_provider, "smtp_connect")
@patch("requests.post")
@patch.object(activity_module.download_helper, "storage_context")
@patch.object(activity_module.digest_provider, "storage_context")
@data(
{
"comment": "digest docx file example",
"filename": "DIGEST+99999.docx",
"post_status_code": 200,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": True,
"expected_build_status": True,
"expected_jats_status": True,
"expected_post_status": True,
"expected_email_status": True,
"expected_digest_doi": u"https://doi.org/10.7554/eLife.99999",
},
{
"comment": "digest zip file example",
"filename": "DIGEST+99999.zip",
"post_status_code": 200,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": True,
"expected_build_status": True,
"expected_jats_status": True,
"expected_post_status": True,
"expected_email_status": True,
"expected_digest_doi": u"https://doi.org/10.7554/eLife.99999",
},
{
"comment": "digest file does not exist example",
"filename": "",
"post_status_code": 200,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": None,
"expected_build_status": False,
"expected_jats_status": None,
"expected_post_status": None,
},
{
"comment": "bad digest docx file example",
"filename": "DIGEST+99998.docx",
"post_status_code": 200,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": None,
"expected_build_status": False,
"expected_jats_status": None,
"expected_post_status": None,
"expected_email_status": None,
},
{
"comment": "digest author name encoding file example",
"filename": "DIGEST+99997.zip",
"post_status_code": 200,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": True,
"expected_build_status": True,
"expected_jats_status": True,
"expected_post_status": True,
"expected_email_status": True,
"expected_digest_doi": u"https://doi.org/10.7554/eLife.99997",
},
{
"comment": "digest bad post response",
"filename": "DIGEST+99999.docx",
"post_status_code": 500,
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": True,
"expected_build_status": True,
"expected_jats_status": True,
"expected_post_status": False,
"expected_email_status": None,
"expected_digest_doi": u"https://doi.org/10.7554/eLife.99999",
},
{
"comment": "digest silent deposit example",
"filename": "DIGEST+99999+SILENT.zip",
"expected_result": activity_object.ACTIVITY_SUCCESS,
"expected_activity_status": None,
"expected_build_status": None,
"expected_jats_status": None,
"expected_post_status": None,
"expected_email_status": None,
},
)
def test_do_activity(
self,
test_data,
fake_storage_context,
fake_download_storage_context,
requests_method_mock,
fake_email_smtp_connect,
):
# copy XML files into the input directory using the storage context
fake_storage_context.return_value = FakeStorageContext()
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
# POST response
requests_method_mock.return_value = FakeResponse(
test_data.get("post_status_code"), None
)
# do the activity
result = self.activity.do_activity(input_data(test_data.get("filename")))
filename_used = input_data(test_data.get("filename")).get("file_name")
# check assertions
self.assertEqual(
result,
test_data.get("expected_result"),
(
"failed in {comment}, got {result}, filename {filename}, "
+ "input_file {input_file}, digest {digest}"
).format(
comment=test_data.get("comment"),
result=result,
input_file=self.activity.input_file,
filename=filename_used,
digest=self.activity.digest,
),
)
self.assertEqual(
self.activity.statuses.get("build"),
test_data.get("expected_build_status"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
self.assertEqual(
self.activity.statuses.get("jats"),
test_data.get("expected_jats_status"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
self.assertEqual(
self.activity.statuses.get("post"),
test_data.get("expected_post_status"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
self.assertEqual(
self.activity.statuses.get("email"),
test_data.get("expected_email_status"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
# check digest values
if self.activity.digest and test_data.get("expected_digest_doi"):
self.assertEqual(
self.activity.digest.doi,
test_data.get("expected_digest_doi"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
@patch.object(activity_module.email_provider, "smtp_connect")
@patch.object(activity_module.download_helper, "storage_context")
@patch.object(activity_module.digest_provider, "storage_context")
@patch.object(digest_provider, "digest_jats")
def test_do_activity_jats_failure(
self,
fake_digest_jats,
fake_storage_context,
fake_download_storage_context,
fake_email_smtp_connect,
):
fake_storage_context.return_value = FakeStorageContext()
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
activity_data = input_data("DIGEST+99999.zip")
fake_digest_jats.return_value = None
result = self.activity.do_activity(activity_data)
self.assertEqual(result, activity_object.ACTIVITY_SUCCESS)
@patch.object(activity_module.email_provider, "smtp_connect")
@patch.object(activity_module.download_helper, "storage_context")
@patch.object(activity_module.digest_provider, "storage_context")
@patch.object(activity_module.requests_provider, "jats_post_payload")
def test_do_activity_post_failure(
self,
fake_post_jats,
fake_storage_context,
fake_download_storage_context,
fake_email_smtp_connect,
):
fake_storage_context.return_value = FakeStorageContext()
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
activity_data = input_data("DIGEST+99999.zip")
fake_post_jats.side_effect = Exception("Something went wrong!")
result = self.activity.do_activity(activity_data)
self.assertEqual(result, activity_object.ACTIVITY_SUCCESS)
class TestPostDigestJatsNoEndpoint(unittest.TestCase):
def test_do_activity_no_endpoint(self):
"""test returning True if the endpoint is not specified in the settings"""
activity = activity_object(settings_mock, FakeLogger(), None, None, None)
# now can safely alter the settings
del activity.settings.typesetter_digest_endpoint
result = activity.do_activity()
self.assertEqual(result, activity_object.ACTIVITY_SUCCESS)
def test_do_activity_blank_endpoint(self):
"""test returning True if the endpoint is blank"""
activity = activity_object(settings_mock, FakeLogger(), None, None, None)
# now can safely alter the settings
activity.settings.typesetter_digest_endpoint = ""
result = activity.do_activity()
self.assertEqual(result, activity_object.ACTIVITY_SUCCESS)
class TestEmailErrorReport(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
def tearDown(self):
# clean the temporary directory
self.activity.clean_tmp_dir()
@patch.object(activity_module.email_provider, "smtp_connect")
def test_email_error_report(self, fake_email_smtp_connect):
"""test sending an email error"""
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
digest_content = Digest()
digest_content.doi = "10.7554/eLife.99999"
jats_content = {}
error_messages = ["An error"]
settings_mock.typesetter_digest_endpoint = ""
result = self.activity.email_error_report(
digest_content, jats_content, error_messages
)
self.assertEqual(result, True)
if __name__ == "__main__":
unittest.main()
| [
"mock.patch",
"digestparser.objects.Digest",
"tests.activity.classes_mock.FakeStorageContext",
"mock.patch.object",
"ddt.data",
"unittest.main",
"activity.activity_PostDigestJATS.activity_PostDigestJATS",
"tests.activity.classes_mock.FakeLogger"
] | [((1060, 1120), 'mock.patch.object', 'patch.object', (['activity_module.email_provider', '"""smtp_connect"""'], {}), "(activity_module.email_provider, 'smtp_connect')\n", (1072, 1120), False, 'from mock import patch\n'), ((1126, 1148), 'mock.patch', 'patch', (['"""requests.post"""'], {}), "('requests.post')\n", (1131, 1148), False, 'from mock import patch\n'), ((1154, 1218), 'mock.patch.object', 'patch.object', (['activity_module.download_helper', '"""storage_context"""'], {}), "(activity_module.download_helper, 'storage_context')\n", (1166, 1218), False, 'from mock import patch\n'), ((1224, 1288), 'mock.patch.object', 'patch.object', (['activity_module.digest_provider', '"""storage_context"""'], {}), "(activity_module.digest_provider, 'storage_context')\n", (1236, 1288), False, 'from mock import patch\n'), ((1294, 3836), 'ddt.data', 'data', (["{'comment': 'digest docx file example', 'filename': 'DIGEST+99999.docx',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}", "{'comment': 'digest zip file example', 'filename': 'DIGEST+99999.zip',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}", "{'comment': 'digest file does not exist example', 'filename': '',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': False, 'expected_jats_status': None,\n 'expected_post_status': None}", "{'comment': 'bad digest docx file example', 'filename': 'DIGEST+99998.docx',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': False, 'expected_jats_status': None,\n 'expected_post_status': None, 'expected_email_status': None}", "{'comment': 'digest author name encoding file example', 'filename':\n 'DIGEST+99997.zip', 'post_status_code': 200, 'expected_result':\n activity_object.ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99997'}", "{'comment': 'digest bad post response', 'filename': 'DIGEST+99999.docx',\n 'post_status_code': 500, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': False, 'expected_email_status': None,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}", "{'comment': 'digest silent deposit example', 'filename':\n 'DIGEST+99999+SILENT.zip', 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': None, 'expected_jats_status': None,\n 'expected_post_status': None, 'expected_email_status': None}"], {}), "({'comment': 'digest docx file example', 'filename':\n 'DIGEST+99999.docx', 'post_status_code': 200, 'expected_result':\n activity_object.ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}, {\n 'comment': 'digest zip file example', 'filename': 'DIGEST+99999.zip',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}, {\n 'comment': 'digest file does not exist example', 'filename': '',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': False, 'expected_jats_status': None,\n 'expected_post_status': None}, {'comment':\n 'bad digest docx file example', 'filename': 'DIGEST+99998.docx',\n 'post_status_code': 200, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': False, 'expected_jats_status': None,\n 'expected_post_status': None, 'expected_email_status': None}, {\n 'comment': 'digest author name encoding file example', 'filename':\n 'DIGEST+99997.zip', 'post_status_code': 200, 'expected_result':\n activity_object.ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': True, 'expected_email_status': True,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99997'}, {\n 'comment': 'digest bad post response', 'filename': 'DIGEST+99999.docx',\n 'post_status_code': 500, 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': True,\n 'expected_build_status': True, 'expected_jats_status': True,\n 'expected_post_status': False, 'expected_email_status': None,\n 'expected_digest_doi': u'https://doi.org/10.7554/eLife.99999'}, {\n 'comment': 'digest silent deposit example', 'filename':\n 'DIGEST+99999+SILENT.zip', 'expected_result': activity_object.\n ACTIVITY_SUCCESS, 'expected_activity_status': None,\n 'expected_build_status': None, 'expected_jats_status': None,\n 'expected_post_status': None, 'expected_email_status': None})\n", (1298, 3836), False, 'from ddt import ddt, data\n'), ((7165, 7225), 'mock.patch.object', 'patch.object', (['activity_module.email_provider', '"""smtp_connect"""'], {}), "(activity_module.email_provider, 'smtp_connect')\n", (7177, 7225), False, 'from mock import patch\n'), ((7231, 7295), 'mock.patch.object', 'patch.object', (['activity_module.download_helper', '"""storage_context"""'], {}), "(activity_module.download_helper, 'storage_context')\n", (7243, 7295), False, 'from mock import patch\n'), ((7301, 7365), 'mock.patch.object', 'patch.object', (['activity_module.digest_provider', '"""storage_context"""'], {}), "(activity_module.digest_provider, 'storage_context')\n", (7313, 7365), False, 'from mock import patch\n'), ((7371, 7415), 'mock.patch.object', 'patch.object', (['digest_provider', '"""digest_jats"""'], {}), "(digest_provider, 'digest_jats')\n", (7383, 7415), False, 'from mock import patch\n'), ((8087, 8147), 'mock.patch.object', 'patch.object', (['activity_module.email_provider', '"""smtp_connect"""'], {}), "(activity_module.email_provider, 'smtp_connect')\n", (8099, 8147), False, 'from mock import patch\n'), ((8153, 8217), 'mock.patch.object', 'patch.object', (['activity_module.download_helper', '"""storage_context"""'], {}), "(activity_module.download_helper, 'storage_context')\n", (8165, 8217), False, 'from mock import patch\n'), ((8223, 8287), 'mock.patch.object', 'patch.object', (['activity_module.digest_provider', '"""storage_context"""'], {}), "(activity_module.digest_provider, 'storage_context')\n", (8235, 8287), False, 'from mock import patch\n'), ((8293, 8361), 'mock.patch.object', 'patch.object', (['activity_module.requests_provider', '"""jats_post_payload"""'], {}), "(activity_module.requests_provider, 'jats_post_payload')\n", (8305, 8361), False, 'from mock import patch\n'), ((10224, 10284), 'mock.patch.object', 'patch.object', (['activity_module.email_provider', '"""smtp_connect"""'], {}), "(activity_module.email_provider, 'smtp_connect')\n", (10236, 10284), False, 'from mock import patch\n'), ((10897, 10912), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10910, 10912), False, 'import unittest\n'), ((852, 864), 'tests.activity.classes_mock.FakeLogger', 'FakeLogger', ([], {}), '()\n', (862, 864), False, 'from tests.activity.classes_mock import FakeLogger, FakeResponse\n'), ((889, 950), 'activity.activity_PostDigestJATS.activity_PostDigestJATS', 'activity_object', (['settings_mock', 'fake_logger', 'None', 'None', 'None'], {}), '(settings_mock, fake_logger, None, None, None)\n', (904, 950), True, 'from activity.activity_PostDigestJATS import activity_PostDigestJATS as activity_object\n'), ((4913, 4933), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (4931, 4933), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((4987, 5007), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (5005, 5007), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((7648, 7668), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (7666, 7668), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((7722, 7742), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (7740, 7742), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((8592, 8612), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (8610, 8612), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((8666, 8686), 'tests.activity.classes_mock.FakeStorageContext', 'FakeStorageContext', ([], {}), '()\n', (8684, 8686), False, 'from tests.activity.classes_mock import FakeStorageContext\n'), ((10016, 10028), 'tests.activity.classes_mock.FakeLogger', 'FakeLogger', ([], {}), '()\n', (10026, 10028), False, 'from tests.activity.classes_mock import FakeLogger, FakeResponse\n'), ((10053, 10114), 'activity.activity_PostDigestJATS.activity_PostDigestJATS', 'activity_object', (['settings_mock', 'fake_logger', 'None', 'None', 'None'], {}), '(settings_mock, fake_logger, None, None, None)\n', (10068, 10114), True, 'from activity.activity_PostDigestJATS import activity_PostDigestJATS as activity_object\n'), ((10529, 10537), 'digestparser.objects.Digest', 'Digest', ([], {}), '()\n', (10535, 10537), False, 'from digestparser.objects import Digest\n'), ((9286, 9298), 'tests.activity.classes_mock.FakeLogger', 'FakeLogger', ([], {}), '()\n', (9296, 9298), False, 'from tests.activity.classes_mock import FakeLogger, FakeResponse\n'), ((9683, 9695), 'tests.activity.classes_mock.FakeLogger', 'FakeLogger', ([], {}), '()\n', (9693, 9695), False, 'from tests.activity.classes_mock import FakeLogger, FakeResponse\n')] |
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import ValidationError
# Class for project management and their relationships
class Projects(models.Model):
# Name of table
_name = "projects.project"
# Simple fields of the object
name = fields.Char(string="Project title", required=True)
identifier = fields.Char(string="ID", required=True)
locality = fields.Char(string="Locality")
province = fields.Char(string="Province")
start_date = fields.Date(string="Start date")
# Relational fields with other classes
department_ids = fields.Many2one('projects.department', string="Department") # department_id
employee_id = fields.Many2many('projects.employee', string="Employees") # employee_ids
# Constraint of the employees working in the project
@api.constrains('employee_id')
@api.multi
def _check_department(self):
for record in self:
# If we have a department
if record.department_ids:
# Iterate over all employees selected for the project
for employee_x in record.employee_id:
# If any of the employees doesn't belong to the same department of the project, then raise a ValidationError
if employee_x.department_id.name is not record.department_ids.name:
raise ValidationError("Employee %s is not valid because he doesn't belong to the project's department." % employee_x.name)
# Extension Class for the project class
class PriorityProjects(models.Model):
# We inherit from the project class and use the same table
_inherit = 'projects.project'
# Add a new field to save the deadline of a project
limit_date = fields.Date(string="Limit date", required=True) | [
"odoo.api.constrains",
"odoo.fields.Date",
"odoo.fields.Many2one",
"odoo.exceptions.ValidationError",
"odoo.fields.Many2many",
"odoo.fields.Char"
] | [((288, 338), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Project title"""', 'required': '(True)'}), "(string='Project title', required=True)\n", (299, 338), False, 'from odoo import models, fields, api\n'), ((354, 393), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""ID"""', 'required': '(True)'}), "(string='ID', required=True)\n", (365, 393), False, 'from odoo import models, fields, api\n'), ((407, 437), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Locality"""'}), "(string='Locality')\n", (418, 437), False, 'from odoo import models, fields, api\n'), ((451, 481), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Province"""'}), "(string='Province')\n", (462, 481), False, 'from odoo import models, fields, api\n'), ((497, 529), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Start date"""'}), "(string='Start date')\n", (508, 529), False, 'from odoo import models, fields, api\n'), ((592, 651), 'odoo.fields.Many2one', 'fields.Many2one', (['"""projects.department"""'], {'string': '"""Department"""'}), "('projects.department', string='Department')\n", (607, 651), False, 'from odoo import models, fields, api\n'), ((684, 741), 'odoo.fields.Many2many', 'fields.Many2many', (['"""projects.employee"""'], {'string': '"""Employees"""'}), "('projects.employee', string='Employees')\n", (700, 741), False, 'from odoo import models, fields, api\n'), ((817, 846), 'odoo.api.constrains', 'api.constrains', (['"""employee_id"""'], {}), "('employee_id')\n", (831, 846), False, 'from odoo import models, fields, api\n'), ((1641, 1688), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Limit date"""', 'required': '(True)'}), "(string='Limit date', required=True)\n", (1652, 1688), False, 'from odoo import models, fields, api\n'), ((1278, 1404), 'odoo.exceptions.ValidationError', 'ValidationError', (['("Employee %s is not valid because he doesn\'t belong to the project\'s department."\n % employee_x.name)'], {}), '(\n "Employee %s is not valid because he doesn\'t belong to the project\'s department."\n % employee_x.name)\n', (1293, 1404), False, 'from odoo.exceptions import ValidationError\n')] |
from app.shared.models import db
from app.models.mixins import ModelMixin
class Food(ModelMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey("category.id"), nullable=False)
restourant_id = db.Column(
db.Integer, db.ForeignKey("restourant.id"), nullable=False
)
| [
"app.shared.models.db.Column",
"app.shared.models.db.String",
"app.shared.models.db.ForeignKey"
] | [((119, 158), 'app.shared.models.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (128, 158), False, 'from app.shared.models import db\n'), ((180, 194), 'app.shared.models.db.String', 'db.String', (['(255)'], {}), '(255)\n', (189, 194), False, 'from app.shared.models import db\n'), ((252, 280), 'app.shared.models.db.ForeignKey', 'db.ForeignKey', (['"""category.id"""'], {}), "('category.id')\n", (265, 280), False, 'from app.shared.models import db\n'), ((349, 379), 'app.shared.models.db.ForeignKey', 'db.ForeignKey', (['"""restourant.id"""'], {}), "('restourant.id')\n", (362, 379), False, 'from app.shared.models import db\n')] |
from setuptools import find_packages, setup
from ticketus import __version__ as version
setup(
name='ticketus',
version=version,
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Ticketus is a simple, no-frills ticketing system for helpdesks.',
url='https://github.com/sjkingo/ticketus',
install_requires=[
'Django >= 1.6.10',
'IMAPClient',
'django-grappelli',
'email-reply-parser',
'mistune',
'psycopg2',
'python-dateutil',
],
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ticketus_settings']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
'Topic :: Communications :: Email',
'Topic :: Software Development :: Bug Tracking',
'Topic :: System :: Systems Administration',
],
scripts=[
'import_scripts/ticketus_import_freshdesk',
'import_scripts/ticketus_import_github',
'bin_scripts/ticketus_mailgw_imap4',
'bin_scripts/ticketus-admin',
],
)
| [
"setuptools.find_packages"
] | [((605, 649), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ticketus_settings']"}), "(exclude=['ticketus_settings'])\n", (618, 649), False, 'from setuptools import find_packages, setup\n')] |
from rasa_nlu.interpreters.simple_interpreter import HelloGoodbyeInterpreter
interpreter = HelloGoodbyeInterpreter()
def test_samples():
samples = [
("Hey there", {'text': "Hey there", 'intent': 'greet', 'entities': {}}),
("good bye for now", {'text': "good bye for now", 'intent': 'goodbye', 'entities': {}})
]
for text, result in samples:
assert interpreter.parse(text) == result, "text : {0} \nresult : {1}, expected {2}".format(text,
interpreter.parse(
text), result)
| [
"rasa_nlu.interpreters.simple_interpreter.HelloGoodbyeInterpreter"
] | [((92, 117), 'rasa_nlu.interpreters.simple_interpreter.HelloGoodbyeInterpreter', 'HelloGoodbyeInterpreter', ([], {}), '()\n', (115, 117), False, 'from rasa_nlu.interpreters.simple_interpreter import HelloGoodbyeInterpreter\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Tests for loan item resolver."""
from invenio_circulation.api import Loan
def test_loan_item_resolver(app, testdata):
"""Test item resolving from loan."""
loan_pid = testdata["loans"][1]["pid"]
loan = Loan.get_record_by_pid(loan_pid)
loan = loan.replace_refs()
assert loan["item"]["pid"] == loan["item_pid"]["value"]
def test_loan_item_resolver_for_empty_item_pid(app, testdata):
"""Test item resolving from loan."""
loan_pid = testdata["loans"][0]["pid"]
loan = Loan.get_record_by_pid(loan_pid)
loan = loan.replace_refs()
assert loan["item"] == dict()
| [
"invenio_circulation.api.Loan.get_record_by_pid"
] | [((429, 461), 'invenio_circulation.api.Loan.get_record_by_pid', 'Loan.get_record_by_pid', (['loan_pid'], {}), '(loan_pid)\n', (451, 461), False, 'from invenio_circulation.api import Loan\n'), ((713, 745), 'invenio_circulation.api.Loan.get_record_by_pid', 'Loan.get_record_by_pid', (['loan_pid'], {}), '(loan_pid)\n', (735, 745), False, 'from invenio_circulation.api import Loan\n')] |
#!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
#
# CoverageHelper
#
# The CoverageHelper is an object which defines lists used by the coverage
# scripts for grouping or filtering source by path, as well as filtering sources
# by name.
class CoverageHelper(object):
def __init__(self):
self.ignore_set = self.Ignore()
self.path_filter = self.Filter()
self.groups = self.Groups()
#
# Return a list of fully qualified paths for the directories availible
# at the input path specified.
#
def GetDirList(self, startpath):
paths = []
realpath = os.path.realpath(startpath)
for name in os.listdir(realpath):
path = os.path.join(realpath, name)
if os.path.isdir(path):
paths.append(path)
return paths
#
# Set of results to ignore by path.
#
def Filter(self):
filters = [
'src/trusted/sel_universal', 'src/third_party/valgrind', 'src/tools']
return set([os.path.realpath(path) for path in filters])
#
# Set of files to ignore because they are in the TCB but used by the
# validator.
#
def Ignore(self):
return set([
'cpu_x86_test.c',
'defsize64.c',
'lock_insts.c',
'lock_insts.h',
'long_mode.c',
'long_mode.h',
'nacl_illegal.c',
'nacl_illegal.h',
'nc_read_segment.c',
'nc_read_segment.h',
'nc_rep_prefix.c',
'nc_rep_prefix.h',
'ncdecodeX87.c',
'ncdecode_OF.c',
'ncdecode_forms.c',
'ncdecode_forms.h',
'ncdecode_onebyte.c',
'ncdecode_sse.c',
'ncdecode_st.c',
'ncdecode_st.h',
'ncdecode_table.c',
'ncdecode_tablegen.c',
'ncdecode_tablegen.h',
'ncdecode_tests.c',
'ncdis.c',
'ncdis_segments.c',
'ncdis_segments.h',
'ncdis_util.c',
'ncdis_util.h',
'ncenuminsts.c',
'ncenuminsts.h',
'ncval.c',
'ncval_driver.c',
'ncval_driver.h',
'ncval_tests.c',
'ze64.h',
'zero_extends.c',
'zero_extends.h',
])
#
# Set of results to group by path.
#
def Groups(self):
groups = []
groups.append(os.path.realpath('scons-out'))
groups.append(os.path.realpath('src/include'))
groups.append(os.path.realpath('src/tools'))
groups.extend(self.GetDirList('src/trusted'))
groups.extend(self.GetDirList('src/shared'))
groups.extend(self.GetDirList('src/third_party'))
groups.extend(self.GetDirList('..'))
return groups
| [
"os.path.realpath",
"os.listdir",
"os.path.join",
"os.path.isdir"
] | [((726, 753), 'os.path.realpath', 'os.path.realpath', (['startpath'], {}), '(startpath)\n', (742, 753), False, 'import os\n'), ((770, 790), 'os.listdir', 'os.listdir', (['realpath'], {}), '(realpath)\n', (780, 790), False, 'import os\n'), ((805, 833), 'os.path.join', 'os.path.join', (['realpath', 'name'], {}), '(realpath, name)\n', (817, 833), False, 'import os\n'), ((843, 862), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (856, 862), False, 'import os\n'), ((2268, 2297), 'os.path.realpath', 'os.path.realpath', (['"""scons-out"""'], {}), "('scons-out')\n", (2284, 2297), False, 'import os\n'), ((2317, 2348), 'os.path.realpath', 'os.path.realpath', (['"""src/include"""'], {}), "('src/include')\n", (2333, 2348), False, 'import os\n'), ((2368, 2397), 'os.path.realpath', 'os.path.realpath', (['"""src/tools"""'], {}), "('src/tools')\n", (2384, 2397), False, 'import os\n'), ((1083, 1105), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (1099, 1105), False, 'import os\n')] |
import json
from requests import Request
from pydantic import BaseModel
from pydoc import locate
from typing import List, Optional
import dataclasses
from dataclasses import dataclass
from datetime import datetime
SCHEMAS = {}
class ResourceBaseSchema(BaseModel):
id: Optional[str]
private: Optional[bool]
canRead: Optional[List[str]]
canWrite: Optional[List[str]]
owner: Optional[str]
anonymousComments: Optional[bool]
comments: Optional[List[str]]
createdAt: Optional[str]
updatedAt: Optional[str]
class Config:
fields = {'id': '_id'}
class ResourceInfo(BaseModel):
resourceType: str
resourceId: str
class Comment(BaseModel):
id: Optional[str]
owner: Optional[str]
comments: Optional[List[str]]
text: Optional[str]
flagged: Optional[bool]
resource: Optional[ResourceInfo]
otherResources: Optional[List[ResourceInfo]]
closed: Optional[bool]
assignedTo: Optional[List[str]]
labels: Optional[List[str]]
priority: Optional[str]
status: Optional[str]
view: Optional[dict]
screenshot: Optional[str]
class Config:
fields = {'id': '_id'}
def clean_empty(d):
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_empty(v) for v in d) if v is not None]
return {k: v for k, v in ((k, clean_empty(v)) for k, v in d.items()) if v is not None}
class ResourceBase(object):
def __init__(self, session, basepath, me, name, methods):
self.s = session
self._path = basepath + '/' + name
self.me = me
self._comment_path = basepath + '/comments/' + name
self.name = name
self.methods = methods
self.method_dict = {
'list': {
'method': 'GET',
},
'create': {
'method': 'POST',
},
'get': {
'method': 'GET',
},
'update': {
'method': 'PUT',
},
'delete': {
'method': 'DELETE',
},
'comment_get': {
'method': 'GET',
},
'comment_create': {
'method': 'POST'}
}
for method in self.methods:
if method == 'retrieve':
self.__setattr__('retrieve', 'x')
self.schema = None
self.comment_schema = Comment
def _prep_request(self, method, path, comment, data, params):
assert method in self.methods, 'method {} not supported for {} calls'.format(method, self.name)
if comment:
url = self._comment_path + path
if data:
dataclass_instance = self.comment_schema.parse_obj(data)
data = clean_empty(dataclass_instance.dict(by_alias=True))
else:
url = self._path + path
if data:
if isinstance(data, list):
data_list = []
if self.schema:
for d in data:
if isinstance(d, dict):
dataclass_instance = self.schema.parse_obj(d)
data_list.append(clean_empty(dataclass_instance.dict(by_alias=True)))
elif isinstance(d, str):
data_list.append(d)
data = data_list
elif self.schema:
if isinstance(data, dict):
dataclass_instance = self.schema.parse_obj(data)
else:
dataclass_instance = data
data = clean_empty(dataclass_instance.dict(by_alias=True))
return self.s.prepare_request(Request(self.method_dict[method]['method'], url, json=data, params=params))
def _parse_response(self, response, comment=False, schema=None):
"""Parse the request response
Arguments:
response {Response} -- A response from the server
comment {bool} -- Whether or not the response is a comment
schema {Schema} -- Optional schema to parse the response with
Returns:
Schema / dict -- An object derived from SpeckleObject if possible, otherwise
a dict of the response resource
"""
if schema:
# If a schema is defined, then try to parse it with that
return schema.parse_obj(response)
elif comment:
return self.comment_schema.parse_obj(response)
elif 'type' in response:
# Otherwise, check if the incoming type is within the dict of loaded schemas
types = response['type'].split('/')
for t in reversed(types):
if t in SCHEMAS:
return SCHEMAS[t].parse_obj(response)
if self.schema:
return self.schema.parse_obj(response)
return response
def make_request(self, method, path, data=None, comment=False, schema=None, params=None):
r = self._prep_request(method, path, comment, data, params)
resp = self.s.send(r)
resp.raise_for_status()
response_payload = resp.json()
assert response_payload['success'] == True, json.dumps(response_payload)
if 'resources' in response_payload:
return [self._parse_response(resource, comment, schema) for resource in response_payload['resources']]
elif 'resource' in response_payload:
return self._parse_response(response_payload['resource'], comment, schema)
else:
return response_payload # Not sure what to do in this scenario or when it might occur | [
"json.dumps",
"requests.Request"
] | [((5357, 5385), 'json.dumps', 'json.dumps', (['response_payload'], {}), '(response_payload)\n', (5367, 5385), False, 'import json\n'), ((3852, 3926), 'requests.Request', 'Request', (["self.method_dict[method]['method']", 'url'], {'json': 'data', 'params': 'params'}), "(self.method_dict[method]['method'], url, json=data, params=params)\n", (3859, 3926), False, 'from requests import Request\n')] |
"""This module handles the creation of an Explosion instance."""
from os import pardir, path
import pygame as pg
from pygame.sprite import Sprite
import color
# Path template for the nine explosion images.
IMG = path.join(pardir, "resources/images/explosions/explosion0{}.jpg")
class Explosion(Sprite):
"""A class to simulate an explosion in the event of a collision."""
def __init__(self, center):
"""Create an explosion object in the rect center of the alien that was shot."""
super().__init__()
self.frame = 0 # Corresponds to each image in the simulation.
self.last_update = pg.time.get_ticks()
self.frame_rate = 75
# The list of images that simulate the explosion.
self.explosion_sim = []
self._prep_images()
self.image = self.explosion_sim[0] # Corresponds to images in the simulation.
self.rect = self.image.get_rect()
self.rect.center = center
def _prep_images(self):
"""Load each explosion image, remove background and add to explosion list."""
for i in range(9): # There are nine images.
image = pg.image.load(IMG.format(i)).convert()
image = pg.transform.scale(image, (55, 55))
image.set_colorkey(color.BLACK)
self.explosion_sim.append(image)
def update(self):
"""Cycle through the explosion images."""
now = pg.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame += 1
# Remove the sprite when it has cycled through all images.
if self.frame == len(self.explosion_sim):
self.kill()
else:
# Display the current image at the mid-point of where the alien was.
center = self.rect.center
self.image = self.explosion_sim[self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
| [
"os.path.join",
"pygame.time.get_ticks",
"pygame.transform.scale"
] | [((216, 281), 'os.path.join', 'path.join', (['pardir', '"""resources/images/explosions/explosion0{}.jpg"""'], {}), "(pardir, 'resources/images/explosions/explosion0{}.jpg')\n", (225, 281), False, 'from os import pardir, path\n'), ((627, 646), 'pygame.time.get_ticks', 'pg.time.get_ticks', ([], {}), '()\n', (644, 646), True, 'import pygame as pg\n'), ((1416, 1435), 'pygame.time.get_ticks', 'pg.time.get_ticks', ([], {}), '()\n', (1433, 1435), True, 'import pygame as pg\n'), ((1204, 1239), 'pygame.transform.scale', 'pg.transform.scale', (['image', '(55, 55)'], {}), '(image, (55, 55))\n', (1222, 1239), True, 'import pygame as pg\n')] |
import pandas as pd
import numpy as np
import yaml
import os
import argparse
from sklearn.impute import KNNImputer
from logger import App_Logger
file_object=open("application_logging/Loggings.txt", 'a+')
logger_object=App_Logger()
def read_params(config_path):
with open(config_path) as yaml_file:
config=yaml.safe_load(yaml_file)
return config
def preprocessing(config_path):
config= read_params(config_path)
train_data_path= config["split_data"]["train_path"]
test_data_path= config["split_data"]["test_path"]
train_processed_path= config["processed"]["train_path"]
test_processed_path= config["processed"]["test_path"]
"""
Method Name: preprocessing
Description: This method replacing the missing values with Nan values and perform the imputer to both train and test
Output: A pandas DataFrame .csv.
On Failure: Raise Exception
"""
logger_object.log(file_object,'Entered the preprocessing')
try:
train_data=pd.read_csv(train_data_path)
test_data=pd.read_csv(test_data_path)
# ? is replaced with np.nan in train data
for column in train_data.columns:
count = train_data[column][ train_data[column]=='?'].count()
if count!=0:
train_data[column] = train_data[column].replace('?',np.nan)
train_data['sex'] = train_data ['sex'].replace({'F' : 0, 'M' : 1})
for column in train_data.columns:
if len(train_data[column].unique())==2:
train_data[column] = train_data[column].replace({'f' : 0, 't' : 1})
elif len(train_data[column].unique())==1:
train_data[column] = train_data[column].replace({'f' : 0})
train_data['Class'] = train_data['Class'].replace({'negative' : 0, 'compensated_hypothyroid' : 1,'primary_hypothyroid' :2,'secondary_hypothyroid':3})
train_data["Class"] = train_data["Class"].apply(lambda value : 1 if value >=1 else 0)
imputer=KNNImputer(n_neighbors=3, weights='uniform',missing_values=np.nan)
new_array=imputer.fit_transform(train_data)
train_impu_data=pd.DataFrame(data=np.round(new_array), columns=train_data.columns)
train_impu_data.to_csv(train_processed_path,index=False)
#############################################################################################################################################
# ? is replaced with np.nan in test data
for column in test_data.columns:
count = test_data[column][ test_data[column]=='?'].count()
if count!=0:
test_data[column] = test_data[column].replace('?',np.nan)
test_data['sex'] = test_data['sex'].replace({'F' : 0, 'M' : 1})
for column in test_data.columns:
if len(test_data[column].unique())==2:
test_data[column] = test_data[column].replace({'f' : 0, 't' : 1})
elif len(test_data[column].unique())==1:
test_data[column] = test_data[column].replace({'f' : 0})
test_data['Class'] = test_data['Class'].replace({'negative' : 0, 'compensated_hypothyroid' : 1,'primary_hypothyroid' :2,'secondary_hypothyroid':3})
test_data["Class"] = test_data["Class"].apply(lambda value : 1 if value >=1 else 0)
imputer=KNNImputer(n_neighbors=3, weights='uniform',missing_values=np.nan)
new_array=imputer.fit_transform(test_data)
test_impu_data=pd.DataFrame(data=np.round(new_array), columns=test_data.columns)
test_impu_data.to_csv(test_processed_path,index=False)
logger_object.log(file_object,'preprocessing was done Successful and Exited')
except Exception as e:
logger_object.log(file_object,'Exception occured in preprocessing . Exception message: '+str(e))
logger_object.log(file_object,'preprocessing Unsuccessful')
raise Exception()
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
data = preprocessing(config_path=parsed_args.config)
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.impute.KNNImputer",
"yaml.safe_load",
"logger.App_Logger",
"numpy.round"
] | [((228, 240), 'logger.App_Logger', 'App_Logger', ([], {}), '()\n', (238, 240), False, 'from logger import App_Logger\n'), ((4068, 4093), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4091, 4093), False, 'import argparse\n'), ((332, 357), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (346, 357), False, 'import yaml\n'), ((1029, 1057), 'pandas.read_csv', 'pd.read_csv', (['train_data_path'], {}), '(train_data_path)\n', (1040, 1057), True, 'import pandas as pd\n'), ((1077, 1104), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (1088, 1104), True, 'import pandas as pd\n'), ((2064, 2131), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {'n_neighbors': '(3)', 'weights': '"""uniform"""', 'missing_values': 'np.nan'}), "(n_neighbors=3, weights='uniform', missing_values=np.nan)\n", (2074, 2131), False, 'from sklearn.impute import KNNImputer\n'), ((3425, 3492), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {'n_neighbors': '(3)', 'weights': '"""uniform"""', 'missing_values': 'np.nan'}), "(n_neighbors=3, weights='uniform', missing_values=np.nan)\n", (3435, 3492), False, 'from sklearn.impute import KNNImputer\n'), ((2227, 2246), 'numpy.round', 'np.round', (['new_array'], {}), '(new_array)\n', (2235, 2246), True, 'import numpy as np\n'), ((3586, 3605), 'numpy.round', 'np.round', (['new_array'], {}), '(new_array)\n', (3594, 3605), True, 'import numpy as np\n')] |
"""This module contains helper functions used in the API"""
import datetime
import json
import re
import string
import random
from functools import wraps
from flask import request
from api_v1.models import User
def name_validalidation(name, context):
"""Function used to validate various names"""
if len(name.strip()) == 0 or not re.match("^[-a-zA-Z0-9_\\s]*$", name):
message = "Name shouldn't be empty. No special characters"
response = {
"message": message + " for " + context + " names",
context: "null"
}
return response, 400
def email_validation(email):
"""Function used to validate users emails"""
if not re.match(r"(^[a-zA-Z0-9_.]+@[a-zA-Z0-9-]+\.[a-z]+$)", email):
response = {
'message': 'Incorrect email format.',
'status': 'Registration failed'
}
return response, 400
def datetimeconverter(obj):
"""Function to convert datime objects to a string"""
if isinstance(obj, datetime.datetime):
return obj.__str__()
def master_serializer(resource):
"""Function to return a resource json"""
data = resource.serialize()
user_json = json.dumps(
data, default=datetimeconverter, sort_keys=True
)
return user_json
def token_required(funct):
"""Decorator method to check for jwt tokens"""
@wraps(funct)
def wrapper(*args, **kwargs):
"""Wrapper function to add pass down results of the token decoding"""
if 'Authorization' in request.headers:
access_token = request.headers.get('Authorization')
data = User.decode_token(access_token)
if not isinstance(data, str):
user_id = data
else:
response = {
'message': data
}
return response, 401
return funct(*args, user_id, **kwargs)
else:
message = "No token found! Ensure that the request header"
response = {
'message': message + ' has an authorization key value'
}
return response, 401
wrapper.__doc__ = funct.__doc__
wrapper.__name__ = funct.__name__
return wrapper
def password_generator(size=8, chars=string.ascii_uppercase + string.digits):
"""Function to generate a random password"""
return ''.join(random.choice(chars) for _ in range(size))
| [
"api_v1.models.User.decode_token",
"random.choice",
"json.dumps",
"re.match",
"functools.wraps",
"flask.request.headers.get"
] | [((1193, 1252), 'json.dumps', 'json.dumps', (['data'], {'default': 'datetimeconverter', 'sort_keys': '(True)'}), '(data, default=datetimeconverter, sort_keys=True)\n', (1203, 1252), False, 'import json\n'), ((1373, 1385), 'functools.wraps', 'wraps', (['funct'], {}), '(funct)\n', (1378, 1385), False, 'from functools import wraps\n'), ((690, 750), 're.match', 're.match', (['"""(^[a-zA-Z0-9_.]+@[a-zA-Z0-9-]+\\\\.[a-z]+$)"""', 'email'], {}), "('(^[a-zA-Z0-9_.]+@[a-zA-Z0-9-]+\\\\.[a-z]+$)', email)\n", (698, 750), False, 'import re\n'), ((342, 379), 're.match', 're.match', (['"""^[-a-zA-Z0-9_\\\\s]*$"""', 'name'], {}), "('^[-a-zA-Z0-9_\\\\s]*$', name)\n", (350, 379), False, 'import re\n'), ((1572, 1608), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (1591, 1608), False, 'from flask import request\n'), ((1629, 1660), 'api_v1.models.User.decode_token', 'User.decode_token', (['access_token'], {}), '(access_token)\n', (1646, 1660), False, 'from api_v1.models import User\n'), ((2393, 2413), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (2406, 2413), False, 'import random\n')] |
import sensor, image, time, pyb
from pid import PID
from pyb import Servo
from pyb import UART
uart = UART(3, 19200)
usb = pyb.USB_VCP()
led_red = pyb.LED(1) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
led_green = pyb.LED(2)
pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
import sensor, image, time, pyb
from pid import PID
from pyb import Servo
from pyb import Pin
usb = pyb.USB_VCP()
pan_servo=Servo(1)
tilt_servo=Servo(2)
led_red = pyb.LED(1) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
led_green = pyb.LED(2)
pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
p_out = Pin('P7', Pin.OUT_PP)#设置p_out为输出引脚
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()
dete_red = 0
dete_green = 0
while(dete_green != 1 or dete_red != 1):
clock.tick()
img = sensor.snapshot().lens_corr(1.8)
pan_servo.angle(pan_servo.angle()+2)
print(pan_servo.angle())
#tilt_servo.angle(tilt_servo.angle()+2)
#print(tilt_servo.angle())
for r in img.find_rects(threshold = 10000):
# for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0))
# print(r)
area = (r.x(), r.y(), r.w(), r.h())
statistics = img.get_statistics(roi=area)#像素颜色统计
# print(statistics)
if 17<statistics.l_mode()<87 and 30<statistics.a_mode()<123 and -49<statistics.b_mode()<50 and dete_red == 0:#if the circle is red
img.draw_rectangle(area, color = (255, 0, 0))
dete_red = 1 #识别到的红色圆形用红色的圆框出来
print("red")
uart.write("red")
j = 3
while(j):
p_out.high()#设置p_out引脚为高
j=j-1
p_out.low()
i = 5
while(i):
led_red.on()
time.sleep(1000)
led_red.off()
i=i-1
elif 24<statistics.l_mode()<48 and -48<statistics.a_mode()<-24 and -1<statistics.b_mode()<49 and dete_green == 0:
img.draw_rectangle(area, color = (0, 255, 0))
dete_green = 1
print("green")
uart.write("green")
j = 3
while(j):
p_out.high()#设置p_out引脚为高
j=j-1
p_out.low()
i = 5
while(i):
led_green.on()
time.sleep(1000)
led_green.off()
i=i-1
# print("FPS %f" % clock.fps())
| [
"sensor.skip_frames",
"sensor.set_auto_gain",
"time.clock",
"sensor.set_pixformat",
"pyb.Pin",
"sensor.set_auto_whitebal",
"pid.PID",
"sensor.set_framesize",
"time.sleep",
"pyb.USB_VCP",
"sensor.reset",
"pyb.UART",
"pyb.Servo",
"sensor.snapshot",
"pyb.LED"
] | [((103, 117), 'pyb.UART', 'UART', (['(3)', '(19200)'], {}), '(3, 19200)\n', (107, 117), False, 'from pyb import UART\n'), ((124, 137), 'pyb.USB_VCP', 'pyb.USB_VCP', ([], {}), '()\n', (135, 137), False, 'import sensor, image, time, pyb\n'), ((151, 161), 'pyb.LED', 'pyb.LED', (['(1)'], {}), '(1)\n', (158, 161), False, 'import sensor, image, time, pyb\n'), ((232, 242), 'pyb.LED', 'pyb.LED', (['(2)'], {}), '(2)\n', (239, 242), False, 'import sensor, image, time, pyb\n'), ((254, 279), 'pid.PID', 'PID', ([], {'p': '(0.07)', 'i': '(0)', 'imax': '(90)'}), '(p=0.07, i=0, imax=90)\n', (257, 279), False, 'from pid import PID\n'), ((313, 338), 'pid.PID', 'PID', ([], {'p': '(0.05)', 'i': '(0)', 'imax': '(90)'}), '(p=0.05, i=0, imax=90)\n', (316, 338), False, 'from pid import PID\n'), ((459, 473), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (471, 473), False, 'import sensor, image, time, pyb\n'), ((474, 509), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor.RGB565'], {}), '(sensor.RGB565)\n', (494, 509), False, 'import sensor, image, time, pyb\n'), ((510, 544), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor.QQVGA'], {}), '(sensor.QQVGA)\n', (530, 544), False, 'import sensor, image, time, pyb\n'), ((645, 658), 'pyb.USB_VCP', 'pyb.USB_VCP', ([], {}), '()\n', (656, 658), False, 'import sensor, image, time, pyb\n'), ((670, 678), 'pyb.Servo', 'Servo', (['(1)'], {}), '(1)\n', (675, 678), False, 'from pyb import Servo\n'), ((690, 698), 'pyb.Servo', 'Servo', (['(2)'], {}), '(2)\n', (695, 698), False, 'from pyb import Servo\n'), ((710, 720), 'pyb.LED', 'pyb.LED', (['(1)'], {}), '(1)\n', (717, 720), False, 'import sensor, image, time, pyb\n'), ((791, 801), 'pyb.LED', 'pyb.LED', (['(2)'], {}), '(2)\n', (798, 801), False, 'import sensor, image, time, pyb\n'), ((813, 838), 'pid.PID', 'PID', ([], {'p': '(0.07)', 'i': '(0)', 'imax': '(90)'}), '(p=0.07, i=0, imax=90)\n', (816, 838), False, 'from pid import PID\n'), ((872, 897), 'pid.PID', 'PID', ([], {'p': '(0.05)', 'i': '(0)', 'imax': '(90)'}), '(p=0.05, i=0, imax=90)\n', (875, 897), False, 'from pid import PID\n'), ((1025, 1046), 'pyb.Pin', 'Pin', (['"""P7"""', 'Pin.OUT_PP'], {}), "('P7', Pin.OUT_PP)\n", (1028, 1046), False, 'from pyb import Pin\n'), ((1060, 1074), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (1072, 1074), False, 'import sensor, image, time, pyb\n'), ((1075, 1110), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor.RGB565'], {}), '(sensor.RGB565)\n', (1095, 1110), False, 'import sensor, image, time, pyb\n'), ((1111, 1145), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor.QQVGA'], {}), '(sensor.QQVGA)\n', (1131, 1145), False, 'import sensor, image, time, pyb\n'), ((1146, 1175), 'sensor.skip_frames', 'sensor.skip_frames', ([], {'time': '(2000)'}), '(time=2000)\n', (1164, 1175), False, 'import sensor, image, time, pyb\n'), ((1178, 1205), 'sensor.set_auto_gain', 'sensor.set_auto_gain', (['(False)'], {}), '(False)\n', (1198, 1205), False, 'import sensor, image, time, pyb\n'), ((1246, 1277), 'sensor.set_auto_whitebal', 'sensor.set_auto_whitebal', (['(False)'], {}), '(False)\n', (1270, 1277), False, 'import sensor, image, time, pyb\n'), ((1326, 1338), 'time.clock', 'time.clock', ([], {}), '()\n', (1336, 1338), False, 'import sensor, image, time, pyb\n'), ((1436, 1453), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (1451, 1453), False, 'import sensor, image, time, pyb\n'), ((2402, 2418), 'time.sleep', 'time.sleep', (['(1000)'], {}), '(1000)\n', (2412, 2418), False, 'import sensor, image, time, pyb\n'), ((2951, 2967), 'time.sleep', 'time.sleep', (['(1000)'], {}), '(1000)\n', (2961, 2967), False, 'import sensor, image, time, pyb\n')] |
import json
import datetime
def print_json_log(logger_, level_, message_):
dict_ = {"level": level_, "message": message_, "time": str(datetime.datetime.now())}
json_str = json.dumps(dict_)
getattr(logger_, level_)(json_str)
| [
"datetime.datetime.now",
"json.dumps"
] | [((181, 198), 'json.dumps', 'json.dumps', (['dict_'], {}), '(dict_)\n', (191, 198), False, 'import json\n'), ((140, 163), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (161, 163), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
import pytest
from pyisic import NACE2_to_ISIC4
from pyisic.types import Standards
@pytest.mark.parametrize(
"code,expected",
[
("DOESNT EXIST", set()),
("A", {(Standards.ISIC4, "A")}),
("01", {(Standards.ISIC4, "01")}),
("01.1", {(Standards.ISIC4, "011")}),
("01.11", {(Standards.ISIC4, "0111")}),
],
)
def test_naics2017_to_isic4_concordance(code: str, expected: set):
"""Test NAICS2017 to ISIC4 sample concordances."""
assert NACE2_to_ISIC4.concordant(code) == expected
| [
"pyisic.NACE2_to_ISIC4.concordant"
] | [((516, 547), 'pyisic.NACE2_to_ISIC4.concordant', 'NACE2_to_ISIC4.concordant', (['code'], {}), '(code)\n', (541, 547), False, 'from pyisic import NACE2_to_ISIC4\n')] |
from django.urls import path
from custom_user_app.views import (CustomUserLoginView,
CustomUserLogoutView,
CustomUserCreationView,
CustomUserUpdateView,
CustomUserPasswordChangeView,
CustomUserPasswordChangeDoneView)
urlpatterns = [
path('login/', CustomUserLoginView.as_view(), name='user_login'),
path('logout/', CustomUserLogoutView.as_view(), name='user_logout'),
path('registration/', CustomUserCreationView.as_view(), name='user_registration'),
path('profile/<int:profile_id>', CustomUserUpdateView.as_view(), name='user_profile'),
path('password_change/<int:profile_id>', CustomUserPasswordChangeView.as_view(), name='user_password_change'),
path('password_change_done/<int:profile_id>', CustomUserPasswordChangeDoneView.as_view(),
name='password_change_done'),
]
| [
"custom_user_app.views.CustomUserPasswordChangeView.as_view",
"custom_user_app.views.CustomUserCreationView.as_view",
"custom_user_app.views.CustomUserUpdateView.as_view",
"custom_user_app.views.CustomUserPasswordChangeDoneView.as_view",
"custom_user_app.views.CustomUserLogoutView.as_view",
"custom_user_a... | [((429, 458), 'custom_user_app.views.CustomUserLoginView.as_view', 'CustomUserLoginView.as_view', ([], {}), '()\n', (456, 458), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n'), ((500, 530), 'custom_user_app.views.CustomUserLogoutView.as_view', 'CustomUserLogoutView.as_view', ([], {}), '()\n', (528, 530), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n'), ((579, 611), 'custom_user_app.views.CustomUserCreationView.as_view', 'CustomUserCreationView.as_view', ([], {}), '()\n', (609, 611), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n'), ((677, 707), 'custom_user_app.views.CustomUserUpdateView.as_view', 'CustomUserUpdateView.as_view', ([], {}), '()\n', (705, 707), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n'), ((776, 814), 'custom_user_app.views.CustomUserPasswordChangeView.as_view', 'CustomUserPasswordChangeView.as_view', ([], {}), '()\n', (812, 814), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n'), ((896, 938), 'custom_user_app.views.CustomUserPasswordChangeDoneView.as_view', 'CustomUserPasswordChangeDoneView.as_view', ([], {}), '()\n', (936, 938), False, 'from custom_user_app.views import CustomUserLoginView, CustomUserLogoutView, CustomUserCreationView, CustomUserUpdateView, CustomUserPasswordChangeView, CustomUserPasswordChangeDoneView\n')] |
import numpy as np
import csv as csv
from clean_data import clean_data
from join_columns import join_columns
from fix_decimals import add_int, cut_decimals
def preprocess_dataset():
preprocess_data('train', False)
preprocess_data('test', False)
preprocess_data('train', True)
preprocess_data('test', True)
def preprocess_data(data_name, encode_features):
name = data_name
raw = list()
with open("./data/raw_" + data_name + ".csv") as f:
raw_reader = csv.reader(f, delimiter=",")
for row in raw_reader:
raw.append(row)
raw = np.array(raw)
raw = clean_data(raw)
if encode_features:
raw = join_columns(raw, ["sanitario1", "sanitario2", "sanitario3", "sanitario5", "sanitario6"], ["c","c","c","c","o1"], "sanitario", [1,2,3,4], {"o1":"sanioth"})
raw = join_columns(raw, ["energcocinar1", "energcocinar2", "energcocinar3", "energcocinar4"], ["c","c","c","c"], "energcocinar", [1,4,2,3])
raw = join_columns(raw, ["elimbasu1", "elimbasu2", "elimbasu3", "elimbasu4", "elimbasu6"], ["c","c","c","c","o1"], "elimbasu", [4,3,2,1], {"o1":"elimoth"})
#raw = np.delete(raw, np.where(raw[0,:] == "elimbasu5")[0][0], axis=1) #this column has been removed inside the clean_data function since it has 0 mean and 0 variance
raw = join_columns(raw, ["epared1", "epared2", "epared3"], ["c","c","c"], "epared", [1,2,3])
raw = join_columns(raw, ["etecho1", "etecho2", "etecho3"], ["c","c","c"], "etecho", [1,2,3])
raw = join_columns(raw, ["eviv1", "eviv2", "eviv3"], ["c","c","c"], "eviv", [1,2,3])
raw = join_columns(raw, ["female", "male"], ["c","c"], "gender", [0,1])
raw = join_columns(raw, ["parentesco1", "parentesco2", "parentesco3", "parentesco4", "parentesco5", "parentesco6", "parentesco7", "parentesco8", "parentesco9", "parentesco10", "parentesco11", "parentesco12"], ["c","c","c","c","c","c","c","c","c","c","c","c"], "parentesco", [1,2,3,4,5,6,7,8,9,10,11,12])
raw = join_columns(raw, ["instlevel1", "instlevel2", "instlevel3", "instlevel4", "instlevel5", "instlevel6", "instlevel7", "instlevel8", "instlevel9"], ["c","c","c","c","c","c","c","c","c"], "instlevel", [1,2,3,4,5,6,7,8,9])
raw = join_columns(raw, ["tipovivi1", "tipovivi2", "tipovivi3", "tipovivi4", "tipovivi5"], ["c","c","c","c","o1"], "tipovivi", [1,2,3,4], {"o1":"tipooth"})
raw = join_columns(raw, ["area2", "area1"], ["c","c"], "area", [0,1])
name = name + '_enc'
raw = add_int(raw, 0)
raw = cut_decimals(raw, 2)
#saving new dataset
print('exporting ' + name + '.csv')
np.savetxt('./data/' + name + '.csv', raw, delimiter=';', fmt='%s') | [
"fix_decimals.cut_decimals",
"fix_decimals.add_int",
"numpy.array",
"numpy.savetxt",
"clean_data.clean_data",
"csv.reader",
"join_columns.join_columns"
] | [((586, 599), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (594, 599), True, 'import numpy as np\n'), ((610, 625), 'clean_data.clean_data', 'clean_data', (['raw'], {}), '(raw)\n', (620, 625), False, 'from clean_data import clean_data\n'), ((2508, 2523), 'fix_decimals.add_int', 'add_int', (['raw', '(0)'], {}), '(raw, 0)\n', (2515, 2523), False, 'from fix_decimals import add_int, cut_decimals\n'), ((2534, 2554), 'fix_decimals.cut_decimals', 'cut_decimals', (['raw', '(2)'], {}), '(raw, 2)\n', (2546, 2554), False, 'from fix_decimals import add_int, cut_decimals\n'), ((2624, 2691), 'numpy.savetxt', 'np.savetxt', (["('./data/' + name + '.csv')", 'raw'], {'delimiter': '""";"""', 'fmt': '"""%s"""'}), "('./data/' + name + '.csv', raw, delimiter=';', fmt='%s')\n", (2634, 2691), True, 'import numpy as np\n'), ((488, 516), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (498, 516), True, 'import csv as csv\n'), ((664, 836), 'join_columns.join_columns', 'join_columns', (['raw', "['sanitario1', 'sanitario2', 'sanitario3', 'sanitario5', 'sanitario6']", "['c', 'c', 'c', 'c', 'o1']", '"""sanitario"""', '[1, 2, 3, 4]', "{'o1': 'sanioth'}"], {}), "(raw, ['sanitario1', 'sanitario2', 'sanitario3', 'sanitario5',\n 'sanitario6'], ['c', 'c', 'c', 'c', 'o1'], 'sanitario', [1, 2, 3, 4], {\n 'o1': 'sanioth'})\n", (676, 836), False, 'from join_columns import join_columns\n'), ((834, 977), 'join_columns.join_columns', 'join_columns', (['raw', "['energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4']", "['c', 'c', 'c', 'c']", '"""energcocinar"""', '[1, 4, 2, 3]'], {}), "(raw, ['energcocinar1', 'energcocinar2', 'energcocinar3',\n 'energcocinar4'], ['c', 'c', 'c', 'c'], 'energcocinar', [1, 4, 2, 3])\n", (846, 977), False, 'from join_columns import join_columns\n'), ((982, 1148), 'join_columns.join_columns', 'join_columns', (['raw', "['elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4', 'elimbasu6']", "['c', 'c', 'c', 'c', 'o1']", '"""elimbasu"""', '[4, 3, 2, 1]', "{'o1': 'elimoth'}"], {}), "(raw, ['elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',\n 'elimbasu6'], ['c', 'c', 'c', 'c', 'o1'], 'elimbasu', [4, 3, 2, 1], {\n 'o1': 'elimoth'})\n", (994, 1148), False, 'from join_columns import join_columns\n'), ((1321, 1415), 'join_columns.join_columns', 'join_columns', (['raw', "['epared1', 'epared2', 'epared3']", "['c', 'c', 'c']", '"""epared"""', '[1, 2, 3]'], {}), "(raw, ['epared1', 'epared2', 'epared3'], ['c', 'c', 'c'],\n 'epared', [1, 2, 3])\n", (1333, 1415), False, 'from join_columns import join_columns\n'), ((1422, 1516), 'join_columns.join_columns', 'join_columns', (['raw', "['etecho1', 'etecho2', 'etecho3']", "['c', 'c', 'c']", '"""etecho"""', '[1, 2, 3]'], {}), "(raw, ['etecho1', 'etecho2', 'etecho3'], ['c', 'c', 'c'],\n 'etecho', [1, 2, 3])\n", (1434, 1516), False, 'from join_columns import join_columns\n'), ((1523, 1609), 'join_columns.join_columns', 'join_columns', (['raw', "['eviv1', 'eviv2', 'eviv3']", "['c', 'c', 'c']", '"""eviv"""', '[1, 2, 3]'], {}), "(raw, ['eviv1', 'eviv2', 'eviv3'], ['c', 'c', 'c'], 'eviv', [1,\n 2, 3])\n", (1535, 1609), False, 'from join_columns import join_columns\n'), ((1616, 1683), 'join_columns.join_columns', 'join_columns', (['raw', "['female', 'male']", "['c', 'c']", '"""gender"""', '[0, 1]'], {}), "(raw, ['female', 'male'], ['c', 'c'], 'gender', [0, 1])\n", (1628, 1683), False, 'from join_columns import join_columns\n'), ((1696, 2031), 'join_columns.join_columns', 'join_columns', (['raw', "['parentesco1', 'parentesco2', 'parentesco3', 'parentesco4', 'parentesco5',\n 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9',\n 'parentesco10', 'parentesco11', 'parentesco12']", "['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c']", '"""parentesco"""', '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]'], {}), "(raw, ['parentesco1', 'parentesco2', 'parentesco3',\n 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7',\n 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',\n 'parentesco12'], ['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c',\n 'c'], 'parentesco', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n", (1708, 2031), False, 'from join_columns import join_columns\n'), ((2008, 2254), 'join_columns.join_columns', 'join_columns', (['raw', "['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5',\n 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9']", "['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c']", '"""instlevel"""', '[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), "(raw, ['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4',\n 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9'],\n ['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c'], 'instlevel', [1, 2, 3, 4,\n 5, 6, 7, 8, 9])\n", (2020, 2254), False, 'from join_columns import join_columns\n'), ((2241, 2407), 'join_columns.join_columns', 'join_columns', (['raw', "['tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5']", "['c', 'c', 'c', 'c', 'o1']", '"""tipovivi"""', '[1, 2, 3, 4]', "{'o1': 'tipooth'}"], {}), "(raw, ['tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4',\n 'tipovivi5'], ['c', 'c', 'c', 'c', 'o1'], 'tipovivi', [1, 2, 3, 4], {\n 'o1': 'tipooth'})\n", (2253, 2407), False, 'from join_columns import join_columns\n'), ((2405, 2470), 'join_columns.join_columns', 'join_columns', (['raw', "['area2', 'area1']", "['c', 'c']", '"""area"""', '[0, 1]'], {}), "(raw, ['area2', 'area1'], ['c', 'c'], 'area', [0, 1])\n", (2417, 2470), False, 'from join_columns import join_columns\n')] |
# Generated by Django 3.2.7 on 2021-09-07 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_location'),
]
operations = [
migrations.AlterField(
model_name='location',
name='location',
field=models.CharField(max_length=150),
),
]
| [
"django.db.models.CharField"
] | [((331, 363), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (347, 363), False, 'from django.db import migrations, models\n')] |
import numpy as np
from napari_plugin_engine import napari_hook_implementation
from napari_tools_menu import register_function
from napari_time_slicer import time_slicer, slice_by_slice
import napari
from napari.types import ImageData, LabelsData
@napari_hook_implementation
def napari_experimental_provide_function():
return [
gaussian_blur,
threshold_otsu,
connected_component_labeling,
sobel_edge_detector,
binary_fill_holes,
seeded_watershed,
split_touching_objects,
euclidean_distance_map
]
@register_function(menu="Filtering / noise removal > Gaussian (n-mahotas)")
@time_slicer
def gaussian_blur(image:ImageData, sigma: float = 1, viewer: napari.Viewer = None) -> ImageData:
"""
Filters an image using a Gaussian kernel with a given sigma.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.gaussian_filter
"""
import mahotas as mh
return mh.gaussian_filter(image, sigma)
def _8bit(image):
return (image / image.max() * 255).astype(np.uint8)
@register_function(menu="Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)")
@time_slicer
def threshold_otsu(image:ImageData, viewer: napari.Viewer = None) -> LabelsData:
"""
Thresholds an image using Otsu's technique
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.otsu
"""
import mahotas as mh
image_8bit = _8bit(image)
t = mh.otsu(image_8bit)
return image_8bit > t
@register_function(menu="Segmentation / labeling > Connected component labeling (n-mahotas)")
@time_slicer
def connected_component_labeling(binary_image: LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Label connected regions in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.label
"""
labeled, nr_objects = mh.label(binary_image)
return labeled
@register_function(menu="Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)")
@time_slicer
def sobel_edge_detector(image:ImageData, viewer: napari.Viewer = None) -> ImageData:
"""
Enhances edges using a sobel operator
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.sobel
"""
import mahotas as mh
return mh.sobel(image, just_filter=True)
@register_function(menu="Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)")
@slice_by_slice
@time_slicer
def binary_fill_holes(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Fill holes in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.close_holes
"""
import mahotas as mh
return mh.close_holes(binary_image)
@register_function(menu="Segmentation / labeling > Seeded watershed (n-mahotas)")
@time_slicer
def seeded_watershed(image:ImageData, labeled_seeds:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Labels all pixels in an image by flooding intensity valleys in a given image starting from labeled region seeds.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.cwatershed
"""
import mahotas as mh
labels = mh.cwatershed(image, labeled_seeds)
return labels
@register_function(menu="Measurement > Euclidean distance map (n-mahotas)")
@time_slicer
def euclidean_distance_map(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Draws a Euclidean distance map from a binary image. Non-zero values in th binary image will be
replaced by the distance to the next zero pixel.
See also
--------
..[0] https://en.wikipedia.org/wiki/Distance_transform
"""
import mahotas as mh
return mh.distance(binary_image)
def _sobel_3d(image):
from scipy import ndimage as ndi
kernel = np.asarray([
[
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], [
[0, 1, 0],
[1, -6, 1],
[0, 1, 0]
], [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
])
return ndi.convolve(image, kernel)
@register_function(menu="Segmentation post-processing > Split touching objects (n-mahotas)")
@time_slicer
def split_touching_objects(binary:LabelsData, sigma:float=3.5, viewer: napari.Viewer = None) -> LabelsData:
"""
Takes a binary image and draws cuts in the objects similar to the ImageJ watershed algorithm.
See also
--------
.. [0] https://imagej.nih.gov/ij/docs/menus/process.html#watershed
"""
import mahotas as mh
binary = _8bit(np.asarray(binary))
# typical way of using scikit-image watershed
distance = mh.distance(binary)
blurred_distance = mh.gaussian_filter(distance, sigma=sigma)
fp = np.ones((3,) * binary.ndim)
markers, num_labels = mh.label(mh.regmax(blurred_distance, Bc=fp))
labels = mh.cwatershed(-blurred_distance, markers)
# identify label-cutting edges
if len(binary.shape) == 2:
edges = mh.sobel(labels, just_filter=True)
edges2 = mh.sobel(binary, just_filter=True)
else: # assuming 3D
edges = _sobel_3d(labels)
edges2 = _sobel_3d(binary)
almost = np.logical_not(np.logical_xor(edges != 0, edges2 != 0)) * binary
return mh.open(almost) != 0
| [
"mahotas.label",
"mahotas.distance",
"numpy.ones",
"mahotas.close_holes",
"numpy.asarray",
"mahotas.cwatershed",
"scipy.ndimage.convolve",
"numpy.logical_xor",
"mahotas.gaussian_filter",
"mahotas.regmax",
"napari_tools_menu.register_function",
"mahotas.open",
"mahotas.sobel",
"mahotas.otsu... | [((574, 648), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Filtering / noise removal > Gaussian (n-mahotas)"""'}), "(menu='Filtering / noise removal > Gaussian (n-mahotas)')\n", (591, 648), False, 'from napari_tools_menu import register_function\n'), ((1099, 1198), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)"""'}), "(menu=\n 'Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)')\n", (1116, 1198), False, 'from napari_tools_menu import register_function\n'), ((1562, 1659), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / labeling > Connected component labeling (n-mahotas)"""'}), "(menu=\n 'Segmentation / labeling > Connected component labeling (n-mahotas)')\n", (1579, 1659), False, 'from napari_tools_menu import register_function\n'), ((2005, 2120), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)"""'}), "(menu=\n 'Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)'\n )\n", (2022, 2120), False, 'from napari_tools_menu import register_function\n'), ((2440, 2552), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)"""'}), "(menu=\n 'Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)'\n )\n", (2457, 2552), False, 'from napari_tools_menu import register_function\n'), ((2888, 2973), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / labeling > Seeded watershed (n-mahotas)"""'}), "(menu='Segmentation / labeling > Seeded watershed (n-mahotas)'\n )\n", (2905, 2973), False, 'from napari_tools_menu import register_function\n'), ((3424, 3498), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Measurement > Euclidean distance map (n-mahotas)"""'}), "(menu='Measurement > Euclidean distance map (n-mahotas)')\n", (3441, 3498), False, 'from napari_tools_menu import register_function\n'), ((4312, 4408), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation post-processing > Split touching objects (n-mahotas)"""'}), "(menu=\n 'Segmentation post-processing > Split touching objects (n-mahotas)')\n", (4329, 4408), False, 'from napari_tools_menu import register_function\n'), ((987, 1019), 'mahotas.gaussian_filter', 'mh.gaussian_filter', (['image', 'sigma'], {}), '(image, sigma)\n', (1005, 1019), True, 'import mahotas as mh\n'), ((1514, 1533), 'mahotas.otsu', 'mh.otsu', (['image_8bit'], {}), '(image_8bit)\n', (1521, 1533), True, 'import mahotas as mh\n'), ((1961, 1983), 'mahotas.label', 'mh.label', (['binary_image'], {}), '(binary_image)\n', (1969, 1983), True, 'import mahotas as mh\n'), ((2404, 2437), 'mahotas.sobel', 'mh.sobel', (['image'], {'just_filter': '(True)'}), '(image, just_filter=True)\n', (2412, 2437), True, 'import mahotas as mh\n'), ((2856, 2884), 'mahotas.close_holes', 'mh.close_holes', (['binary_image'], {}), '(binary_image)\n', (2870, 2884), True, 'import mahotas as mh\n'), ((3368, 3403), 'mahotas.cwatershed', 'mh.cwatershed', (['image', 'labeled_seeds'], {}), '(image, labeled_seeds)\n', (3381, 3403), True, 'import mahotas as mh\n'), ((3899, 3924), 'mahotas.distance', 'mh.distance', (['binary_image'], {}), '(binary_image)\n', (3910, 3924), True, 'import mahotas as mh\n'), ((3999, 4122), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, -6, 1], [0, 1, 0]], [[0,\n 0, 0], [0, 1, 0], [0, 0, 0]]]'], {}), '([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, -6, 1], [0, \n 1, 0]], [[0, 0, 0], [0, 1, 0], [0, 0, 0]]])\n', (4009, 4122), True, 'import numpy as np\n'), ((4281, 4308), 'scipy.ndimage.convolve', 'ndi.convolve', (['image', 'kernel'], {}), '(image, kernel)\n', (4293, 4308), True, 'from scipy import ndimage as ndi\n'), ((4868, 4887), 'mahotas.distance', 'mh.distance', (['binary'], {}), '(binary)\n', (4879, 4887), True, 'import mahotas as mh\n'), ((4911, 4952), 'mahotas.gaussian_filter', 'mh.gaussian_filter', (['distance'], {'sigma': 'sigma'}), '(distance, sigma=sigma)\n', (4929, 4952), True, 'import mahotas as mh\n'), ((4962, 4989), 'numpy.ones', 'np.ones', (['((3,) * binary.ndim)'], {}), '((3,) * binary.ndim)\n', (4969, 4989), True, 'import numpy as np\n'), ((5074, 5115), 'mahotas.cwatershed', 'mh.cwatershed', (['(-blurred_distance)', 'markers'], {}), '(-blurred_distance, markers)\n', (5087, 5115), True, 'import mahotas as mh\n'), ((4782, 4800), 'numpy.asarray', 'np.asarray', (['binary'], {}), '(binary)\n', (4792, 4800), True, 'import numpy as np\n'), ((5025, 5059), 'mahotas.regmax', 'mh.regmax', (['blurred_distance'], {'Bc': 'fp'}), '(blurred_distance, Bc=fp)\n', (5034, 5059), True, 'import mahotas as mh\n'), ((5199, 5233), 'mahotas.sobel', 'mh.sobel', (['labels'], {'just_filter': '(True)'}), '(labels, just_filter=True)\n', (5207, 5233), True, 'import mahotas as mh\n'), ((5251, 5285), 'mahotas.sobel', 'mh.sobel', (['binary'], {'just_filter': '(True)'}), '(binary, just_filter=True)\n', (5259, 5285), True, 'import mahotas as mh\n'), ((5470, 5485), 'mahotas.open', 'mh.open', (['almost'], {}), '(almost)\n', (5477, 5485), True, 'import mahotas as mh\n'), ((5409, 5448), 'numpy.logical_xor', 'np.logical_xor', (['(edges != 0)', '(edges2 != 0)'], {}), '(edges != 0, edges2 != 0)\n', (5423, 5448), True, 'import numpy as np\n')] |
from order import Order
class OrderManager:
def __init__(self):
self.orders = {}
def user_has_any_order(self, chat_id: int, user: str) -> bool:
order = self.get_order(chat_id)
return order.user_has_any_order(user)
def get_order(self, id: int) -> Order:
if id not in self.orders:
self.orders[id] = Order()
return self.orders[id]
def reset_order(self, id: int) -> None:
self.get_order(id).reset() | [
"order.Order"
] | [((360, 367), 'order.Order', 'Order', ([], {}), '()\n', (365, 367), False, 'from order import Order\n')] |
from django.contrib import admin
# Register your models here.
from Box.models import Player, ObservationList, Comments, ObservationForm
class PlayerAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name', 'year_of_birth', 'club', 'position', 'status', 'mail', 'phone', 'agent']
class ObservationListAdmin(admin.ModelAdmin):
list_display = ['date', 'match', 'city', 'country', 'scout']
class CommentsAdmin(admin.ModelAdmin):
list_display = ['comment', 'player', 'date']
class ObservationFormAdmin(admin.ModelAdmin):
list_display = ['scout', 'player', 'observation', 'first_desc',
'second_desc', 'third_desc', 'fourth_desc', 'fifth_desc', 'sixth_desc',
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven']
admin.site.register(Player, PlayerAdmin),
admin.site.register(ObservationList, ObservationListAdmin),
admin.site.register(Comments, CommentsAdmin),
admin.site.register(ObservationForm, ObservationFormAdmin) | [
"django.contrib.admin.site.register"
] | [((966, 1024), 'django.contrib.admin.site.register', 'admin.site.register', (['ObservationForm', 'ObservationFormAdmin'], {}), '(ObservationForm, ObservationFormAdmin)\n', (985, 1024), False, 'from django.contrib import admin\n'), ((818, 858), 'django.contrib.admin.site.register', 'admin.site.register', (['Player', 'PlayerAdmin'], {}), '(Player, PlayerAdmin)\n', (837, 858), False, 'from django.contrib import admin\n'), ((860, 918), 'django.contrib.admin.site.register', 'admin.site.register', (['ObservationList', 'ObservationListAdmin'], {}), '(ObservationList, ObservationListAdmin)\n', (879, 918), False, 'from django.contrib import admin\n'), ((920, 964), 'django.contrib.admin.site.register', 'admin.site.register', (['Comments', 'CommentsAdmin'], {}), '(Comments, CommentsAdmin)\n', (939, 964), False, 'from django.contrib import admin\n')] |
from __future__ import print_function, division, absolute_import, unicode_literals
from numbers import Number
import numpy as np
from voluptuous import Schema, Required, Any, Range
from mitxgraders.comparers.baseclasses import CorrelatedComparer
from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero
from mitxgraders.helpers.validatorfuncs import text_string
from mitxgraders.exceptions import ConfigError
def get_linear_fit_error(x, y):
"""
Get total error in a linear regression y = ax + b between samples x and y.
If x is constant, returns the result of get_offset_fit_error(x, y).
Arguments:
x, y: flat numpy array
Usage
=====
Zero error in a linear relationship:
>>> x = np.array([2, 5, 8])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered linearly related
>>> x = np.array([1, 1, 1])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_linear_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack([x, np.ones(len(x))]).T
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
if rank == 1:
# The input values x are constant. Return the linear offset error.
return get_offset_fit_error(x, y)
return np.sqrt(residuals.item())
def get_proportional_fit_error(x, y):
"""
Get total error in a linear regression y = ax between samples x and y, with
zero constant term.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not proportional:
>>> x = np.array([2, 5, 8])
>>> result = get_proportional_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
0.76200...
Zero error in a proportional relationship:
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered proportional
>>> x = np.array([1, 1, 1])
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_proportional_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack(x)
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
return np.sqrt(residuals.item())
def get_offset_fit_error(x, y):
"""
Get total error in a linear regression y = x + b between samples x and y,
with slope term equal to 1.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not constant-offset:
>>> x = np.array([2, 5, 8])
>>> result = get_offset_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
4.242640...
Zero error in a constant-offset relationship:
>>> result = get_offset_fit_error(x, x + 5)
>>> round(result, 6)
0.0
"""
mean = np.mean(y - x)
return np.sqrt(sum(np.square(x + mean - y)))
def get_equals_fit_error(x, y):
"""
Get total error in the difference between two samples.
Arguments:
x, y: compatible numpy arrays
"""
return np.sqrt(sum(np.square(x - y)))
class LinearComparer(CorrelatedComparer):
"""
Used to check that there is an linear relationship between student's input
and the expected answer.
The general linear relationship is expected = a * student + b. The comparer
can check for four subtypes:
equals: (a, b) = (1, 0)
proportional: b = 0
offset: a = 1
linear: neither a nor b fixed
Configuration
=============
The first four configuration keys determine the amount of partial credit
given for a specific type of linear relationship. If set to None, the
relationship is not checked.
equals (None | number): defaults to 1.0
proportional (None | number): defaults to 0.5
offset (None | number): defaults to None
linear (None | number): defaults to None
The remaining configuration keys specify a feedback message to be given
in each case:
equals_msg (str): defaults to ''
proportional_msg (str): defaults to 'The submitted answer differs from
an expected answer by a constant factor.'
offset_msg (str): defaults to ''
linear_msg (str): defaults to ''
NOTE:
LinearComparer can be used with MatrixGrader, but the linear
relationship must be the same for all entries. Essentially, this means
we test for
expected_array = sclar_a * expected_array + scalar_b * ONES
where ONES is a matrix of all ones.
The ONES offset works as expected for vectors, but is probably not what
you want for matrices.
"""
schema_config = Schema({
Required('equals', default=1.0): Any(None, Range(0, 1)),
Required('proportional', default=0.5): Any(None, Range(0, 1)),
Required('offset', default=None): Any(None, Range(0, 1)),
Required('linear', default=None): Any(None, Range(0, 1)),
Required('equals_msg', default=''): text_string,
Required('proportional_msg', default=(
'The submitted answer differs from an expected answer by a '
'constant factor.'
)): text_string,
Required('offset_msg', default=''): text_string,
Required('linear_msg', default=''): text_string,
})
all_modes = ('equals', 'proportional', 'offset', 'linear')
zero_compatible_modes = ('equals', 'offset')
def __init__(self, config=None, **kwargs):
super(LinearComparer, self).__init__(config, **kwargs)
self.modes = tuple(mode for mode in self.all_modes if self.config[mode] is not None)
error_calculators = {
'equals': get_equals_fit_error,
'proportional': get_proportional_fit_error,
'offset': get_offset_fit_error,
'linear': get_linear_fit_error,
}
@staticmethod
def check_comparing_zero(comparer_params_evals, student_evals, tolerance):
"""
Check whether student input is nearly zero, or author input is exactly zero
"""
student_zero = all([
is_nearly_zero(x, tolerance, reference=y)
for x, y in zip(student_evals, comparer_params_evals)
])
expected_zero = all(np.all(x == 0.0) for [x] in comparer_params_evals)
return student_zero or expected_zero
def get_valid_modes(self, is_comparing_zero):
"""
Returns a copy of self.modes, first removing 'proportional' and 'linear'
when is_comparing_zero is truthy.
"""
if is_comparing_zero:
return tuple(mode for mode in self.modes
if mode in self.zero_compatible_modes)
return self.modes
def __call__(self, comparer_params_evals, student_evals, utils):
student_evals_norm = np.linalg.norm(student_evals)
# Validate student input shape...only needed for MatrixGrader
if hasattr(utils, 'validate_shape'):
# in numpy, scalars have empty tuples as their shapes
expected_0 = comparer_params_evals[0][0]
scalar_expected = isinstance(expected_0, Number)
shape = tuple() if scalar_expected else expected_0.shape
utils.validate_shape(student_evals[0], shape)
# Raise an error if there is less than 3 samples
if len(student_evals) < 3:
msg = 'Cannot perform linear comparison with less than 3 samples'
raise ConfigError(msg)
is_comparing_zero = self.check_comparing_zero(comparer_params_evals,
student_evals, utils.tolerance)
filtered_modes = self.get_valid_modes(is_comparing_zero)
# Get the result for each mode
# flatten in case individual evals are arrays (as in MatrixGrader)
student = np.array(student_evals).flatten()
expected = np.array(comparer_params_evals).flatten()
errors = [self.error_calculators[mode](student, expected) for mode in filtered_modes]
results = [
{'grade_decimal': self.config[mode], 'msg': self.config[mode+'_msg']}
if is_nearly_zero(error, utils.tolerance, reference=student_evals_norm)
else
{'grade_decimal': 0, 'msg': ''}
for mode, error in zip(filtered_modes, errors)
]
# Get the best result using max.
# For a list of pairs, max compares by 1st index and uses 2nd to break ties
key = lambda result: (result['grade_decimal'], result['msg'])
return max(results, key=key)
| [
"numpy.mean",
"voluptuous.Required",
"mitxgraders.exceptions.ConfigError",
"numpy.all",
"numpy.square",
"numpy.array",
"numpy.vstack",
"numpy.linalg.lstsq",
"numpy.linalg.norm",
"mitxgraders.helpers.calc.mathfuncs.is_nearly_zero",
"voluptuous.Range"
] | [((1370, 1401), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': '(-1)'}), '(A, y, rcond=-1)\n', (1385, 1401), True, 'import numpy as np\n'), ((2612, 2624), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2621, 2624), True, 'import numpy as np\n'), ((2670, 2701), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': '(-1)'}), '(A, y, rcond=-1)\n', (2685, 2701), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.mean', 'np.mean', (['(y - x)'], {}), '(y - x)\n', (3353, 3360), True, 'import numpy as np\n'), ((7327, 7356), 'numpy.linalg.norm', 'np.linalg.norm', (['student_evals'], {}), '(student_evals)\n', (7341, 7356), True, 'import numpy as np\n'), ((3384, 3407), 'numpy.square', 'np.square', (['(x + mean - y)'], {}), '(x + mean - y)\n', (3393, 3407), True, 'import numpy as np\n'), ((3594, 3610), 'numpy.square', 'np.square', (['(x - y)'], {}), '(x - y)\n', (3603, 3610), True, 'import numpy as np\n'), ((5231, 5262), 'voluptuous.Required', 'Required', (['"""equals"""'], {'default': '(1.0)'}), "('equals', default=1.0)\n", (5239, 5262), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5296, 5333), 'voluptuous.Required', 'Required', (['"""proportional"""'], {'default': '(0.5)'}), "('proportional', default=0.5)\n", (5304, 5333), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5367, 5399), 'voluptuous.Required', 'Required', (['"""offset"""'], {'default': 'None'}), "('offset', default=None)\n", (5375, 5399), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5433, 5465), 'voluptuous.Required', 'Required', (['"""linear"""'], {'default': 'None'}), "('linear', default=None)\n", (5441, 5465), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5499, 5533), 'voluptuous.Required', 'Required', (['"""equals_msg"""'], {'default': '""""""'}), "('equals_msg', default='')\n", (5507, 5533), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5556, 5680), 'voluptuous.Required', 'Required', (['"""proportional_msg"""'], {'default': '"""The submitted answer differs from an expected answer by a constant factor."""'}), "('proportional_msg', default=\n 'The submitted answer differs from an expected answer by a constant factor.'\n )\n", (5564, 5680), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5732, 5766), 'voluptuous.Required', 'Required', (['"""offset_msg"""'], {'default': '""""""'}), "('offset_msg', default='')\n", (5740, 5766), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5789, 5823), 'voluptuous.Required', 'Required', (['"""linear_msg"""'], {'default': '""""""'}), "('linear_msg', default='')\n", (5797, 5823), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((7969, 7985), 'mitxgraders.exceptions.ConfigError', 'ConfigError', (['msg'], {}), '(msg)\n', (7980, 7985), False, 'from mitxgraders.exceptions import ConfigError\n'), ((5274, 5285), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5279, 5285), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5345, 5356), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5350, 5356), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5411, 5422), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5416, 5422), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5477, 5488), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5482, 5488), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((6614, 6655), 'mitxgraders.helpers.calc.mathfuncs.is_nearly_zero', 'is_nearly_zero', (['x', 'tolerance'], {'reference': 'y'}), '(x, tolerance, reference=y)\n', (6628, 6655), False, 'from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero\n'), ((6761, 6777), 'numpy.all', 'np.all', (['(x == 0.0)'], {}), '(x == 0.0)\n', (6767, 6777), True, 'import numpy as np\n'), ((8348, 8371), 'numpy.array', 'np.array', (['student_evals'], {}), '(student_evals)\n', (8356, 8371), True, 'import numpy as np\n'), ((8401, 8432), 'numpy.array', 'np.array', (['comparer_params_evals'], {}), '(comparer_params_evals)\n', (8409, 8432), True, 'import numpy as np\n'), ((8655, 8723), 'mitxgraders.helpers.calc.mathfuncs.is_nearly_zero', 'is_nearly_zero', (['error', 'utils.tolerance'], {'reference': 'student_evals_norm'}), '(error, utils.tolerance, reference=student_evals_norm)\n', (8669, 8723), False, 'from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 05:21:45 2021
@author: bw98j
"""
import prose as pgx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import numpy as np
import itertools
import glob
import os
from tqdm import tqdm
import scipy.stats
import gtfparse
import itertools
from pylab import *
import collections
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import quantile_transform
import pickle
import re
#plot parameters
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Arial:italic'
plt.rcParams['mathtext.rm'] = 'Arial'
plt.rc('font',family='arial',size=40)
plt.rc('hatch',linewidth = 2.0)
#%% Read formatted matrices
score = pd.read_csv('ccle/ccle_prose_formatted.tsv.gz', sep='\t').drop_duplicates('cell_line')
#%%
pe=glob.glob('databases/nextprot*')
pedict = {}
for i in pe:
pe = i.split('_')[3]
df = pd.read_csv(i)
pro = [i.split('_')[-1] for i in df.values.T[0]]
pedict[pe] = set(pro)
#%%
score = score.sort_values(by='tissue')
df = score[list(score.columns.intersection(pedict['PE2']))]
data = df.mean().sort_values(ascending=True)
fig, ax = plt.subplots(figsize=[10,9])
g = sns.scatterplot(y=df.mean().sort_values(ascending=True),
x=df.mean().sort_values(ascending=True).rank(),
)
sns.despine()
plt.ylabel('Mean PROSE score', labelpad=10)
plt.xlabel('rank')
plt.text(s='Protein',x=1.4,y=.95,size=30,ha='right',transform = ax.transAxes, weight='bold')
plt.text(s='Score',x=1.45,y=.95,size=30,ha='left',transform = ax.transAxes,weight='bold')
highscore = df.mean().sort_values(ascending=False)[:10]
for p,m,i in zip(highscore.index, round(highscore,3),range(len(highscore))):
print(p,m)
plt.text(s=m,x=1.45,y=.85-i*0.08, ha='left',
size=30,transform = ax.transAxes)
plt.text(s=p,x=1.4,y=.85-i*0.08, ha='right',
size=30,transform = ax.transAxes)
plt.savefig('plots/CCLE_PE2_rank.png',
format='png', dpi=600, bbox_inches='tight')
data.to_csv('source_data/Fig S4a (PE2 rank plot).csv')
#%%
cmap = sns.diverging_palette(9, 255, as_cmap=True)
g = sns.clustermap(data=df[highscore.index].T,
cmap=cmap,
vmin=0,vmax=2,center=0,
figsize=[12,10],
xticklabels=False,yticklabels=True,
dendrogram_ratio=0.1,
row_cluster=False,col_cluster=False,
cbar_kws={"orientation": "horizontal", 'aspect':50},
)
ax = g.ax_heatmap
ax.set_xlabel('cell lines',size=40,labelpad=10)
g.cax.set_position([.45, -0.08, .3, .02])
ax.text(x=0.3,y=-0.2,s='PROSE score',ha='center',size=40, transform = ax.transAxes)
g.savefig('plots/CCLE_PE2_heatmap.png',
format='png', dpi=600, bbox_inches='tight')
df[highscore.index].T.to_csv('source_data/Fig S4b (PE2 matrix).csv')
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"seaborn.despine",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"seaborn.clustermap",
"matplotlib.pyplot.xlabel",
"seaborn.diverging_palette",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"glob.glob"
] | [((689, 728), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""arial"""', 'size': '(40)'}), "('font', family='arial', size=40)\n", (695, 728), True, 'import matplotlib.pyplot as plt\n'), ((728, 758), 'matplotlib.pyplot.rc', 'plt.rc', (['"""hatch"""'], {'linewidth': '(2.0)'}), "('hatch', linewidth=2.0)\n", (734, 758), True, 'import matplotlib.pyplot as plt\n'), ((898, 930), 'glob.glob', 'glob.glob', (['"""databases/nextprot*"""'], {}), "('databases/nextprot*')\n", (907, 930), False, 'import glob\n'), ((1267, 1296), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[10, 9]'}), '(figsize=[10, 9])\n', (1279, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1460), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1458, 1460), True, 'import seaborn as sns\n'), ((1464, 1507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean PROSE score"""'], {'labelpad': '(10)'}), "('Mean PROSE score', labelpad=10)\n", (1474, 1507), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1527), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rank"""'], {}), "('rank')\n", (1519, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1531, 1632), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': '"""Protein"""', 'x': '(1.4)', 'y': '(0.95)', 'size': '(30)', 'ha': '"""right"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(s='Protein', x=1.4, y=0.95, size=30, ha='right', transform=ax.\n transAxes, weight='bold')\n", (1539, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1724), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': '"""Score"""', 'x': '(1.45)', 'y': '(0.95)', 'size': '(30)', 'ha': '"""left"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(s='Score', x=1.45, y=0.95, size=30, ha='left', transform=ax.\n transAxes, weight='bold')\n", (1633, 1724), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2162), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/CCLE_PE2_rank.png"""'], {'format': '"""png"""', 'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('plots/CCLE_PE2_rank.png', format='png', dpi=600, bbox_inches=\n 'tight')\n", (2086, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2255, 2298), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(9)', '(255)'], {'as_cmap': '(True)'}), '(9, 255, as_cmap=True)\n', (2276, 2298), True, 'import seaborn as sns\n'), ((2306, 2567), 'seaborn.clustermap', 'sns.clustermap', ([], {'data': 'df[highscore.index].T', 'cmap': 'cmap', 'vmin': '(0)', 'vmax': '(2)', 'center': '(0)', 'figsize': '[12, 10]', 'xticklabels': '(False)', 'yticklabels': '(True)', 'dendrogram_ratio': '(0.1)', 'row_cluster': '(False)', 'col_cluster': '(False)', 'cbar_kws': "{'orientation': 'horizontal', 'aspect': 50}"}), "(data=df[highscore.index].T, cmap=cmap, vmin=0, vmax=2,\n center=0, figsize=[12, 10], xticklabels=False, yticklabels=True,\n dendrogram_ratio=0.1, row_cluster=False, col_cluster=False, cbar_kws={\n 'orientation': 'horizontal', 'aspect': 50})\n", (2320, 2567), True, 'import seaborn as sns\n'), ((996, 1010), 'pandas.read_csv', 'pd.read_csv', (['i'], {}), '(i)\n', (1007, 1010), True, 'import pandas as pd\n'), ((1881, 1970), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': 'm', 'x': '(1.45)', 'y': '(0.85 - i * 0.08)', 'ha': '"""left"""', 'size': '(30)', 'transform': 'ax.transAxes'}), "(s=m, x=1.45, y=0.85 - i * 0.08, ha='left', size=30, transform=ax.\n transAxes)\n", (1889, 1970), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2067), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': 'p', 'x': '(1.4)', 'y': '(0.85 - i * 0.08)', 'ha': '"""right"""', 'size': '(30)', 'transform': 'ax.transAxes'}), "(s=p, x=1.4, y=0.85 - i * 0.08, ha='right', size=30, transform=ax.\n transAxes)\n", (1986, 2067), True, 'import matplotlib.pyplot as plt\n'), ((800, 857), 'pandas.read_csv', 'pd.read_csv', (['"""ccle/ccle_prose_formatted.tsv.gz"""'], {'sep': '"""\t"""'}), "('ccle/ccle_prose_formatted.tsv.gz', sep='\\t')\n", (811, 857), True, 'import pandas as pd\n')] |
import argparse
import os
import glob
import pandas as pd
from libraryTools import imageRegionOfInterest
#filename,width,height,class,xmin,ymin,xmax,ymax
#20170730_132530-(F00000).jpeg,576,1024,sinaleira,221,396,246,437
valid_images = [".jpg",".gif",".png",".tga",".jpeg"]
def run(image_path, classNameList = ["someclass"], searchSubdir = False):
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
classes_qtd = []
images_total_qtd = 0
images_without_classes_qtd = 0
xml_list = []
searchFolder(image_path, classNameList, searchSubdir)
print()
print('Total Images: ', images_total_qtd)
print('Images without classes: ', images_without_classes_qtd)
print('Classes: ')
for q in classes_qtd:
print( q)
def searchFolder(image_path, classNameList, searchSubdir):
global valid_images
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
print("Folder", image_path)
obj = imageRegionOfInterest(image_path)
for filename in os.listdir(image_path):
if searchSubdir and os.path.isdir(os.path.join(image_path, filename)):
searchFolder(os.path.join(image_path, filename), classNameList, searchSubdir)
name, ext = os.path.splitext(filename)
if ext.lower() not in valid_images:
continue
print(filename)
images_total_qtd = images_total_qtd + 1
obj.setFileImage(filename)
points = obj.loadBoxFromTxt()
if len(points)>0:
for point in points:
iclass = int(point[4])
while len(classes_qtd) < iclass+1:
classes_qtd.append(0)
classes_qtd[iclass] = classes_qtd[iclass] + 1
else:
images_without_classes_qtd = images_without_classes_qtd + 1
return
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="images path")
ap.add_argument('-className', nargs='*', help='class name list (0..9 positions, max 10), e.g. -classes dog cat')
ap.add_argument('-s', '--subdir', action='store_true', help="Search sub folders")
args = vars(ap.parse_args())
run(args["path"], args["className"], args["subdir"]) | [
"os.listdir",
"argparse.ArgumentParser",
"libraryTools.imageRegionOfInterest",
"os.path.join",
"os.path.splitext"
] | [((2060, 2085), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2083, 2085), False, 'import argparse\n'), ((1051, 1084), 'libraryTools.imageRegionOfInterest', 'imageRegionOfInterest', (['image_path'], {}), '(image_path)\n', (1072, 1084), False, 'from libraryTools import imageRegionOfInterest\n'), ((1105, 1127), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1115, 1127), False, 'import os\n'), ((1319, 1345), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1335, 1345), False, 'import os\n'), ((1171, 1205), 'os.path.join', 'os.path.join', (['image_path', 'filename'], {}), '(image_path, filename)\n', (1183, 1205), False, 'import os\n'), ((1233, 1267), 'os.path.join', 'os.path.join', (['image_path', 'filename'], {}), '(image_path, filename)\n', (1245, 1267), False, 'import os\n')] |
# coding=utf-8
import serial
import time
import os
KERNEL_PATH = './kernel9.img'
def serial_w(content):
ser.write(content)
time.sleep(1)
port1 = '/dev/pts/4'
port2 = '/dev/ttyUSB0'
if __name__ == "__main__":
ser = serial.Serial(port=port1, baudrate=115200)
kernel_size = os.path.getsize(KERNEL_PATH)
with open(KERNEL_PATH, 'rb') as kernel_f:
# cmd
serial_w('loadimg\r')
# addr
serial_w('90000\r')
# kernel size
k_size = os.path.getsize(KERNEL_PATH)
serial_w(str(k_size)+'\r')
# while k_size > 0:
# words = kernel_f.read(0x400)
# k_size -= ser.write(words)
# # k_size -= words
words = kernel_f.read()
serial_w(words)
serial_w('F\r')
print('hi')
kernel_f.close()
| [
"os.path.getsize",
"serial.Serial",
"time.sleep"
] | [((134, 147), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (144, 147), False, 'import time\n'), ((231, 273), 'serial.Serial', 'serial.Serial', ([], {'port': 'port1', 'baudrate': '(115200)'}), '(port=port1, baudrate=115200)\n', (244, 273), False, 'import serial\n'), ((292, 320), 'os.path.getsize', 'os.path.getsize', (['KERNEL_PATH'], {}), '(KERNEL_PATH)\n', (307, 320), False, 'import os\n'), ((512, 540), 'os.path.getsize', 'os.path.getsize', (['KERNEL_PATH'], {}), '(KERNEL_PATH)\n', (527, 540), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# -----------
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 <NAME>
# uuid : 633f2088-bbe3-11eb-b9c2-33be0bb8451e
# author: <NAME>
# email : <EMAIL>
# date : 2021-05-23
# -----------
"""
The `repair` command has access to tools that can repair various
problems that could occur.
- bad-links
- relative links that don't point to the correct file
- section attributes
- ATX headers that are missing links
--dry-run
"""
# ------------
# System Modules - Included with Python
import hashlib
from pathlib import Path
from datetime import datetime
from difflib import get_close_matches
# ------------
# 3rd Party - From pip
import click
from rich.console import Console
console = Console()
# ------------
# Custom Modules
from ..documentos.common import (
relative_path,
search,
)
from ..documentos.document import (
MarkdownDocument,
search as md_search,
document_lookup,
)
from ..documentos.markdown_classifiers import MarkdownAttributeSyntax
# -------------
def find_broken_urls(
parent=None,
links=None,
):
"""
Examine the relative links for the MarkdownDocument object and
return a list contain links that don't have matches on the file
system.
Can work for images or relative links pointing to markdown files.
# Parameters
parent:Path
- The path of the parent folder to resolve links
links:list(tuple)
- A list of tuples containing:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
- The `url` key is the required and is the URL of the
relative link
# Return
a list of tuples that contains the problem link and line number.
item:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
"""
problems = []
for rurl in links:
# we only want the URL, not any section anchors
left, _, _ = rurl[1]["url"].partition("#")
file = parent.joinpath(left).resolve()
if not file.exists():
problems.append(rurl)
return problems
def classify_broken_urls(
lookup=None,
broken_urls=None,
):
"""
Using the lookup dictionary and the list of broken URLS, sort the
broken URLS for further processing. Sort them into
- `no match` - There is no match on the file system for the URLs
- `file match` - There are matching file names on the system
- `suggestions` - There are no-matching file names, but some of the
file names are close
# Parameters
lookup:dict
- A dictionary keyed by the file name mapped to a list of
MarkdownDocument objects that have the same name but
different paths.
broken_urls:list
- a list of tuples that contains the problem link and line
number.
- item:
- line number (0 based)
- dict
- 'full' - The full regex match - [text](link)
- 'text' - The text portion of the markdown link
- 'url' - The URL portion of the markdown link
- "md_span": result.span("md"), # tuple(start, end) <- start and end position of the match
- "md": result.group("md"),
- "section_span": result.span("section"),
- "section": section attribute i.e ../file.md#id <- the id portion,
# Return
A dictionary keyed by:
- no_matches - no matches were found, this is a list of the broken
urls
- exact_matches - Direct matches in the file system were found, this
is a tuple of the broken url and a list of MarkdownDocument
objects
- The name of the file has an exact match in the system, or a
number of matches
- multiple exact matches fount
- exact_match - Only one exact match found
- suggestions - Closes matches found in the file system, this is a
tuple of the broken url and a list of MarkdownDocument objects
- This may not be an ideal case or even correct.
Each key will contain a list of tuples: (dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
"""
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for problem in broken_urls:
line, url = problem
# we only want the URL, not any section anchors
left, _, _ = url["url"].partition("#")
key = Path(left).name
if key in lookup:
matches = [match for match in lookup[key]]
if len(matches) == 1:
results["exact_match"].append((problem, matches))
else:
results["exact_matches"].append((problem, matches))
else:
# https://docs.python.org/3/library/difflib.html#difflib.get_close_matches
# Can we suggest anything?
suggestions = get_close_matches(key, lookup.keys(), cutoff=0.8)
if suggestions:
results["suggestions"].append(
(problem, [match for pk in suggestions for match in lookup[pk]])
)
else:
# We don't have a file match or any suggestions - a dead
# end :(
results["no_matches"].append((problem, []))
return results
def display_classified_url(results, root=None):
"""
# Parameters
results:list
- A list containing a reference to a MarkdownDocument and a list
of tuples containing line, url (dict) and the list of
matches (MarkdownDocument)
root:Path
- The path to the root of the document folder
"""
for item in results:
md, problems = item
md_relative = md.filename.relative_to(root)
for defect, matches in problems:
line, url = defect
console.print(f"File: {md_relative}")
console.print(f'Line: {line} -> `{url["full"]}`')
for i, match in enumerate(matches, start=1):
console.print(f"{i}. -> {match.filename.relative_to(root)}")
console.print("")
def write_corrected_url(
md=None,
problems=None,
root=None,
dry_run=False,
):
"""
# Parameters
md:MarkdownDocument
- The document we need to correct the URLs
problems:list(dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
root:Path
- The path to the root of the document folder
"""
console.print(f"File: {md.filename.relative_to(root)}")
for defect, matches in problems:
line, url = defect
match = (
matches[0].filename
if isinstance(matches[0], MarkdownDocument)
else matches[0]
) # assume pathlib.Path
new_url = relative_path(
md.filename.parent,
match.parent,
).joinpath(match.name)
left, _, _ = url["url"].partition("#")
new_line = md.contents[line].replace(left, str(new_url))
console.print(f"Line: {line} - Replacing `{left}` -> `{new_url}`")
md.contents[line] = new_line
if dry_run:
console.print("------DRY-RUN------")
else:
with md.filename.open("w", encoding="utf-8") as fo:
for line in md.contents:
fo.write(line)
console.print("Changes written...")
def display_and_fix_issues(results, root=None, dry_run=False):
""" """
messages = {
"no_matches": [
"NO MATCHES",
"The following files had no matches or any close matches within the system.",
],
"suggestions": [
"SUGGESTIONS",
"The following files did not have any exact matches within the system but they had some close matches.",
],
"exact_matches": [
"EXACT MATCHES",
"The following files have multiple exact matches within the system.",
],
"exact_match": [
"EXACT MATCHES",
"The following files have a single, exact match within the system.",
],
}
# Display the files that had problems we can't repair automatically
for key in (k for k in messages.keys() if k != "exact_match"):
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
display_classified_url(results[key], root=root)
# Display and repair the files we can fix
key = "exact_match"
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
for item in results[key]:
md, problems = item
write_corrected_url(
md,
problems,
root=root,
dry_run=dry_run,
)
console.print("")
if dry_run:
console.print(f"Exact Matches - {len(results[key])} files corrected!")
console.print("-" * 6)
def find_missing_header_attributes(
files=None,
root=None,
display_problems=False,
):
"""
# Parameters
files:list(MarkdownDocument)
- The list of MarkdownDocument objects to search for missing
header attributes
root:Path
- The path to the root of the document folder
display_problems:bool
- If true, it will display the problems as it finds them
- Default - False
# Return
A dictionary keyed with the MarkdownDocument object that has missing
attributes mapped to the list of missing attributes which are a
tuple (line number, line text)
"""
md_attribute_syntax_rule = MarkdownAttributeSyntax()
problems = {}
for md in files:
# md.headers() A dictionary keyed by header depth (1 to 6) with
# a list of tuples containing line numbers containing the ATX
# header at that depth and the text of the header(23, "
# [hello World](./en.md) ")
missing_attributes = []
for _, headers in md.headers.items():
for h in headers:
number, text = h
if not md_attribute_syntax_rule.match(text):
missing_attributes.append(h)
if display_problems:
console.print(
f"MISSING ATTRIBUTE: `{md.filename.relative_to(root)}` - Line: {number} - `{text}`"
)
if missing_attributes:
problems[md] = missing_attributes
return problems
def repair_header_issues(
issues,
root=None,
dry_run=False,
):
"""
# Parameters
issues:dict
- A dictionary keyed by the MarkdownDocument object with header
issues. It is mapped to a list of tuples (line number, header
text)
root:Path
- The path to the root of the document folder
dry_run:bool
- If true, it will not write changes
- Default - False
"""
for md, problems in issues.items():
console.print(f"File: {md.filename.relative_to(root)}")
# we'll hash the file name and path using SHA256 and use the
# first 10 hex characters. we just need something to make the
# section header anchors unique if the document is merged into
# a pdf - it honestly doesn't matter
# - https://gnugat.github.io/2018/06/15/short-identifier.html
# - https://preshing.com/20110504/hash-collision-probabilities/
# - https://en.wikipedia.org/wiki/Birthday_attack#Mathematics
# Using 10 characters, i.e. 10 hex numbers yields about 40 bits
# of the 256 bits using the Birthday paradox approximation we
# can determine how many hashes we can generate before there is
# a 50% chance of a collision: 10 hex numbers is 10*4bits =
# 40bits H = 2^40 p(n) = 50% = 0.5 = 1/2 n = sqrt(2 * 2^40 *
# 1/2) = sqrt(2^40) = 1,048,576 Essentially we would need to
# generate at least a million hashes before we expect a
# collision with about a 50% probability.
file_hash = (
hashlib.sha256(str(md.filename).encode("utf-8")).hexdigest()[:10].lower()
)
# split the hash up into something easier to understand -
# `xxx-xxx-xxxx`
file_id = f"{file_hash[:3]}-{file_hash[3:6]}-{file_hash[6:]}"
for i, item in enumerate(problems):
line, _ = item
section_attribute = f"{{#sec:{file_id}_{i}}}"
md.contents[line] = md.contents[line].rstrip() + " " + section_attribute
console.print(f"Line: {line} - Added Section Attribute: `{md.contents[line]}`")
console.print("")
if dry_run:
console.print("------DRY-RUN------")
else:
with md.filename.open("w", encoding="utf-8") as fo:
for line in md.contents:
fo.write(line)
console.print("Changes written...")
@click.group("repair")
@click.option(
"--dry-run",
is_flag=True,
help="List the changes that would be made without actually making any.",
)
@click.pass_context
def repair(*args, **kwargs):
"""
\b
Repair certain things within the Markdown documents. This will
provide tools to deal with validation issues.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run links
$ docs --config=./en/config.common.yaml repair links
$ docs --config=./en/config.common.yaml repair --dry-run images
$ docs --config=./en/config.common.yaml repair images
$ docs --config=./en/config.common.yaml repair --dry-run headers --list
$ docs --config=./en/config.common.yaml repair --dry-run headers
$ docs --config=./en/config.common.yaml repair headers
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
config["dry_run"] = kwargs["dry_run"] if "dry_run" in kwargs else False
# ----------------
# Find all of the markdown files and lst files
console.print("Searching for Markdown files...")
config["md_files"] = md_search(root=config["documents.path"])
console.print(f'{len(config["md_files"])} Markdown files were found...')
console.print("")
args[0].obj["cfg"] = config
@repair.command("links")
@click.pass_context
def links(*args, **kwargs):
"""
\b
Examine all of the Markdown documents in the configuration folder.
Determine if there are relative links that have a problem and
attempt to fix them.
- Only looks at Markdown Links of the form `[text](url)`
- Only examines relative links
- If it finds the correct file, and there is only one it can correct
the link. If the link could be pointing to multiple files, it
will not correct, but offer the suggestion of potential matches
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run links
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# ------
# Validate Markdown Files
console.print("Processing Markdown File Links...")
console.print("")
lookup = document_lookup(config["md_files"])
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for md in config["md_files"]:
sorted_broken_urls = classify_broken_urls(
lookup=lookup,
broken_urls=find_broken_urls(
md.filename.parent,
md.relative_links(),
),
)
for key in results:
if sorted_broken_urls[key]:
results[key].append((md, sorted_broken_urls[key]))
display_and_fix_issues(
results, root=config["documents.path"], dry_run=config["dry_run"]
)
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
@repair.command("images")
@click.pass_context
def images(*args, **kwargs):
"""
\b
Examine the MarkdownDocument objects for broken relative image links
and attempt to repair them.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run images
$ docs --config=./en/config.common.yaml repair images
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# --------
# Find the images
images = list(
search(
root=config["documents.path"],
extensions=(".png", ".gif", ".jpg", ".jpeg"),
)
)
console.print(f"{len(images)} images were found...")
console.print("")
# 1. create a reverse look for the image names to their file paths
reverse_image_lookup = {}
for img in images:
reverse_image_lookup.setdefault(img.name, []).append(img)
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for md in config["md_files"]:
sorted_broken_urls = classify_broken_urls(
lookup=reverse_image_lookup,
broken_urls=find_broken_urls(
md.filename.parent,
md.image_links(),
),
)
for key in results:
if sorted_broken_urls[key]:
results[key].append((md, sorted_broken_urls[key]))
display_and_fix_issues(
results, root=config["documents.path"], dry_run=config["dry_run"]
)
# ----------
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
@repair.command("headers")
@click.option(
"--list",
is_flag=True,
help="List the problem files as they are encountered.",
)
@click.pass_context
def headers(*args, **kwargs):
"""
\b
Examine all the MarkdownDocument objects for ATX headers that do not
have a proper section attribute set. It can automatically add a
section attribute.
# Usage
$ docs --config=./en/config.common.yaml repair --dry-run headers --list
$ docs --config=./en/config.common.yaml repair headers
"""
# Extract the configuration file from the click context
config = args[0].obj["cfg"]
build_start_time = datetime.now()
# ----------
console.print("Searching for missing header attributes...")
console.print("")
problems = find_missing_header_attributes(
files=config["md_files"],
root=config["documents.path"],
display_problems=kwargs["list"],
)
if len(problems) > 0:
console.print("-" * 6)
console.print(
f'{len(problems)}/{len(config["md_files"])} files have missing attributes.'
)
# -----------
# Add missing header section attributes
repair_header_issues(
problems, root=config["documents.path"], dry_run=config["dry_run"]
)
# ----------
console.print("")
console.print("-" * 6)
build_end_time = datetime.now()
console.print(f"Started - {build_start_time}")
console.print(f"Finished - {build_end_time}")
console.print(f"Elapsed: {build_end_time - build_start_time}")
| [
"pathlib.Path",
"click.group",
"click.option",
"rich.console.Console",
"datetime.datetime.now"
] | [((750, 759), 'rich.console.Console', 'Console', ([], {}), '()\n', (757, 759), False, 'from rich.console import Console\n'), ((13372, 13393), 'click.group', 'click.group', (['"""repair"""'], {}), "('repair')\n", (13383, 13393), False, 'import click\n'), ((13395, 13512), 'click.option', 'click.option', (['"""--dry-run"""'], {'is_flag': '(True)', 'help': '"""List the changes that would be made without actually making any."""'}), "('--dry-run', is_flag=True, help=\n 'List the changes that would be made without actually making any.')\n", (13407, 13512), False, 'import click\n'), ((18412, 18509), 'click.option', 'click.option', (['"""--list"""'], {'is_flag': '(True)', 'help': '"""List the problem files as they are encountered."""'}), "('--list', is_flag=True, help=\n 'List the problem files as they are encountered.')\n", (18424, 18509), False, 'import click\n'), ((15448, 15462), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15460, 15462), False, 'from datetime import datetime\n'), ((16337, 16351), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16349, 16351), False, 'from datetime import datetime\n'), ((16986, 17000), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16998, 17000), False, 'from datetime import datetime\n'), ((18195, 18209), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18207, 18209), False, 'from datetime import datetime\n'), ((19024, 19038), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19036, 19038), False, 'from datetime import datetime\n'), ((19750, 19764), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19762, 19764), False, 'from datetime import datetime\n'), ((4662, 4672), 'pathlib.Path', 'Path', (['left'], {}), '(left)\n', (4666, 4672), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import os
import json
from datetime import datetime
from urllib import parse
import requests
from click import ClickException
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font, PatternFill
from openpyxl.styles.colors import Color, WHITE
from openpyxl.utils import quote_sheetname
from openpyxl.worksheet.datavalidation import DataValidation
from tqdm import trange
from connect.cli.core.constants import DEFAULT_BAR_FORMAT
from connect.cli.core.http import (
format_http_status,
handle_http_error,
)
from connect.cli.plugins.product.constants import PARAM_TYPES
from connect.cli.plugins.product.utils import (
get_col_headers_by_ws_type,
get_col_limit_by_ws_type,
get_json_object_for_param,
)
from connect.client import ClientError, ConnectClient, R
def _setup_cover_sheet(ws, product, location, client, media_path):
ws.title = 'General Information'
ws.column_dimensions['A'].width = 50
ws.column_dimensions['B'].width = 180
ws.merge_cells('A1:B1')
cell = ws['A1']
cell.fill = PatternFill('solid', start_color=Color('1565C0'))
cell.font = Font(sz=24, color=WHITE)
cell.alignment = Alignment(horizontal='center', vertical='center')
cell.value = 'Product information'
for i in range(3, 13):
ws[f'A{i}'].font = Font(sz=12)
ws[f'B{i}'].font = Font(sz=12)
ws['A3'].value = 'Account ID'
ws['B3'].value = product['owner']['id']
ws['A4'].value = 'Account Name'
ws['B4'].value = product['owner']['name']
ws['A5'].value = 'Product ID'
ws['B5'].value = product['id']
ws['A6'].value = 'Product Name'
ws['B6'].value = product['name']
ws['A7'].value = 'Export datetime'
ws['B7'].value = datetime.now().isoformat()
ws['A8'].value = 'Product Category'
ws['B8'].value = product['category']['name']
ws['A9'].value = 'Product Icon file name'
ws['A9'].font = Font(sz=14)
ws['B9'].value = f'{product["id"]}.{product["icon"].split(".")[-1]}'
_dump_image(
f'{location}{product["icon"]}',
f'{product["id"]}.{product["icon"].split(".")[-1]}',
media_path,
)
ws['A10'].value = 'Product Short Description'
ws['A10'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B10'].value = product['short_description']
ws['B10'].alignment = Alignment(
wrap_text=True,
)
ws['A11'].value = 'Product Detailed Description'
ws['A11'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B11'].value = product['detailed_description']
ws['B11'].alignment = Alignment(
wrap_text=True,
)
ws['A12'].value = 'Embedding description'
ws['B12'].value = product['customer_ui_settings']['description']
ws['B12'].alignment = Alignment(
wrap_text=True,
)
ws['A13'].value = 'Embedding getting started'
ws['B13'].value = product['customer_ui_settings']['getting_started']
ws['B13'].alignment = Alignment(
wrap_text=True,
)
categories = client.categories.all()
unassignable_cat = ['Cloud Services', 'All Categories']
categories_list = [
cat['name'] for cat in categories if cat['name'] not in unassignable_cat
]
ws['AA1'].value = 'Categories'
cat_row_idx = 2
for cat in categories_list:
ws[f'AA{cat_row_idx}'].value = cat
cat_row_idx += 1
categories_validation = DataValidation(
type='list',
formula1=f'{quote_sheetname("General Information")}!$AA$2:$AA${len(categories_list)}',
allow_blank=False,
)
ws.add_data_validation(categories_validation)
categories_validation.add('B8')
def _dump_image(image_location, image_name, media_path):
image = requests.get(image_location)
if image.status_code == 200:
with open(os.path.join(media_path, image_name), 'wb') as f:
f.write(image.content)
else:
raise ClickException(f"Error obtaining image from {image_location}")
def _setup_ws_header(ws, ws_type=None): # noqa: CCR001
if not ws_type:
ws_type = 'items'
color = Color('d3d3d3')
fill = PatternFill('solid', color)
cels = ws['A1': '{cell}1'.format(
cell=get_col_limit_by_ws_type(ws_type),
)]
col_headers = get_col_headers_by_ws_type(ws_type)
for cel in cels[0]:
ws.column_dimensions[cel.column_letter].width = 25
ws.column_dimensions[cel.column_letter].auto_size = True
cel.fill = fill
cel.value = col_headers[cel.column_letter]
if ws_type == 'params' and cel.value == 'JSON Properties':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'capabilities' and cel.value == 'Capability':
ws.column_dimensions[cel.column_letter].width = 50
elif ws_type == 'static_links' and cel.value == 'Url':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'templates':
if cel.value == 'Content':
ws.column_dimensions[cel.column_letter].width = 100
if cel.value == 'Title':
ws.column_dimensions[cel.column_letter].width = 50
def _calculate_commitment(item):
period = item.get('period')
if not period:
return '-'
commitment = item.get('commitment')
if not commitment:
return '-'
count = commitment['count']
if count == 1:
return '-'
multiplier = commitment['multiplier']
if multiplier == 'billing_period':
if period == 'monthly':
years = count // 12
return '{quantity} year{plural}'.format(
quantity=years,
plural='s' if years > 1 else '',
)
else:
return '{years} years'.format(
years=count,
)
# One-time
return '-'
def _fill_param_row(ws, row_idx, param):
ws.cell(row_idx, 1, value=param['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=param['name']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=param['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 5, value=param['description']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=param['phase']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 7, value=param['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 8, value=param['type']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 9,
value=param['constraints']['required'] if param['constraints']['required'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 10,
value=param['constraints']['unique'] if param['constraints']['unique'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 11,
value=param['constraints']['hidden'] if param['constraints']['hidden'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 12,
value=get_json_object_for_param(param),
).alignment = Alignment(
wrap_text=True,
)
ws.cell(
row_idx, 13, value=param['events']['created']['at'],
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 14, value=param['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_media_row(ws, row_idx, media, location, product, media_path):
ws.cell(row_idx, 1, value=media['position'])
ws.cell(row_idx, 2, value=media['id'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=media['type'])
ws.cell(row_idx, 5, value=f'{media["id"]}.{media["thumbnail"].split(".")[-1]}')
_dump_image(
f'{location}{media["thumbnail"]}',
f'{media["id"]}.{media["thumbnail"].split(".")[-1]}',
media_path,
)
ws.cell(row_idx, 6, value='-' if media['type'] == 'image' else media['url'])
def _fill_template_row(ws, row_idx, template):
ws.cell(row_idx, 1, value=template['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=template['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=template['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 5, value=template['type'] if 'type' in template else 'fulfillment',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=template['body']).alignment = Alignment(
wrap_text=True,
)
ws.cell(row_idx, 7, value=template['events']['created']['at']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 8, value=template['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_action_row(ws, row_idx, action):
ws.cell(row_idx, 1, value=action['id'])
ws.cell(row_idx, 2, value=action['action'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=action['name'])
ws.cell(row_idx, 5, value=action['title'])
ws.cell(row_idx, 6, value=action['description'])
ws.cell(row_idx, 7, value=action['scope'])
ws.cell(row_idx, 8, value=action['events']['created']['at'])
ws.cell(row_idx, 9, value=action['events'].get('updated', {}).get('at'))
def _fill_configuration_row(ws, row_idx, configuration, conf_id):
ws.cell(row_idx, 1, value=conf_id)
ws.cell(row_idx, 2, value=configuration['parameter']['id'])
ws.cell(row_idx, 3, value=configuration['parameter']['scope'])
ws.cell(row_idx, 4, value='-')
ws.cell(row_idx, 5, value=configuration['item']['id'] if 'item' in configuration else '-')
ws.cell(row_idx, 6, value=configuration['item']['name'] if 'item' in configuration else '-')
ws.cell(row_idx, 7, value=configuration['marketplace']['id'] if 'marketplace' in configuration else '-')
ws.cell(row_idx, 8,
value=configuration['marketplace']['name'] if 'marketplace' in configuration else '-')
if 'structured_value' in configuration:
value = configuration['structured_value']
value = json.dumps(value, indent=4, sort_keys=True)
ws.cell(row_idx, 9, value=value).alignment = Alignment(wrap_text=True)
elif 'value' in configuration:
ws.cell(row_idx, 9, value=configuration['value'])
else:
ws.cell(row_idx, 9, value='-')
def _fill_item_row(ws, row_idx, item):
ws.cell(row_idx, 1, value=item['id'])
ws.cell(row_idx, 2, value=item['mpn'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=item['display_name'])
ws.cell(row_idx, 5, value=item['description'])
ws.cell(row_idx, 6, value=item['type'])
ws.cell(row_idx, 7, value=item['precision'])
ws.cell(row_idx, 8, value=item['unit']['unit'])
period = item.get('period', 'monthly')
if period.startswith('years_'):
period = f'{period.rsplit("_")[-1]} years'
ws.cell(row_idx, 9, value=period)
ws.cell(row_idx, 10, value=_calculate_commitment(item))
ws.cell(row_idx, 11, value=item['status'])
ws.cell(row_idx, 12, value=item['events']['created']['at'])
ws.cell(row_idx, 13, value=item['events'].get('updated', {}).get('at'))
def _calculate_configuration_id(configuration):
conf_id = configuration['parameter']['id']
if 'item' in configuration and 'id' in configuration['item']:
conf_id = f'{conf_id}#{configuration["item"]["id"]}'
else:
conf_id = f'{conf_id}#'
if 'marketplace' in configuration and 'id' in configuration['marketplace']:
conf_id = f'{conf_id}#{configuration["marketplace"]["id"]}'
else:
conf_id = f'{conf_id}#'
return conf_id
def _dump_actions(ws, client, product_id, silent):
_setup_ws_header(ws, 'actions')
row_idx = 2
actions = client.products[product_id].actions.all()
count = actions.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(scope_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for action in actions:
progress.set_description(f'Processing action {action["id"]}')
progress.update(1)
_fill_action_row(ws, row_idx, action)
action_validation.add(f'C{row_idx}')
scope_validation.add(f'G{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_configuration(ws, client, product_id, silent):
_setup_ws_header(ws, 'configurations')
row_idx = 2
configurations = client.products[product_id].configurations.all()
count = configurations.count()
action_validation = DataValidation(
type='list',
formula1='"-,update,delete"',
allow_blank=False,
)
if count == 0:
return
ws.add_data_validation(action_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for configuration in configurations:
conf_id = _calculate_configuration_id(configuration)
progress.set_description(f'Processing parameter configuration {conf_id}')
progress.update(1)
_fill_configuration_row(ws, row_idx, configuration, conf_id)
action_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_parameters(ws, client, product_id, param_type, silent):
_setup_ws_header(ws, 'params')
rql = R().phase.eq(param_type)
row_idx = 2
params = client.products[product_id].parameters.filter(rql)
count = params.count()
if count == 0:
# Product without params is strange, but may exist
return
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"{params}"'.format(
params=','.join(PARAM_TYPES),
),
allow_blank=False,
)
ordering_fulfillment_scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
configuration_scope_validation = DataValidation(
type='list',
formula1='"product,marketplace,item,item_marketplace"',
allow_blank=False,
)
bool_validation = DataValidation(
type='list',
formula1='"True,-"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
ws.add_data_validation(ordering_fulfillment_scope_validation)
ws.add_data_validation(configuration_scope_validation)
ws.add_data_validation(bool_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for param in params:
progress.set_description(f'Processing {param_type} parameter {param["id"]}')
progress.update(1)
_fill_param_row(ws, row_idx, param)
action_validation.add(f'C{row_idx}')
if param['phase'] == 'configuration':
configuration_scope_validation.add(f'G{row_idx}')
else:
ordering_fulfillment_scope_validation.add(f'G{row_idx}')
type_validation.add(f'H{row_idx}')
bool_validation.add(f'I{row_idx}')
bool_validation.add(f'J{row_idx}')
bool_validation.add(f'K{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_media(ws, client, product_id, silent, media_location, media_path):
_setup_ws_header(ws, 'media')
row_idx = 2
medias = client.products[product_id].media.all()
count = medias.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"image,video"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for media in medias:
progress.set_description(f'Processing media {media["id"]}')
progress.update(1)
_fill_media_row(ws, row_idx, media, media_location, product_id, media_path)
action_validation.add(f'C{row_idx}')
type_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_external_static_links(ws, product, silent):
_setup_ws_header(ws, 'static_links')
row_idx = 2
count = len(product['customer_ui_settings']['download_links'])
count = count + len(product['customer_ui_settings']['documents'])
action_validation = DataValidation(
type='list',
formula1='"-,create,delete"',
allow_blank=False,
)
link_type = DataValidation(
type='list',
formula1='"Download,Documentation"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(link_type)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
progress.set_description("Processing static links")
for link in product['customer_ui_settings']['download_links']:
progress.update(1)
ws.cell(row_idx, 1, value='Download')
ws.cell(row_idx, 2, value=link['title'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=link['url'])
action_validation.add(f'C{row_idx}')
link_type.add(f'A{row_idx}')
row_idx += 1
for link in product['customer_ui_settings']['documents']:
progress.update(1)
ws.cell(row_idx, 1, value='Documentation')
ws.cell(row_idx, 2, value=link['title'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=link['url'])
action_validation.add(f'C{row_idx}')
link_type.add(f'A{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_capabilities(ws, product, silent): # noqa: CCR001
_setup_ws_header(ws, 'capabilities')
progress = trange(0, 1, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
progress.set_description("Processing product capabilities")
ppu = product['capabilities']['ppu']
capabilities = product['capabilities']
tiers = capabilities['tiers']
action_validation = DataValidation(
type='list',
formula1='"-,update"',
allow_blank=False,
)
ppu_validation = DataValidation(
type='list',
formula1='"Disabled,QT,TR,PR"',
allow_blank=False,
)
disabled_enabled = DataValidation(
type='list',
formula1='"Disabled,Enabled"',
allow_blank=False,
)
tier_validation = DataValidation(
type='list',
formula1='"Disabled,1,2"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(ppu_validation)
ws.add_data_validation(disabled_enabled)
ws.add_data_validation(tier_validation)
ws['A2'].value = 'Pay-as-you-go support and schema'
ws['B2'].value = '-'
ws['C2'].value = (ppu['schema'] if ppu else 'Disabled')
ppu_validation.add(ws['C2'])
ws['A3'].value = 'Pay-as-you-go dynamic items support'
ws['B3'].value = '-'
ws['C3'].value = (
'Enabled' if ppu and 'dynamic' in ppu and ppu['dynamic'] else 'Disabled'
)
disabled_enabled.add(ws['C3'])
ws['A4'].value = 'Pay-as-you-go future charges support'
ws['B4'].value = '-'
ws['C4'].value = (
'Enabled' if ppu and 'future' in ppu and ppu['future'] else 'Disabled'
)
disabled_enabled.add(ws['C4'])
ws['A5'].value = 'Consumption reporting for Reservation Items'
ws['B5'].value = '-'
progress.update(1)
progress.close()
print()
def _get_reporting_consumption(reservation_cap):
if 'consumption' in reservation_cap and reservation_cap['consumption']:
return 'Enabled'
return 'Disabled'
ws['C5'].value = _get_reporting_consumption(capabilities['reservation'])
disabled_enabled.add(ws['C5'])
ws['A6'].value = 'Dynamic Validation of the Draft Requests'
ws['B6'].value = '-'
def _get_dynamic_validation_draft(capabilities_cart):
if 'validation' in capabilities_cart and capabilities['cart']['validation']:
return 'Enabled'
return 'Disabled'
ws['C6'].value = _get_dynamic_validation_draft(capabilities['cart'])
disabled_enabled.add(ws['C6'])
ws['A7'].value = 'Dynamic Validation of the Inquiring Form'
ws['B7'].value = '-'
def _get_validation_inquiring(capabilities_inquiring):
if 'validation' in capabilities_inquiring and capabilities_inquiring['validation']:
return 'Enabled'
return 'Disabled'
ws['C7'].value = _get_validation_inquiring(capabilities['inquiring'])
disabled_enabled.add(ws['C7'])
ws['A8'].value = 'Reseller Authorization Level'
ws['B8'].value = '-'
def _get_reseller_authorization_level(tiers):
if tiers and 'configs' in tiers and tiers['configs']:
return tiers['configs']['level']
return 'Disabled'
ws['C8'].value = _get_reseller_authorization_level(tiers)
tier_validation.add(ws['C8'])
ws['A9'].value = 'Tier Accounts Sync'
ws['B9'].value = '-'
ws['C9'].value = (
'Enabled' if tiers and 'updates' in tiers and tiers['updates'] else 'Disabled'
)
disabled_enabled.add(ws['C9'])
ws['A10'].value = 'Administrative Hold'
ws['B10'].value = '-'
def _get_administrative_hold(capabilities):
if 'hold' in capabilities['subscription'] and capabilities['subscription']['hold']:
return 'Enabled'
return 'Disabled'
ws['C10'].value = _get_administrative_hold(capabilities)
disabled_enabled.add(ws['C10'])
idx = 2
while idx < 11:
action_validation.add(f'B{idx}')
idx = idx + 1
progress.update(1)
def _dump_templates(ws, client, product_id, silent):
_setup_ws_header(ws, 'templates')
row_idx = 2
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"fulfillment,inquire"',
allow_blank=False,
)
templates = client.products[product_id].templates.all()
count = templates.count()
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(scope_validation)
ws.add_data_validation(type_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for template in templates:
progress.set_description(f'Processing template {template["id"]}')
progress.update(1)
_fill_template_row(ws, row_idx, template)
action_validation.add(f'C{row_idx}')
scope_validation.add(f'D{row_idx}')
type_validation.add(f'E{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_items(ws, client, product_id, silent):
_setup_ws_header(ws, 'items')
row_idx = 2
items = client.products[product_id].items.all()
count = items.count()
if count == 0:
raise ClickException(f'The product {product_id} doesn\'t have items.')
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"reservation,ppu"',
allow_blank=False,
)
period_validation = DataValidation(
type='list',
formula1='"onetime,monthly,yearly,2 years,3 years,4 years,5 years"',
allow_blank=False,
)
precision_validation = DataValidation(
type='list',
formula1='"integer,decimal(1),decimal(2),decimal(4),decimal(8)"',
allow_blank=False,
)
commitment_validation = DataValidation(
type='list',
formula1='"-,1 year,2 years,3 years,4 years,5 years"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
ws.add_data_validation(period_validation)
ws.add_data_validation(precision_validation)
ws.add_data_validation(commitment_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for item in items:
progress.set_description(f'Processing item {item["id"]}')
progress.update(1)
_fill_item_row(ws, row_idx, item)
action_validation.add(f'C{row_idx}')
type_validation.add(f'F{row_idx}')
precision_validation.add(f'G{row_idx}')
period_validation.add(f'I{row_idx}')
commitment_validation.add(f'J{row_idx}')
row_idx += 1
progress.close()
print()
def dump_product(api_url, api_key, product_id, output_file, silent, output_path=None): # noqa: CCR001
if not output_path:
output_path = os.path.join(os.getcwd(), product_id)
else:
if not os.path.exists(output_path):
raise ClickException(
"Output Path does not exist",
)
output_path = os.path.join(output_path, product_id)
media_path = os.path.join(output_path, 'media')
if not output_file:
output_file = os.path.join(output_path, f'{product_id}.xlsx')
else:
output_file = os.path.join(output_path, output_file)
if not os.path.exists(output_path):
os.mkdir(output_path)
elif not os.path.isdir(output_path):
raise ClickException(
"Exists a file with product name but a directory is expected, please rename it",
)
if not os.path.exists(media_path):
os.mkdir(media_path)
try:
client = ConnectClient(
api_key=api_key,
endpoint=api_url,
use_specs=False,
max_retries=3,
)
product = client.products[product_id].get()
wb = Workbook()
connect_api_location = parse.urlparse(api_url)
media_location = f'{connect_api_location.scheme}://{connect_api_location.netloc}'
_setup_cover_sheet(
wb.active,
product,
media_location,
client,
media_path,
)
_dump_capabilities(wb.create_sheet('Capabilities'), product, silent)
_dump_external_static_links(wb.create_sheet('Embedding Static Resources'), product, silent)
_dump_media(
wb.create_sheet('Media'),
client,
product_id,
silent,
media_location,
media_path,
)
_dump_templates(wb.create_sheet('Templates'), client, product_id, silent)
_dump_items(wb.create_sheet('Items'), client, product_id, silent)
_dump_parameters(
wb.create_sheet('Ordering Parameters'),
client,
product_id,
'ordering',
silent,
)
_dump_parameters(
wb.create_sheet('Fulfillment Parameters'),
client,
product_id,
'fulfillment',
silent,
)
_dump_parameters(
wb.create_sheet('Configuration Parameters'),
client,
product_id,
'configuration',
silent,
)
_dump_actions(wb.create_sheet('Actions'), client, product_id, silent)
_dump_configuration(wb.create_sheet('Configuration'), client, product_id, silent)
wb.save(output_file)
except ClientError as error:
status = format_http_status(error.status_code)
if error.status_code == 404:
raise ClickException(f'{status}: Product {product_id} not found.')
handle_http_error(error)
return output_file
| [
"connect.cli.core.http.handle_http_error",
"click.ClickException",
"connect.client.R",
"connect.cli.plugins.product.utils.get_col_limit_by_ws_type",
"os.path.exists",
"json.dumps",
"os.path.isdir",
"os.mkdir",
"openpyxl.utils.quote_sheetname",
"connect.cli.plugins.product.utils.get_json_object_for... | [((1274, 1298), 'openpyxl.styles.Font', 'Font', ([], {'sz': '(24)', 'color': 'WHITE'}), '(sz=24, color=WHITE)\n', (1278, 1298), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((1320, 1369), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""center"""', 'vertical': '"""center"""'}), "(horizontal='center', vertical='center')\n", (1329, 1369), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2058, 2069), 'openpyxl.styles.Font', 'Font', ([], {'sz': '(14)'}), '(sz=14)\n', (2062, 2069), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2363, 2407), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (2372, 2407), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2508, 2533), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (2517, 2533), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2628, 2672), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (2637, 2672), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2776, 2801), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (2785, 2801), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((2958, 2983), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (2967, 2983), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((3148, 3173), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (3157, 3173), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((3907, 3935), 'requests.get', 'requests.get', (['image_location'], {}), '(image_location)\n', (3919, 3935), False, 'import requests\n'), ((4276, 4291), 'openpyxl.styles.colors.Color', 'Color', (['"""d3d3d3"""'], {}), "('d3d3d3')\n", (4281, 4291), False, 'from openpyxl.styles.colors import Color, WHITE\n'), ((4303, 4330), 'openpyxl.styles.PatternFill', 'PatternFill', (['"""solid"""', 'color'], {}), "('solid', color)\n", (4314, 4330), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((4442, 4477), 'connect.cli.plugins.product.utils.get_col_headers_by_ws_type', 'get_col_headers_by_ws_type', (['ws_type'], {}), '(ws_type)\n', (4468, 4477), False, 'from connect.cli.plugins.product.utils import get_col_headers_by_ws_type, get_col_limit_by_ws_type, get_json_object_for_param\n'), ((6121, 6165), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6130, 6165), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6246, 6290), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6255, 6290), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6361, 6405), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6370, 6405), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6487, 6531), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6496, 6531), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6619, 6663), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6628, 6663), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6745, 6789), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6754, 6789), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6871, 6915), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (6880, 6915), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((6996, 7040), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (7005, 7040), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((7208, 7252), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (7217, 7252), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((7417, 7461), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (7426, 7461), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((7626, 7670), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (7635, 7670), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((7794, 7819), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (7803, 7819), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((7927, 7971), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (7936, 7971), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((8099, 8143), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (8108, 8143), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((8833, 8877), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (8842, 8877), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((8962, 9006), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (8971, 9006), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9077, 9121), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (9086, 9121), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9206, 9250), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (9215, 9250), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9390, 9434), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (9399, 9434), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9518, 9543), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (9527, 9543), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9638, 9682), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (9647, 9682), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((9812, 9856), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""left"""', 'vertical': '"""top"""'}), "(horizontal='left', vertical='top')\n", (9821, 9856), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((12975, 13062), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,update,delete"\',\n allow_blank=False)\n', (12989, 13062), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((13114, 13192), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""asset,tier1,tier2\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"asset,tier1,tier2"\', allow_blank=False)\n', (13128, 13192), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((13358, 13433), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (13364, 13433), False, 'from tqdm import trange\n'), ((13999, 14075), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,update,delete"\', allow_blank=False)\n', (14013, 14075), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((14205, 14280), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (14211, 14280), False, 'from tqdm import trange\n'), ((15028, 15115), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,update,delete"\',\n allow_blank=False)\n', (15042, 15115), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((15370, 15448), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""asset,tier1,tier2\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"asset,tier1,tier2"\', allow_blank=False)\n', (15384, 15448), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((15517, 15624), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""product,marketplace,item,item_marketplace\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\n \'"product,marketplace,item,item_marketplace"\', allow_blank=False)\n', (15531, 15624), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((15673, 15740), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""True,-\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"True,-"\', allow_blank=False)\n', (15687, 15740), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((16047, 16122), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (16053, 16122), False, 'from tqdm import trange\n'), ((17002, 17089), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,update,delete"\',\n allow_blank=False)\n', (17016, 17089), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((17139, 17211), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""image,video\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"image,video"\', allow_blank=False)\n', (17153, 17211), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((17375, 17450), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (17381, 17450), False, 'from tqdm import trange\n'), ((18073, 18149), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,delete"\', allow_blank=False)\n', (18087, 18149), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((18197, 18284), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""Download,Documentation\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"Download,Documentation"\',\n allow_blank=False)\n', (18211, 18284), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((18438, 18513), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (18444, 18513), False, 'from tqdm import trange\n'), ((19482, 19553), 'tqdm.trange', 'trange', (['(0)', '(1)'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, 1, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (19488, 19553), False, 'from tqdm import trange\n'), ((19761, 19830), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,update\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,update"\', allow_blank=False)\n', (19775, 19830), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((19883, 19961), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""Disabled,QT,TR,PR\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"Disabled,QT,TR,PR"\', allow_blank=False)\n', (19897, 19961), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((20016, 20093), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""Disabled,Enabled\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"Disabled,Enabled"\', allow_blank=False)\n', (20030, 20093), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((20147, 20220), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""Disabled,1,2\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"Disabled,1,2"\', allow_blank=False)\n', (20161, 20220), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((23508, 23595), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,update,delete"\',\n allow_blank=False)\n', (23522, 23595), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((23646, 23724), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""asset,tier1,tier2\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"asset,tier1,tier2"\', allow_blank=False)\n', (23660, 23724), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((23778, 23863), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""fulfillment,inquire\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"fulfillment,inquire"\', allow_blank=False\n )\n', (23792, 23863), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((24163, 24238), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (24169, 24238), False, 'from tqdm import trange\n'), ((24915, 25002), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,create,update,delete\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"-,create,update,delete"\',\n allow_blank=False)\n', (24929, 25002), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((25052, 25128), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""reservation,ppu\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\'"reservation,ppu"\', allow_blank=False)\n', (25066, 25128), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((25184, 25309), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""onetime,monthly,yearly,2 years,3 years,4 years,5 years\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\n \'"onetime,monthly,yearly,2 years,3 years,4 years,5 years"\', allow_blank\n =False)\n', (25198, 25309), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((25359, 25476), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""integer,decimal(1),decimal(2),decimal(4),decimal(8)\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\n \'"integer,decimal(1),decimal(2),decimal(4),decimal(8)"\', allow_blank=False)\n', (25373, 25476), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((25532, 25638), 'openpyxl.worksheet.datavalidation.DataValidation', 'DataValidation', ([], {'type': '"""list"""', 'formula1': '""""-,1 year,2 years,3 years,4 years,5 years\\""""', 'allow_blank': '(False)'}), '(type=\'list\', formula1=\n \'"-,1 year,2 years,3 years,4 years,5 years"\', allow_blank=False)\n', (25546, 25638), False, 'from openpyxl.worksheet.datavalidation import DataValidation\n'), ((25917, 25992), 'tqdm.trange', 'trange', (['(0)', 'count'], {'disable': 'silent', 'leave': '(True)', 'bar_format': 'DEFAULT_BAR_FORMAT'}), '(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)\n', (25923, 25992), False, 'from tqdm import trange\n'), ((26852, 26886), 'os.path.join', 'os.path.join', (['output_path', '"""media"""'], {}), "(output_path, 'media')\n", (26864, 26886), False, 'import os\n'), ((1463, 1474), 'openpyxl.styles.Font', 'Font', ([], {'sz': '(12)'}), '(sz=12)\n', (1467, 1474), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((1502, 1513), 'openpyxl.styles.Font', 'Font', ([], {'sz': '(12)'}), '(sz=12)\n', (1506, 1513), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((4096, 4158), 'click.ClickException', 'ClickException', (['f"""Error obtaining image from {image_location}"""'], {}), "(f'Error obtaining image from {image_location}')\n", (4110, 4158), False, 'from click import ClickException\n'), ((11194, 11237), 'json.dumps', 'json.dumps', (['value'], {'indent': '(4)', 'sort_keys': '(True)'}), '(value, indent=4, sort_keys=True)\n', (11204, 11237), False, 'import json\n'), ((11291, 11316), 'openpyxl.styles.Alignment', 'Alignment', ([], {'wrap_text': '(True)'}), '(wrap_text=True)\n', (11300, 11316), False, 'from openpyxl.styles import Alignment, Font, PatternFill\n'), ((24825, 24888), 'click.ClickException', 'ClickException', (['f"""The product {product_id} doesn\'t have items."""'], {}), '(f"The product {product_id} doesn\'t have items.")\n', (24839, 24888), False, 'from click import ClickException\n'), ((26796, 26833), 'os.path.join', 'os.path.join', (['output_path', 'product_id'], {}), '(output_path, product_id)\n', (26808, 26833), False, 'import os\n'), ((26934, 26981), 'os.path.join', 'os.path.join', (['output_path', 'f"""{product_id}.xlsx"""'], {}), "(output_path, f'{product_id}.xlsx')\n", (26946, 26981), False, 'import os\n'), ((27014, 27052), 'os.path.join', 'os.path.join', (['output_path', 'output_file'], {}), '(output_path, output_file)\n', (27026, 27052), False, 'import os\n'), ((27065, 27092), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (27079, 27092), False, 'import os\n'), ((27102, 27123), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (27110, 27123), False, 'import os\n'), ((27310, 27336), 'os.path.exists', 'os.path.exists', (['media_path'], {}), '(media_path)\n', (27324, 27336), False, 'import os\n'), ((27346, 27366), 'os.mkdir', 'os.mkdir', (['media_path'], {}), '(media_path)\n', (27354, 27366), False, 'import os\n'), ((27393, 27478), 'connect.client.ConnectClient', 'ConnectClient', ([], {'api_key': 'api_key', 'endpoint': 'api_url', 'use_specs': '(False)', 'max_retries': '(3)'}), '(api_key=api_key, endpoint=api_url, use_specs=False, max_retries=3\n )\n', (27406, 27478), False, 'from connect.client import ClientError, ConnectClient, R\n'), ((27598, 27608), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (27606, 27608), False, 'from openpyxl import Workbook\n'), ((27640, 27663), 'urllib.parse.urlparse', 'parse.urlparse', (['api_url'], {}), '(api_url)\n', (27654, 27663), False, 'from urllib import parse\n'), ((1241, 1256), 'openpyxl.styles.colors.Color', 'Color', (['"""1565C0"""'], {}), "('1565C0')\n", (1246, 1256), False, 'from openpyxl.styles.colors import Color, WHITE\n'), ((1876, 1890), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1888, 1890), False, 'from datetime import datetime\n'), ((26601, 26612), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26610, 26612), False, 'import os\n'), ((26651, 26678), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (26665, 26678), False, 'import os\n'), ((26698, 26742), 'click.ClickException', 'ClickException', (['"""Output Path does not exist"""'], {}), "('Output Path does not exist')\n", (26712, 26742), False, 'from click import ClickException\n'), ((27137, 27163), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (27150, 27163), False, 'import os\n'), ((27179, 27284), 'click.ClickException', 'ClickException', (['"""Exists a file with product name but a directory is expected, please rename it"""'], {}), "(\n 'Exists a file with product name but a directory is expected, please rename it'\n )\n", (27193, 27284), False, 'from click import ClickException\n'), ((29220, 29257), 'connect.cli.core.http.format_http_status', 'format_http_status', (['error.status_code'], {}), '(error.status_code)\n', (29238, 29257), False, 'from connect.cli.core.http import format_http_status, handle_http_error\n'), ((29383, 29407), 'connect.cli.core.http.handle_http_error', 'handle_http_error', (['error'], {}), '(error)\n', (29400, 29407), False, 'from connect.cli.core.http import format_http_status, handle_http_error\n'), ((3987, 4023), 'os.path.join', 'os.path.join', (['media_path', 'image_name'], {}), '(media_path, image_name)\n', (3999, 4023), False, 'import os\n'), ((7742, 7774), 'connect.cli.plugins.product.utils.get_json_object_for_param', 'get_json_object_for_param', (['param'], {}), '(param)\n', (7767, 7774), False, 'from connect.cli.plugins.product.utils import get_col_headers_by_ws_type, get_col_limit_by_ws_type, get_json_object_for_param\n'), ((14776, 14779), 'connect.client.R', 'R', ([], {}), '()\n', (14777, 14779), False, 'from connect.client import ClientError, ConnectClient, R\n'), ((29313, 29373), 'click.ClickException', 'ClickException', (['f"""{status}: Product {product_id} not found."""'], {}), "(f'{status}: Product {product_id} not found.')\n", (29327, 29373), False, 'from click import ClickException\n'), ((3642, 3680), 'openpyxl.utils.quote_sheetname', 'quote_sheetname', (['"""General Information"""'], {}), "('General Information')\n", (3657, 3680), False, 'from openpyxl.utils import quote_sheetname\n'), ((4382, 4415), 'connect.cli.plugins.product.utils.get_col_limit_by_ws_type', 'get_col_limit_by_ws_type', (['ws_type'], {}), '(ws_type)\n', (4406, 4415), False, 'from connect.cli.plugins.product.utils import get_col_headers_by_ws_type, get_col_limit_by_ws_type, get_json_object_for_param\n')] |
import os, time, shutil
def get_used_dirs():
pids = [p for p in os.listdir("/proc") if p.isnumeric()]
res = set()
for p in pids:
try:
path = os.path.realpath("/proc/%s/cwd"%p)
if path.startswith("/tmp/fileshare."):
res.add(path)
except:
pass
return res
while True:
try:
dirs = ["/tmp/"+d for d in os.listdir("/tmp") if d.startswith("fileshare.")]
used = get_used_dirs()
for d in dirs:
if d not in used:
try:
os.system("umount %s/proc"%d)
shutil.rmtree(d)
except:
pass
except:
pass
time.sleep(5)
| [
"os.listdir",
"time.sleep",
"os.path.realpath",
"shutil.rmtree",
"os.system"
] | [((715, 728), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (725, 728), False, 'import os, time, shutil\n'), ((69, 88), 'os.listdir', 'os.listdir', (['"""/proc"""'], {}), "('/proc')\n", (79, 88), False, 'import os, time, shutil\n'), ((174, 210), 'os.path.realpath', 'os.path.realpath', (["('/proc/%s/cwd' % p)"], {}), "('/proc/%s/cwd' % p)\n", (190, 210), False, 'import os, time, shutil\n'), ((395, 413), 'os.listdir', 'os.listdir', (['"""/tmp"""'], {}), "('/tmp')\n", (405, 413), False, 'import os, time, shutil\n'), ((570, 601), 'os.system', 'os.system', (["('umount %s/proc' % d)"], {}), "('umount %s/proc' % d)\n", (579, 601), False, 'import os, time, shutil\n'), ((620, 636), 'shutil.rmtree', 'shutil.rmtree', (['d'], {}), '(d)\n', (633, 636), False, 'import os, time, shutil\n')] |
import os
import requests
from nltk.corpus import wordnet as wn
urldir = "urls"
geturls = "http://www.image-net.org/api/text/imagenet.synset.geturls?wnid={wnid}"
if not os.path.isdir(urldir):
os.makedirs(urldir)
with open("base_concepts.txt") as fin:
for line in fin:
concept = line.strip().split("_")[0]
print("===", concept)
syns = wn.synsets(concept, pos=wn.NOUN)
available = []
for synset in syns:
category = synset.lexname().split(".")[-1]
name = synset.name().split(".")[0]
offset = synset.offset()
wnid = f"n{offset:08d}"
print(f"{wnid}.{category}.{name}")
r = requests.get(geturls.format(wnid=wnid))
if "\n" not in r.text:
continue
urls = r.text.split()
if len(urls) < 100:
continue
filename = os.path.join(urldir, f"{wnid}.{category}.{name}.{len(urls)}.txt")
available.append((filename, len(urls), urls))
if not available:
continue
available.sort(key=lambda x: x[1], reverse=True)
filename, _, urls = available[0]
print(f"BEST: {filename}")
with open(filename, "w", encoding="utf-8") as fout:
for url in urls:
try:
print(url, file=fout)
except Exception as e:
print(type(e), url)
| [
"nltk.corpus.wordnet.synsets",
"os.path.isdir",
"os.makedirs"
] | [((172, 193), 'os.path.isdir', 'os.path.isdir', (['urldir'], {}), '(urldir)\n', (185, 193), False, 'import os\n'), ((199, 218), 'os.makedirs', 'os.makedirs', (['urldir'], {}), '(urldir)\n', (210, 218), False, 'import os\n'), ((370, 402), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['concept'], {'pos': 'wn.NOUN'}), '(concept, pos=wn.NOUN)\n', (380, 402), True, 'from nltk.corpus import wordnet as wn\n')] |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\x64\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x04\x00\x00\x00\x4a\x7e\xf5\x73\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\
\x00\x00\xdc\x49\x44\x41\x54\x38\xcb\xbd\x92\x41\x0e\x82\x30\x10\
\x45\x5f\x5c\xe0\x21\x24\x1e\xca\x04\x23\x91\x70\x26\x20\x26\xde\
\x41\xa3\x47\xc2\x34\xe0\x46\xbc\x00\x6c\x70\xa5\x9d\x99\x56\x97\
\xce\xac\xfe\xf4\xbf\x64\xfa\x5b\xf8\x77\x2d\x29\xb9\xe2\x98\x98\
\x70\x5c\x28\x58\xfe\xb2\xef\xb8\x33\x9b\xee\xd9\xc6\xcd\x0b\x8e\
\x81\xf9\xdd\x07\x16\x21\x20\xed\x0f\x2a\x6a\x06\x85\x04\xcb\x48\
\xfb\x0a\x80\x54\x21\x99\xbe\xaa\xdc\xbd\xfa\xcc\x1b\x31\xed\x48\
\x3c\x50\xaa\x8d\xeb\x28\x30\xb3\xf7\xc0\x55\x1d\x0c\xa4\x00\xac\
\x79\xaa\xf9\xd9\x03\xce\xa4\x32\xd0\xd0\x18\xfb\xcc\xcd\x03\xd3\
\xd7\x40\x65\x8f\x21\x60\xe3\xd4\x7a\xb4\x2b\xd9\x38\xad\x6e\x3d\
\x70\x89\xc6\x69\xf5\xc9\x03\x45\x34\x4e\xab\x73\xf9\x70\x7d\x24\
\x4e\xad\x9d\x7c\x38\xd8\x46\xe3\x94\x7a\x63\x7f\xd3\xe1\x67\xa4\
\x75\xec\x7b\x7f\x47\xaa\xd8\xf7\x06\xc8\xe8\x02\xb3\x0b\x97\x91\
\x95\xb0\xe7\xcc\x8d\x91\x91\x96\x13\xb9\xbe\xea\x3f\xea\x05\xa7\
\xf0\xfd\xeb\x14\xb8\xd5\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\xda\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x33\x2e\x39\
\x39\x20\x33\x37\x2e\x30\x38\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\
\x36\x31\x20\x36\x20\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\
\x20\x31\x38\x2d\x31\x34\x2d\x33\x2e\x32\x36\x2d\x32\x2e\x35\x33\
\x2d\x31\x34\x2e\x37\x35\x20\x31\x31\x2e\x34\x38\x7a\x4d\x32\x34\
\x20\x33\x32\x6c\x31\x34\x2e\x37\x33\x2d\x31\x31\x2e\x34\x35\x4c\
\x34\x32\x20\x31\x38\x20\x32\x34\x20\x34\x20\x36\x20\x31\x38\x6c\
\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\x34\x20\x33\x32\x7a\
\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x34\x31\x2e\x30\
\x39\x20\x31\x30\x2e\x34\x35\x6c\x2d\x32\x2e\x37\x37\x2d\x33\x2e\
\x33\x36\x43\x33\x37\x2e\x37\x36\x20\x36\x2e\x34\x33\x20\x33\x36\
\x2e\x39\x33\x20\x36\x20\x33\x36\x20\x36\x48\x31\x32\x63\x2d\x2e\
\x39\x33\x20\x30\x2d\x31\x2e\x37\x36\x2e\x34\x33\x2d\x32\x2e\x33\
\x31\x20\x31\x2e\x30\x39\x6c\x2d\x32\x2e\x37\x37\x20\x33\x2e\x33\
\x36\x43\x36\x2e\x33\x34\x20\x31\x31\x2e\x31\x35\x20\x36\x20\x31\
\x32\x2e\x30\x33\x20\x36\x20\x31\x33\x76\x32\x35\x63\x30\x20\x32\
\x2e\x32\x31\x20\x31\x2e\x37\x39\x20\x34\x20\x34\x20\x34\x68\x32\
\x38\x63\x32\x2e\x32\x31\x20\x30\x20\x34\x2d\x31\x2e\x37\x39\x20\
\x34\x2d\x34\x56\x31\x33\x63\x30\x2d\x2e\x39\x37\x2d\x2e\x33\x34\
\x2d\x31\x2e\x38\x35\x2d\x2e\x39\x31\x2d\x32\x2e\x35\x35\x7a\x4d\
\x32\x34\x20\x33\x35\x4c\x31\x33\x20\x32\x34\x68\x37\x76\x2d\x34\
\x68\x38\x76\x34\x68\x37\x4c\x32\x34\x20\x33\x35\x7a\x4d\x31\x30\
\x2e\x32\x35\x20\x31\x30\x6c\x31\x2e\x36\x33\x2d\x32\x68\x32\x34\
\x6c\x31\x2e\x38\x37\x20\x32\x68\x2d\x32\x37\x2e\x35\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x33\x39\x2e\x36\
\x32\x20\x32\x39\x2e\x39\x38\x4c\x34\x32\x20\x32\x38\x2e\x31\x33\
\x6c\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x33\x38\
\x20\x31\x2e\x38\x35\x20\x32\x2e\x38\x35\x20\x32\x2e\x38\x35\x7a\
\x6d\x2d\x2e\x38\x39\x2d\x39\x2e\x34\x33\x4c\x34\x32\x20\x31\x38\
\x20\x32\x34\x20\x34\x6c\x2d\x35\x2e\x38\x33\x20\x34\x2e\x35\x33\
\x20\x31\x35\x2e\x37\x35\x20\x31\x35\x2e\x37\x35\x20\x34\x2e\x38\
\x31\x2d\x33\x2e\x37\x33\x7a\x4d\x36\x2e\x35\x35\x20\x32\x4c\x34\
\x20\x34\x2e\x35\x35\x6c\x38\x2e\x34\x34\x20\x38\x2e\x34\x34\x4c\
\x36\x20\x31\x38\x6c\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\
\x34\x20\x33\x32\x6c\x34\x2e\x31\x39\x2d\x33\x2e\x32\x36\x20\x32\
\x2e\x38\x35\x20\x32\x2e\x38\x35\x2d\x37\x2e\x30\x36\x20\x35\x2e\
\x34\x39\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\x36\x31\x20\x36\x20\
\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\x20\x39\x2e\x38\x39\
\x2d\x37\x2e\x37\x4c\x34\x31\x2e\x34\x36\x20\x34\x32\x20\x34\x34\
\x20\x33\x39\x2e\x34\x35\x20\x36\x2e\x35\x35\x20\x32\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x13\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x36\x20\x31\
\x34\x68\x2d\x34\x76\x38\x68\x2d\x38\x76\x34\x68\x38\x76\x38\x68\
\x34\x76\x2d\x38\x68\x38\x76\x2d\x34\x68\x2d\x38\x76\x2d\x38\x7a\
\x4d\x32\x34\x20\x34\x43\x31\x32\x2e\x39\x35\x20\x34\x20\x34\x20\
\x31\x32\x2e\x39\x35\x20\x34\x20\x32\x34\x73\x38\x2e\x39\x35\x20\
\x32\x30\x20\x32\x30\x20\x32\x30\x20\x32\x30\x2d\x38\x2e\x39\x35\
\x20\x32\x30\x2d\x32\x30\x53\x33\x35\x2e\x30\x35\x20\x34\x20\x32\
\x34\x20\x34\x7a\x6d\x30\x20\x33\x36\x63\x2d\x38\x2e\x38\x32\x20\
\x30\x2d\x31\x36\x2d\x37\x2e\x31\x38\x2d\x31\x36\x2d\x31\x36\x53\
\x31\x35\x2e\x31\x38\x20\x38\x20\x32\x34\x20\x38\x73\x31\x36\x20\
\x37\x2e\x31\x38\x20\x31\x36\x20\x31\x36\x2d\x37\x2e\x31\x38\x20\
\x31\x36\x2d\x31\x36\x20\x31\x36\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\
\x67\x3e\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0d\
\x0e\x99\xe8\x93\
\x00\x43\
\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x43\x00\x72\x00\x61\x00\x74\x00\x65\x00\x72\x00\x73\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0b\x0d\xf4\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\
\x00\x67\
\x00\x13\
\x0f\x34\x1f\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x72\x00\x63\x00\x68\x00\x69\x00\x76\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\
\x00\x76\x00\x67\
\x00\x18\
\x02\xe6\x9c\xa7\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x63\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x5f\x00\x34\
\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x1e\
\x0d\xce\xb3\x87\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x64\x00\x64\x00\x5f\x00\x63\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x5f\x00\x6f\x00\x75\x00\x74\
\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x34\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x03\xa2\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x04\xfe\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x02\x46\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x34\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x03\xa2\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x04\xfe\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x02\x46\
\x00\x00\x01\x75\x3f\xfb\xd6\x9f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"PyQt5.QtCore.qVersion",
"PyQt5.QtCore.qUnregisterResourceData",
"PyQt5.QtCore.qRegisterResourceData"
] | [((9613, 9714), 'PyQt5.QtCore.qRegisterResourceData', 'QtCore.qRegisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (9641, 9714), False, 'from PyQt5 import QtCore\n'), ((9741, 9844), 'PyQt5.QtCore.qUnregisterResourceData', 'QtCore.qUnregisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (9771, 9844), False, 'from PyQt5 import QtCore\n'), ((9389, 9406), 'PyQt5.QtCore.qVersion', 'QtCore.qVersion', ([], {}), '()\n', (9404, 9406), False, 'from PyQt5 import QtCore\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import typing
import numpy as np
import jax.numpy as jnp
import xarray as xr
import seaborn as sns
from jax_cfd.data import xarray_utils as xru
import jax_cfd.base as cfd
from dynamical_system import Lorenz96, KolmogorovFlow
from util import jnp_to_aa_tuple, aa_tuple_to_jnp
plot_colors = {
'b': '#5A7D9F',
'r': '#c23b22',
'y': '#ffdb58',
}
def load_da_results(
filenames: list,
retained_variables: list,
retained_attrs: list,
) -> xr.Dataset:
"""
Loads data assimilations for analysis.
Args:
filenames: list of files that contain the four the computed setups.
retained_variables: variables to keep in the dataset for analysis.
retained_attrs: attributes to keep in the dataset for analysis.
Returns:
Data assimilation data for analysis.
"""
ds_list = []
initialization_coords = set()
optspace_coords = set()
# get all data and extract relevant variables
for fname in filenames:
data = xr.open_dataset(fname)
initialization_coords.add(data.attrs['da_init'])
optspace_coords.add(data.attrs['opt_space'])
ds_list.append(data[retained_variables])
initialization_coords = list(initialization_coords)
optspace_coords = list(optspace_coords)
# organize data in nested data structure
num_init = len(initialization_coords)
num_optspace = len(optspace_coords)
ds_grid = np.empty((num_init, num_optspace), dtype=object)
for ds in ds_list:
i = initialization_coords.index(ds.attrs['da_init'])
j = optspace_coords.index(ds.attrs['opt_space'])
ds.attrs = {attr: ds.attrs[attr] for attr in retained_attrs}
ds_grid[i][j] = ds
ds = (
xr.combine_nested(
ds_grid.tolist(),
concat_dim=['init', 'opt_space'],
combine_attrs='identical',
)
.assign_coords(
{'init': initialization_coords, 'opt_space':optspace_coords},
)
)
return ds
def compute_vorticity(ds: xr.Dataset, grid: cfd.grids.Grid) -> xr.Dataset:
"""
Computes vorticity of a dataset containing Kolmogorov flow trajectories.
Args:
ds: dataset conntaining variables with with Kolmogorov flow trajectories.
grid: grid over which to compute vorticity.
Returns:
Vorticity of the Kolmogorov flow trajectories.
"""
coords = xru.construct_coords(grid)
ds = ds.assign_coords(coords)
dy = ds.y[1] - ds.y[0]
dx = ds.x[1] - ds.x[0]
dv_dx = (ds.sel(v=1).roll(x=-1, roll_coords=False) - ds.sel(v=1)) / dx
du_dy = (ds.sel(v=0).roll(y=-1, roll_coords=False) - ds.sel(v=0)) / dy
return (dv_dx - du_dy)
def integrate_kolmogorov_xr(
dyn_sys: KolmogorovFlow,
X0_da: xr.DataArray,
n_steps: int,
) -> xr.DataArray:
"""
Integrates Kolmogorov flow from and to an `xarray.DataArray`.
Args:
dyn_sys: Kolmogorov flow dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0 = jnp.asarray(X0_da.data)
batch_dimensions = X0.shape[:-3]
state_dimensions = X0.shape[-3:]
final_shape = batch_dimensions + (n_steps,) + state_dimensions
X0_flat = X0.reshape((-1,) + X0.shape[-3:])
X = dyn_sys.batch_integrate(X0_flat, n_steps, None, True).reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-3, 't')
X_da = xr.DataArray(X, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_kolmogorov(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for Kolmogorov flow.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(
X - X.sel(data_type=comparison_var)
).sum(dim=['x', 'y']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def integrate_lorenz96_xr(
dyn_sys: Lorenz96,
X0_da: xr.DataArray,
n_steps: int,
) -> xr. DataArray:
"""
Integrates the Lorenz96 model from and to an `xarray.DataArray`.
Args:
dyn_sys: Lorenz96 dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0_jnp = X0_da.data
grid_size = X0_jnp.shape[-1]
batch_dimensions = X0_jnp.shape[:-1]
final_shape = list(batch_dimensions) + [n_steps, grid_size]
X0_jnp_flat = X0_jnp.reshape(-1, grid_size)
X_jnp_flat = dyn_sys.batch_integrate(X0_jnp_flat, n_steps)
X_jnp = X_jnp_flat.reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-1, 't')
X_da = xr.DataArray(X_jnp, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_lorenz96(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for the Lorenz96 model.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(X - X.sel(data_type=comparison_var)).sum(dim=['x']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def adjust_row_labels(g: sns.FacetGrid, labels: list):
"""
Adjust row `labels` of a seaborn FaceGrid object `g`.
"""
for ax in g.axes.flat:
if ax.texts:
# ylabel text on the right side
txt = ax.texts[0]
ax.text(txt.get_unitless_position()[0], txt.get_unitless_position()[1],
labels.pop(0),
transform=ax.transAxes,
va='center',
rotation=-90)
# remove original text
ax.texts[0].remove() | [
"jax_cfd.data.xarray_utils.construct_coords",
"jax.numpy.asarray",
"numpy.empty",
"xarray.DataArray",
"xarray.open_dataset"
] | [((2013, 2061), 'numpy.empty', 'np.empty', (['(num_init, num_optspace)'], {'dtype': 'object'}), '((num_init, num_optspace), dtype=object)\n', (2021, 2061), True, 'import numpy as np\n'), ((2931, 2957), 'jax_cfd.data.xarray_utils.construct_coords', 'xru.construct_coords', (['grid'], {}), '(grid)\n', (2951, 2957), True, 'from jax_cfd.data import xarray_utils as xru\n'), ((3591, 3614), 'jax.numpy.asarray', 'jnp.asarray', (['X0_da.data'], {}), '(X0_da.data)\n', (3602, 3614), True, 'import jax.numpy as jnp\n'), ((3935, 3982), 'xarray.DataArray', 'xr.DataArray', (['X'], {'dims': 'dims', 'coords': 'X0_da.coords'}), '(X, dims=dims, coords=X0_da.coords)\n', (3947, 3982), True, 'import xarray as xr\n'), ((5302, 5353), 'xarray.DataArray', 'xr.DataArray', (['X_jnp'], {'dims': 'dims', 'coords': 'X0_da.coords'}), '(X_jnp, dims=dims, coords=X0_da.coords)\n', (5314, 5353), True, 'import xarray as xr\n'), ((1614, 1636), 'xarray.open_dataset', 'xr.open_dataset', (['fname'], {}), '(fname)\n', (1629, 1636), True, 'import xarray as xr\n')] |
import argparse
from train_images import run
# generalized ZSL
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 0 --generalized True > awa1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True > awa2.log 2>&1 &
# naive feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > apy.log 2>&1 &
# finetue feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > apy.log 2>&1 &
# reg feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > apy.log 2>&1 &
# few shot
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train True --num_shots 1 --generalized True --image_embedding res101 > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train True --num_shots 5 --generalized True --image_embedding res101 > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train True --num_shots 10 --generalized True --image_embedding res101 > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# few shot
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train True --num_shots 1 --generalized True --image_embedding res101 > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train True --num_shots 5 --generalized True --image_embedding res101 > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train True --num_shots 10 --generalized True --image_embedding res101 > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# reg feature + att
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# few shot + class
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo3.log 2>&1 &
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='FLO', help='FLO')
parser.add_argument('--few_train', default = False, type = str2bool, help='use few train samples')
parser.add_argument('--num_shots', type=int, default=5, help='the number of shots, if few_train, then num_shots is for train classes, else for test classes')
parser.add_argument('--generalized', default=False, type = str2bool, help='enable generalized zero-shot learning')
parser.add_argument('--image_embedding', default='res101', help='res101')
parser.add_argument('--class_embedding', default='att', help='att')
args = parser.parse_args()
class myArgs():
def __init__(self, args):
self.dataset = args.dataset
self.few_train = args.few_train
self.num_shots = args.num_shots
self.generalized = args.generalized
self.image_embedding = args.image_embedding
self.class_embedding = args.class_embedding
self.dataroot = "../data"
self.syn_num = 100; self.preprocessing = False; self.standardization = False; self.workers = 8
self.batch_size = 64; self.resSize = 2048; self.attSize = 1024; self.nz = 312; self.ngh = 4096
self.ndh = 1024; self.nepoch = 2000; self.critic_iter = 5; self.lambda1 = 10; self.lambda2 = 10
self.lr = 0.001; self.feed_lr = 0.0001; self.dec_lr = 0.0001; self.classifier_lr = 0.001
self.beta1 = 0.5; self.cuda = True; self.encoded_noise = False; self.manualSeed = 0
self.nclass_all = 200; self.validation = False; self.encoder_layer_sizes = [8192, 4096]
self.decoder_layer_sizes = [4096, 8192]; self.gammaD = 1000; self.gammaG = 1000
self.gammaG_D2 = 1000; self.gammaD2 = 1000; self.latent_size = 312; self.conditional = True
self.a1 = 1.0; self.a2 = 1.0; self.recons_weight = 1.0; self.feedback_loop = 2
self.freeze_dec = False
if self.dataset in ["AWA1", "AWA2"]:
self.gammaD = 10; self.gammaG = 10; self.encoded_noise = True
self.manualSeed = 9182; self.preprocessing = True; self.cuda = True
self.nepoch = 120; self.syn_num = 1800; self.ngh = 4096; self.ndh = 4096
self.lambda1 = 10; self.critic_iter = 5; self.nclass_all = 50; self.batch_size = 64; self.nz = 85
self.latent_size = 85; self.attSize=85; self.resSize = 2048; self.lr = 0.00001; self.classifier_lr = 0.001;
self.recons_weight = 0.1; self.freeze_dec = True; self.feed_lr = 0.0001; self.dec_lr = 0.0001; self.feedback_loop = 2;
self.a1 = 0.01; self.a2 = 0.01
elif self.dataset == "CUB":
self.gammaD = 10; self.gammaG = 10; self.manualSeed = 3483; self.encoded_noise = True; self.preprocessing = True
self.cuda = True; self.nepoch = 300; self.ngh = 4096
self.ndh = 4096; self.lr = 0.0001; self.classifier_lr = 0.001; self.lambda1 = 10; self.critic_iter = 5
self.nclass_all = 200; self.batch_size = 64; self.nz = 312; self.latent_size = 312; self.attSize = 312
self.resSize = 2048; self.syn_num = 300; self.recons_weight = 0.01; self.a1 = 1; self.a2 = 1
self.feed_lr = 0.00001; self.dec_lr = 0.0001; self.feedback_loop = 2
elif self.dataset == "FLO":
self.gammaD = 10; self.gammaG = 10; self.nclass_all = 102; self.latent_size = 1024; self.manualSeed = 806
self.syn_num = 1200; self.preprocessing = True; self.nepoch = 500
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 1024; self.attSize = 1024; self.resSize = 2048; self.lr = 0.0001; self.classifier_lr = 0.001
self.cuda = True; self.recons_weight = 0.01; self.feedback_loop = 2
self.feed_lr = 0.00001; self.a1 = 0.5; self.a2 = 0.5; self.dec_lr = 0.0001
elif self.dataset == "SUN":
self.gammaD = 1; self.gammaG = 1; self.manualSeed = 4115; self.encoded_noise = True; self.preprocessing = True
self.cuda = True; self.nepoch = 400
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 102; self.latent_size = 102; self.attSize = 102; self.lr = 0.001; self.classifier_lr = 0.0005
self.syn_num = 400; self.nclass_all = 717; self.recons_weight = 0.01; self.a1 = 0.1; self.a2 = 0.01
self.feedback_loop = 2; self.feed_lr = 0.0001
if self.image_embedding == "res101_reg":
self.self.lr = 0.0001; self.classifier_lr = 0.0001; self.recons_weight = 0.0001
elif self.dataset == "aPY":
self.gammaD = 10; self.gammaG = 10; self.nclass_all = 32; self.latent_size = 1024; self.manualSeed = 806
self.syn_num = 1200; self.preprocessing = True; self.nepoch = 500
self.ngh = 4096; self.ndh = 4096; self.lambda1 = 10; self.critic_iter = 5; self.batch_size = 64
self.nz = 64; self.attSize = 64; self.resSize = 2048; self.lr = 0.0001; self.classifier_lr = 0.001
self.cuda = True; self.recons_weight = 0.01; self.feedback_loop = 2
self.feed_lr = 0.00001; self.a1 = 0.5; self.a2 = 0.5; self.dec_lr = 0.0001
opt = myArgs(args)
opt.lambda2 = opt.lambda1
opt.encoder_layer_sizes[0] = opt.resSize
opt.decoder_layer_sizes[-1] = opt.resSize
opt.latent_size = opt.attSize
print("lr: ", opt.lr, "classifier_lr: ", opt.classifier_lr, "recons_weight: ", opt.recons_weight, "a1: ", opt.a1, opt.a2, "a2: ", "feed_lr: ", opt.feed_lr)
run(opt)
| [
"argparse.ArgumentTypeError",
"train_images.run",
"argparse.ArgumentParser"
] | [((18138, 18163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18161, 18163), False, 'import argparse\n'), ((23652, 23660), 'train_images.run', 'run', (['opt'], {}), '(opt)\n', (23655, 23660), False, 'from train_images import run\n'), ((18074, 18127), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (18100, 18127), False, 'import argparse\n')] |
from kivy.app import App
from kivy.config import Config
from kivy.uix.listview import ListItemButton
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from functools import partial
from bitcoin.core import CoreMainParams
import bitcoin
# Config.set('graphics', 'width', '1366')
# Config.set('graphics', 'height', '768')
Config.set('kivy', 'window_icon', 'favicon.ico')
import rpclib
import chatlib
import bitcoinrpc
import ast
from bitcoin.wallet import P2PKHBitcoinAddress
from bitcoin.core import x
from datetime import datetime
class CoinParams(CoreMainParams):
MESSAGE_START = b'\x24\xe9\x27\x64'
DEFAULT_PORT = 7770
BASE58_PREFIXES = {'PUBKEY_ADDR': 60,
'SCRIPT_ADDR': 85,
'SECRET_KEY': 188}
bitcoin.params = CoinParams
class LoginPage(Screen):
def verify_credentials(self):
while True:
try:
server_input = self.ids["rpcserver"].text
user_input = self.ids["rpcuser"].text
password_input = self.ids["rpcpassword"].text
port_input = int(self.ids["port"].text)
connection = rpclib.rpc_connect(user_input, password_input, server_input, port_input)
except Exception as e:
print(e)
print("Not connected. Please check credentials")
#TODO: have to throw popup and in this case not clean text fields
self.ids["rpcserver"].text = ''
self.ids["rpcuser"].text = ''
self.ids["rpcpassword"].text = ''
self.ids["port"].text = ''
break
else:
App.get_running_app().rpc_connection = connection
App.get_running_app().is_connected = True
self.manager.current = "user"
break
class UserPage(Screen):
pass
class ScreenManagement(ScreenManager):
pass
class MessagesBoxLabel(Label):
def update(self):
self.text = TrollboxCCApp.active_room_id
class RoomListItemButton(ListItemButton):
def on_release(self):
# setting active room id after room button release
TrollboxCCApp.active_room_id = str(self.text[-64:])
#have to receive time delta for compatibility with kivy clock
class MessageUpdater(Widget):
def messages_checker(self, dt):
while True:
if App.get_running_app().is_connected == False:
break
else:
# getting oraclesinfo for active room
oracles_info = rpclib.oracles_info(App.get_running_app().rpc_connection, App.get_running_app().active_room_id)
if App.get_running_app().active_room_id == '':
print("Seems messages grabbing works")
break
else:
# flushing it to not print previous messages
baton_returned = {}
# getting batons to print on each iteration
data_to_print = {}
# getting dictionary with current batontxid for each publisher
for entry in oracles_info["registered"]:
baton_returned[entry["publisher"]] = entry["batontxid"]
# updating batons for all publishers in app array
for publisher in baton_returned:
if publisher in App.get_running_app().current_baton:
# if publisher already here updating baton and adding it to print queue
if baton_returned[publisher] != App.get_running_app().current_baton[publisher]:
App.get_running_app().current_baton[publisher] = baton_returned[publisher]
try:
data_to_print[publisher] = rpclib.oracles_samples(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, baton_returned[publisher], "1")['samples'][0][0]
except IndexError:
break
# if baton is the same as before there is nothing to update
else:
break
# if publisher not here adding it with latest baton and adding baton to print queue
else:
App.get_running_app().current_baton[publisher] = baton_returned[publisher]
try:
data_to_print[publisher] = rpclib.oracles_samples(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, baton_returned[publisher], "1")['samples'][0][0]
except IndexError:
break
# finally printing messages
try:
for publisher in data_to_print:
message_list = ast.literal_eval(data_to_print[publisher].replace('\r','\\r').replace('\n','\\n'))
kvsearch_result = rpclib.kvsearch(App.get_running_app().rpc_connection, publisher)
if 'value' in kvsearch_result:
addr = str(P2PKHBitcoinAddress.from_pubkey(x(publisher)))
signature = kvsearch_result['value'][:88]
value = kvsearch_result['value'][88:]
verifymessage_result = rpclib.verifymessage(App.get_running_app().rpc_connection, addr, signature, value)
if verifymessage_result:
message_to_print = datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + kvsearch_result['value'][88:] + '-' + publisher[0:10] + ']:' + message_list[1]
else:
message_to_print = 'IMPROPER SIGNATURE' + datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + kvsearch_result['value'][88:] + '-' + publisher[0:10] + ']:' + message_list[1]
else:
message_to_print = datetime.utcfromtimestamp(message_list[0]).strftime('%D %H:%M') + '[' + publisher[0:10] + ']:' + message_list[1]
App.get_running_app().messages.append(message_to_print)
App.get_running_app().root.ids.messagesview.adapter.data = App.get_running_app().messages
break
except bitcoinrpc.authproxy.JSONRPCException as e:
print(App.get_running_app().active_room_id)
print(e)
break
class CreateRoomButton(Button):
def create_room(self, room_name, room_description):
secret_room_description = "DCHAT " + room_description
try:
new_room_hex = rpclib.oracles_create(App.get_running_app().rpc_connection, room_name, secret_room_description, "S")
print(new_room_hex)
except Exception as e:
print(e)
else:
try:
new_room_txid = rpclib.sendrawtransaction(App.get_running_app().rpc_connection, new_room_hex["hex"])
print(new_room_txid)
except KeyError as e:
print(e)
print(new_room_hex)
class CreateNicknameButton(Button):
def create_nickname(self, nickname, password):
new_nickname = chatlib.set_nickname(App.get_running_app().rpc_connection, nickname, password)
print(new_nickname)
class SubscribeOnRoomButton(Button):
def subscribe_room(self, utxos_amount):
chatlib.room_subscription(App.get_running_app().rpc_connection, str(App.get_running_app().active_room_id), utxos_amount)
class TrollboxCCApp(App):
title = "OraclesCC Trollbox"
active_room_id = ''
messages = []
#key: publisher, value: batontxid
current_baton = {}
is_connected = False
#rpc_connection = None
def get_rooms_list(self):
if App.get_running_app().is_connected == False:
self.data = ''
else:
self.data = chatlib.get_chat_rooms(App.get_running_app().rpc_connection)
return self.data
def on_text(instance, value):
print('The widget', instance, 'have:', value)
def send_message(instance, inputid):
new_message = chatlib.message_sending(App.get_running_app().rpc_connection, App.get_running_app().active_room_id, str(inputid.text))
print(new_message)
inputid.text = ''
def callback_refresh_rooms(self, roomslist):
roomslist.adapter.data = self.get_rooms_list()
print("Room list succesfully refreshed")
# checking selected chat room for new messages every 0.5 seconds
message_updater = MessageUpdater()
check_messages = Clock.schedule_interval(partial(MessageUpdater.messages_checker, message_updater), 0.5)
check_messages()
if __name__ == "__main__":
TrollboxCCApp().run()
| [
"datetime.datetime.utcfromtimestamp",
"rpclib.rpc_connect",
"kivy.config.Config.set",
"functools.partial",
"bitcoin.core.x",
"kivy.app.App.get_running_app"
] | [((461, 509), 'kivy.config.Config.set', 'Config.set', (['"""kivy"""', '"""window_icon"""', '"""favicon.ico"""'], {}), "('kivy', 'window_icon', 'favicon.ico')\n", (471, 509), False, 'from kivy.config import Config\n'), ((8959, 9016), 'functools.partial', 'partial', (['MessageUpdater.messages_checker', 'message_updater'], {}), '(MessageUpdater.messages_checker, message_updater)\n', (8966, 9016), False, 'from functools import partial\n'), ((1261, 1333), 'rpclib.rpc_connect', 'rpclib.rpc_connect', (['user_input', 'password_input', 'server_input', 'port_input'], {}), '(user_input, password_input, server_input, port_input)\n', (1279, 1333), False, 'import rpclib\n'), ((7567, 7588), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (7586, 7588), False, 'from kivy.app import App\n'), ((7771, 7792), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (7790, 7792), False, 'from kivy.app import App\n'), ((8130, 8151), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (8149, 8151), False, 'from kivy.app import App\n'), ((8503, 8524), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (8522, 8524), False, 'from kivy.app import App\n'), ((8541, 8562), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (8560, 8562), False, 'from kivy.app import App\n'), ((1784, 1805), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (1803, 1805), False, 'from kivy.app import App\n'), ((1850, 1871), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (1869, 1871), False, 'from kivy.app import App\n'), ((2506, 2527), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (2525, 2527), False, 'from kivy.app import App\n'), ((6990, 7011), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (7009, 7011), False, 'from kivy.app import App\n'), ((7813, 7834), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (7832, 7834), False, 'from kivy.app import App\n'), ((8263, 8284), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (8282, 8284), False, 'from kivy.app import App\n'), ((2696, 2717), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (2715, 2717), False, 'from kivy.app import App\n'), ((2734, 2755), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (2753, 2755), False, 'from kivy.app import App\n'), ((2791, 2812), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (2810, 2812), False, 'from kivy.app import App\n'), ((7242, 7263), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (7261, 7263), False, 'from kivy.app import App\n'), ((6532, 6553), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (6551, 6553), False, 'from kivy.app import App\n'), ((3521, 3542), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (3540, 3542), False, 'from kivy.app import App\n'), ((5205, 5226), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (5224, 5226), False, 'from kivy.app import App\n'), ((6682, 6703), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (6701, 6703), False, 'from kivy.app import App\n'), ((4508, 4529), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (4527, 4529), False, 'from kivy.app import App\n'), ((5380, 5392), 'bitcoin.core.x', 'x', (['publisher'], {}), '(publisher)\n', (5381, 5392), False, 'from bitcoin.core import x\n'), ((5603, 5624), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (5622, 5624), False, 'from kivy.app import App\n'), ((6393, 6414), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (6412, 6414), False, 'from kivy.app import App\n'), ((3706, 3727), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (3725, 3727), False, 'from kivy.app import App\n'), ((3786, 3807), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (3805, 3807), False, 'from kivy.app import App\n'), ((6473, 6494), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (6492, 6494), False, 'from kivy.app import App\n'), ((4698, 4719), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (4717, 4719), False, 'from kivy.app import App\n'), ((4736, 4757), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (4755, 4757), False, 'from kivy.app import App\n'), ((6256, 6298), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['message_list[0]'], {}), '(message_list[0])\n', (6281, 6298), False, 'from datetime import datetime\n'), ((3984, 4005), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (4003, 4005), False, 'from kivy.app import App\n'), ((4022, 4043), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (4041, 4043), False, 'from kivy.app import App\n'), ((5769, 5811), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['message_list[0]'], {}), '(message_list[0])\n', (5794, 5811), False, 'from datetime import datetime\n'), ((6028, 6070), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['message_list[0]'], {}), '(message_list[0])\n', (6053, 6070), False, 'from datetime import datetime\n')] |
import os
import sys
import shutil
import csv
import subprocess
import xml.etree.ElementTree as ET
import random
import re
import time
sys.path.append('/home/kimsoohyun/00-Research/02-Graph/01-tapsterbot/dataSendTest')
import req
from change_axis_qhd import ChangeAxis as C1
FLAGS = None
def get_point(index, package_name):
activity_list = list()
# waiting for rendering end
while True:
if len(activity_list) > 5:
activity_list.pop(0)
if len(activity_list) ==5 and len(set(activity_list)) == 1:
break
#export XML log
command = 'adb shell uiautomator dump /sdcard/{0}.xml'.format(index)
dump_output = None
try:
dump_output = command_output(command)
except subprocess.CalledProcessError:
print("uiautomator dump error")
if dump_output is not None and \
not dump_output.startswith('UI hierchary dumped to:'):
activity_list.append(0)
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
continue
#pull XML log
command = 'adb pull /sdcard/{0}.xml ./dataset/00-xml/{1}/{0}.xml'.format(index, package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
xml = './dataset/00-xml/{0}/{1}.xml'.format(package_name, index)
size, point = parse_xml_log(xml)
activity_list.append(size)
return point
def check_binary(binaries):
for binary in binaries:
if shutil.which(binary) is None:
raise FileNotFoundError
def check_dirs(dirs):
for dir_path in dirs:
os.makedirs(dir_path, exist_ok=True)
def terminate_env(pss):
for ps in pss:
command = 'adb shell "ps | grep {0}"'.format(ps)
try:
output = command_output(command)
except subprocess.CalledProcessError as e:
continue
psnum = re.findall('\d+', output)[0]
command = 'adb shell kill -2 {0}'.format(psnum)
command_check(command)
def command_popen(command):
return subprocess.Popen(command, shell=True)
def command_check(command):
return subprocess.check_call(command, shell=True)
def command_output(command):
return subprocess.check_output(command, shell=True).decode('utf-8')
def parse_xml_log(path):
tree = ET.parse(path)
root = tree.getroot()
it = root.iter()
size = 0
bounds = list()
for item in it:
size = size+1
if item.get('clickable') == 'true':
bounds.append(item.get('bounds'))
try:
choose = random.choice(bounds)
axes = re.findall('\d+', choose)
point = (int(axes[0])+int(axes[2])/2, int(axes[1])+int(axes[3])/2)
except ValueError:
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
except IndexError:
point = (random.randrange(0, 1080),
random.randrange(0, 1920))
return size, point
def main(args):
'''input: app_package_name
output: csvfile
(appname, send-axis,expect-bot-axis,
clicked-axis, clicked-bot-axis, is success)
1. 앱 패키지리스트를 읽어옴
2. adb를 실행시켜 앱 패키지 이름으로 앱을 실행시킴
3. 횟수가 0이될때까지 다음을 반복
3-1. xml의 clickble bound 중앙값을 찾음
3-2. 값 저장: clicked-axis, clicked_bot-axis
3-3. 데이터 robot에 전송 함
3-4. clicked-bot-axis 전송받음 --> 데이터 저장
3-5. adb로부터 실제클릭리스트 받음 --> 데이터 저장
getevent -l /dev/input/event0 | grep "ABS_MT_POSITION"
displayX = x * 1440 / 4096
displayY = y * 2960 / 4096
3-6. 수동으로 클릭되었는지 확인(is success)
3-7. csv 저장
'''
binaries = ['adb']
check_binary(binaries)
change_point = C1(1440, 2960, 40, 100, 695)
dirs = ['./dataset/01-coordinate-csv',
'./dataset/00-xml']
check_dirs(dirs)
print('checked all binaries dirs')
#앱 패키지 리스트를 읽어옴
app_package_list = args.input
event = args.event
if not os.path.exists(app_package_list):
raise Exception(' Need app_list.csv')
app_list = list()
with open(app_package_list, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
print(row['package_name'])
app_list.append(row['package_name'])
# 앱순회
for package_name in app_list:
dirs = ['./dataset/00-xml/'+package_name]
check_dirs(dirs)
command = 'adb shell rm /sdcard/*.xml'
try:
command_check(command)
except subprocess.CalledProcessError:
pass
#adb를 실행시켜 앱실행
command = 'adb shell monkey -p {0} -c android.intent.category.LAUNCHER 1'.format(package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
for index in range(0, event):
#xml의 point ckwdma
send_axis = get_point(index, package_name)
send_bot_axis = (change_point.c_x(send_axis[0]), \
change_point.c_y(send_axis[1]))
res = req.send_req(args.ip, \
send_bot_axis[0], \
send_bot_axis[1], \
package_name)
command = 'adb shell getevent -l /dev/input/event0 | grep "ABS_MT_POSTION"'
try:
result = command_output(command)
except subprocess.CalledProcessError:
result = None
print(result)
#stop app
for index in range(0, 5):
command = 'adb shell input keyevent KEYCODE_BACK'
try:
command_check(command)
except subprocess.CalledProcessError:
pass
command = 'adb shell am force-stop {0}'.format(package_name)
try:
command_check(command)
except subprocess.CalledProcessError:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Mobile xml extractor')
parser.add_argument('-i', '--input', type=str,
required=True,
help=('list of app package names to test'))
parser.add_argument('-e', '--event', type=int,
default=10,
help=('the number of generated user event(default: 10)'))
parser.add_argument('-p', '--ip', type=str,
required=True,
help=('input send ip address'))
FLAGS, _ = parser.parse_known_args()
main(FLAGS)
| [
"subprocess.check_output",
"os.path.exists",
"random.choice",
"xml.etree.ElementTree.parse",
"csv.DictReader",
"argparse.ArgumentParser",
"subprocess.check_call",
"os.makedirs",
"subprocess.Popen",
"req.send_req",
"shutil.which",
"random.randrange",
"change_axis_qhd.ChangeAxis",
"re.findal... | [((590, 678), 'sys.path.append', 'sys.path.append', (['"""/home/kimsoohyun/00-Research/02-Graph/01-tapsterbot/dataSendTest"""'], {}), "(\n '/home/kimsoohyun/00-Research/02-Graph/01-tapsterbot/dataSendTest')\n", (605, 678), False, 'import sys\n'), ((2590, 2627), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2606, 2627), False, 'import subprocess\n'), ((2669, 2711), 'subprocess.check_call', 'subprocess.check_call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2690, 2711), False, 'import subprocess\n'), ((2853, 2867), 'xml.etree.ElementTree.parse', 'ET.parse', (['path'], {}), '(path)\n', (2861, 2867), True, 'import xml.etree.ElementTree as ET\n'), ((4335, 4363), 'change_axis_qhd.ChangeAxis', 'C1', (['(1440)', '(2960)', '(40)', '(100)', '(695)'], {}), '(1440, 2960, 40, 100, 695)\n', (4337, 4363), True, 'from change_axis_qhd import ChangeAxis as C1\n'), ((8388, 8447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mobile xml extractor"""'}), "(description='Mobile xml extractor')\n", (8411, 8447), False, 'import argparse\n'), ((2148, 2184), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (2159, 2184), False, 'import os\n'), ((3106, 3127), 'random.choice', 'random.choice', (['bounds'], {}), '(bounds)\n', (3119, 3127), False, 'import random\n'), ((3143, 3169), 're.findall', 're.findall', (['"""\\\\d+"""', 'choose'], {}), "('\\\\d+', choose)\n", (3153, 3169), False, 'import re\n'), ((4929, 4961), 'os.path.exists', 'os.path.exists', (['app_package_list'], {}), '(app_package_list)\n', (4943, 4961), False, 'import os\n'), ((5421, 5438), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (5435, 5438), False, 'import csv\n'), ((2024, 2044), 'shutil.which', 'shutil.which', (['binary'], {}), '(binary)\n', (2036, 2044), False, 'import shutil\n'), ((2433, 2459), 're.findall', 're.findall', (['"""\\\\d+"""', 'output'], {}), "('\\\\d+', output)\n", (2443, 2459), False, 'import re\n'), ((2754, 2798), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2777, 2798), False, 'import subprocess\n'), ((7295, 7366), 'req.send_req', 'req.send_req', (['args.ip', 'send_bot_axis[0]', 'send_bot_axis[1]', 'package_name'], {}), '(args.ip, send_bot_axis[0], send_bot_axis[1], package_name)\n', (7307, 7366), False, 'import req\n'), ((1457, 1482), 'random.randrange', 'random.randrange', (['(0)', '(1080)'], {}), '(0, 1080)\n', (1473, 1482), False, 'import random\n'), ((1505, 1530), 'random.randrange', 'random.randrange', (['(0)', '(1920)'], {}), '(0, 1920)\n', (1521, 1530), False, 'import random\n'), ((3284, 3309), 'random.randrange', 'random.randrange', (['(0)', '(1080)'], {}), '(0, 1080)\n', (3300, 3309), False, 'import random\n'), ((3328, 3353), 'random.randrange', 'random.randrange', (['(0)', '(1920)'], {}), '(0, 1920)\n', (3344, 3353), False, 'import random\n'), ((3395, 3420), 'random.randrange', 'random.randrange', (['(0)', '(1080)'], {}), '(0, 1080)\n', (3411, 3420), False, 'import random\n'), ((3439, 3464), 'random.randrange', 'random.randrange', (['(0)', '(1920)'], {}), '(0, 1920)\n', (3455, 3464), False, 'import random\n')] |
# -*- coding: utf-8 -*-
'''
Special rule for processing Hangul
https://github.com/kyubyong/g2pK
'''
import re
from g2pk.utils import gloss, get_rule_id2text
rule_id2text = get_rule_id2text()
############################ vowels ############################
def jyeo(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.1"]
# 일반적인 규칙으로 취급한다 by kyubyong
out = re.sub("([ᄌᄍᄎ])ᅧ", r"\1ᅥ", inp)
gloss(verbose, out, inp, rule)
return out
def ye(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.2"]
# 실제로 언중은 예, 녜, 셰, 쎼 이외의 'ㅖ'는 [ㅔ]로 발음한다. by kyubyong
if descriptive:
out = re.sub("([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ", r"\1ᅦ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def consonant_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.3"]
out = re.sub("([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ", r"\1ᅵ", inp)
gloss(verbose, out, inp, rule)
return out
def josa_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.2"]
# 실제로 언중은 높은 확률로 조사 '의'는 [ㅔ]로 발음한다.
if descriptive:
out = re.sub("의/J", "에", inp)
else:
out = inp.replace("/J", "")
gloss(verbose, out, inp, rule)
return out
def vowel_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.1"]
# 실제로 언중은 높은 확률로 단어의 첫음절 이외의 '의'는 [ㅣ]로 발음한다."""
if descriptive:
out = re.sub("(\Sᄋ)ᅴ", r"\1ᅵ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def jamo(inp, descriptive=False, verbose=False):
rule = rule_id2text["16"]
out = inp
out = re.sub("([그])ᆮᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆽᆾᇀᇂ]ᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆿ]ᄋ", r"\1ᄀ", out)
out = re.sub("([으])[ᇁ]ᄋ", r"\1ᄇ", out)
gloss(verbose, out, inp, rule)
return out
############################ 어간 받침 ############################
def rieulgiyeok(inp, descriptive=False, verbose=False):
rule = rule_id2text["11.1"]
out = inp
out = re.sub("ᆰ/P([ᄀᄁ])", r"ᆯᄁ", out)
gloss(verbose, out, inp, rule)
return out
def rieulbieub(inp, descriptive=False, verbose=False):
rule = rule_id2text["25"]
out = inp
out = re.sub("([ᆲᆴ])/Pᄀ", r"\1ᄁ", out)
out = re.sub("([ᆲᆴ])/Pᄃ", r"\1ᄄ", out)
out = re.sub("([ᆲᆴ])/Pᄉ", r"\1ᄊ", out)
out = re.sub("([ᆲᆴ])/Pᄌ", r"\1ᄍ", out)
gloss(verbose, out, inp, rule)
return out
def verb_nieun(inp, descriptive=False, verbose=False):
rule = rule_id2text["24"]
out = inp
pairs = [ ("([ᆫᆷ])/Pᄀ", r"\1ᄁ"),
("([ᆫᆷ])/Pᄃ", r"\1ᄄ"),
("([ᆫᆷ])/Pᄉ", r"\1ᄊ"),
("([ᆫᆷ])/Pᄌ", r"\1ᄍ"),
("ᆬ/Pᄀ", "ᆫᄁ"),
("ᆬ/Pᄃ", "ᆫᄄ"),
("ᆬ/Pᄉ", "ᆫᄊ"),
("ᆬ/Pᄌ", "ᆫᄍ"),
("ᆱ/Pᄀ", "ᆷᄁ"),
("ᆱ/Pᄃ", "ᆷᄄ"),
("ᆱ/Pᄉ", "ᆷᄊ"),
("ᆱ/Pᄌ", "ᆷᄍ") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
def balb(inp, descriptive=False, verbose=False):
rule = rule_id2text["10.1"]
out = inp
syllable_final_or_consonants = "($|[^ᄋᄒ])"
# exceptions
out = re.sub(f"(바)ᆲ({syllable_final_or_consonants})", r"\1ᆸ\2", out)
out = re.sub(f"(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)", r"\1ᆸ\2", out)
gloss(verbose, out, inp, rule)
return out
def palatalize(inp, descriptive=False, verbose=False):
rule = rule_id2text["17"]
out = inp
out = re.sub("ᆮᄋ([ᅵᅧ])", r"ᄌ\1", out)
out = re.sub("ᇀᄋ([ᅵᅧ])", r"ᄎ\1", out)
out = re.sub("ᆴᄋ([ᅵᅧ])", r"ᆯᄎ\1", out)
out = re.sub("ᆮᄒ([ᅵ])", r"ᄎ\1", out)
gloss(verbose, out, inp, rule)
return out
def modifying_rieul(inp, descriptive=False, verbose=False):
rule = rule_id2text["27"]
out = inp
pairs = [ ("ᆯ/E ᄀ", r"ᆯ ᄁ"),
("ᆯ/E ᄃ", r"ᆯ ᄄ"),
("ᆯ/E ᄇ", r"ᆯ ᄈ"),
("ᆯ/E ᄉ", r"ᆯ ᄊ"),
("ᆯ/E ᄌ", r"ᆯ ᄍ"),
("ᆯ걸", "ᆯ껄"),
("ᆯ밖에", "ᆯ빠께"),
("ᆯ세라", "ᆯ쎄라"),
("ᆯ수록", "ᆯ쑤록"),
("ᆯ지라도", "ᆯ찌라도"),
("ᆯ지언정", "ᆯ찌언정"),
("ᆯ진대", "ᆯ찐대") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
| [
"g2pk.utils.gloss",
"re.sub",
"g2pk.utils.get_rule_id2text"
] | [((175, 193), 'g2pk.utils.get_rule_id2text', 'get_rule_id2text', ([], {}), '()\n', (191, 193), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((385, 416), 're.sub', 're.sub', (['"""([ᄌᄍᄎ])ᅧ"""', '"""\\\\1ᅥ"""', 'inp'], {}), "('([ᄌᄍᄎ])ᅧ', '\\\\1ᅥ', inp)\n", (391, 416), False, 'import re\n'), ((421, 451), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (426, 451), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((715, 745), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (720, 745), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((862, 908), 're.sub', 're.sub', (['"""([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ"""', '"""\\\\1ᅵ"""', 'inp'], {}), "('([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ', '\\\\1ᅵ', inp)\n", (868, 908), False, 'import re\n'), ((913, 943), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (918, 943), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((1196, 1226), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (1201, 1226), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((1478, 1508), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (1483, 1508), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((1630, 1661), 're.sub', 're.sub', (['"""([그])ᆮᄋ"""', '"""\\\\1ᄉ"""', 'out'], {}), "('([그])ᆮᄋ', '\\\\1ᄉ', out)\n", (1636, 1661), False, 'import re\n'), ((1672, 1708), 're.sub', 're.sub', (['"""([으])[ᆽᆾᇀᇂ]ᄋ"""', '"""\\\\1ᄉ"""', 'out'], {}), "('([으])[ᆽᆾᇀᇂ]ᄋ', '\\\\1ᄉ', out)\n", (1678, 1708), False, 'import re\n'), ((1719, 1752), 're.sub', 're.sub', (['"""([으])[ᆿ]ᄋ"""', '"""\\\\1ᄀ"""', 'out'], {}), "('([으])[ᆿ]ᄋ', '\\\\1ᄀ', out)\n", (1725, 1752), False, 'import re\n'), ((1763, 1796), 're.sub', 're.sub', (['"""([으])[ᇁ]ᄋ"""', '"""\\\\1ᄇ"""', 'out'], {}), "('([으])[ᇁ]ᄋ', '\\\\1ᄇ', out)\n", (1769, 1796), False, 'import re\n'), ((1802, 1832), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (1807, 1832), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((2031, 2061), 're.sub', 're.sub', (['"""ᆰ/P([ᄀᄁ])"""', '"""ᆯᄁ"""', 'out'], {}), "('ᆰ/P([ᄀᄁ])', 'ᆯᄁ', out)\n", (2037, 2061), False, 'import re\n'), ((2068, 2098), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (2073, 2098), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((2226, 2258), 're.sub', 're.sub', (['"""([ᆲᆴ])/Pᄀ"""', '"""\\\\1ᄁ"""', 'out'], {}), "('([ᆲᆴ])/Pᄀ', '\\\\1ᄁ', out)\n", (2232, 2258), False, 'import re\n'), ((2269, 2301), 're.sub', 're.sub', (['"""([ᆲᆴ])/Pᄃ"""', '"""\\\\1ᄄ"""', 'out'], {}), "('([ᆲᆴ])/Pᄃ', '\\\\1ᄄ', out)\n", (2275, 2301), False, 'import re\n'), ((2312, 2344), 're.sub', 're.sub', (['"""([ᆲᆴ])/Pᄉ"""', '"""\\\\1ᄊ"""', 'out'], {}), "('([ᆲᆴ])/Pᄉ', '\\\\1ᄊ', out)\n", (2318, 2344), False, 'import re\n'), ((2355, 2387), 're.sub', 're.sub', (['"""([ᆲᆴ])/Pᄌ"""', '"""\\\\1ᄍ"""', 'out'], {}), "('([ᆲᆴ])/Pᄌ', '\\\\1ᄍ', out)\n", (2361, 2387), False, 'import re\n'), ((2393, 2423), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (2398, 2423), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((3006, 3036), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (3011, 3036), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((3224, 3288), 're.sub', 're.sub', (['f"""(바)ᆲ({syllable_final_or_consonants})"""', '"""\\\\1ᆸ\\\\2"""', 'out'], {}), "(f'(바)ᆲ({syllable_final_or_consonants})', '\\\\1ᆸ\\\\2', out)\n", (3230, 3288), False, 'import re\n'), ((3298, 3343), 're.sub', 're.sub', (['f"""(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)"""', '"""\\\\1ᆸ\\\\2"""', 'out'], {}), "(f'(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)', '\\\\1ᆸ\\\\2', out)\n", (3304, 3343), False, 'import re\n'), ((3347, 3377), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (3352, 3377), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((3505, 3536), 're.sub', 're.sub', (['"""ᆮᄋ([ᅵᅧ])"""', '"""ᄌ\\\\1"""', 'out'], {}), "('ᆮᄋ([ᅵᅧ])', 'ᄌ\\\\1', out)\n", (3511, 3536), False, 'import re\n'), ((3547, 3578), 're.sub', 're.sub', (['"""ᇀᄋ([ᅵᅧ])"""', '"""ᄎ\\\\1"""', 'out'], {}), "('ᇀᄋ([ᅵᅧ])', 'ᄎ\\\\1', out)\n", (3553, 3578), False, 'import re\n'), ((3589, 3621), 're.sub', 're.sub', (['"""ᆴᄋ([ᅵᅧ])"""', '"""ᆯᄎ\\\\1"""', 'out'], {}), "('ᆴᄋ([ᅵᅧ])', 'ᆯᄎ\\\\1', out)\n", (3595, 3621), False, 'import re\n'), ((3633, 3663), 're.sub', 're.sub', (['"""ᆮᄒ([ᅵ])"""', '"""ᄎ\\\\1"""', 'out'], {}), "('ᆮᄒ([ᅵ])', 'ᄎ\\\\1', out)\n", (3639, 3663), False, 'import re\n'), ((3669, 3699), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (3674, 3699), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((4343, 4373), 'g2pk.utils.gloss', 'gloss', (['verbose', 'out', 'inp', 'rule'], {}), '(verbose, out, inp, rule)\n', (4348, 4373), False, 'from g2pk.utils import gloss, get_rule_id2text\n'), ((639, 682), 're.sub', 're.sub', (['"""([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ"""', '"""\\\\1ᅦ"""', 'inp'], {}), "('([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ', '\\\\1ᅦ', inp)\n", (645, 682), False, 'import re\n'), ((1120, 1145), 're.sub', 're.sub', (['"""의/J"""', '"""에"""', 'inp'], {}), "('의/J', '에', inp)\n", (1126, 1145), False, 'import re\n'), ((1416, 1446), 're.sub', 're.sub', (['"""(\\\\Sᄋ)ᅴ"""', '"""\\\\1ᅵ"""', 'inp'], {}), "('(\\\\Sᄋ)ᅴ', '\\\\1ᅵ', inp)\n", (1422, 1446), False, 'import re\n'), ((2977, 3000), 're.sub', 're.sub', (['str1', 'str2', 'out'], {}), '(str1, str2, out)\n', (2983, 3000), False, 'import re\n'), ((4314, 4337), 're.sub', 're.sub', (['str1', 'str2', 'out'], {}), '(str1, str2, out)\n', (4320, 4337), False, 'import re\n')] |
#!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 <NAME>.
##
__author__ = '<NAME>'
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.flord_l_wfp_sio import FlordLWfpSioParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
with open(source_file_path, 'rb') as stream_handle:
driver = FlordLWfpSioTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class FlordLWfpSioTelemeteredDriver(SimpleDatasetDriver):
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flord_l_wfp_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordLWfpSioDataParticle'
}
parser = FlordLWfpSioParser(parser_config,
stream_handle,
self._exception_callback)
return parser
| [
"mi.core.versioning.version",
"mi.dataset.parser.flord_l_wfp_sio.FlordLWfpSioParser"
] | [((328, 345), 'mi.core.versioning.version', 'version', (['"""15.6.1"""'], {}), "('15.6.1')\n", (335, 345), False, 'from mi.core.versioning import version\n'), ((956, 1030), 'mi.dataset.parser.flord_l_wfp_sio.FlordLWfpSioParser', 'FlordLWfpSioParser', (['parser_config', 'stream_handle', 'self._exception_callback'], {}), '(parser_config, stream_handle, self._exception_callback)\n', (974, 1030), False, 'from mi.dataset.parser.flord_l_wfp_sio import FlordLWfpSioParser\n')] |
import sys
import os
sys.path.insert(0, '../..')
from nltk.corpus import framenet as fn
import FrameNetNLTK
from FrameNetNLTK import load, convert_to_lemon
my_fn = load(folder='test_lexicon',
verbose=2)
output_path = os.path.join(os.getcwd(),
'stats',
'dfn_0.1.ttl')
convert_to_lemon(lemon=FrameNetNLTK.lemon,
premon_nt_path=FrameNetNLTK.premon_nt,
ontolex=FrameNetNLTK.ontolex,
fn_pos_to_lexinfo=FrameNetNLTK.fn_pos_to_lexinfo,
your_fn=my_fn,
namespace='http://rdf.cltl.nl/dfn/',
namespace_prefix='dfn',
language='nld',
major_version=0,
minor_version=1,
output_path=output_path,
verbose=2)
output_path = os.path.join(os.getcwd(),
'stats',
'efn_1.7.ttl')
convert_to_lemon(lemon=FrameNetNLTK.lemon,
premon_nt_path=FrameNetNLTK.premon_nt,
ontolex=FrameNetNLTK.ontolex,
fn_pos_to_lexinfo=FrameNetNLTK.fn_pos_to_lexinfo,
your_fn=fn,
namespace='http://rdf.cltl.nl/efn/',
namespace_prefix='efn',
language='eng',
major_version=1,
minor_version=7,
output_path=output_path,
verbose=5) | [
"FrameNetNLTK.convert_to_lemon",
"sys.path.insert",
"FrameNetNLTK.load",
"os.getcwd"
] | [((21, 48), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../.."""'], {}), "(0, '../..')\n", (36, 48), False, 'import sys\n'), ((166, 204), 'FrameNetNLTK.load', 'load', ([], {'folder': '"""test_lexicon"""', 'verbose': '(2)'}), "(folder='test_lexicon', verbose=2)\n", (170, 204), False, 'from FrameNetNLTK import load, convert_to_lemon\n'), ((339, 681), 'FrameNetNLTK.convert_to_lemon', 'convert_to_lemon', ([], {'lemon': 'FrameNetNLTK.lemon', 'premon_nt_path': 'FrameNetNLTK.premon_nt', 'ontolex': 'FrameNetNLTK.ontolex', 'fn_pos_to_lexinfo': 'FrameNetNLTK.fn_pos_to_lexinfo', 'your_fn': 'my_fn', 'namespace': '"""http://rdf.cltl.nl/dfn/"""', 'namespace_prefix': '"""dfn"""', 'language': '"""nld"""', 'major_version': '(0)', 'minor_version': '(1)', 'output_path': 'output_path', 'verbose': '(2)'}), "(lemon=FrameNetNLTK.lemon, premon_nt_path=FrameNetNLTK.\n premon_nt, ontolex=FrameNetNLTK.ontolex, fn_pos_to_lexinfo=FrameNetNLTK\n .fn_pos_to_lexinfo, your_fn=my_fn, namespace='http://rdf.cltl.nl/dfn/',\n namespace_prefix='dfn', language='nld', major_version=0, minor_version=\n 1, output_path=output_path, verbose=2)\n", (355, 681), False, 'from FrameNetNLTK import load, convert_to_lemon\n'), ((970, 1309), 'FrameNetNLTK.convert_to_lemon', 'convert_to_lemon', ([], {'lemon': 'FrameNetNLTK.lemon', 'premon_nt_path': 'FrameNetNLTK.premon_nt', 'ontolex': 'FrameNetNLTK.ontolex', 'fn_pos_to_lexinfo': 'FrameNetNLTK.fn_pos_to_lexinfo', 'your_fn': 'fn', 'namespace': '"""http://rdf.cltl.nl/efn/"""', 'namespace_prefix': '"""efn"""', 'language': '"""eng"""', 'major_version': '(1)', 'minor_version': '(7)', 'output_path': 'output_path', 'verbose': '(5)'}), "(lemon=FrameNetNLTK.lemon, premon_nt_path=FrameNetNLTK.\n premon_nt, ontolex=FrameNetNLTK.ontolex, fn_pos_to_lexinfo=FrameNetNLTK\n .fn_pos_to_lexinfo, your_fn=fn, namespace='http://rdf.cltl.nl/efn/',\n namespace_prefix='efn', language='eng', major_version=1, minor_version=\n 7, output_path=output_path, verbose=5)\n", (986, 1309), False, 'from FrameNetNLTK import load, convert_to_lemon\n'), ((247, 258), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (256, 258), False, 'import os\n'), ((878, 889), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (887, 889), False, 'import os\n')] |
from django.contrib import admin
from controle.models import Time, Music
# Register your models here.
admin.site.register(Time)
admin.site.register(Music) | [
"django.contrib.admin.site.register"
] | [((103, 128), 'django.contrib.admin.site.register', 'admin.site.register', (['Time'], {}), '(Time)\n', (122, 128), False, 'from django.contrib import admin\n'), ((129, 155), 'django.contrib.admin.site.register', 'admin.site.register', (['Music'], {}), '(Music)\n', (148, 155), False, 'from django.contrib import admin\n')] |
import os
import pdb
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
class Output():
def __init__(self, outputDir, probeIndex, sim):
if os.path.isdir(outputDir) == False:
os.mkdir(outputDir)
self.defFile = open(outputDir + 'deformation.dat', 'a')
self.forceFile = open(outputDir + 'force.dat', 'a')
self.voltageFile = open(outputDir + 'voltage.dat', 'a')
self.sim = sim
self.probeIndex = probeIndex
self.outputDir = outputDir
def finish(self):
self.defFile.close()
self.forceFile.close()
self.voltageFile.close()
def writeData(self):
globalTime = self.sim.globalTime
timeStep = self.sim.timeStep
deformation = self.sim.deformation
maxDef = deformation.min(axis = 0)
self.defFile.write('%e\t%e\t%e\t' % (globalTime, timeStep,maxDef[2]))
for i in range(0, len(self.probeIndex)):
self.defFile.write('%e\t' % deformation[self.probeIndex[i]][2])
self.defFile.write('\n')
self.defFile.flush()
vel = self.sim.velocity
acc = self.sim.acceleration
eForce = self.sim.elecForceSum
fForce = self.sim.flowForceSum
cForce = self.sim.contactForceSum
self.forceFile.write('%e\t' % globalTime)
for i in range(0, len(self.probeIndex)):
self.forceFile.write('%e\t' % vel[self.probeIndex[i]][2])
self.forceFile.write('%e\t%e\t%e\n' % (eForce, fForce, cForce))
self.forceFile.flush()
voltage = self.sim.voltage
self.voltageFile.write('%e\t%e\n' % (globalTime, voltage))
self.voltageFile.flush()
def saveFluidVTK(self, n):
geomFields = self.sim.geomFields
fluidMeshes = self.sim.fluidMeshes
elecFields = self.sim.elecFields
if self.sim.enableFlowModel:
flowFields = self.sim.flowFields
writer = exporters.VTKWriterA(geomFields,fluidMeshes,
self.outputDir + "fluid-" + str(n) + ".vtk",
"gen5_fluid",
False,0)
writer.init()
writer.writeScalarField(elecFields.potential,"potential")
writer.writeVectorField(elecFields.electric_field,"potentialgradient")
if self.sim.enableFlowModel:
writer.writeVectorField(flowFields.velocity,"velocity")
writer.writeScalarField(flowFields.pressure, "pressure")
writer.finish()
def saveBeamVTK(self, n):
geomFields = self.sim.geomFields
solidMeshes = self.sim.solidMeshes
plateFields = self.sim.plateFields
writer = exporters.VTKWriterA(geomFields,solidMeshes,
self.outputDir + "beam-" + str(n) + ".vtk",
"gen5_beam",
False,0)
writer.init()
writer.writeVectorField(plateFields.deformation,"deformation")
writer.writeScalarField(plateFields.force, "force")
writer.finish()
def saveBeamBoundaryVTK(self, n):
geomFields = self.sim.geomFields
solidBoundaryMeshes = self.sim.solidBoundaryMeshes
writer3 = exporters.VTKWriterA(geomFields,solidBoundaryMeshes,
self.outputDir + "beamBoundary-" + str(n) + ".vtk",
"beam Boundary",
False,0,True)
writer3.init()
#writer3.writeVectorField(flowFields.velocity,"velocity")
#writer3.writeVectorField(flowFields.force,"flow_force")
#writer3.writeVectorField(elecFields.force,"elec_force")
writer3.finish()
| [
"os.path.isdir",
"os.mkdir"
] | [((197, 221), 'os.path.isdir', 'os.path.isdir', (['outputDir'], {}), '(outputDir)\n', (210, 221), False, 'import os\n'), ((244, 263), 'os.mkdir', 'os.mkdir', (['outputDir'], {}), '(outputDir)\n', (252, 263), False, 'import os\n')] |
from __future__ import absolute_import
import errno
import socket
import sys
from future.utils import raise_
from boofuzz import exception
from boofuzz.connections import base_socket_connection
ETH_P_ALL = 0x0003 # Ethernet protocol: Every packet, see Linux if_ether.h docs for more details.
ETH_P_IP = 0x0800 # Ethernet protocol: Internet Protocol packet, see Linux <net/if_ether.h> docs for more details.
class RawL3SocketConnection(base_socket_connection.BaseSocketConnection):
"""BaseSocketConnection implementation for use with Raw Layer 2 Sockets.
.. versionadded:: 0.2.0
Args:
interface (str): Interface to send and receive on.
send_timeout (float): Seconds to wait for send before timing out. Default 5.0.
recv_timeout (float): Seconds to wait for recv before timing out. Default 5.0.
ethernet_proto (int): Ethernet protocol to bind to. Defaults to ETH_P_IP (0x0800).
l2_dst (bytes): Layer2 destination address (e.g. MAC address). Default b'\xFF\xFF\xFF\xFF\xFF\xFF' (broadcast)
packet_size (int): Maximum packet size (in bytes). Default 1500 if the underlying interface uses
standard ethernet for layer 2. Otherwise, a different packet size may apply (e.g. Jumboframes,
802.5 Token Ring, 802.11 wifi, ...) that must be specified.
"""
def __init__(
self,
interface,
send_timeout=5.0,
recv_timeout=5.0,
ethernet_proto=ETH_P_IP,
l2_dst=b"\xff" * 6,
packet_size=1500,
):
super(RawL3SocketConnection, self).__init__(send_timeout, recv_timeout)
self.interface = interface
self.ethernet_proto = ethernet_proto
self.l2_dst = l2_dst
self.packet_size = packet_size
def open(self):
self._sock = socket.socket(socket.AF_PACKET, socket.SOCK_DGRAM, socket.htons(self.ethernet_proto))
self._sock.bind((self.interface, self.ethernet_proto))
super(RawL3SocketConnection, self).open()
def recv(self, max_bytes):
"""
Receives a packet from the raw socket. If max_bytes < packet_size, only the first max_bytes are returned and
the rest of the packet is discarded. Otherwise, return the whole packet.
Args:
max_bytes (int): Maximum number of bytes to return. 0 to return the whole packet.
Returns:
Received data
"""
data = b""
try:
data = self._sock.recv(self.packet_size)
if 0 < max_bytes < self.packet_size:
data = data[: self._packet_size]
except socket.timeout:
data = b""
except socket.error as e:
if e.errno == errno.ECONNABORTED:
raise_(
exception.BoofuzzTargetConnectionAborted(socket_errno=e.errno, socket_errmsg=e.strerror),
None,
sys.exc_info()[2],
)
elif e.errno in [errno.ECONNRESET, errno.ENETRESET, errno.ETIMEDOUT]:
raise_(exception.BoofuzzTargetConnectionReset(), None, sys.exc_info()[2])
elif e.errno == errno.EWOULDBLOCK:
data = b""
else:
raise
return data
def send(self, data):
"""
Send data to the target. Only valid after calling open!
Data will be trunctated to self.packet_size (Default: 1500
bytes).
Args:
data: Data to send.
Returns:
int: Number of bytes actually sent.
"""
num_sent = 0
data = data[: self.packet_size]
try:
num_sent = self._sock.sendto(data, (self.interface, self.ethernet_proto, 0, 0, self.l2_dst))
except socket.error as e:
if e.errno == errno.ECONNABORTED:
raise_(
exception.BoofuzzTargetConnectionAborted(socket_errno=e.errno, socket_errmsg=e.strerror),
None,
sys.exc_info()[2],
)
elif e.errno in [errno.ECONNRESET, errno.ENETRESET, errno.ETIMEDOUT, errno.EPIPE]:
raise_(exception.BoofuzzTargetConnectionReset(), None, sys.exc_info()[2])
else:
raise
return num_sent
@property
def info(self):
return "{0}, type 0x{1:04x}".format(self.interface, self.ethernet_proto)
| [
"sys.exc_info",
"boofuzz.exception.BoofuzzTargetConnectionReset",
"socket.htons",
"boofuzz.exception.BoofuzzTargetConnectionAborted"
] | [((1861, 1894), 'socket.htons', 'socket.htons', (['self.ethernet_proto'], {}), '(self.ethernet_proto)\n', (1873, 1894), False, 'import socket\n'), ((2781, 2873), 'boofuzz.exception.BoofuzzTargetConnectionAborted', 'exception.BoofuzzTargetConnectionAborted', ([], {'socket_errno': 'e.errno', 'socket_errmsg': 'e.strerror'}), '(socket_errno=e.errno,\n socket_errmsg=e.strerror)\n', (2821, 2873), False, 'from boofuzz import exception\n'), ((3878, 3970), 'boofuzz.exception.BoofuzzTargetConnectionAborted', 'exception.BoofuzzTargetConnectionAborted', ([], {'socket_errno': 'e.errno', 'socket_errmsg': 'e.strerror'}), '(socket_errno=e.errno,\n socket_errmsg=e.strerror)\n', (3918, 3970), False, 'from boofuzz import exception\n'), ((2917, 2931), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2929, 2931), False, 'import sys\n'), ((3059, 3099), 'boofuzz.exception.BoofuzzTargetConnectionReset', 'exception.BoofuzzTargetConnectionReset', ([], {}), '()\n', (3097, 3099), False, 'from boofuzz import exception\n'), ((4014, 4028), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4026, 4028), False, 'import sys\n'), ((4169, 4209), 'boofuzz.exception.BoofuzzTargetConnectionReset', 'exception.BoofuzzTargetConnectionReset', ([], {}), '()\n', (4207, 4209), False, 'from boofuzz import exception\n'), ((3107, 3121), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3119, 3121), False, 'import sys\n'), ((4217, 4231), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4229, 4231), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_google_dork.models
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
replaces = [('django_google_dork', '0001_initial'), ('django_google_dork', '0002_auto_20141116_1551'), ('django_google_dork', '0003_run_engine')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', django_google_dork.models.CampaignNameField(unique=True, max_length=32)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('query', django_google_dork.models.DorkQueryField(max_length=256)),
('campaign', models.ForeignKey(to='django_google_dork.Campaign')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('summary', models.TextField()),
('url', models.URLField(max_length=1024)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('dork', models.ForeignKey(to='django_google_dork.Dork')),
('result_set', models.ManyToManyField(to='django_google_dork.Result')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('title', 'summary', 'url')]),
),
migrations.AlterUniqueTogether(
name='dork',
unique_together=set([('campaign', 'query')]),
),
migrations.CreateModel(
name='SearchEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(unique=True, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='campaign',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='dork',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='run',
name='engine',
field=models.ForeignKey(default=None, to='django_google_dork.SearchEngine'),
preserve_default=False,
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((3647, 3680), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3666, 3680), False, 'from django.db import models, migrations\n'), ((3834, 3867), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3853, 3867), False, 'from django.db import models, migrations\n'), ((4019, 4088), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': 'None', 'to': '"""django_google_dork.SearchEngine"""'}), "(default=None, to='django_google_dork.SearchEngine')\n", (4036, 4088), False, 'from django.db import models, migrations\n'), ((539, 632), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (555, 632), False, 'from django.db import models, migrations\n'), ((1253, 1346), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1269, 1346), False, 'from django.db import models, migrations\n'), ((1748, 1799), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""django_google_dork.Campaign"""'}), "(to='django_google_dork.Campaign')\n", (1765, 1799), False, 'from django.db import models, migrations\n'), ((2003, 2096), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2019, 2096), False, 'from django.db import models, migrations\n'), ((2121, 2154), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (2137, 2154), False, 'from django.db import models, migrations\n'), ((2185, 2203), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2201, 2203), False, 'from django.db import models, migrations\n'), ((2230, 2262), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (2245, 2262), False, 'from django.db import models, migrations\n'), ((2463, 2556), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2479, 2556), False, 'from django.db import models, migrations\n'), ((2583, 2622), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2603, 2622), False, 'from django.db import models, migrations\n'), ((2650, 2697), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""django_google_dork.Dork"""'}), "(to='django_google_dork.Dork')\n", (2667, 2697), False, 'from django.db import models, migrations\n'), ((2731, 2785), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""django_google_dork.Result"""'}), "(to='django_google_dork.Result')\n", (2753, 2785), False, 'from django.db import models, migrations\n'), ((3271, 3364), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3287, 3364), False, 'from django.db import models, migrations\n'), ((3392, 3436), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(32)'}), '(unique=True, max_length=32)\n', (3408, 3436), False, 'from django.db import models, migrations\n')] |
from flask import Blueprint, abort, jsonify, render_template
from src.fruit_castle.hadwin.utilities import get_json_data
from .v1.version_1 import v1
from .v2.version_2 import v2
from .v3.version_3 import v3
hadwin = Blueprint('hadwin', __name__, url_prefix='/hadwin',static_url_path='/dist',
static_folder='../client/dist', template_folder='client')
hadwin.register_blueprint(v1)
hadwin.register_blueprint(v2)
hadwin.register_blueprint(v3)
@hadwin.route("/")
def hadwin_home():
# return render_template("dashboard.html", py_sent_data="hadwin concept data")
abort(401)
@hadwin.route('/app')
def hadwin_about_app():
retrieved_file_data = get_json_data(
"src/data/hadwin/about_the_app.json")
return jsonify(retrieved_file_data)
| [
"flask.abort",
"flask.Blueprint",
"src.fruit_castle.hadwin.utilities.get_json_data",
"flask.jsonify"
] | [((219, 357), 'flask.Blueprint', 'Blueprint', (['"""hadwin"""', '__name__'], {'url_prefix': '"""/hadwin"""', 'static_url_path': '"""/dist"""', 'static_folder': '"""../client/dist"""', 'template_folder': '"""client"""'}), "('hadwin', __name__, url_prefix='/hadwin', static_url_path='/dist',\n static_folder='../client/dist', template_folder='client')\n", (228, 357), False, 'from flask import Blueprint, abort, jsonify, render_template\n'), ((584, 594), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (589, 594), False, 'from flask import Blueprint, abort, jsonify, render_template\n'), ((669, 720), 'src.fruit_castle.hadwin.utilities.get_json_data', 'get_json_data', (['"""src/data/hadwin/about_the_app.json"""'], {}), "('src/data/hadwin/about_the_app.json')\n", (682, 720), False, 'from src.fruit_castle.hadwin.utilities import get_json_data\n'), ((741, 769), 'flask.jsonify', 'jsonify', (['retrieved_file_data'], {}), '(retrieved_file_data)\n', (748, 769), False, 'from flask import Blueprint, abort, jsonify, render_template\n')] |
import pytest
import friendly_traceback
from friendly_traceback.console_helpers import _get_info
from ..syntax_errors_formatting_cases import descriptions
friendly_traceback.set_lang("en")
where = "parsing_error_source"
cause = "cause"
@pytest.mark.parametrize("filename", descriptions.keys())
def test_syntax_errors(filename):
expected = descriptions[filename]
try:
exec("from . import %s" % filename)
except SyntaxError:
friendly_traceback.explain_traceback(redirect="capture")
info = _get_info()
assert expected[where] == info[where] # noqa
assert expected[cause] in info[cause] # noqa
| [
"friendly_traceback.explain_traceback",
"friendly_traceback.set_lang",
"friendly_traceback.console_helpers._get_info"
] | [((157, 190), 'friendly_traceback.set_lang', 'friendly_traceback.set_lang', (['"""en"""'], {}), "('en')\n", (184, 190), False, 'import friendly_traceback\n'), ((523, 534), 'friendly_traceback.console_helpers._get_info', '_get_info', ([], {}), '()\n', (532, 534), False, 'from friendly_traceback.console_helpers import _get_info\n'), ((455, 511), 'friendly_traceback.explain_traceback', 'friendly_traceback.explain_traceback', ([], {'redirect': '"""capture"""'}), "(redirect='capture')\n", (491, 511), False, 'import friendly_traceback\n')] |
from itertools import product
from tools.general import load_input_list
def get_new_active_range(current_active_set, dimensions):
lowest = [0] * dimensions
highest = [0] * dimensions
for point in current_active_set:
for i, coord in enumerate(point):
if coord < lowest[i]:
lowest[i] = coord
elif highest[i] < coord:
highest[i] = coord
return tuple(range(lowest[i] - 1, highest[i] + 2) for i in range(dimensions))
def count_active_neighbours(active_set, point):
active_count = 0
for nbr in product(*(range(coord - 1, coord + 2) for coord in point)):
if nbr in active_set and nbr != point:
active_count += 1
return active_count
def new_state_is_active(active_set, point):
active_nbr = count_active_neighbours(active_set, point)
if point in active_set:
if 2 <= active_nbr <= 3:
return True
elif active_nbr == 3:
return True
return False
def iterate_grid(initial_grid, dimensions, iterations):
active_points = set()
for y, row in enumerate(initial_grid):
for x, cube in enumerate(row):
if cube == '#':
active_points.add(tuple([x, y] + [0] * (dimensions - 2)))
for _ in range(iterations):
new_active_points = set()
for point in product(*get_new_active_range(active_points, dimensions)):
if new_state_is_active(active_points, point):
new_active_points.add(point)
active_points = new_active_points
return len(active_points)
starting_grid = [list(row) for row in load_input_list("day17.txt")]
print(f"Part 1 => {iterate_grid(starting_grid, 3, 6)}")
print(f"Part 1 => {iterate_grid(starting_grid, 4, 6)}")
| [
"tools.general.load_input_list"
] | [((1640, 1668), 'tools.general.load_input_list', 'load_input_list', (['"""day17.txt"""'], {}), "('day17.txt')\n", (1655, 1668), False, 'from tools.general import load_input_list\n')] |
# -*- coding: utf-8 -*-
from numpy import log2
from pickle import load
"""
* Clase que se encarga de ver la información mutua que hay entre dos tokens
* sirve para determinar si es colocación o no
"""
class MI:
def __init__(self):
self.words = load(open("./models/words.d",'r'))
self.ngrams = load(open("./models/ngrams.d","r"))
self.count = self.count()
def count(self):
cnt = 0
for i in self.words:
cnt += self.words[i]
return cnt
def eval(self,str1,str2):
try:
sup = float(self.ngrams[str1+"_"+str2])/float(self.count)
inf = float(self.words[str1]) * float(self.words[str2])
if inf <= 0 or sup <= 0:
return 0
else:
inf = inf/(float(self.count)*float(self.count))
return log2(sup/inf)
except:
return 0
| [
"numpy.log2"
] | [((722, 737), 'numpy.log2', 'log2', (['(sup / inf)'], {}), '(sup / inf)\n', (726, 737), False, 'from numpy import log2\n')] |
from functools import reduce
data = []
with open("aoc6.inp") as rf:
sets = []
for l in rf:
if l == "\n":
data.append(sets)
sets = []
else:
sets.append(set([c for c in l.strip()]))
a1 = a2 = 0
for sets in data:
a1 += len(reduce(lambda s1, s2: s1 | s2, sets))
a2 += len(reduce(lambda s1, s2: s1 & s2, sets))
print(a1, a2)
| [
"functools.reduce"
] | [((288, 324), 'functools.reduce', 'reduce', (['(lambda s1, s2: s1 | s2)', 'sets'], {}), '(lambda s1, s2: s1 | s2, sets)\n', (294, 324), False, 'from functools import reduce\n'), ((340, 376), 'functools.reduce', 'reduce', (['(lambda s1, s2: s1 & s2)', 'sets'], {}), '(lambda s1, s2: s1 & s2, sets)\n', (346, 376), False, 'from functools import reduce\n')] |
import zeep
import logging
logging.getLogger('zeep').setLevel(logging.ERROR)
publicServiceUrl = 'https://api.tradera.com/v3/PublicService.asmx'
appId = 'REPLACE ME WITH TRADERA ID'
appKey = 'REPLACE ME WITH TRADERA KEY'
wsdl = 'https://api.tradera.com/v3/PublicService.asmx?WSDL'
client = zeep.Client(wsdl=wsdl)
authHeader = {
'AuthenticationHeader' : {
'AppId' : appId,
'AppKey' : appKey
}
}
result = client.service.GetOfficalTime(_soapheaders = authHeader)
print(result)
| [
"logging.getLogger",
"zeep.Client"
] | [((294, 316), 'zeep.Client', 'zeep.Client', ([], {'wsdl': 'wsdl'}), '(wsdl=wsdl)\n', (305, 316), False, 'import zeep\n'), ((28, 53), 'logging.getLogger', 'logging.getLogger', (['"""zeep"""'], {}), "('zeep')\n", (45, 53), False, 'import logging\n')] |
#!/usr/bin/env python3
import webbrowser
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from ghnotifier.notifier import Notifier
from ghnotifier.settings import Settings
class Menu:
GITHUB_NOTIFICATIONS = 'https://github.com/notifications'
def __init__(self):
self.menu = Gtk.Menu()
self.create_menu()
self.menu.show_all()
def create_menu(self):
self.append('Open Notifications', self.notifications)
self.append('Settings', self.settings)
self.menu.append(Gtk.SeparatorMenuItem())
self.append('Quit', self.quit)
def append(self, name, callback):
item = Gtk.MenuItem(name)
item.connect('activate', callback)
self.menu.append(item)
@staticmethod
def notifications(source):
webbrowser.open(Menu.GITHUB_NOTIFICATIONS)
@staticmethod
def settings(source):
Settings().open()
@staticmethod
def quit(source):
Notifier.stop()
Gtk.main_quit()
def get_inner(self):
return self.menu | [
"gi.repository.Gtk.SeparatorMenuItem",
"gi.repository.Gtk.main_quit",
"webbrowser.open",
"gi.require_version",
"ghnotifier.settings.Settings",
"gi.repository.Gtk.Menu",
"ghnotifier.notifier.Notifier.stop",
"gi.repository.Gtk.MenuItem"
] | [((53, 85), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (71, 85), False, 'import gi\n'), ((321, 331), 'gi.repository.Gtk.Menu', 'Gtk.Menu', ([], {}), '()\n', (329, 331), False, 'from gi.repository import Gtk\n'), ((670, 688), 'gi.repository.Gtk.MenuItem', 'Gtk.MenuItem', (['name'], {}), '(name)\n', (682, 688), False, 'from gi.repository import Gtk\n'), ((822, 864), 'webbrowser.open', 'webbrowser.open', (['Menu.GITHUB_NOTIFICATIONS'], {}), '(Menu.GITHUB_NOTIFICATIONS)\n', (837, 864), False, 'import webbrowser\n'), ((985, 1000), 'ghnotifier.notifier.Notifier.stop', 'Notifier.stop', ([], {}), '()\n', (998, 1000), False, 'from ghnotifier.notifier import Notifier\n'), ((1009, 1024), 'gi.repository.Gtk.main_quit', 'Gtk.main_quit', ([], {}), '()\n', (1022, 1024), False, 'from gi.repository import Gtk\n'), ((551, 574), 'gi.repository.Gtk.SeparatorMenuItem', 'Gtk.SeparatorMenuItem', ([], {}), '()\n', (572, 574), False, 'from gi.repository import Gtk\n'), ((918, 928), 'ghnotifier.settings.Settings', 'Settings', ([], {}), '()\n', (926, 928), False, 'from ghnotifier.settings import Settings\n')] |
import tensorflow as tf
########################################################################################################################
# Isometry Loss
########################################################################################################################
def getLoss(inputMeshTensor, restTensor, laplacian, numberOfEdges, rowWeight):
batchSize = tf.shape(inputMeshTensor)[0]
numberOfVertices = tf.shape(inputMeshTensor)[1]
v_r = (inputMeshTensor/1000.0) - (restTensor/1000.0)
innerSumX = tf.matmul( laplacian, tf.reshape(v_r[:, :, 0], [batchSize,numberOfVertices, 1]))
innerSumX = innerSumX * innerSumX
innerSumY = tf.matmul(laplacian, tf.reshape(v_r[:, :, 1], [batchSize,numberOfVertices, 1]))
innerSumY = innerSumY * innerSumY
innerSumZ = tf.matmul(laplacian, tf.reshape(v_r[:, :, 2], [batchSize,numberOfVertices, 1]))
innerSumZ = innerSumZ * innerSumZ
innerSum = innerSumX + innerSumY + innerSumZ
innerSum = tf.reshape(innerSum,[batchSize,numberOfVertices])
loss = tf.reduce_sum(innerSum * rowWeight)
loss = loss / tf.cast(batchSize * numberOfEdges,tf.float32)
return loss
########################################################################################################################
| [
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.reshape"
] | [((995, 1046), 'tensorflow.reshape', 'tf.reshape', (['innerSum', '[batchSize, numberOfVertices]'], {}), '(innerSum, [batchSize, numberOfVertices])\n', (1005, 1046), True, 'import tensorflow as tf\n'), ((1057, 1092), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(innerSum * rowWeight)'], {}), '(innerSum * rowWeight)\n', (1070, 1092), True, 'import tensorflow as tf\n'), ((381, 406), 'tensorflow.shape', 'tf.shape', (['inputMeshTensor'], {}), '(inputMeshTensor)\n', (389, 406), True, 'import tensorflow as tf\n'), ((433, 458), 'tensorflow.shape', 'tf.shape', (['inputMeshTensor'], {}), '(inputMeshTensor)\n', (441, 458), True, 'import tensorflow as tf\n'), ((560, 618), 'tensorflow.reshape', 'tf.reshape', (['v_r[:, :, 0]', '[batchSize, numberOfVertices, 1]'], {}), '(v_r[:, :, 0], [batchSize, numberOfVertices, 1])\n', (570, 618), True, 'import tensorflow as tf\n'), ((696, 754), 'tensorflow.reshape', 'tf.reshape', (['v_r[:, :, 1]', '[batchSize, numberOfVertices, 1]'], {}), '(v_r[:, :, 1], [batchSize, numberOfVertices, 1])\n', (706, 754), True, 'import tensorflow as tf\n'), ((832, 890), 'tensorflow.reshape', 'tf.reshape', (['v_r[:, :, 2]', '[batchSize, numberOfVertices, 1]'], {}), '(v_r[:, :, 2], [batchSize, numberOfVertices, 1])\n', (842, 890), True, 'import tensorflow as tf\n'), ((1112, 1158), 'tensorflow.cast', 'tf.cast', (['(batchSize * numberOfEdges)', 'tf.float32'], {}), '(batchSize * numberOfEdges, tf.float32)\n', (1119, 1158), True, 'import tensorflow as tf\n')] |
import os
import re
import fnmatch
from logfetch_base import log, is_in_date_range
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
log(colored('Including log {0}\n'.format(filename), 'blue'), args, True)
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
log(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'), args, True)
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"-\d{13}-", filename)
if timestamps:
return is_in_date_range(args, int(str(timestamps[-1]).replace("-", "")[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId) | [
"re.findall",
"os.listdir",
"fnmatch.fnmatch"
] | [((222, 243), 'os.listdir', 'os.listdir', (['args.dest'], {}), '(args.dest)\n', (232, 243), False, 'import os\n'), ((700, 733), 're.findall', 're.findall', (['"""-\\\\d{13}-"""', 'filename'], {}), "('-\\\\d{13}-', filename)\n", (710, 733), False, 'import re\n'), ((256, 295), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['filename', 'log_fn_match'], {}), '(filename, log_fn_match)\n', (271, 295), False, 'import fnmatch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file predict_bw_lstm1.py
# @author <NAME> (<NAME> <<EMAIL>>
# @date 2019-04-22
# 2022-03-23 - updated for TensorFlow version 2.6
#
# @brief Predict channel bandwidth.
#
# @remarks This code is based on the nice sample code from:
# https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
# import modules
import numpy as np
import tensorflow as tf
import tensorflow.keras # required for TF ver. 2.6
from skimage.util import view_as_windows
# define dataset
bws = np.load('bandwidths.npy')
X = view_as_windows(bws, 3, step=1)[:-1] # 3-sample sliding window over bws (except the last one, i.e., '[:-1]')
y = bws[3:]
# reshape from [samples, timesteps] into [samples, timesteps, features]
X = X.reshape((X.shape[0], X.shape[1], 1))
# define model
model = tf.keras.Sequential()
# model.add(tf.keras.layers.LSTM(units=50, activation='relu', input_shape=(3, 1)))
model.add(tf.keras.layers.LSTM(units=50, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=1000, verbose=0)
# demonstrate prediction
for i in range(10):
x_input = X[i]
x_input = x_input.reshape((1, 3, 1))
yhat = model.predict(x_input, verbose=0)
print(f"{','.join([str(int(i)) for i in x_input.flatten()])} -> {yhat.flatten()[0]:.2e} (true value: {int(y[i]):d})")
| [
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"numpy.load",
"skimage.util.view_as_windows"
] | [((594, 619), 'numpy.load', 'np.load', (['"""bandwidths.npy"""'], {}), "('bandwidths.npy')\n", (601, 619), True, 'import numpy as np\n'), ((885, 906), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (904, 906), True, 'import tensorflow as tf\n'), ((624, 655), 'skimage.util.view_as_windows', 'view_as_windows', (['bws', '(3)'], {'step': '(1)'}), '(bws, 3, step=1)\n', (639, 655), False, 'from skimage.util import view_as_windows\n'), ((1000, 1049), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(50)', 'activation': '"""relu"""'}), "(units=50, activation='relu')\n", (1020, 1049), True, 'import tensorflow as tf\n'), ((1061, 1085), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1082, 1085), True, 'import tensorflow as tf\n')] |
from src.use_cases.todo import create_todo, update_todo
def test_create_todo(client, user):
payload = {
'title': 'test todo',
'description': 'very long and useful description',
}
todo = create_todo(user.id, payload)
assert todo.id == 1
assert todo.title == payload['title']
def test_update_todo(client):
todo_id = 1
old_description = 'very long and useful description'
payload = {'title': 'new title'}
todo = update_todo(todo_id, payload)
assert todo.title == payload['title']
assert todo.description == old_description
| [
"src.use_cases.todo.update_todo",
"src.use_cases.todo.create_todo"
] | [((216, 245), 'src.use_cases.todo.create_todo', 'create_todo', (['user.id', 'payload'], {}), '(user.id, payload)\n', (227, 245), False, 'from src.use_cases.todo import create_todo, update_todo\n'), ((466, 495), 'src.use_cases.todo.update_todo', 'update_todo', (['todo_id', 'payload'], {}), '(todo_id, payload)\n', (477, 495), False, 'from src.use_cases.todo import create_todo, update_todo\n')] |
#!/usr/bin/env python
"""Exposes functions to perform a source-to-source transformation
that detects and unrolls loops in the code being analyzed.
"""
"""See the LICENSE file, located in the root directory of
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
from defaults import config, sourceDir
class HandlerMode(object):
"""Represents the mode that the loop handler works in."""
#: Detect loops.
DETECTOR = 0
#: Unroll loops.
UNROLLER = 1
def _generateHandlerCommand(projectConfig, handlerMode):
"""Generates the system call that runs the loop handler
with appropriate inputs.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
handlerMode:
Mode that the loop handler should run in.
Returns:
Appropriate system call as a list that contains the program
to be run and the proper arguments.
"""
# Set the environment variable that allows the Cilly driver to find
# the path to the configuration file for the Findlib OCaml module.
os.environ["OCAMLFIND_CONF"] = os.path.join(sourceDir,
"ocaml/conf/findlib.conf")
# Set the environment variable that allows the Cilly driver to find
# the path to the folder that contains the compiled OCaml files.
os.environ["OCAMLPATH"] = os.path.join(sourceDir, "ocaml/lib")
# Set the environment variable that configures the Cilly driver to load
# the features that will be needed for the loop handler.
os.environ["CIL_FEATURES"] = "cil.default-features,loopHandler.loopHandler"
command = []
command.append(os.path.join(config.TOOL_CIL, "bin/cilly.bat"))
command.append("--doloopHandler")
command.append("--loopHandler-detect"
if handlerMode is HandlerMode.DETECTOR
else "--loopHandler-unroll")
command.append("--loopHandler-analyze=%s" % projectConfig.func)
loopConfigFile = os.path.join(projectConfig.locationTempDir,
config.TEMP_LOOP_CONFIG)
command.append("--loopHandler-config='%s'" % loopConfigFile)
for inlineName in projectConfig.inlined:
command.append("--inline='%s'" % inlineName)
analysisFile = ("%s%s.c" % (projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_LINE_NUMS)
if handlerMode is HandlerMode.DETECTOR
else projectConfig.locationTempFile)
command.append(analysisFile)
command.append("-I'%s'" % projectConfig.locationOrigDir)
command.append("--save-temps='%s'" % projectConfig.locationTempDir)
command.append("-c")
command.append("-o")
command.append("'%s.out'" % projectConfig.locationTempNoExtension)
return command
def runDetector(projectConfig):
"""Conducts the sequence of system calls that will detect loops
for the function currently being analyzed. The output of the
detector will be placed in a loop configuration file that the
user has to modify: this file contains the line numbers of each
loop header, and the user has to specify bounds for each loops
by changing the number beside the line numbers, which is set to
1 by default.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.DETECTOR)
proc = subprocess.call(command, shell=True)
return proc
def runUnroller(projectConfig):
"""Conducts the sequence of system calls that will unroll loops
in the function currently being analyzed. The output of the
detector will be a temporary file for GameTime analysis where
all of the loops have been unrolled using user-specified bounds.
Precondition: The loop detector has already been run, and the user
has already specified bounds for each loop in the loop configuration
file generated by the loop detector.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.UNROLLER)
proc = subprocess.call(command, shell=True)
return proc
| [
"os.path.join",
"subprocess.call"
] | [((1313, 1363), 'os.path.join', 'os.path.join', (['sourceDir', '"""ocaml/conf/findlib.conf"""'], {}), "(sourceDir, 'ocaml/conf/findlib.conf')\n", (1325, 1363), False, 'import os\n'), ((1584, 1620), 'os.path.join', 'os.path.join', (['sourceDir', '"""ocaml/lib"""'], {}), "(sourceDir, 'ocaml/lib')\n", (1596, 1620), False, 'import os\n'), ((2202, 2270), 'os.path.join', 'os.path.join', (['projectConfig.locationTempDir', 'config.TEMP_LOOP_CONFIG'], {}), '(projectConfig.locationTempDir, config.TEMP_LOOP_CONFIG)\n', (2214, 2270), False, 'import os\n'), ((3857, 3893), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3872, 3893), False, 'import subprocess\n'), ((4765, 4801), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (4780, 4801), False, 'import subprocess\n'), ((1877, 1923), 'os.path.join', 'os.path.join', (['config.TOOL_CIL', '"""bin/cilly.bat"""'], {}), "(config.TOOL_CIL, 'bin/cilly.bat')\n", (1889, 1923), False, 'import os\n')] |
from Main import main, __version__ as ESVersion
from argparse import Namespace
import random
from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox
def guiMain(args=None):
mainWindow = Tk()
mainWindow.wm_title("Entrance Shuffle %s" % ESVersion)
topFrame = Frame(mainWindow)
rightHalfFrame = Frame(topFrame)
checkBoxFrame = Frame(rightHalfFrame)
createSpoilerVar = IntVar()
createSpoilerCheckbutton = Checkbutton(checkBoxFrame, text="Create Spoiler Log", variable=createSpoilerVar)
suppressRomVar = IntVar()
suppressRomCheckbutton = Checkbutton(checkBoxFrame, text="Do not create patched Rom", variable=suppressRomVar)
quickSwapVar = IntVar()
quickSwapCheckbutton = Checkbutton(checkBoxFrame, text="Enabled L/R Item quickswapping", variable=quickSwapVar)
dungeonItemsVar = IntVar()
dungeonItemsCheckbutton = Checkbutton(checkBoxFrame, text="Place Dungeon Items (Compasses/Maps)", onvalue=0, offvalue=1, variable=dungeonItemsVar)
beatableOnlyVar = IntVar()
beatableOnlyCheckbutton = Checkbutton(checkBoxFrame, text="Only ensure seed is beatable, not all items must be reachable", variable=beatableOnlyVar)
shuffleGanonVar = IntVar()
shuffleGanonCheckbutton = Checkbutton(checkBoxFrame, text="Include Ganon's Tower and Pyramid Hole in shuffle pool", variable=shuffleGanonVar)
createSpoilerCheckbutton.pack(expand=True, anchor=W)
suppressRomCheckbutton.pack(expand=True, anchor=W)
quickSwapCheckbutton.pack(expand=True, anchor=W)
dungeonItemsCheckbutton.pack(expand=True, anchor=W)
beatableOnlyCheckbutton.pack(expand=True, anchor=W)
shuffleGanonCheckbutton.pack(expand=True, anchor=W)
fileDialogFrame = Frame(rightHalfFrame)
romDialogFrame = Frame(fileDialogFrame)
baseRomLabel = Label(romDialogFrame, text='Base Rom')
romVar = StringVar()
romEntry = Entry(romDialogFrame, textvariable=romVar)
def RomSelect():
rom = filedialog.askopenfilename()
romVar.set(rom)
romSelectButton = Button(romDialogFrame, text='Select Rom', command=RomSelect)
baseRomLabel.pack(side=LEFT)
romEntry.pack(side=LEFT)
romSelectButton.pack(side=LEFT)
spriteDialogFrame = Frame(fileDialogFrame)
baseSpriteLabel = Label(spriteDialogFrame, text='Link Sprite')
spriteVar = StringVar()
spriteEntry = Entry(spriteDialogFrame, textvariable=spriteVar)
def SpriteSelect():
sprite = filedialog.askopenfilename()
spriteVar.set(sprite)
spriteSelectButton = Button(spriteDialogFrame, text='Select Sprite', command=SpriteSelect)
baseSpriteLabel.pack(side=LEFT)
spriteEntry.pack(side=LEFT)
spriteSelectButton.pack(side=LEFT)
romDialogFrame.pack()
spriteDialogFrame.pack()
checkBoxFrame.pack()
fileDialogFrame.pack()
drowDownFrame = Frame(topFrame)
modeFrame = Frame(drowDownFrame)
modeVar = StringVar()
modeVar.set('open')
modeOptionMenu = OptionMenu(modeFrame, modeVar, 'standard', 'open', 'swordless')
modeOptionMenu.pack(side=RIGHT)
modeLabel = Label(modeFrame, text='Game Mode')
modeLabel.pack(side=LEFT)
logicFrame = Frame(drowDownFrame)
logicVar = StringVar()
logicVar.set('noglitches')
logicOptionMenu = OptionMenu(logicFrame, logicVar, 'noglitches', 'minorglitches')
logicOptionMenu.pack(side=RIGHT)
logicLabel = Label(logicFrame, text='Game logic')
logicLabel.pack(side=LEFT)
goalFrame = Frame(drowDownFrame)
goalVar = StringVar()
goalVar.set('ganon')
goalOptionMenu = OptionMenu(goalFrame, goalVar, 'ganon', 'pedestal', 'dungeons', 'triforcehunt', 'crystals')
goalOptionMenu.pack(side=RIGHT)
goalLabel = Label(goalFrame, text='Game goal')
goalLabel.pack(side=LEFT)
difficultyFrame = Frame(drowDownFrame)
difficultyVar = StringVar()
difficultyVar.set('normal')
difficultyOptionMenu = OptionMenu(difficultyFrame, difficultyVar, 'normal', 'timed', 'timed-ohko', 'timed-countdown')
difficultyOptionMenu.pack(side=RIGHT)
difficultyLabel = Label(difficultyFrame, text='Game difficulty')
difficultyLabel.pack(side=LEFT)
algorithmFrame = Frame(drowDownFrame)
algorithmVar = StringVar()
algorithmVar.set('vt25')
algorithmOptionMenu = OptionMenu(algorithmFrame, algorithmVar, 'freshness', 'flood', 'vt21', 'vt22', 'vt25')
algorithmOptionMenu.pack(side=RIGHT)
algorithmLabel = Label(algorithmFrame, text='Item distribution algorithm')
algorithmLabel.pack(side=LEFT)
shuffleFrame = Frame(drowDownFrame)
shuffleVar = StringVar()
shuffleVar.set('full')
shuffleOptionMenu = OptionMenu(shuffleFrame, shuffleVar, 'vanilla', 'simple', 'restricted', 'full', 'madness', 'insanity', 'dungeonsfull', 'dungeonssimple')
shuffleOptionMenu.pack(side=RIGHT)
shuffleLabel = Label(shuffleFrame, text='Entrance shuffle algorithm')
shuffleLabel.pack(side=LEFT)
heartbeepFrame = Frame(drowDownFrame)
heartbeepVar = StringVar()
heartbeepVar.set('normal')
heartbeepOptionMenu = OptionMenu(heartbeepFrame, heartbeepVar, 'normal', 'half', 'quarter', 'off')
heartbeepOptionMenu.pack(side=RIGHT)
heartbeepLabel = Label(heartbeepFrame, text='Heartbeep sound rate')
heartbeepLabel.pack(side=LEFT)
modeFrame.pack(expand=True, anchor=E)
logicFrame.pack(expand=True, anchor=E)
goalFrame.pack(expand=True, anchor=E)
difficultyFrame.pack(expand=True, anchor=E)
algorithmFrame.pack(expand=True, anchor=E)
shuffleFrame.pack(expand=True, anchor=E)
heartbeepFrame.pack(expand=True, anchor=E)
bottomFrame = Frame(mainWindow)
seedLabel = Label(bottomFrame, text='Seed #')
seedVar = StringVar()
seedEntry = Entry(bottomFrame, textvariable=seedVar)
countLabel = Label(bottomFrame, text='Count')
countVar = StringVar()
countSpinbox = Spinbox(bottomFrame, from_=1, to=100, textvariable=countVar)
def generateRom():
guiargs = Namespace
guiargs.seed = int(seedVar.get()) if seedVar.get() else None
guiargs.count = int(countVar.get()) if countVar.get() != '1' else None
guiargs.mode = modeVar.get()
guiargs.logic = logicVar.get()
guiargs.goal = goalVar.get()
guiargs.difficulty = difficultyVar.get()
guiargs.algorithm = algorithmVar.get()
guiargs.shuffle = shuffleVar.get()
guiargs.heartbeep = heartbeepVar.get()
guiargs.create_spoiler = bool(createSpoilerVar.get())
guiargs.suppress_rom = bool(suppressRomVar.get())
guiargs.nodungeonitems = bool(dungeonItemsVar.get())
guiargs.beatableonly = bool(beatableOnlyVar.get())
guiargs.quickswap = bool(quickSwapVar.get())
guiargs.shuffleganon = bool(shuffleGanonVar.get())
guiargs.rom = romVar.get()
guiargs.jsonout = None
guiargs.sprite = spriteVar.get() if spriteVar.get() else None
try:
if guiargs.count is not None:
seed = guiargs.seed
for i in range(guiargs.count):
main(seed=seed, args=guiargs)
seed = random.randint(0, 999999999)
else:
main(seed=guiargs.seed, args=guiargs)
except Exception as e:
messagebox.showerror(title="Error while creating seed", message=str(e))
else:
messagebox.showinfo(title="Success", message="Rom patched successfully")
generateButton = Button(bottomFrame, text='Generate Patched Rom', command=generateRom)
seedLabel.pack(side=LEFT)
seedEntry.pack(side=LEFT)
countLabel.pack(side=LEFT)
countSpinbox.pack(side=LEFT)
generateButton.pack(side=LEFT)
drowDownFrame.pack(side=LEFT)
rightHalfFrame.pack(side=RIGHT)
topFrame.pack(side=TOP)
bottomFrame.pack(side=BOTTOM)
if args is not None:
# load values from commandline args
createSpoilerVar.set(int(args.create_spoiler))
suppressRomVar.set(int(args.suppress_rom))
if args.nodungeonitems:
dungeonItemsVar.set(int(not args.nodungeonitems))
beatableOnlyVar.set(int(args.beatableonly))
quickSwapVar.set(int(args.quickswap))
if args.count:
countVar.set(str(args.count))
if args.seed:
seedVar.set(str(args.seed))
modeVar.set(args.mode)
difficultyVar.set(args.difficulty)
goalVar.set(args.goal)
algorithmVar.set(args.algorithm)
shuffleVar.set(args.shuffle)
heartbeepVar.set(args.heartbeep)
logicVar.set(args.logic)
romVar.set(args.rom)
shuffleGanonVar.set(args.shuffleganon)
if args.sprite is not None:
spriteVar.set(args.sprite)
mainWindow.mainloop()
if __name__ == '__main__':
guiMain()
| [
"tkinter.IntVar",
"random.randint",
"tkinter.Checkbutton",
"tkinter.Entry",
"tkinter.Button",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Spinbox",
"tkinter.Label",
"tkinter.OptionMenu",
"tkinter.messagebox.showinfo",
"tkinter.Frame",
"Main.main",
"tkinter.filedialog.askopenfilename"
] | [((298, 302), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (300, 302), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((378, 395), 'tkinter.Frame', 'Frame', (['mainWindow'], {}), '(mainWindow)\n', (383, 395), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((417, 432), 'tkinter.Frame', 'Frame', (['topFrame'], {}), '(topFrame)\n', (422, 432), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((453, 474), 'tkinter.Frame', 'Frame', (['rightHalfFrame'], {}), '(rightHalfFrame)\n', (458, 474), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((499, 507), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (505, 507), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((539, 624), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Create Spoiler Log"""', 'variable': 'createSpoilerVar'}), "(checkBoxFrame, text='Create Spoiler Log', variable=createSpoilerVar\n )\n", (550, 624), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((641, 649), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (647, 649), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((679, 769), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Do not create patched Rom"""', 'variable': 'suppressRomVar'}), "(checkBoxFrame, text='Do not create patched Rom', variable=\n suppressRomVar)\n", (690, 769), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((784, 792), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (790, 792), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((820, 913), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Enabled L/R Item quickswapping"""', 'variable': 'quickSwapVar'}), "(checkBoxFrame, text='Enabled L/R Item quickswapping', variable=\n quickSwapVar)\n", (831, 913), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((931, 939), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (937, 939), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((970, 1094), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Place Dungeon Items (Compasses/Maps)"""', 'onvalue': '(0)', 'offvalue': '(1)', 'variable': 'dungeonItemsVar'}), "(checkBoxFrame, text='Place Dungeon Items (Compasses/Maps)',\n onvalue=0, offvalue=1, variable=dungeonItemsVar)\n", (981, 1094), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1113, 1121), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1119, 1121), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1152, 1283), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Only ensure seed is beatable, not all items must be reachable"""', 'variable': 'beatableOnlyVar'}), "(checkBoxFrame, text=\n 'Only ensure seed is beatable, not all items must be reachable',\n variable=beatableOnlyVar)\n", (1163, 1283), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1297, 1305), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1303, 1305), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1336, 1461), 'tkinter.Checkbutton', 'Checkbutton', (['checkBoxFrame'], {'text': '"""Include Ganon\'s Tower and Pyramid Hole in shuffle pool"""', 'variable': 'shuffleGanonVar'}), '(checkBoxFrame, text=\n "Include Ganon\'s Tower and Pyramid Hole in shuffle pool", variable=\n shuffleGanonVar)\n', (1347, 1461), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1813, 1834), 'tkinter.Frame', 'Frame', (['rightHalfFrame'], {}), '(rightHalfFrame)\n', (1818, 1834), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1857, 1879), 'tkinter.Frame', 'Frame', (['fileDialogFrame'], {}), '(fileDialogFrame)\n', (1862, 1879), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1899, 1937), 'tkinter.Label', 'Label', (['romDialogFrame'], {'text': '"""Base Rom"""'}), "(romDialogFrame, text='Base Rom')\n", (1904, 1937), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1951, 1962), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1960, 1962), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((1978, 2020), 'tkinter.Entry', 'Entry', (['romDialogFrame'], {'textvariable': 'romVar'}), '(romDialogFrame, textvariable=romVar)\n', (1983, 2020), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2131, 2191), 'tkinter.Button', 'Button', (['romDialogFrame'], {'text': '"""Select Rom"""', 'command': 'RomSelect'}), "(romDialogFrame, text='Select Rom', command=RomSelect)\n", (2137, 2191), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2320, 2342), 'tkinter.Frame', 'Frame', (['fileDialogFrame'], {}), '(fileDialogFrame)\n', (2325, 2342), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2365, 2409), 'tkinter.Label', 'Label', (['spriteDialogFrame'], {'text': '"""Link Sprite"""'}), "(spriteDialogFrame, text='Link Sprite')\n", (2370, 2409), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2426, 2437), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (2435, 2437), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2456, 2504), 'tkinter.Entry', 'Entry', (['spriteDialogFrame'], {'textvariable': 'spriteVar'}), '(spriteDialogFrame, textvariable=spriteVar)\n', (2461, 2504), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2632, 2701), 'tkinter.Button', 'Button', (['spriteDialogFrame'], {'text': '"""Select Sprite"""', 'command': 'SpriteSelect'}), "(spriteDialogFrame, text='Select Sprite', command=SpriteSelect)\n", (2638, 2701), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2944, 2959), 'tkinter.Frame', 'Frame', (['topFrame'], {}), '(topFrame)\n', (2949, 2959), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2977, 2997), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (2982, 2997), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3012, 3023), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (3021, 3023), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3069, 3132), 'tkinter.OptionMenu', 'OptionMenu', (['modeFrame', 'modeVar', '"""standard"""', '"""open"""', '"""swordless"""'], {}), "(modeFrame, modeVar, 'standard', 'open', 'swordless')\n", (3079, 3132), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3185, 3219), 'tkinter.Label', 'Label', (['modeFrame'], {'text': '"""Game Mode"""'}), "(modeFrame, text='Game Mode')\n", (3190, 3219), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3272, 3292), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (3277, 3292), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3308, 3319), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (3317, 3319), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3373, 3436), 'tkinter.OptionMenu', 'OptionMenu', (['logicFrame', 'logicVar', '"""noglitches"""', '"""minorglitches"""'], {}), "(logicFrame, logicVar, 'noglitches', 'minorglitches')\n", (3383, 3436), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3491, 3527), 'tkinter.Label', 'Label', (['logicFrame'], {'text': '"""Game logic"""'}), "(logicFrame, text='Game logic')\n", (3496, 3527), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3580, 3600), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (3585, 3600), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3615, 3626), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (3624, 3626), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3673, 3768), 'tkinter.OptionMenu', 'OptionMenu', (['goalFrame', 'goalVar', '"""ganon"""', '"""pedestal"""', '"""dungeons"""', '"""triforcehunt"""', '"""crystals"""'], {}), "(goalFrame, goalVar, 'ganon', 'pedestal', 'dungeons',\n 'triforcehunt', 'crystals')\n", (3683, 3768), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3817, 3851), 'tkinter.Label', 'Label', (['goalFrame'], {'text': '"""Game goal"""'}), "(goalFrame, text='Game goal')\n", (3822, 3851), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3909, 3929), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (3914, 3929), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((3950, 3961), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (3959, 3961), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4021, 4119), 'tkinter.OptionMenu', 'OptionMenu', (['difficultyFrame', 'difficultyVar', '"""normal"""', '"""timed"""', '"""timed-ohko"""', '"""timed-countdown"""'], {}), "(difficultyFrame, difficultyVar, 'normal', 'timed', 'timed-ohko',\n 'timed-countdown')\n", (4031, 4119), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4180, 4226), 'tkinter.Label', 'Label', (['difficultyFrame'], {'text': '"""Game difficulty"""'}), "(difficultyFrame, text='Game difficulty')\n", (4185, 4226), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4289, 4309), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (4294, 4309), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4329, 4340), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (4338, 4340), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4396, 4486), 'tkinter.OptionMenu', 'OptionMenu', (['algorithmFrame', 'algorithmVar', '"""freshness"""', '"""flood"""', '"""vt21"""', '"""vt22"""', '"""vt25"""'], {}), "(algorithmFrame, algorithmVar, 'freshness', 'flood', 'vt21',\n 'vt22', 'vt25')\n", (4406, 4486), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4545, 4602), 'tkinter.Label', 'Label', (['algorithmFrame'], {'text': '"""Item distribution algorithm"""'}), "(algorithmFrame, text='Item distribution algorithm')\n", (4550, 4602), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4662, 4682), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (4667, 4682), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4700, 4711), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (4709, 4711), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4763, 4903), 'tkinter.OptionMenu', 'OptionMenu', (['shuffleFrame', 'shuffleVar', '"""vanilla"""', '"""simple"""', '"""restricted"""', '"""full"""', '"""madness"""', '"""insanity"""', '"""dungeonsfull"""', '"""dungeonssimple"""'], {}), "(shuffleFrame, shuffleVar, 'vanilla', 'simple', 'restricted',\n 'full', 'madness', 'insanity', 'dungeonsfull', 'dungeonssimple')\n", (4773, 4903), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((4958, 5012), 'tkinter.Label', 'Label', (['shuffleFrame'], {'text': '"""Entrance shuffle algorithm"""'}), "(shuffleFrame, text='Entrance shuffle algorithm')\n", (4963, 5012), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5072, 5092), 'tkinter.Frame', 'Frame', (['drowDownFrame'], {}), '(drowDownFrame)\n', (5077, 5092), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5112, 5123), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (5121, 5123), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5181, 5257), 'tkinter.OptionMenu', 'OptionMenu', (['heartbeepFrame', 'heartbeepVar', '"""normal"""', '"""half"""', '"""quarter"""', '"""off"""'], {}), "(heartbeepFrame, heartbeepVar, 'normal', 'half', 'quarter', 'off')\n", (5191, 5257), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5320, 5370), 'tkinter.Label', 'Label', (['heartbeepFrame'], {'text': '"""Heartbeep sound rate"""'}), "(heartbeepFrame, text='Heartbeep sound rate')\n", (5325, 5370), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5744, 5761), 'tkinter.Frame', 'Frame', (['mainWindow'], {}), '(mainWindow)\n', (5749, 5761), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5779, 5812), 'tkinter.Label', 'Label', (['bottomFrame'], {'text': '"""Seed #"""'}), "(bottomFrame, text='Seed #')\n", (5784, 5812), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5827, 5838), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (5836, 5838), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5855, 5895), 'tkinter.Entry', 'Entry', (['bottomFrame'], {'textvariable': 'seedVar'}), '(bottomFrame, textvariable=seedVar)\n', (5860, 5895), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5913, 5945), 'tkinter.Label', 'Label', (['bottomFrame'], {'text': '"""Count"""'}), "(bottomFrame, text='Count')\n", (5918, 5945), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5961, 5972), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (5970, 5972), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((5992, 6052), 'tkinter.Spinbox', 'Spinbox', (['bottomFrame'], {'from_': '(1)', 'to': '(100)', 'textvariable': 'countVar'}), '(bottomFrame, from_=1, to=100, textvariable=countVar)\n', (5999, 6052), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((7592, 7661), 'tkinter.Button', 'Button', (['bottomFrame'], {'text': '"""Generate Patched Rom"""', 'command': 'generateRom'}), "(bottomFrame, text='Generate Patched Rom', command=generateRom)\n", (7598, 7661), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2056, 2084), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (2082, 2084), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((2547, 2575), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (2573, 2575), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((7497, 7569), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'title': '"""Success"""', 'message': '"""Rom patched successfully"""'}), "(title='Success', message='Rom patched successfully')\n", (7516, 7569), False, 'from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox\n'), ((7318, 7355), 'Main.main', 'main', ([], {'seed': 'guiargs.seed', 'args': 'guiargs'}), '(seed=guiargs.seed, args=guiargs)\n', (7322, 7355), False, 'from Main import main, __version__ as ESVersion\n'), ((7198, 7227), 'Main.main', 'main', ([], {'seed': 'seed', 'args': 'guiargs'}), '(seed=seed, args=guiargs)\n', (7202, 7227), False, 'from Main import main, __version__ as ESVersion\n'), ((7255, 7283), 'random.randint', 'random.randint', (['(0)', '(999999999)'], {}), '(0, 999999999)\n', (7269, 7283), False, 'import random\n')] |
from datetime import datetime
from pprint import pprint
from cryptography.fernet import Fernet
from libsvc.utils import pack_data, unpack_data
def pack_data_test():
fkey = Fernet.generate_key()
data = {"today": datetime.today(),
"dog": "cat",
"red": "blue"}
p = pack_data(data, fkey, fields=["today", "dog"])
print(p.decode("utf8"))
u = unpack_data(p, fkey)
pprint(u)
assert u["dog"] == "cat"
today = datetime.fromisoformat(u["today"]).date()
assert today == datetime.today().date()
assert "red" not in u
if __name__ == "__main__":
pack_data_test()
| [
"cryptography.fernet.Fernet.generate_key",
"libsvc.utils.pack_data",
"datetime.datetime.fromisoformat",
"datetime.datetime.today",
"libsvc.utils.unpack_data",
"pprint.pprint"
] | [((179, 200), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ([], {}), '()\n', (198, 200), False, 'from cryptography.fernet import Fernet\n'), ((303, 349), 'libsvc.utils.pack_data', 'pack_data', (['data', 'fkey'], {'fields': "['today', 'dog']"}), "(data, fkey, fields=['today', 'dog'])\n", (312, 349), False, 'from libsvc.utils import pack_data, unpack_data\n'), ((387, 407), 'libsvc.utils.unpack_data', 'unpack_data', (['p', 'fkey'], {}), '(p, fkey)\n', (398, 407), False, 'from libsvc.utils import pack_data, unpack_data\n'), ((412, 421), 'pprint.pprint', 'pprint', (['u'], {}), '(u)\n', (418, 421), False, 'from pprint import pprint\n'), ((223, 239), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (237, 239), False, 'from datetime import datetime\n'), ((464, 498), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["u['today']"], {}), "(u['today'])\n", (486, 498), False, 'from datetime import datetime\n'), ((526, 542), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (540, 542), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import logging
from ckan.lib.helpers import lang as ckan_lang
from ckan.model import Package
from ckan.plugins import PluginImplementations, toolkit
from ckanext.doi.interfaces import IDoi
from ckanext.doi.lib import xml_utils
from ckanext.doi.lib.errors import DOIMetadataException
from ckanext.doi.lib.helpers import date_or_none, get_site_url, package_get_year
log = logging.getLogger(__name__)
def build_metadata_dict(pkg_dict):
'''
Build/extract a basic dict of metadata that can then be passed to build_xml_dict.
:param pkg_dict: dict of package details
'''
metadata_dict = {}
# collect errors instead of throwing them immediately; some data may not be correctly handled
# by this base method but will be handled correctly by plugins that implement IDoi
errors = {}
# required fields first (identifier will be added later)
required = {
'creators': [],
'titles': [],
'publisher': None,
'publicationYear': None,
'resourceType': None
}
def _add_required(key, get_func):
try:
required[key] = get_func()
except Exception as e:
errors[key] = e
# CREATORS
_add_required('creators', lambda: [{
'full_name': pkg_dict.get('author')
}])
# TITLES
_add_required('titles', lambda: [{
'title': pkg_dict.get('title')
}])
# PUBLISHER
_add_required('publisher', lambda: toolkit.config.get('ckanext.doi.publisher'))
# PUBLICATION YEAR
_add_required('publicationYear', lambda: package_get_year(pkg_dict))
# TYPE
_add_required('resourceType', lambda: pkg_dict.get('type'))
# now the optional fields
optional = {
'subjects': [],
'contributors': [],
'dates': [],
'language': '',
'alternateIdentifiers': [],
'relatedIdentifiers': [],
'sizes': [],
'formats': [],
'version': '',
'rightsList': [],
'descriptions': [],
'geolocations': [],
'fundingReferences': []
}
# SUBJECTS
# use the tag list
try:
tags = pkg_dict.get('tag_string', '').split(',')
tags += [tag['name'] if isinstance(tag, dict) else tag for tag in pkg_dict.get('tags', [])]
optional['subjects'] = [{'subject': tag} for tag in sorted({t for t in tags if t != ''})]
except Exception as e:
errors['subjects'] = e
# CONTRIBUTORS
# use the author and maintainer; no splitting or parsing for either
# no try/except for this because it's just a simple .get() and if that doesn't work then we
# want to know
author = pkg_dict.get('author')
maintainer = pkg_dict.get('maintainer')
if author is not None:
optional['contributors'].append(
{
'contributor_type': 'Researcher',
'full_name': author
})
if maintainer is not None:
optional['contributors'].append({
'contributor_type': 'DataManager',
'full_name': maintainer
})
# DATES
# created, updated, and doi publish date
date_errors = {}
try:
optional['dates'].append({
'dateType': 'Created',
'date': date_or_none(pkg_dict.get('metadata_created'))
})
except Exception as e:
date_errors['created'] = e
try:
optional['dates'].append({
'dateType': 'Updated',
'date': date_or_none(pkg_dict.get('metadata_modified'))
})
except Exception as e:
date_errors['updated'] = e
if 'doi_date_published' in pkg_dict:
try:
optional['dates'].append({
'dateType': 'Issued',
'date': date_or_none(pkg_dict.get('doi_date_published'))
})
except Exception as e:
date_errors['doi_date_published'] = e
# LANGUAGE
# use language set in CKAN
try:
optional['language'] = ckan_lang()
except Exception as e:
errors['language'] = e
# ALTERNATE IDENTIFIERS
# add permalink back to this site
try:
permalink = f'{get_site_url()}/dataset/{pkg_dict["id"]}'
optional['alternateIdentifiers'] = [{
'alternateIdentifierType': 'URL',
'alternateIdentifier': permalink
}]
except Exception as e:
errors['alternateIdentifiers'] = e
# RELATED IDENTIFIERS
# nothing relevant in default schema
# SIZES
# sum up given sizes from resources in the package and convert from bytes to kilobytes
try:
resource_sizes = [r.get('size') or 0 for r in pkg_dict.get('resources', []) or []]
total_size = [f'{int(sum(resource_sizes) / 1024)} kb']
optional['sizes'] = total_size
except Exception as e:
errors['sizes'] = e
# FORMATS
# list unique formats from package resources
try:
formats = list(
set(filter(None, [r.get('format') for r in pkg_dict.get('resources', []) or []])))
optional['formats'] = formats
except Exception as e:
errors['formats'] = e
# VERSION
# doesn't matter if there's no version, it'll get filtered out later
optional['version'] = pkg_dict.get('version')
# RIGHTS
# use the package license and get details from CKAN's license register
license_id = pkg_dict.get('license_id', 'notspecified')
try:
if license_id != 'notspecified' and license_id is not None:
license_register = Package.get_license_register()
license = license_register.get(license_id)
if license is not None:
optional['rightsList'] = [
{
'url': license.url,
'identifier': license.id
}
]
except Exception as e:
errors['rightsList'] = e
# DESCRIPTIONS
# use package notes
optional['descriptions'] = [
{
'descriptionType': 'Other',
'description': pkg_dict.get('notes', '')
}
]
# GEOLOCATIONS
# nothing relevant in default schema
# FUNDING
# nothing relevant in default schema
metadata_dict.update(required)
metadata_dict.update(optional)
for plugin in PluginImplementations(IDoi):
# implementations should remove relevant errors from the errors dict if they successfully
# handle an item
metadata_dict, errors = plugin.build_metadata_dict(pkg_dict, metadata_dict, errors)
for k in required:
if metadata_dict.get(k) is None and errors.get(k) is None:
errors[k] = DOIMetadataException('Required field cannot be None')
required_errors = {k: e for k, e in errors.items() if k in required}
if len(required_errors) > 0:
error_msg = f'Could not extract metadata for the following required keys: ' \
f'{", ".join(required_errors)}'
log.exception(error_msg)
for k, e in required_errors.items():
log.exception(f'{k}: {e}')
raise DOIMetadataException(error_msg)
optional_errors = {k: e for k, e in errors.items() if k in optional}
if len(required_errors) > 0:
error_msg = f'Could not extract metadata for the following optional keys: ' \
f'{", ".join(optional_errors)}'
log.debug(error_msg)
for k, e in optional_errors.items():
log.debug(f'{k}: {e}')
return metadata_dict
def build_xml_dict(metadata_dict):
'''
Builds a dictionary that can be passed directly to datacite.schema42.tostring() to generate xml.
Previously named metadata_to_xml but renamed as it's not actually producing any xml,
it's just formatting the metadata so a separate function can then generate the xml.
:param metadata_dict: a dict of metadata generated from build_metadata_dict
:return: dict that can be passed directly to datacite.schema42.tostring()
'''
# required fields first (DOI will be added later)
xml_dict = {
'creators': [],
'titles': metadata_dict.get('titles', []),
'publisher': metadata_dict.get('publisher'),
'publicationYear': str(metadata_dict.get('publicationYear')),
'types': {
'resourceType': metadata_dict.get('resourceType'),
'resourceTypeGeneral': 'Dataset'
},
'schemaVersion': 'http://datacite.org/schema/kernel-4',
}
for creator in metadata_dict.get('creators', []):
xml_dict['creators'].append(xml_utils.create_contributor(**creator))
optional = [
'subjects',
'contributors',
'dates',
'language',
'alternateIdentifiers',
'relatedIdentifiers',
'sizes',
'formats',
'version',
'rightsList',
'descriptions',
'geolocations',
'fundingReferences'
]
for k in optional:
v = metadata_dict.get(k)
try:
has_value = v is not None and len(v) > 0
except:
has_value = False
if not has_value:
continue
if k == 'contributors':
xml_dict['contributors'] = []
for contributor in v:
xml_dict['contributors'].append(xml_utils.create_contributor(**contributor))
elif k == 'dates':
item = []
for date_entry in v:
date_entry_copy = {k: v for k, v in date_entry.items()}
date_entry_copy['date'] = str(date_entry_copy['date'])
item.append(date_entry_copy)
xml_dict[k] = item
else:
xml_dict[k] = v
for plugin in PluginImplementations(IDoi):
xml_dict = plugin.build_xml_dict(metadata_dict, xml_dict)
return xml_dict
| [
"logging.getLogger",
"ckan.plugins.toolkit.config.get",
"ckanext.doi.lib.helpers.package_get_year",
"ckan.lib.helpers.lang",
"ckan.model.Package.get_license_register",
"ckanext.doi.lib.xml_utils.create_contributor",
"ckanext.doi.lib.errors.DOIMetadataException",
"ckan.plugins.PluginImplementations",
... | [((506, 533), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'import logging\n'), ((6424, 6451), 'ckan.plugins.PluginImplementations', 'PluginImplementations', (['IDoi'], {}), '(IDoi)\n', (6445, 6451), False, 'from ckan.plugins import PluginImplementations, toolkit\n'), ((9823, 9850), 'ckan.plugins.PluginImplementations', 'PluginImplementations', (['IDoi'], {}), '(IDoi)\n', (9844, 9850), False, 'from ckan.plugins import PluginImplementations, toolkit\n'), ((4101, 4112), 'ckan.lib.helpers.lang', 'ckan_lang', ([], {}), '()\n', (4110, 4112), True, 'from ckan.lib.helpers import lang as ckan_lang\n'), ((7213, 7244), 'ckanext.doi.lib.errors.DOIMetadataException', 'DOIMetadataException', (['error_msg'], {}), '(error_msg)\n', (7233, 7244), False, 'from ckanext.doi.lib.errors import DOIMetadataException\n'), ((1578, 1621), 'ckan.plugins.toolkit.config.get', 'toolkit.config.get', (['"""ckanext.doi.publisher"""'], {}), "('ckanext.doi.publisher')\n", (1596, 1621), False, 'from ckan.plugins import PluginImplementations, toolkit\n'), ((1692, 1718), 'ckanext.doi.lib.helpers.package_get_year', 'package_get_year', (['pkg_dict'], {}), '(pkg_dict)\n', (1708, 1718), False, 'from ckanext.doi.lib.helpers import date_or_none, get_site_url, package_get_year\n'), ((5641, 5671), 'ckan.model.Package.get_license_register', 'Package.get_license_register', ([], {}), '()\n', (5669, 5671), False, 'from ckan.model import Package\n'), ((6783, 6836), 'ckanext.doi.lib.errors.DOIMetadataException', 'DOIMetadataException', (['"""Required field cannot be None"""'], {}), "('Required field cannot be None')\n", (6803, 6836), False, 'from ckanext.doi.lib.errors import DOIMetadataException\n'), ((8683, 8722), 'ckanext.doi.lib.xml_utils.create_contributor', 'xml_utils.create_contributor', ([], {}), '(**creator)\n', (8711, 8722), False, 'from ckanext.doi.lib import xml_utils\n'), ((4270, 4284), 'ckanext.doi.lib.helpers.get_site_url', 'get_site_url', ([], {}), '()\n', (4282, 4284), False, 'from ckanext.doi.lib.helpers import date_or_none, get_site_url, package_get_year\n'), ((9416, 9459), 'ckanext.doi.lib.xml_utils.create_contributor', 'xml_utils.create_contributor', ([], {}), '(**contributor)\n', (9444, 9459), False, 'from ckanext.doi.lib import xml_utils\n')] |
# -*- coding: utf-8 -*-
from model.movie import Movie
from model.user import User
from fixture.selenium_fixture import app
def test_add_movie(app):
app.session.login(User.Admin())
old_list = app.movie.get_movie_list()
app.movie.add_movie(Movie(film_name='name', film_year='2016'))
new_list = app.movie.get_movie_list()
assert old_list == new_list
app.session.logout()
def test_add_movie_empty(app):
app.session.login(User.Admin())
app.movie.check_field_in_add_form()
app.session.logout()
| [
"model.user.User.Admin",
"fixture.selenium_fixture.app.movie.check_field_in_add_form",
"model.movie.Movie",
"fixture.selenium_fixture.app.movie.get_movie_list",
"fixture.selenium_fixture.app.session.logout"
] | [((201, 227), 'fixture.selenium_fixture.app.movie.get_movie_list', 'app.movie.get_movie_list', ([], {}), '()\n', (225, 227), False, 'from fixture.selenium_fixture import app\n'), ((310, 336), 'fixture.selenium_fixture.app.movie.get_movie_list', 'app.movie.get_movie_list', ([], {}), '()\n', (334, 336), False, 'from fixture.selenium_fixture import app\n'), ((373, 393), 'fixture.selenium_fixture.app.session.logout', 'app.session.logout', ([], {}), '()\n', (391, 393), False, 'from fixture.selenium_fixture import app\n'), ((467, 502), 'fixture.selenium_fixture.app.movie.check_field_in_add_form', 'app.movie.check_field_in_add_form', ([], {}), '()\n', (500, 502), False, 'from fixture.selenium_fixture import app\n'), ((507, 527), 'fixture.selenium_fixture.app.session.logout', 'app.session.logout', ([], {}), '()\n', (525, 527), False, 'from fixture.selenium_fixture import app\n'), ((172, 184), 'model.user.User.Admin', 'User.Admin', ([], {}), '()\n', (182, 184), False, 'from model.user import User\n'), ((252, 293), 'model.movie.Movie', 'Movie', ([], {'film_name': '"""name"""', 'film_year': '"""2016"""'}), "(film_name='name', film_year='2016')\n", (257, 293), False, 'from model.movie import Movie\n'), ((449, 461), 'model.user.User.Admin', 'User.Admin', ([], {}), '()\n', (459, 461), False, 'from model.user import User\n')] |
# Argument handling
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--run", action="store_true",
help="Start the learning process")
parser.add_argument("-m", "--memories", type=int, default=100,
help="Number of runs of demonstration data to initialize with")
parser.add_argument("-e", "--episodes", type=int, default=10000,
help="Number of episodes / runs to learn for")
parser.add_argument("-t", "--type", type=str, default="DQN",
choices=["DQN", "SARSA", "DDQN", "BOTH", "Baseline", "Human"],
help="The algorithm to use")
parser.add_argument("-n", "--name", type=str, default="no_name",
help="A custom name to give the saved log and model files")
args = parser.parse_args()
if args.run and args.name == "no_name":
parser.error("You should provide a name when running a learning session")
if args.type in ["Baseline", "DQN", "SARSA", "DDQN", "BOTH"] and args.episodes == 0:
parser.error("You should specify the number of episodes for the algorithm")
# Suppress the many unnecessary TensorFlow warnings
import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# Create the simulation
from Simulation.forest_fire import ForestFire
forestfire = ForestFire()
# Start learning straight away
if args.run:
if args.type == "Baseline":
print(f"Running Baseline with {args.episodes} episodes")
else:
print(f"Running {args.type} with {args.memories} "
f"memories and {args.episodes} episodes")
if args.type in ["DQN", "Baseline"]:
from DQN import DQN
if args.type == "SARSA":
from DQN_SARSA import DQN_SARSA as DQN
if args.type == "DDQN":
from DQN_DUEL import DQN_DUEL as DQN
if args.type == "BOTH":
from DQN_BOTH import DQN_BOTH as DQN
Agent = DQN(forestfire, args.name)
if args.type == "Baseline":
Agent.collect_memories(args.episodes, perform_baseline=True)
else:
Agent.collect_memories(args.memories)
Agent.learn(args.episodes)
# Don't start learning
else:
# Run the simulation in human mode
if args.type == "Human":
from misc import run_human
run_human(forestfire)
# Just import everything for interactive mode
else:
from misc import run_human, time_simulation_run
from DQN import DQN
from DQN_SARSA import DQN_SARSA
from DQN_DUEL import DQN_DUEL
from DQN_BOTH import DQN_BOTH
# Create the agents
DQN = DQN(forestfire, verbose=False)
DQN_SARSA = DQN_SARSA(forestfire, verbose=False)
DQN_DUEL = DQN_DUEL(forestfire, verbose=False)
DQN_BOTH = DQN_BOTH(forestfire, verbose=False)
# Get a list of imported algorithms to play with
options = [o for o in dir() \
if not o.startswith("__") \
and not o in ["os", "code", "tf", "argparse",
"args", "parser", "ForestFire"]]
# Display those algorithms for ease of use
msg = (
f"\nImported the following functions and algorithms for interactive mode:"
f"\n{[o for o in options]}\n"
f"Load a model with .load_model, play optimally with .play_optimal.\n"
)
# Drop the user in the interpreter, if the script is not already called with -i
if sys.flags.interactive:
print(msg)
else:
import code
code.interact(banner=msg, local=locals())
| [
"misc.run_human",
"argparse.ArgumentParser",
"DQN_DUEL.DQN_DUEL",
"tensorflow.logging.set_verbosity",
"DQN.DQN",
"Simulation.forest_fire.ForestFire",
"DQN_BOTH.DQN_BOTH",
"DQN_SARSA.DQN_SARSA"
] | [((47, 72), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (70, 72), False, 'import argparse\n'), ((1282, 1324), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (1306, 1324), True, 'import tensorflow as tf\n'), ((1415, 1427), 'Simulation.forest_fire.ForestFire', 'ForestFire', ([], {}), '()\n', (1425, 1427), False, 'from Simulation.forest_fire import ForestFire\n'), ((2025, 2051), 'DQN.DQN', 'DQN', (['forestfire', 'args.name'], {}), '(forestfire, args.name)\n', (2028, 2051), False, 'from DQN import DQN\n'), ((2399, 2420), 'misc.run_human', 'run_human', (['forestfire'], {}), '(forestfire)\n', (2408, 2420), False, 'from misc import run_human, time_simulation_run\n'), ((2734, 2764), 'DQN.DQN', 'DQN', (['forestfire'], {'verbose': '(False)'}), '(forestfire, verbose=False)\n', (2737, 2764), False, 'from DQN import DQN\n'), ((2786, 2822), 'DQN_SARSA.DQN_SARSA', 'DQN_SARSA', (['forestfire'], {'verbose': '(False)'}), '(forestfire, verbose=False)\n', (2795, 2822), False, 'from DQN_SARSA import DQN_SARSA\n'), ((2843, 2878), 'DQN_DUEL.DQN_DUEL', 'DQN_DUEL', (['forestfire'], {'verbose': '(False)'}), '(forestfire, verbose=False)\n', (2851, 2878), False, 'from DQN_DUEL import DQN_DUEL\n'), ((2899, 2934), 'DQN_BOTH.DQN_BOTH', 'DQN_BOTH', (['forestfire'], {'verbose': '(False)'}), '(forestfire, verbose=False)\n', (2907, 2934), False, 'from DQN_BOTH import DQN_BOTH\n')] |
# author: <NAME>, <NAME>
# title: occasionally trivial support functions for aggregating data for python 2/3 [only numpy as dependency]
# NOTE: these functions are generally tested meant for 1D although they may apply or be easily extended to nd
# license: 3-clause BSD
import numpy as np
flat_max = np.max
flat_min = np.min
flat_percentile = np.percentile
flat_mean = np.average
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def flat_abs_minimum(data, preserve_sign=True):
"""
Function to return the absolute minimum value in an array. Note that, by default, this function will
reserve the sign.
For example, if an array contains [-100, -24, 1, 2] then the function will return 1 because that value
has the smallest magnitude. If an array contained [-100, -50, -2, -1] the the function would return -1
because that value has the smallest magnitude; however, the sign would preserved (by default).
Removing the sign preservation basically makes this function a composite of abs and min.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: smallest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmin(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def partition_top(data, n, return_indices=False):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:param return_indices: whether to return the indices array
:return: top n values if n < data.size or all values if n is None, <=0 or >= data.size, also index array if `return_indices`
"""
data = np.asarray(data)
if n is None or n <= 0 or n >= data.size:
return data
n = min(data.size, n) - 1
idx = np.argpartition(data, n)[:n]
result = data[idx]
if return_indices:
return result, idx
return result
def flat_top_average(data, n):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:return: average of top n values if n < data.size or average of data if n > data.size
"""
return np.average(partition_top(data, n, return_indices=False))
| [
"numpy.abs",
"numpy.argpartition",
"numpy.asarray",
"numpy.argmax",
"numpy.argmin"
] | [((1057, 1073), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1067, 1073), True, 'import numpy as np\n'), ((1089, 1101), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1095, 1101), True, 'import numpy as np\n'), ((2024, 2040), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2034, 2040), True, 'import numpy as np\n'), ((2056, 2068), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2062, 2068), True, 'import numpy as np\n'), ((2621, 2637), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2631, 2637), True, 'import numpy as np\n'), ((1132, 1151), 'numpy.argmax', 'np.argmax', (['abs_data'], {}), '(abs_data)\n', (1141, 1151), True, 'import numpy as np\n'), ((2099, 2118), 'numpy.argmin', 'np.argmin', (['abs_data'], {}), '(abs_data)\n', (2108, 2118), True, 'import numpy as np\n'), ((2744, 2768), 'numpy.argpartition', 'np.argpartition', (['data', 'n'], {}), '(data, n)\n', (2759, 2768), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('begineer_tutorial')
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
#self.image_pub = rospy.Publisher("image_topic_2",Image,queue_size=10)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_color",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#cv2.imshow('cv_image', cv_image)
image = cv2.cvtColor(cv_image , cv2.COLOR_BGR2HSV)
lower_range = np.array([30,150,50])
upper_range = np.array([255,255,180])
mask = cv2.inRange(image , lower_range, upper_range)
res = cv2.bitwise_and(cv_image, cv_image, mask=mask)
cv2.imshow("Image window", res)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| [
"rospy.init_node",
"cv2.inRange",
"cv2.bitwise_and",
"roslib.load_manifest",
"cv_bridge.CvBridge",
"numpy.array",
"cv2.imshow",
"cv2.destroyAllWindows",
"rospy.spin",
"cv2.cvtColor",
"rospy.Subscriber"
] | [((75, 116), 'roslib.load_manifest', 'roslib.load_manifest', (['"""begineer_tutorial"""'], {}), "('begineer_tutorial')\n", (95, 116), False, 'import roslib\n'), ((1035, 1085), 'rospy.init_node', 'rospy.init_node', (['"""image_converter"""'], {'anonymous': '(True)'}), "('image_converter', anonymous=True)\n", (1050, 1085), False, 'import rospy\n'), ((1167, 1190), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1188, 1190), False, 'import cv2\n'), ((423, 433), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (431, 433), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((455, 520), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/camera/rgb/image_color"""', 'Image', 'self.callback'], {}), "('/camera/rgb/image_color', Image, self.callback)\n", (471, 520), False, 'import rospy\n'), ((715, 756), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2HSV'], {}), '(cv_image, cv2.COLOR_BGR2HSV)\n', (727, 756), False, 'import cv2\n'), ((776, 799), 'numpy.array', 'np.array', (['[30, 150, 50]'], {}), '([30, 150, 50])\n', (784, 799), True, 'import numpy as np\n'), ((816, 841), 'numpy.array', 'np.array', (['[255, 255, 180]'], {}), '([255, 255, 180])\n', (824, 841), True, 'import numpy as np\n'), ((851, 895), 'cv2.inRange', 'cv2.inRange', (['image', 'lower_range', 'upper_range'], {}), '(image, lower_range, upper_range)\n', (862, 895), False, 'import cv2\n'), ((907, 953), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cv_image', 'cv_image'], {'mask': 'mask'}), '(cv_image, cv_image, mask=mask)\n', (922, 953), False, 'import cv2\n'), ((958, 989), 'cv2.imshow', 'cv2.imshow', (['"""Image window"""', 'res'], {}), "('Image window', res)\n", (968, 989), False, 'import cv2\n'), ((1097, 1109), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1107, 1109), False, 'import rospy\n')] |
import os
from pathlib import Path
ABS_PATH_OF_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
| [
"pathlib.Path"
] | [((96, 110), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (100, 110), False, 'from pathlib import Path\n')] |
from django.db import models
# Create your models here.
class Case(models.Model):
name = models.CharField(max_length =100)
email = models.EmailField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| [
"django.db.models.DateTimeField",
"django.db.models.EmailField",
"django.db.models.CharField"
] | [((94, 126), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (110, 126), False, 'from django.db import models\n'), ((137, 183), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (154, 183), False, 'from django.db import models\n'), ((195, 239), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)'}), '(max_length=500, blank=True)\n', (211, 239), False, 'from django.db import models\n'), ((254, 293), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (274, 293), False, 'from django.db import models\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import PasswordResetForm,PasswordChangeForm,AdminPasswordChangeForm
#from django.contrib.auth.models import User
from dynamicresponse.response import *
from forms import *
from models import *
from django.views.decorators.csrf import csrf_exempt
import pdb
"""
def users(request):
users = Users.objects.all()
return render_to_response("users.html", {
'users': users },
RequestContext(request))
def test_js(request):
return render_to_response('test_js.html', {}, RequestContext(request))
"""
"""
@ csrf_exempt
def index_user(request):
"Lists all blog user."
if request.method == 'POST':
user = User.objects.create(title=request.POST.get("title"), reviewer=request.POST.get("reviewer"), email=request.POST.get("email"),content=request.POST.get("content") )
user.save()
form = RegisterForm(request.POST, instance=user)
#users = Users.objects.all()
else:
form = RegisterForm(instance=None)
users = User.objects.all()
#pdb.set_trace()
return SerializeOrRender('blog/index_user.html', { 'users': users }, extra={ 'form': form })
"""
def users_list(request):
"Lists all blog user."
users = User.objects.all()
return SerializeOrRender('blog/users_list.html', { 'users': users })
"""
def delete_user(request, user_id):
"Deletes the blog user."
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
user.delete()
return SerializeOrRedirect(reverse('list_users'), {}, status=CR_DELETED)
else:
return SerializeOrRender('blog/delete_user.html', { 'user': user }, status=CR_CONFIRM)
"""
def register(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = RegisterForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = RegisterForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def u_change(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = U_ChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = U_ChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def passwordchange(request, user_id=None):
password_change_form=PasswordChangeForm
user = None
if user_id:
user = get_object_or_404(User.objects.get(id=user_id), pk=user_id)
if request.method == 'POST':
form = PasswordChangeForm(user, request.POST)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
else:
form = password_change_form(user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
def passwordchange(request, user_id=None):
"Displays, creates or updates a blog users."
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
olduser=User.objects.get(id=user_id)
if request.method == 'POST':
form = U_PasswordChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
# else:
# form = U_PasswordChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
"""
def passwordchange(request, is_admin_site=False, template_name='blog/user.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm, token_generator=default_token_generator,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
"""
| [
"django.contrib.auth.forms.PasswordChangeForm",
"django.core.urlresolvers.reverse"
] | [((3509, 3547), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', (['user', 'request.POST'], {}), '(user, request.POST)\n', (3527, 3547), False, 'from django.contrib.auth.forms import PasswordResetForm, PasswordChangeForm, AdminPasswordChangeForm\n'), ((2457, 2478), 'django.core.urlresolvers.reverse', 'reverse', (['"""users_list"""'], {}), "('users_list')\n", (2464, 2478), False, 'from django.core.urlresolvers import reverse\n'), ((3057, 3078), 'django.core.urlresolvers.reverse', 'reverse', (['"""users_list"""'], {}), "('users_list')\n", (3064, 3078), False, 'from django.core.urlresolvers import reverse\n'), ((3646, 3667), 'django.core.urlresolvers.reverse', 'reverse', (['"""list_users"""'], {}), "('list_users')\n", (3653, 3667), False, 'from django.core.urlresolvers import reverse\n')] |
# spaCyをインポートし、日本語のnlpオブジェクトを作成
import spacy
nlp = spacy.blank("ja")
# テキストを処理
doc = nlp("私はツリーカンガルーとイッカクが好きです。")
# 「ツリーカンガルー」のスライスを選択
tree_kangaroos = doc[2:4]
print(tree_kangaroos.text)
# 「ツリーカンガルーとイッカク」のスライスを選択
tree_kangaroos_and_narwhals = doc[2:6]
print(tree_kangaroos_and_narwhals.text)
| [
"spacy.blank"
] | [((52, 69), 'spacy.blank', 'spacy.blank', (['"""ja"""'], {}), "('ja')\n", (63, 69), False, 'import spacy\n')] |
import numpy
import numbers
import math
import struct
from six.moves import zip
from .. import SetIntersectionIndexBase, SearchResults, EmptySearchResults
def _check_numpy ():
missing = []
for fn in ("zeros", "empty", "digitize", "resize", "concatenate", "unique", "bincount", "argsort"):
if not getattr (numpy, fn, False):
missing.append (fn)
if missing:
raise ImportError ("setix.backends.numpy: required functions not provided by installed numpy: " + ", ".join(missing))
_check_numpy ()
class SetIntersectionIndex (SetIntersectionIndexBase):
def __init__ (self,
max_sets=2**32,
max_symbols=2**16,
init_bucket_size=16,
support_most_frequent=True,
support_find_similar=True):
self._sets = numpy.empty (64, dtype="object")
self._num_sets = 0
self._symbols = []
self._index = {}
self._sets_by_sig = {}
self._init_bs = init_bucket_size
self._packers = {}
self._support_most_frequent = bool (support_most_frequent)
self._support_find_similar = bool (support_find_similar)
if not isinstance (max_sets, numbers.Number):
raise TypeError ("max_sets")
if not isinstance (max_symbols, numbers.Number):
raise TypeError ("max_symbols")
if not isinstance (init_bucket_size, numbers.Number):
raise TypeError ("init_bucket_size")
if max_sets < 1 or max_sets >= 2**64:
raise ValueError ("max_sets")
if max_symbols < 1 or max_symbols >= 2**64:
raise ValueError ("max_sets")
if init_bucket_size < 4:
raise ValueError ("init_bucket_size")
set_bits = int (round (math.log (max_sets, 2)))
symbol_bits = int (round (math.log (max_symbols, 2)))
sz = (9, 17, 33, 65)
dt = (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64)
sf = ("B", "H", "I", "L")
x = numpy.digitize([set_bits], sz)[0]
self._dtype_sets = dt[x]
self._max_sets = 2 ** (sz[x]-1)
x = numpy.digitize([symbol_bits], sz)[0]
self._dtype_symbols = dt[x]
self._max_symbols = 2 ** (sz[x]-1)
self._struct_symbols = sf[x]
if support_find_similar:
self._set_sizes = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_symbols)
if support_most_frequent:
self._symbol_counts = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_sets)
@property
def symbol_count (self):
return len (self._symbols)
@property
def set_count (self):
return self._num_sets
@property
def symbols (self):
return tuple (self._symbols)
@property
def payloads (self):
for s in self._sets:
for pl in s:
yield pl
@property
def supports_most_frequent (self):
return self._support_most_frequent
@property
def supports_find_similar (self):
return self._support_find_similar
@property
def max_sets (self):
return self._max_sets
@property
def max_symbols (self):
return self._max_symbols
def __getstate__ (self):
state = dict (self.__dict__)
del state["_packers"]
return state
def __setstate__ (self, state):
self.__dict__ = state
state["_packers"] = {}
def add (self, iterable, payload=SetIntersectionIndexBase._SENTINEL):
if payload is self._SENTINEL:
payload = iterable
max_sets = self._max_sets
max_symbols = self._max_symbols
init_bs = self._init_bs
symbols = self._symbols
index = self._index
buckets = [] # list of per-symbol buckets this set belongs in
sig = set() # set of symbol ids for identifying the set
num_syms = len (symbols)
for symbol in iterable:
bucket = index.get (symbol)
if bucket is None:
# register new symbol
id = len (symbols)
if id >= max_symbols:
raise RuntimeError ("index full: maximum number of symbols reached")
bucket = index[symbol] = [id, 0, numpy.zeros (init_bs, dtype=self._dtype_sets)]
symbols.append (symbol)
buckets.append (bucket)
sig.add (bucket[0])
sig = sorted (sig)
# packed signature used as a key in self._sets
# this saves memory compared to a tuple of ints
lsig = len (sig)
packer = self._packers[lsig] = self._packers.get(lsig) or struct.Struct(self._struct_symbols * lsig).pack
ssig = packer (*sig)
S = self._sets_by_sig.get (ssig)
if S is None:
# register new set
sid = self._num_sets
if sid >= max_sets:
raise RuntimeError ("index full: maximum number of sets reached")
self._num_sets += 1
sets = self._sets
if sid >= sets.size:
sets = self._sets = numpy.resize (sets, int(sid * 1.25))
S = self._sets_by_sig[ssig] = []
sets[sid] = S
if self._support_find_similar:
if self._set_sizes.size <= sid:
self._set_sizes = numpy.resize (self._set_sizes, int(sid * 1.25))
self._set_sizes[sid] = len (buckets)
# add set to per-symbol buckets
for bucket in buckets:
arr = bucket[2]
idx = bucket[1]
if arr.size <= idx:
arr = bucket[2] = numpy.resize (arr, int(idx * 1.25))
arr[idx] = sid
bucket[1] += 1
if self._support_most_frequent:
# update counts of symbol occurrences
symbol_counts = self._symbol_counts
new_syms = len (symbols)
if new_syms > num_syms and new_syms >= symbol_counts.size:
self._symbol_counts = symbol_counts = numpy.resize (symbol_counts, int(new_syms * 1.25))
symbol_counts[num_syms:new_syms] = 0
if len (sig) == len (buckets): #no repetitions
symbol_counts[ numpy.array (sig, dtype=self._dtype_symbols) ] += 1
else:
for bucket in buckets:
symbol_counts[bucket[0]] += 1
S.append (payload)
def _find (self, iterable):
buckets = []
sig = set()
occurrences = []
L = 0
for symbol in iterable:
L += 1
bucket = self._index.get (symbol)
if bucket is not None:
buckets.append (bucket)
sig.add (bucket[0])
if bucket[1]:
occurrences.append (bucket[2][0:bucket[1]])
if occurrences:
sids, indices = numpy.unique (numpy.concatenate (occurrences), return_inverse=True)
counts = numpy.bincount (indices)
return L, sids, indices, counts
else:
return L, [], [], []
class SearchResults (SearchResults):
def __init__ (self, sids, scores, sets):
self._sids = sids
self._scores = scores
self._sets = sets
self._sort = None
self._list = None
self._list_for = None
def get (self, max_results=None):
scores = self._scores
sort = self._sort = self._sort or numpy.argsort (scores)
if max_results is not None:
sort = sort[-max_results:]
sort = sort[::-1]
r_sids = self._sids[sort]
r_counts = scores[sort]
return zip (r_counts, self._sets[r_sids])
def __len__ (self):
return self._scores.size
def find (self, iterable, threshold=1, max_results=None):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold < 1 and threshold >= 0:
raise ValueError ("threshold")
L, sids, indices, counts = self._find (iterable)
if threshold < 0:
threshold = L + threshold
if threshold < 1:
raise ValueError ("threshold")
if len (counts) == 0:
return EmptySearchResults ()
mask = counts >= threshold
counts = counts[mask]
sids = sids[mask]
return self.SearchResults (sids, counts, self._sets)
def find_similar (self, iterable, threshold=0.3):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold > 1 or not (threshold > 0):
raise ValueError ("threshold")
if not self._support_find_similar:
raise RuntimeError ("find_similar support disabled")
L, sids, indices, counts = self._find (iterable)
if len (counts) == 0:
return EmptySearchResults ()
smls = counts / (self._set_sizes[sids] + (L * 1.0) - counts)
mask = smls >= threshold
smls = smls[mask]
sids = sids[mask]
return self.SearchResults (sids, smls, self._sets)
def most_frequent (self, threshold=2.0/3.0, max_results=None, with_counts=False):
if not self._support_most_frequent:
raise RuntimeError ("most_frequent support disabled")
counts = self._symbol_counts
if self._num_sets == 0:
return
sort = numpy.argsort (counts[0:len(self._symbols)])
limit = counts[sort[-1]] * 1.0 * threshold
symbols = self._symbols
if max_results:
sort = sort[-max_results:]
if with_counts:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield (symbols[x], count)
else:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield symbols[x]
| [
"numpy.digitize",
"math.log",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"struct.Struct",
"numpy.bincount",
"six.moves.zip"
] | [((844, 875), 'numpy.empty', 'numpy.empty', (['(64)'], {'dtype': '"""object"""'}), "(64, dtype='object')\n", (855, 875), False, 'import numpy\n'), ((2111, 2141), 'numpy.digitize', 'numpy.digitize', (['[set_bits]', 'sz'], {}), '([set_bits], sz)\n', (2125, 2141), False, 'import numpy\n'), ((2239, 2272), 'numpy.digitize', 'numpy.digitize', (['[symbol_bits]', 'sz'], {}), '([symbol_bits], sz)\n', (2253, 2272), False, 'import numpy\n'), ((2464, 2524), 'numpy.zeros', 'numpy.zeros', (['(8 * init_bucket_size)'], {'dtype': 'self._dtype_symbols'}), '(8 * init_bucket_size, dtype=self._dtype_symbols)\n', (2475, 2524), False, 'import numpy\n'), ((2603, 2660), 'numpy.zeros', 'numpy.zeros', (['(8 * init_bucket_size)'], {'dtype': 'self._dtype_sets'}), '(8 * init_bucket_size, dtype=self._dtype_sets)\n', (2614, 2660), False, 'import numpy\n'), ((7443, 7466), 'numpy.bincount', 'numpy.bincount', (['indices'], {}), '(indices)\n', (7457, 7466), False, 'import numpy\n'), ((8254, 8287), 'six.moves.zip', 'zip', (['r_counts', 'self._sets[r_sids]'], {}), '(r_counts, self._sets[r_sids])\n', (8257, 8287), False, 'from six.moves import zip\n'), ((1862, 1883), 'math.log', 'math.log', (['max_sets', '(2)'], {}), '(max_sets, 2)\n', (1870, 1883), False, 'import math\n'), ((1921, 1945), 'math.log', 'math.log', (['max_symbols', '(2)'], {}), '(max_symbols, 2)\n', (1929, 1945), False, 'import math\n'), ((4952, 4994), 'struct.Struct', 'struct.Struct', (['(self._struct_symbols * lsig)'], {}), '(self._struct_symbols * lsig)\n', (4965, 4994), False, 'import struct\n'), ((7368, 7398), 'numpy.concatenate', 'numpy.concatenate', (['occurrences'], {}), '(occurrences)\n', (7385, 7398), False, 'import numpy\n'), ((7986, 8007), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (7999, 8007), False, 'import numpy\n'), ((4537, 4581), 'numpy.zeros', 'numpy.zeros', (['init_bs'], {'dtype': 'self._dtype_sets'}), '(init_bs, dtype=self._dtype_sets)\n', (4548, 4581), False, 'import numpy\n'), ((6670, 6713), 'numpy.array', 'numpy.array', (['sig'], {'dtype': 'self._dtype_symbols'}), '(sig, dtype=self._dtype_symbols)\n', (6681, 6713), False, 'import numpy\n')] |
"""
Programs scraper
"""
import re
import src.scrape.util.helpers as helpers
import src.settings as settings
from src.logger import get_logger
_LOG = get_logger("programs_scraper")
def programs():
"""
Scrapes list of programs
:return: List, of program codes
"""
_LOG.debug("scraping list of programs")
programs = []
url = f"{settings.UQ_FUTURE_BASE_URL}/study/find-a-program/listing/undergraduate"
soup = helpers.get_soup(url)
raw_programs = soup.find_all("a", href=re.compile("/study/program"))
for raw_program in raw_programs:
program_code = raw_program["href"][-4:]
programs.append(program_code)
return programs
| [
"src.scrape.util.helpers.get_soup",
"src.logger.get_logger",
"re.compile"
] | [((151, 181), 'src.logger.get_logger', 'get_logger', (['"""programs_scraper"""'], {}), "('programs_scraper')\n", (161, 181), False, 'from src.logger import get_logger\n'), ((442, 463), 'src.scrape.util.helpers.get_soup', 'helpers.get_soup', (['url'], {}), '(url)\n', (458, 463), True, 'import src.scrape.util.helpers as helpers\n'), ((508, 536), 're.compile', 're.compile', (['"""/study/program"""'], {}), "('/study/program')\n", (518, 536), False, 'import re\n')] |
import numpy as np
from myutils import *
from easydict import EasyDict as edict
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def measure_rec_quality(path_data):
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
metrics_names = ["ndcg", "hr", "recall", "precision"]
metrics = edict()
for metric in metrics_names:
metrics[metric] = {"Overall": []}
for values in attribute_list.values():
if len(attribute_list) == 1: break
attribute_to_name = values[1]
for _, name in attribute_to_name.items():
metrics[metric][name] = []
topk_matches = path_data.uid_topk
test_labels = path_data.test_labels
test_user_idxs = list(test_labels.keys())
invalid_users = []
for uid in test_user_idxs:
if uid not in topk_matches: continue
if len(topk_matches[uid]) < 10:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid], test_labels[uid]
if len(pred_list) == 0:
continue
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
# Based on attribute
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
metrics["ndcg"][attr_name].append(ndcg)
metrics["recall"][attr_name].append(recall)
metrics["precision"][attr_name].append(precision)
metrics["hr"][attr_name].append(hit)
metrics["ndcg"]["Overall"].append(ndcg)
metrics["recall"]["Overall"].append(recall)
metrics["precision"]["Overall"].append(precision)
metrics["hr"]["Overall"].append(hit)
return metrics
def print_rec_metrics(dataset_name, flags, metrics):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Recommandation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metrics.items():
print("{}: {:.3f}".format(metric, np.array(values["Overall"]).mean()), end=" | ")
print("")
for attribute_category, values in attribute_list.items():
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in values[1].values():
print("{} group".format(attribute), end=" ")
for metric_name, groups_values in metrics.items():
print("{}: {:.3f}".format(metric_name, np.array(groups_values[attribute]).mean()), end=" | ")
print("")
print("\n")
"""
Explanation metrics
"""
def topk_ETV(path_data):
dataset_name = path_data.dataset_name
def simpson_index(topk):
n_path_for_patterns = {k: 0 for k in set(PATH_TYPES[dataset_name])}
N = 0
for path in topk:
path = path
path_type = get_path_type(path)
if path_type == 'self_loop':
path_type = 'described_as'
n_path_for_patterns[path_type] += 1
N += 1
numerator = 0
for path_type, n_path_type_ith in n_path_for_patterns.items():
numerator += n_path_type_ith * (n_path_type_ith - 1)
# N = 0
# for item_path in pred_uv_paths.items():
# N += len(item_path[1])
if N * (N - 1) == 0:
return 0
return 1 - (numerator / (N * (N - 1)))
ETVs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
ETV = simpson_index([path_data.uid_pid_explanation[uid][pid] for pid in topk])
ETVs[uid] = ETV
return ETVs
def avg_ETV(path_data):
uid_ETVs = topk_ETV(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETV = {}
groups_ETV_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETV_scores[attribute_label] = []
if "Overall" not in groups_ETV_scores:
groups_ETV_scores["Overall"] = []
for uid, ETV in uid_ETVs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue # Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETV_scores[attr_name].append(ETV)
groups_ETV_scores["Overall"].append(ETV)
for attribute_label, group_scores in groups_ETV_scores.items():
avg_groups_ETV[attribute_label] = np.array(group_scores).mean()
explanation_type_variety = edict(
avg_groups_ETV=avg_groups_ETV,
groups_ETV_scores=groups_ETV_scores
)
return explanation_type_variety
def avg_LID(path_data):
uid_LIDs = topk_LID(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_LID = {}
groups_LID_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LID_scores[attribute_label] = []
if "Overall" not in groups_LID_scores:
groups_LID_scores["Overall"] = []
for uid, LID in uid_LIDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LID_scores[attr_name].append(LID)
groups_LID_scores["Overall"].append(LID)
for attribute_label, group_scores in groups_LID_scores.items():
avg_groups_LID[attribute_label] = np.array(group_scores).mean()
linked_interaction_diversity_results = edict(
avg_groups_LID=avg_groups_LID,
groups_LID_scores=groups_LID_scores
)
return linked_interaction_diversity_results
def topk_LID(path_data):
LIDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_linked_interaction = set()
count = 0
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
li = get_linked_interaction_id(current_path)
if current_path[1][0] == "mention":
li += 10000 #pad in order to not make them overlap, this is a stupid workaround, fix it
unique_linked_interaction.add(li)
if len(topk) == 0 or len(unique_linked_interaction) == 0:
count += 1
LID = len(unique_linked_interaction) / len(topk)
LIDs[uid] = LID
print(count)
return LIDs
def avg_SED(path_data):
uid_SEDs = topk_SED(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_SED = {}
groups_SED_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SED_scores[attribute_label] = []
if "Overall" not in groups_SED_scores:
groups_SED_scores["Overall"] = []
for uid, SED in uid_SEDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SED_scores[attr_name].append(SED)
groups_SED_scores["Overall"].append(SED)
for attribute_label, group_scores in groups_SED_scores.items():
avg_groups_SED[attribute_label] = np.array(group_scores).mean()
shared_entity_diversity_results = edict(
avg_groups_SED=avg_groups_SED,
groups_SED_scores=groups_SED_scores
)
return shared_entity_diversity_results
def topk_SED(path_data):
SEDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_shared_entities = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
se = get_shared_entity_id(current_path)
unique_shared_entities.add(se)
if len(topk) > 0:
SED = len(unique_shared_entities) / len(topk)
else:
SED = 1
SEDs[uid] = SED
return SEDs
def topk_ETD(path_data):
ETDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_path_types = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
path_type = get_path_type(current_path)
unique_path_types.add(path_type)
ETD = len(unique_path_types) / TOTAL_PATH_TYPES[path_data.dataset_name]
ETDs[uid] = ETD
return ETDs
def get_attribute_list(dataset_name, flags):
attribute_list = {}
for attribute, flag in flags.items():
if flag and DATASET_SENSIBLE_ATTRIBUTE_MATRIX[dataset_name][attribute]:
attribute_list[attribute] = []
for attribute in attribute_list.keys():
if attribute == "Gender":
user2attribute, attribute2name = get_kg_uid_to_gender_map(dataset_name)
elif attribute == "Age":
user2attribute, attribute2name = get_kg_uid_to_age_map(dataset_name)
elif attribute == "Occupation":
user2attribute, attribute2name = get_kg_uid_to_occupation_map(dataset_name)
elif attribute == "Country":
pass #implement country
else:
print("Unknown attribute")
attribute_list[attribute] = [user2attribute, attribute2name]
return attribute_list
def avg_ETD(path_data):
uid_ETDs = topk_ETD(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETD = {}
groups_ETD_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETD_scores[attribute_label] = []
if "Overall" not in groups_ETD_scores:
groups_ETD_scores["Overall"] = []
for uid, ETD in uid_ETDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETD_scores[attr_name].append(ETD)
groups_ETD_scores["Overall"].append(ETD)
for attribute_label, group_scores in groups_ETD_scores.items():
avg_groups_ETD[attribute_label] = np.array(group_scores).mean()
diversity_results = edict(
avg_groups_ETD=avg_groups_ETD,
groups_ETD_scores=groups_ETD_scores
)
return diversity_results
#Extract the value of LIR for the given user item path from the LIR_matrix
def LIR_single(path_data, path):
uid = int(path[0][-1])
if uid not in path_data.uid_timestamp or uid not in path_data.LIR_matrix or len(path_data.uid_timestamp[uid]) <= 1: return 0. #Should not enter there
predicted_path = path
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
#Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "watched" or linked_interaction_type == "listened" or linked_interaction_type == "purchase":
LIR = path_data.LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
return LIR
# Returns a dict where to every uid is associated a value of LIR calculated based on his topk
def topk_LIR(path_data):
LIR_topk = {}
# Precompute user timestamps weigths
LIR_matrix = path_data.LIR_matrix
for uid in path_data.test_labels.keys(): #modified for pgpr labels
LIR_single_topk = []
if uid not in LIR_matrix or uid not in path_data.uid_topk:
continue
for pid in path_data.uid_topk[uid]:
predicted_path = path_data.uid_pid_explanation[uid][pid]
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
# Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "purchase" or linked_interaction_type == "watched" or linked_interaction_type == "listened":
LIR = LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
LIR_single_topk.append(LIR)
LIR_topk[uid] = np.array(LIR_single_topk).mean() if len(LIR_single_topk) != 0 else 0
return LIR_topk
# Returns an avg value for the LIR of a given group
def avg_LIR(path_data):
uid_LIR_score = topk_LIR(path_data)
avg_groups_LIR = {}
groups_LIR_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
#Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LIR_scores[attribute_label] = []
if "Overall" not in groups_LIR_scores:
groups_LIR_scores["Overall"] = []
for uid, LIR_score in uid_LIR_score.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LIR_scores[attr_name].append(LIR_score)
groups_LIR_scores["Overall"].append(LIR_score)
for attribute_label, group_scores in groups_LIR_scores.items():
avg_groups_LIR[attribute_label] = np.array(group_scores).mean()
LIR = edict(
avg_groups_LIR=avg_groups_LIR,
groups_LIR_scores=groups_LIR_scores,
)
return LIR
#Extract the value of SEP for the given user item path from the SEP_matrix
def SEP_single(path_data, path):
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = path_data.SEP_matrix[related_entity_type][related_entity_id]
return SEP
def topks_SEP(path_data):
SEP_topk = {}
# Precompute entity distribution
exp_serendipity_matrix = path_data.SEP_matrix
#Measure explanation serendipity for topk
for uid in path_data.test_labels:
SEP_single_topk = []
if uid not in path_data.uid_topk: continue
for pid in path_data.uid_topk[uid]:
if pid not in path_data.uid_pid_explanation[uid]:
#print("strano 2")
continue
path = path_data.uid_pid_explanation[uid][pid]
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = exp_serendipity_matrix[related_entity_type][related_entity_id]
SEP_single_topk.append(SEP)
if len(SEP_single_topk) == 0: continue
SEP_topk[uid] = np.array(SEP_single_topk).mean()
return SEP_topk
def avg_SEP(path_data):
uid_SEP = topks_SEP(path_data)
avg_groups_SEP = {}
groups_SEP_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SEP_scores[attribute_label] = []
if "Overall" not in groups_SEP_scores:
groups_SEP_scores["Overall"] = []
for uid, SEP_score in uid_SEP.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SEP_scores[attr_name].append(SEP_score)
groups_SEP_scores["Overall"].append(SEP_score)
for attribute_label, group_scores in groups_SEP_scores.items():
avg_groups_SEP[attribute_label] = np.array(group_scores).mean()
serendipity_results = edict(
avg_groups_SEP=avg_groups_SEP,
groups_SEP_scores=groups_SEP_scores,
)
return serendipity_results
def print_expquality_metrics(dataset_name, flags, metric_values):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Explanation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values["Overall"]), end= " | ")
print("")
for attribute_category, values in attribute_list.items():
attributes = values[1].values()
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in attributes:
print("{} group".format(attribute), end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values[attribute]), end=" | ")
print("")
| [
"numpy.asfarray",
"easydict.EasyDict",
"numpy.array",
"numpy.arange"
] | [((920, 927), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (925, 927), True, 'from easydict import EasyDict as edict\n'), ((6127, 6200), 'easydict.EasyDict', 'edict', ([], {'avg_groups_ETV': 'avg_groups_ETV', 'groups_ETV_scores': 'groups_ETV_scores'}), '(avg_groups_ETV=avg_groups_ETV, groups_ETV_scores=groups_ETV_scores)\n', (6132, 6200), True, 'from easydict import EasyDict as edict\n'), ((7570, 7643), 'easydict.EasyDict', 'edict', ([], {'avg_groups_LID': 'avg_groups_LID', 'groups_LID_scores': 'groups_LID_scores'}), '(avg_groups_LID=avg_groups_LID, groups_LID_scores=groups_LID_scores)\n', (7575, 7643), True, 'from easydict import EasyDict as edict\n'), ((9860, 9933), 'easydict.EasyDict', 'edict', ([], {'avg_groups_SED': 'avg_groups_SED', 'groups_SED_scores': 'groups_SED_scores'}), '(avg_groups_SED=avg_groups_SED, groups_SED_scores=groups_SED_scores)\n', (9865, 9933), True, 'from easydict import EasyDict as edict\n'), ((13339, 13412), 'easydict.EasyDict', 'edict', ([], {'avg_groups_ETD': 'avg_groups_ETD', 'groups_ETD_scores': 'groups_ETD_scores'}), '(avg_groups_ETD=avg_groups_ETD, groups_ETD_scores=groups_ETD_scores)\n', (13344, 13412), True, 'from easydict import EasyDict as edict\n'), ((16994, 17067), 'easydict.EasyDict', 'edict', ([], {'avg_groups_LIR': 'avg_groups_LIR', 'groups_LIR_scores': 'groups_LIR_scores'}), '(avg_groups_LIR=avg_groups_LIR, groups_LIR_scores=groups_LIR_scores)\n', (16999, 17067), True, 'from easydict import EasyDict as edict\n'), ((19527, 19600), 'easydict.EasyDict', 'edict', ([], {'avg_groups_SEP': 'avg_groups_SEP', 'groups_SEP_scores': 'groups_SEP_scores'}), '(avg_groups_SEP=avg_groups_SEP, groups_SEP_scores=groups_SEP_scores)\n', (19532, 19600), True, 'from easydict import EasyDict as edict\n'), ((119, 133), 'numpy.asfarray', 'np.asfarray', (['r'], {}), '(r)\n', (130, 133), True, 'import numpy as np\n'), ((6065, 6087), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (6073, 6087), True, 'import numpy as np\n'), ((7495, 7517), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (7503, 7517), True, 'import numpy as np\n'), ((9790, 9812), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (9798, 9812), True, 'import numpy as np\n'), ((13283, 13305), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (13291, 13305), True, 'import numpy as np\n'), ((16953, 16975), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (16961, 16975), True, 'import numpy as np\n'), ((18164, 18189), 'numpy.array', 'np.array', (['SEP_single_topk'], {}), '(SEP_single_topk)\n', (18172, 18189), True, 'import numpy as np\n'), ((19470, 19492), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (19478, 19492), True, 'import numpy as np\n'), ((15547, 15572), 'numpy.array', 'np.array', (['LIR_single_topk'], {}), '(LIR_single_topk)\n', (15555, 15572), True, 'import numpy as np\n'), ((3234, 3261), 'numpy.array', 'np.array', (["values['Overall']"], {}), "(values['Overall'])\n", (3242, 3261), True, 'import numpy as np\n'), ((226, 250), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 1)'], {}), '(2, r.size + 1)\n', (235, 250), True, 'import numpy as np\n'), ((317, 341), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 2)'], {}), '(2, r.size + 2)\n', (326, 341), True, 'import numpy as np\n'), ((3670, 3704), 'numpy.array', 'np.array', (['groups_values[attribute]'], {}), '(groups_values[attribute])\n', (3678, 3704), True, 'import numpy as np\n')] |
import numpy as np
#Simulater Setting
#------------------------------
MINUTES=60000000000
TIMESTEP = np.timedelta64(10*MINUTES)
PICKUPTIMEWINDOW = np.timedelta64(10*MINUTES)
#It can enable the neighbor car search system to determine the search range according to the set search distance and the size of the grid.
#It use dfs to find the nearest idle vehicles in the area.
NeighborCanServer = False
#You can adjust the size of the experimental area by entering latitude and longitude.
#The order, road network and grid division will be adaptive. Adjust to fit selected area
FocusOnLocalRegion = False
LocalRegionBound = (104.035,104.105,30.625,30.695)
if FocusOnLocalRegion == False:
LocalRegionBound = (104.011, 104.125, 30.618, 30.703)
#Input parameters
VehiclesNumber = 6000
SideLengthMeter = 800
VehiclesServiceMeter = 800
DispatchMode = "Simulation"
DemandPredictionMode = "None"
#["TransportationClustering","KmeansClustering","SpectralClustering"]
ClusterMode = "Grid"
| [
"numpy.timedelta64"
] | [((102, 130), 'numpy.timedelta64', 'np.timedelta64', (['(10 * MINUTES)'], {}), '(10 * MINUTES)\n', (116, 130), True, 'import numpy as np\n'), ((148, 176), 'numpy.timedelta64', 'np.timedelta64', (['(10 * MINUTES)'], {}), '(10 * MINUTES)\n', (162, 176), True, 'import numpy as np\n')] |
from helpers import create_connection, execute_query
connection = create_connection(
"postgres", "postgres", "admin", "127.0.0.1", "5432"
)
create_database_query = "CREATE DATABASE ekatte"
execute_query(connection, create_database_query)
connection = create_connection(
"ekatte", "postgres", "admin", "127.0.0.1", "5432"
)
create_area_query = '''
CREATE TABLE IF NOT EXISTS "areas" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL UNIQUE,
"code" TEXT NOT NULL UNIQUE,
CONSTRAINT "area_pk" PRIMARY KEY ("code")
);
'''
create_municipality_query = '''
CREATE TABLE IF NOT EXISTS "municipalities" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL,
"code" TEXT NOT NULL UNIQUE,
"area_code" TEXT NOT NULL,
CONSTRAINT "municipalities_pk" PRIMARY KEY ("code"),
CONSTRAINT "municipalities_fk0" FOREIGN KEY ("area_code") REFERENCES "areas"("code")
);
'''
create_settlements_query = '''
CREATE TABLE IF NOT EXISTS "settlements" (
"id" serial NOT NULL,
"ekatte" TEXT NOT NULL UNIQUE,
"type" TEXT NOT NULL,
"name" TEXT NOT NULL,
"municipality_code" TEXT NOT NULL,
CONSTRAINT "settlements_pk" PRIMARY KEY ("id"),
CONSTRAINT "settlements_fk0" FOREIGN KEY ("municipality_code") REFERENCES "municipalities"("code")
);
'''
execute_query(connection, create_area_query)
execute_query(connection, create_municipality_query)
execute_query(connection, create_settlements_query)
| [
"helpers.create_connection",
"helpers.execute_query"
] | [((67, 138), 'helpers.create_connection', 'create_connection', (['"""postgres"""', '"""postgres"""', '"""admin"""', '"""127.0.0.1"""', '"""5432"""'], {}), "('postgres', 'postgres', 'admin', '127.0.0.1', '5432')\n", (84, 138), False, 'from helpers import create_connection, execute_query\n'), ((195, 243), 'helpers.execute_query', 'execute_query', (['connection', 'create_database_query'], {}), '(connection, create_database_query)\n', (208, 243), False, 'from helpers import create_connection, execute_query\n'), ((258, 327), 'helpers.create_connection', 'create_connection', (['"""ekatte"""', '"""postgres"""', '"""admin"""', '"""127.0.0.1"""', '"""5432"""'], {}), "('ekatte', 'postgres', 'admin', '127.0.0.1', '5432')\n", (275, 327), False, 'from helpers import create_connection, execute_query\n'), ((1385, 1429), 'helpers.execute_query', 'execute_query', (['connection', 'create_area_query'], {}), '(connection, create_area_query)\n', (1398, 1429), False, 'from helpers import create_connection, execute_query\n'), ((1430, 1482), 'helpers.execute_query', 'execute_query', (['connection', 'create_municipality_query'], {}), '(connection, create_municipality_query)\n', (1443, 1482), False, 'from helpers import create_connection, execute_query\n'), ((1483, 1534), 'helpers.execute_query', 'execute_query', (['connection', 'create_settlements_query'], {}), '(connection, create_settlements_query)\n', (1496, 1534), False, 'from helpers import create_connection, execute_query\n')] |
# -*- coding: utf-8 -*-
"""
This module contains the tool of Events
"""
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version = '0.2'
long_description = (
read('README.txt')
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
read('CHANGES.txt')
+ '\n' +
'Detailed Documentation\n'
'**********************\n'
+ '\n' +
read('isaw', 'events', 'README.txt')
+ '\n' +
'Contributors\n'
'************\n'
+ '\n' +
read('CONTRIBUTORS.txt')
+ '\n' +
'Download\n'
'********\n'
)
tests_require=['zope.testing']
setup(name='isaw.events',
version=version,
description="",
long_description=long_description,
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Framework :: Plone',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
keywords='events isaw schedule calendar',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/christophwarner/isaw.events',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['isaw', ],
include_package_data=True,
zip_safe=False,
dependency_links=['http://code.google.com/p/python-twitter/'],
install_requires=[
'setuptools',
'tweepy >= 1.5',
'simplejson >= 2.0.9',
'tinyurl >= 0.1.0',
],
tests_require=tests_require,
extras_require=dict(tests=tests_require),
test_suite='isaw.events.tests.test_docs.test_suite',
entry_points="""
# -*- entry_points -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((1328, 1363), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ez_setup']"}), "(exclude=['ez_setup'])\n", (1341, 1363), False, 'from setuptools import setup, find_packages\n'), ((175, 200), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (190, 200), False, 'import os\n')] |
from django.contrib import admin
from app.models import *
from app.admin.tip import ReadingTipAdmin
from app.admin.user import UserAdmin
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(ReadingTip, ReadingTipAdmin)
| [
"django.contrib.admin.site.register"
] | [((168, 204), 'django.contrib.admin.site.register', 'admin.site.register', (['User', 'UserAdmin'], {}), '(User, UserAdmin)\n', (187, 204), False, 'from django.contrib import admin\n'), ((205, 253), 'django.contrib.admin.site.register', 'admin.site.register', (['ReadingTip', 'ReadingTipAdmin'], {}), '(ReadingTip, ReadingTipAdmin)\n', (224, 253), False, 'from django.contrib import admin\n')] |
# %% [markdown]
# # How to define a scikit-learn pipeline and visualize it
# %% [markdown]
# The goal of keeping this notebook is to:
# - make it available for users that want to reproduce it locally
# - archive the script in the event we want to rerecord this video with an
# update in the UI of scikit-learn in a future release.
# %% [markdown]
# ### First we load the dataset
# %% [markdown]
# We need to define our data and target. In this case we will build a classification model
# %%
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
target_name = "SalePrice"
data, target = ames_housing.drop(columns=target_name), ames_housing[target_name]
target = (target > 200_000).astype(int)
# %% [markdown]
# We inspect the first rows of the dataframe
# %%
data
# %% [markdown]
# We can cherry-pick some features and only retain this subset of data
# %%
numeric_features = ['LotArea', 'FullBath', 'HalfBath']
categorical_features = ['Neighborhood', 'HouseStyle']
data = data[numeric_features + categorical_features]
# %% [markdown]
# ### Then we create the pipeline
# %% [markdown]
# The first step is to define the preprocessing steps
# %%
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler(),
)])
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
# %% [markdown]
# The next step is to apply the transformations using `ColumnTransformer`
# %%
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features),
])
# %% [markdown]
# Then we define the model and join the steps in order
# %%
from sklearn.linear_model import LogisticRegression
model = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', LogisticRegression()),
])
# %% [markdown]
# Let's visualize it!
# %%
from sklearn import set_config
set_config(display='diagram')
model
# %% [markdown]
# ### Finally we score the model
# %%
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data, target, cv=5)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
| [
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.cross_validate",
"sklearn.set_config",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"sklearn.impute.SimpleImputer",
"sklearn.compose.ColumnTransformer"
] | [((534, 592), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/house_prices.csv"""'], {'na_values': '"""?"""'}), "('../datasets/house_prices.csv', na_values='?')\n", (545, 592), True, 'import pandas as pd\n'), ((1496, 1534), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (1509, 1534), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((1694, 1834), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('num', numeric_transformer, numeric_features), ('cat',\n categorical_transformer, categorical_features)]"}), "(transformers=[('num', numeric_transformer,\n numeric_features), ('cat', categorical_transformer, categorical_features)])\n", (1711, 1834), False, 'from sklearn.compose import ColumnTransformer\n'), ((2156, 2185), 'sklearn.set_config', 'set_config', ([], {'display': '"""diagram"""'}), "(display='diagram')\n", (2166, 2185), False, 'from sklearn import set_config\n'), ((2313, 2354), 'sklearn.model_selection.cross_validate', 'cross_validate', (['model', 'data', 'target'], {'cv': '(5)'}), '(model, data, target, cv=5)\n', (2327, 2354), False, 'from sklearn.model_selection import cross_validate\n'), ((1397, 1429), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (1410, 1429), False, 'from sklearn.impute import SimpleImputer\n'), ((1447, 1463), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1461, 1463), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((2053, 2073), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2071, 2073), False, 'from sklearn.linear_model import LogisticRegression\n')] |
from nltk.corpus import wordnet as wn
import json
from pyinflect import getAllInflections, getInflection
import re
import inflect
import urllib.parse
from SPARQLWrapper import SPARQLWrapper, JSON
from urllib.parse import quote
from py_thesaurus import Thesaurus
import sys, getopt
WN_NOUN = 'n'
WN_VERB = 'v'
WN_ADJECTIVE = 'a'
WN_ADJECTIVE_SATELLITE = 's'
WN_ADVERB = 'r'
entities = {}
properties = {}
def nounify(verb_word):
""" Transform a verb to the closest noun: die -> death """
verb_synsets = wn.synsets(verb_word, pos="v")
# Word not found
if not verb_synsets:
return []
# Get all verb lemmas of the word
verb_lemmas = []
for s in verb_synsets:
for l in s.lemmas():
if s.name().split('.')[1] == 'v':
verb_lemmas.append(l)
print(verb_lemmas)
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) \
for l in verb_lemmas]
# filter only the nouns
related_noun_lemmas = [l for drf in derivationally_related_forms \
for l in drf[1] if l.synset().name().split('.')[1] == 'n']
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
result = [(w, float(words.count(w))/len_words) for w in set(words)]
result.sort(key=lambda w: -w[1])
# return all the possibilities sorted by probability
return result
def convert(word, from_pos, to_pos):
""" Transform words given from/to POS tags """
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets:
for l in s.lemmas():
if s.name().split('.')[1] == from_pos or from_pos in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE) and s.name().split('.')[1] in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE):
lemmas += [l]
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_noun_lemmas = []
for drf in derivationally_related_forms:
for l in drf[1]:
if l.synset().name().split('.')[1] == to_pos or to_pos in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE) and l.synset().name().split('.')[1] in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE):
related_noun_lemmas += [l]
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
result = [(w, float(words.count(w)) / len_words) for w in set(words)]
result.sort(key=lambda w:-w[1])
# return all the possibilities sorted by probability
return result
def clean_value(value):
value = value.lower()
value = value.replace("$", "dollar ")
#value = value.replace("€", "euro ")
temp = ''
for word in value.split():
if word.startswith('%'):
word = urllib.parse.unquote(word)
temp = temp + " " + word
value = temp
value = re.sub(r"[%][a-zA-Z0-9]+", "", value)
value = value.replace("&", "and")
value = value.replace("/", " or ")
value = re.sub(r"([(].*[)])", "", value)
value = value.replace("'", "")
value = value.replace('"', "")
value = value.replace(':', "")
value = value.replace(',', "")
value = value.replace('<', "")
value = value.replace('>', "")
value = value.replace('(', "")
value = value.replace(')', "")
value = value.replace('!', "")
value = value.replace('\\', "")
value = value.replace('+', " ")
value = value.replace("_", " ")
p = inflect.engine()
temp = ''
for word in value.split():
if word.isdigit() or bool(re.match('^([0-9]|[,]|[.])*$',word)):
word = p.number_to_words(word)
temp = temp + " " + word
value = temp
value = value.strip()
value = re.sub(r"[ \s\n\t]+", " ", value)
return value
def contains_special_characters(value):
return not bool(re.match('^([a-zA-Z]|[ ]|[-])*$',value))
def remove_ambiguities_slot_properties():
global properties
#with open('./augmented_slot_properties.json') as f:
# properties = json.load(f)
all_properties_value = list(properties.keys())
for key in properties:
if 'synonyms' in properties[key]:
synonyms = properties[key]['synonyms']
new_synoynms= []
for synonym in synonyms:
if not synonym in all_properties_value:
all_properties_value.append(synonym)
new_synoynms.append(synonym)
properties[key]['synonyms'] = new_synoynms
#with open("./augmented_slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def augment_slot_properties():
global properties
#with open('./cleaned_slot_properties.json') as f:
# properties = json.load(f)
for key in properties:
# nouns to verbs
verb_tuples = convert(key, 'n', 'v')
verb_form = []
for verb_tuple in verb_tuples:
value = verb_tuple[0]
value = clean_value(value)
verb_form.append(value)
verb_form = set(verb_form)
# add verb inflections
for verb in verb_form:
temp = getAllInflections(verb)
inflections = []
for t in temp:
value = temp[t][0]
value = clean_value(value)
inflections.append(value)
inflections = set(inflections)
verb_form = verb_form.union(inflections)
verb_form = set(verb_form)
if key in verb_form:
verb_form.remove(key)
verb_form = list(verb_form)
# nouns to adjectives
adjective_tuples = convert(key, 'n', 'a')
adjective_form = []
for adjective_tuple in adjective_tuples:
value = adjective_tuple[0]
value = clean_value(value)
adjective_form.append(value)
adjective_form = set(adjective_form)
if key in adjective_form:
adjective_form.remove(key)
adjective_form = list(adjective_form)
'''
# noun synonyms
synonyms = [clean_value(l.name()) for synset in wn.synsets(key) for l in synset.lemmas()]
synonyms = set(synonyms)
temp = set()
for s in synonyms:
if not s in all_augmented_value:
temp.add(s)
#if key in temp:
# temp.remove(key)
synonyms = list(temp)
# combine all
extended_synonyms = list(set(verb_form + synonyms + adjective_form))
'''
extended_synonyms = list(set(verb_form + adjective_form))
if extended_synonyms:
properties[key]["synonyms"] = extended_synonyms
#with open("./augmented_slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def clean_slot_properties():
global properties
#with open('./slot_properties.json') as f:
# properties = json.load(f)
cleaned_properties = {}
for key in properties:
if contains_special_characters(key):
new_key = clean_value(key)
else:
new_key = key
if new_key and len(new_key.strip())>0 and not contains_special_characters(new_key):
if not new_key in cleaned_properties:
cleaned_properties[new_key] = {'urls':[]}
cleaned_properties[new_key]['urls'] = list(set(cleaned_properties[new_key]['urls'] + properties[key]['urls']))
if 'synonyms' in properties[key]:
if not 'synonyms' in cleaned_properties[new_key]:
cleaned_properties[new_key]['synonyms'] = []
cleaned_properties[new_key]['synonyms'] = list(set(cleaned_properties[new_key]['synonyms'] + properties[key]['synonyms']))
#with open("./cleaned_slot_properties.json", "w") as write_file:
# json.dump(cleaned_properties, write_file, indent=4)
properties = cleaned_properties
def augment_slot_entities():
with open('./cleaned_slot_entities.json') as f:
entities = json.load(f)
for key in entities:
synonyms = [clean_value(l.name()) for synset in wn.synsets(key) for l in synset.lemmas()]
synonyms = set(synonyms)
if key in synonyms:
synonyms.remove(key)
synonyms = list(synonyms)
if synonyms:
entities[key]["synonyms"] = synonyms
with open("./augmented_slot_entities.json", "w") as write_file:
json.dump(entities, write_file, indent=4)
def clean_slot_entities():
global entities
#with open('./slot_entities.json') as f:
# entities = json.load(f)
cleaned_entities = {}
for key in entities:
if contains_special_characters(key):
new_key = clean_value(key)
else:
new_key = key
if new_key and len(new_key.strip())>0 and not contains_special_characters(new_key):
if not new_key in cleaned_entities:
cleaned_entities[new_key] = {'urls':[]}
cleaned_entities[new_key]['urls'] = list(set(cleaned_entities[new_key]['urls'] + entities[key]['urls']))
if 'synonyms' in entities[key]:
if not 'synonyms' in cleaned_entities[new_key]:
cleaned_entities[new_key]['synonyms'] = []
cleaned_entities[new_key]['synonyms'] = list(set(cleaned_entities[new_key]['synonyms'] + entities[key]['synonyms']))
#with open("./cleaned_slot_entities.json", "w") as write_file:
# json.dump(cleaned_entities, write_file, indent=4)
entities = cleaned_entities
def generate_entity_label(slot):
parts = slot["class"]["value"].split("/")
label = parts[-1]
if "#" in label:
parts = label.split("#")
label = parts[-1]
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', label)
label = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
label = label.replace("_", " ")
return label
def store_entities(result):
global entities
for slot in result["results"]["bindings"]:
if "label" not in slot or "value" not in slot["label"] :
label = generate_entity_label(slot)
else:
label = slot["label"]["value"]
label = label.lower()
if len(label) < 140:
if label not in entities:
entities[label] = {"urls" : set()}
entities[label]["urls"].add("<"+slot["class"]["value"]+">")
def query_skosConcepts(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX skos: <http://www.w3.org/2004/02/skos/core#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a skos:Concept."
"OPTIONAL{ "
"?class skos:prefLabel ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK skos:concepts query")
except:
print("Failed skos:concepts query")
pass
def query_rdfsClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a rdfs:Class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK rdfs:classes query")
except:
print("Failed rdfs:classes query")
pass
def query_owlClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX owl: <http://www.w3.org/2002/07/owl#> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a owl:Class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK owl classes query")
except:
print("Failed owl classes query")
pass
def query_usedClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"[] a ?class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_entities(result)
print("OK used classes query")
except:
print("Failed used classes query")
pass
def query_entities(sparql_endpoint, defaultGraph = "", lang="en"):
global entities
query_usedClasses(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_skosConcepts(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_rdfsClasses(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_owlClasses(sparql_endpoint, defaultGraph= defaultGraph, lang= lang)
for e in entities:
entities[e]["urls"] = list(entities[e]["urls"])
#with open("./slot_entities.json", "w") as write_file:
# json.dump(entities, write_file, indent=4)
def generate_property_label(slot):
parts = slot["p"]["value"].split("/")
label = parts[-1]
if "#" in label:
parts = label.split("#")
label = parts[-1]
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', label)
label = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
label = label.replace("_", " ")
return label
def store_properties(result):
global properties
for slot in result["results"]["bindings"]:
if "label" not in slot or "value" not in slot["label"] :
label = generate_property_label(slot)
else:
label = slot["label"]["value"]
label = label.lower()
if len(label) < 140:
if label not in properties:
properties[label] = {"urls" : set()}
properties[label]["urls"].add("<"+slot["p"]["value"]+">")
def query_rdfProperty(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"PREFIX rdf: <https://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type rdf:Property. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK rdf:Property query")
except:
print("failed rdf:Property query")
pass
def query_owlDatatypeProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"
"PREFIX owl: <http://www.w3.org/2002/07/owl#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type owl:DatatypeProperty. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK owl:DatatypeProperty query")
except:
print("failed owl:DatatypeProperty query")
pass
def query_owlObjectProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"
"PREFIX owl: <http://www.w3.org/2002/07/owl#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type owl:ObjectProperty. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK owl:ObjectProperty query")
except:
print("failed owl:ObjectProperty query")
pass
def query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("SELECT DISTINCT ?p WHERE { ?s ?p ?o. }")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK used property without labels query")
except:
print("failed used property without labels query")
pass
def query_usedProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?p ?label WHERE { "
"?s ?p ?o. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} LIMIT 500")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK used property with labels query")
except:
print("failed used property with labels query")
query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph, lang)
def query_properties(sparql_endpoint, defaultGraph = "", lang="en"):
global properties
#query_usedProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph=defaultGraph, lang=lang)
query_owlObjectProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_owlDatatypeProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_rdfProperty(sparql_endpoint, defaultGraph= defaultGraph, lang= lang)
for p in properties:
properties[p]["urls"] = list(properties[p]["urls"])
#with open("./slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def main(argv):
endpoint = "" # e.g., "http://dbpedia.org/sparql"
defaultGraph = "" # e.g., "http://dbpedia.org"
lang = None #"en" default
invocation_name = None #"my personal assistant" default
intents = [
"getAllResultsPreviousQuery",
"getQueryExplanation",
"getFurtherDetails",
"getPropertyObject",
"getDescription",
"getNumericFilter",
"getNumericFilterByClass",
"getClassInstances",
"getTripleVerification",
"getLocation",
"getSuperlative",
"getPropertySubjectByClass",
"getPropertySubject"
]
result_limit = 5
if len(argv) == 0:
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit(2)
try:
opts, args = getopt.getopt(argv,"he:g:li:",["endpoint=","graph=","lang","invocation_name"])
except getopt.GetoptError:
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit()
elif opt in ("-e", "--endpoint"):
endpoint = arg
elif opt in ("-g", "--graph"):
defaultGraph = arg
elif opt in ("-l", "--lang") and (arg == "en" or arg == "it"):
lang = arg
elif opt in ("-i", "--invocation_name"):
invocation_name = arg
if lang is None:
lang="en"
if invocation_name is None:
invocation_name = "my personal assistant"
print('SPARQL endpoint: ', endpoint)
print('Graph: ', defaultGraph)
print('Lang: ', lang)
print('Invocation name: ', invocation_name)
print("Querying entities...")
query_entities(endpoint, defaultGraph=defaultGraph, lang=lang)
print("Cleaning class labels...")
clean_slot_entities()
#print("Augmenting class labels...")
#augment_slot_entities()
print("Querying properties...")
query_properties(endpoint, defaultGraph=defaultGraph, lang=lang)
print("Cleaning property labels...")
clean_slot_properties()
print("Augmenting property labels...")
augment_slot_properties()
remove_ambiguities_slot_properties()
'''
with open('./augmented_slot_properties.json') as f:
properties = json.load(f)
'''
if "label" in properties and len(properties["label"]["urls"])>1:
dict_label = {}
for prop_label in properties["label"]["urls"]:
sparql = SPARQLWrapper(endpoint, defaultGraph=defaultGraph)
query = ("SELECT COUNT(*) as ?count WHERE { "
"?s " + prop_label + " ?o. "
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
result = result['results']['bindings'][0]
dict_label[prop_label] = result['count']['value']
except:
pass
key_max = max(dict_label, key= lambda x: dict_label[x])
properties["label"]["urls"] = [key_max]
'''
with open('./cleaned_slot_entities.json') as f:
entities = json.load(f)
'''
conf = {
"invocation_name" : invocation_name,
"intents" : intents,
"lang" : lang,
"result_limit" : result_limit,
"endpoint" : endpoint,
"entity" : entities,
"property" : properties
}
with open("./conf.json", "w") as write_file:
json.dump(conf, write_file, indent=4)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"getopt.getopt",
"pyinflect.getAllInflections",
"SPARQLWrapper.SPARQLWrapper",
"re.match",
"inflect.engine",
"sys.exit",
"json.load",
"re.sub",
"nltk.corpus.wordnet.synsets",
"json.dump"
] | [((514, 544), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['verb_word'], {'pos': '"""v"""'}), "(verb_word, pos='v')\n", (524, 544), True, 'from nltk.corpus import wordnet as wn\n'), ((1681, 1711), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': 'from_pos'}), '(word, pos=from_pos)\n', (1691, 1711), True, 'from nltk.corpus import wordnet as wn\n'), ((3344, 3380), 're.sub', 're.sub', (['"""[%][a-zA-Z0-9]+"""', '""""""', 'value'], {}), "('[%][a-zA-Z0-9]+', '', value)\n", (3350, 3380), False, 'import re\n'), ((3472, 3503), 're.sub', 're.sub', (['"""([(].*[)])"""', '""""""', 'value'], {}), "('([(].*[)])', '', value)\n", (3478, 3503), False, 'import re\n'), ((3937, 3953), 'inflect.engine', 'inflect.engine', ([], {}), '()\n', (3951, 3953), False, 'import inflect\n'), ((4203, 4238), 're.sub', 're.sub', (['"""[ \\\\s\\\\n\\\\t]+"""', '""" """', 'value'], {}), "('[ \\\\s\\\\n\\\\t]+', ' ', value)\n", (4209, 4238), False, 'import re\n'), ((10266, 10310), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'label'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', label)\n", (10272, 10310), False, 'import re\n'), ((11001, 11058), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (11014, 11058), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((11723, 11780), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (11736, 11780), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((12438, 12495), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (12451, 12495), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((13206, 13263), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (13219, 13263), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((14578, 14622), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'label'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', label)\n", (14584, 14622), False, 'import re\n'), ((15320, 15377), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (15333, 15377), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((16076, 16133), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (16089, 16133), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((16908, 16965), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (16921, 16965), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((17742, 17799), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (17755, 17799), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((18227, 18284), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['sparql_endpoint'], {'defaultGraph': 'defaultGraph'}), '(sparql_endpoint, defaultGraph=defaultGraph)\n', (18240, 18284), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((8528, 8540), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8537, 8540), False, 'import json\n'), ((8952, 8993), 'json.dump', 'json.dump', (['entities', 'write_file'], {'indent': '(4)'}), '(entities, write_file, indent=4)\n', (8961, 8993), False, 'import json\n'), ((20435, 20446), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20443, 20446), False, 'import sys, getopt\n'), ((20477, 20564), 'getopt.getopt', 'getopt.getopt', (['argv', '"""he:g:li:"""', "['endpoint=', 'graph=', 'lang', 'invocation_name']"], {}), "(argv, 'he:g:li:', ['endpoint=', 'graph=', 'lang',\n 'invocation_name'])\n", (20490, 20564), False, 'import sys, getopt\n'), ((23328, 23365), 'json.dump', 'json.dump', (['conf', 'write_file'], {'indent': '(4)'}), '(conf, write_file, indent=4)\n', (23337, 23365), False, 'import json\n'), ((4316, 4356), 're.match', 're.match', (['"""^([a-zA-Z]|[ ]|[-])*$"""', 'value'], {}), "('^([a-zA-Z]|[ ]|[-])*$', value)\n", (4324, 4356), False, 'import re\n'), ((5628, 5651), 'pyinflect.getAllInflections', 'getAllInflections', (['verb'], {}), '(verb)\n', (5645, 5651), False, 'from pyinflect import getAllInflections, getInflection\n'), ((10322, 10364), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (10328, 10364), False, 'import re\n'), ((14634, 14676), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (14640, 14676), False, 'import re\n'), ((20703, 20714), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20711, 20714), False, 'import sys, getopt\n'), ((20889, 20899), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20897, 20899), False, 'import sys, getopt\n'), ((22313, 22363), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['endpoint'], {'defaultGraph': 'defaultGraph'}), '(endpoint, defaultGraph=defaultGraph)\n', (22326, 22363), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((4033, 4069), 're.match', 're.match', (['"""^([0-9]|[,]|[.])*$"""', 'word'], {}), "('^([0-9]|[,]|[.])*$', word)\n", (4041, 4069), False, 'import re\n'), ((8623, 8638), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['key'], {}), '(key)\n', (8633, 8638), True, 'from nltk.corpus import wordnet as wn\n')] |
# https://www.youtube.com/watch?v=bf_UOFFaHiY
# http://www.trex-game.skipser.com/
from PIL import ImageGrab, ImageOps
import pyautogui
import time
from numpy import *
class Cordinates():
replayBtn = (962, 530)
dinosaur = (664, 536) # dinaosaur standing
# dinosaur = (686, 548) # dinosaur down
#730= x cordinate to check for tree
#y cordinate = 565
def restartGame():
pyautogui.click(Cordinates.replayBtn)
# pyautogui.keyDown('down')
def pressSpace():
# pyautogui.keyUp('down')
pyautogui.keyDown('space')
print("Jump")
time.sleep(0.18)
pyautogui.keyUp('space')
# pyautogui.keyDown('down')
def imageGrab():
x1 = Cordinates.dinosaur[0] + 50 # 70 is the distance
y1 = Cordinates.dinosaur[1]
x2 = x1 + 80
y2 = Cordinates.dinosaur[1] + 35
box = (x1, y1, x2, y2)
image = ImageGrab.grab(box)
grayImage = ImageOps.grayscale(image)
a = array(grayImage.getcolors())
print(a.sum())
return a.sum()
def main():
restartGame()
try:
while True:
# imageGrab()
if(imageGrab() != 3047):
pressSpace()
time.sleep(0.1)
except KeyboardInterrupt:
print("Program stopped")
main() | [
"PIL.ImageGrab.grab",
"pyautogui.keyDown",
"time.sleep",
"pyautogui.click",
"PIL.ImageOps.grayscale",
"pyautogui.keyUp"
] | [((393, 430), 'pyautogui.click', 'pyautogui.click', (['Cordinates.replayBtn'], {}), '(Cordinates.replayBtn)\n', (408, 430), False, 'import pyautogui\n'), ((516, 542), 'pyautogui.keyDown', 'pyautogui.keyDown', (['"""space"""'], {}), "('space')\n", (533, 542), False, 'import pyautogui\n'), ((565, 581), 'time.sleep', 'time.sleep', (['(0.18)'], {}), '(0.18)\n', (575, 581), False, 'import time\n'), ((586, 610), 'pyautogui.keyUp', 'pyautogui.keyUp', (['"""space"""'], {}), "('space')\n", (601, 610), False, 'import pyautogui\n'), ((845, 864), 'PIL.ImageGrab.grab', 'ImageGrab.grab', (['box'], {}), '(box)\n', (859, 864), False, 'from PIL import ImageGrab, ImageOps\n'), ((881, 906), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['image'], {}), '(image)\n', (899, 906), False, 'from PIL import ImageGrab, ImageOps\n'), ((1150, 1165), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1160, 1165), False, 'import time\n')] |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.bindings.win_event_object as win_event_binding
from cybox.objects.win_handle_object import WinHandle
from cybox.common import ObjectProperties, String
class WinEvent(ObjectProperties):
_binding = win_event_binding
_binding_class = win_event_binding.WindowsEventObjectType
_namespace = "http://cybox.mitre.org/objects#WinEventObject-2"
_XSI_NS = "WinEventObj"
_XSI_TYPE = "WindowsEventObjectType"
name = fields.TypedField("Name", String)
handle = fields.TypedField("Handle", WinHandle)
type_ = fields.TypedField("Type", String)
| [
"mixbox.fields.TypedField"
] | [((603, 636), 'mixbox.fields.TypedField', 'fields.TypedField', (['"""Name"""', 'String'], {}), "('Name', String)\n", (620, 636), False, 'from mixbox import fields\n'), ((650, 688), 'mixbox.fields.TypedField', 'fields.TypedField', (['"""Handle"""', 'WinHandle'], {}), "('Handle', WinHandle)\n", (667, 688), False, 'from mixbox import fields\n'), ((701, 734), 'mixbox.fields.TypedField', 'fields.TypedField', (['"""Type"""', 'String'], {}), "('Type', String)\n", (718, 734), False, 'from mixbox import fields\n')] |
# coding=utf-8
"""This module, nexus_db.py, defines a basic started database for the Nexus Server."""
import pika
import json
import time
from scripts.docker.wait_for_rabbit_host import WaitForRabbitMQHost
from libraries.database_abstraction.sql.sqlite import sqlite_db
from libraries.database_abstraction.sql.sqlite import table_abstraction
from libraries.database_abstraction.sql.query_abstraction import sql_query
######################################################################
db = sqlite_db.SQLiteDB('/v/db.sqlite', False, True)
todo_lists = table_abstraction.TableAbstraction('todo_lists')
todo_lists.add_column_string('table_id', nullable=False, unique=True)
todo_lists.add_column_string('px', nullable=False, unique=False)
todo_lists.add_column_string('py', nullable=False, unique=False)
todo_lists.add_column_string('pz', nullable=False, unique=False)
todo_lists.add_column_string('nx', nullable=False, unique=False)
todo_lists.add_column_string('ny', nullable=False, unique=False)
todo_lists.add_column_string('nz', nullable=False, unique=False)
todo_rows = table_abstraction.TableAbstraction('todo_rows')
todo_rows.add_column_string('table_id', nullable=False, unique=False)
todo_rows.add_column_string('row_id', nullable=False, unique=False)
todo_rows.add_column_string('description', nullable=False, unique=False)
todo_rows.add_column_string('time', nullable=False, unique=False)
todo_rows.add_column_string('difficulty', nullable=False, unique=False)
todo_rows.add_column_string('importance', nullable=False, unique=False)
todo_rows.add_column_string('completed', nullable=False, unique=False)
######################################################################
TEMP = '/Users/utarsuno/git_repos/quasar_source/generated_output/local/personal'
class NexusDatabase(object):
"""API for the Nexus Server's DB."""
def __init__(self, path: str):
self._db = sqlite_db.SQLiteDB(path, False, True)
self._schema_users = self._load_schema_users()
self._schema_worlds = self._load_schema_worlds()
self._schema_entities_root = self._load_schema_entities_root()
self._schema_entities_data = self._load_schema_entities_data()
def _load_schema_users(self):
"""Loads the users table schema."""
users = table_abstraction.TableAbstraction('user')
users.add_column_row_id_alias()
users.add_column_string('email', nullable=False, unique=True, indexed=True)
users.add_column_string('password', nullable=False, unique=False, indexed=False)
#users.add_column_string('meta_data', nullable=True, unique=False, indexed=False)
return users
def _load_schema_worlds(self):
"""Loads the worlds table schema."""
worlds = table_abstraction.TableAbstraction('world')
worlds.add_column_row_id_alias()
#worlds.add_column_string('meta_data', nullable=True, unique=False, indexed=False)
return worlds
def _load_schema_entities_root(self):
"""Loads the entities root table schema."""
entities_root = table_abstraction.TableAbstraction('entity_root')
entities_root.add_column_row_id_alias()
entities_root.add_column_foreign_key(self._schema_worlds.primary_key)
return entities_root
def _load_schema_entities_data(self):
"""Loads the entities root table schema."""
entities_data = table_abstraction.TableAbstraction('entity_data')
entities_data.add_column_row_id_alias()
return entities_data
| [
"libraries.database_abstraction.sql.sqlite.sqlite_db.SQLiteDB",
"libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction"
] | [((496, 543), 'libraries.database_abstraction.sql.sqlite.sqlite_db.SQLiteDB', 'sqlite_db.SQLiteDB', (['"""/v/db.sqlite"""', '(False)', '(True)'], {}), "('/v/db.sqlite', False, True)\n", (514, 543), False, 'from libraries.database_abstraction.sql.sqlite import sqlite_db\n'), ((557, 605), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""todo_lists"""'], {}), "('todo_lists')\n", (591, 605), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n'), ((1078, 1125), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""todo_rows"""'], {}), "('todo_rows')\n", (1112, 1125), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n'), ((1917, 1954), 'libraries.database_abstraction.sql.sqlite.sqlite_db.SQLiteDB', 'sqlite_db.SQLiteDB', (['path', '(False)', '(True)'], {}), '(path, False, True)\n', (1935, 1954), False, 'from libraries.database_abstraction.sql.sqlite import sqlite_db\n'), ((2319, 2361), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""user"""'], {}), "('user')\n", (2353, 2361), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n'), ((2784, 2827), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""world"""'], {}), "('world')\n", (2818, 2827), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n'), ((3101, 3150), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""entity_root"""'], {}), "('entity_root')\n", (3135, 3150), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n'), ((3425, 3474), 'libraries.database_abstraction.sql.sqlite.table_abstraction.TableAbstraction', 'table_abstraction.TableAbstraction', (['"""entity_data"""'], {}), "('entity_data')\n", (3459, 3474), False, 'from libraries.database_abstraction.sql.sqlite import table_abstraction\n')] |
import itertools
import asyncio
from async_timeout import timeout
from functools import partial
from youtube_dl import YoutubeDL
from discord.ext import commands
from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color
ytdlopts = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': 'downloads/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # ipv6 addresses cause issues sometimes
}
ffmpegopts = {
'before_options': '-nostdin -reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 60',
'options': '-vn'
}
ytdl = YoutubeDL(ytdlopts)
class VoiceConnectionError(commands.CommandError):
"""Custom Exception class for connection errors."""
class InvalidVoiceChannel(VoiceConnectionError):
"""Exception for cases of invalid Voice Channels."""
class YTDLSource(PCMVolumeTransformer):
def __init__(self, source, *, data, requester):
super().__init__(source)
self.requester = requester
self.title = data.get('title')
self.web_url = data.get('webpage_url')
# https://github.com/rg3/youtube-dl/blob/master/README.md
def __getitem__(self, item: str):
return self.__getattribute__(item)
@classmethod
async def create_source(cls, ctx, search: str, *, loop, download=False, add_to_q=True):
loop = loop or asyncio.get_event_loop()
to_run = partial(ytdl.extract_info, url=search, download=download)
data = await loop.run_in_executor(None, to_run)
if 'entries' in data:
data = data['entries'][0]
if add_to_q is True:
embed = Embed(title="Aggiunto alla coda:", description=f'[{data["title"]}]({data["webpage_url"]}) [{ctx.author.mention}]', color=0xfefefe)
await ctx.send(embed=embed)
return {'webpage_url': data['webpage_url'], 'requester': ctx.author, 'title': data['title']}
@classmethod
async def regather_stream(cls, data, *, loop):
loop = loop or asyncio.get_event_loop()
requester = data['requester']
to_run = partial(ytdl.extract_info, url=data['webpage_url'], download=False)
data = await loop.run_in_executor(None, to_run)
return cls(FFmpegPCMAudio(data['url'], before_options=ffmpegopts['before_options'], options=ffmpegopts['options']), data=data, requester=requester)
class MusicPlayer(commands.Cog):
__slots__ = ('bot', '_guild', '_channel', '_cog', 'queue', 'next', 'current', 'np', 'volume')
def __init__(self, ctx):
self.bot = ctx.bot
self._guild = ctx.guild
self._channel = ctx.channel
self._cog = ctx.cog
self.queue = asyncio.Queue()
self.next = asyncio.Event()
self.np = None # Now playing message
self.volume = .3
self.current = None
ctx.bot.loop.create_task(self.player_loop())
async def player_loop(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
self.next.clear()
try:
async with timeout(120):
source = await self.queue.get()
except asyncio.TimeoutError:
return self.destroy(self._guild)
if not isinstance(source, YTDLSource):
try:
source = await YTDLSource.regather_stream(source, loop=self.bot.loop)
except Exception as e:
await self._channel.send(f"C'è stato un errore nella richiesta della canzone.\n"f'```css\n[{e}]\n```')
continue
source.volume = self.volume
self.current = source
self._guild.voice_client.play(source, after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set))
embed = Embed(title="Ora in riproduzione:", description=f'[{source.title}]({source.web_url}) [{source.requester.mention}]', color=0xfefefe)
self.np = await self._channel.send(embed=embed)
await self.next.wait()
source.cleanup()
self.current = None
#try:
#await self.np.delete()
#except HTTPException:
#pass
def destroy(self, guild):
return self.bot.loop.create_task(self._cog.cleanup(guild))
class Musica(commands.Cog):
__slots__ = ('bot', 'players')
def __init__(self, bot):
self.bot = bot
self.players = {}
@commands.Cog.listener()
async def on_ready(self):
print("Musica caricata!")
async def cleanup(self, guild):
try:
await guild.voice_client.disconnect()
except AttributeError:
pass
try:
del self.players[guild.id]
except KeyError:
pass
async def __local_check(self, ctx):
if not ctx.guild:
raise commands.NoPrivateMessage
return True
async def __error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.send('Questo comando non può essere usato nei messaggi privati')
except HTTPException:
pass
elif isinstance(error, InvalidVoiceChannel):
await ctx.send('Devi essere in un canale per mettere la musica', reference=ctx.message, mention_author=False)
def get_player(self, ctx):
try:
player = self.players[ctx.guild.id]
except KeyError:
player = MusicPlayer(ctx)
self.players[ctx.guild.id] = player
return player
@commands.command(name='connetti', aliases=['join','entra','connect','connettiti'], help="Fai connettere il bot al canale vocale")
async def connect_(self, ctx):
try:
channel = ctx.author.voice.channel
except AttributeError:
if ctx.author.voice is None:
await ctx.send(f"{ctx.author.mention} Devi essere in un canale vocale per mettere la musica", reference=ctx.message, mention_author=False)
raise InvalidVoiceChannel(f'Nessun canale in cui entrare.')
vc = ctx.voice_client
if vc:
if vc.channel.id == channel.id:
return
try:
await vc.move_to(channel)
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Spostamento canale: <{channel}> timed out.')
else:
try:
await channel.connect()
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Connessione al canale: <{channel}> timed out.')
embed = Embed(title="Entrato in chiamata", color=0xfefefe)
embed.add_field(name="Connesso a:", value=channel, inline=True)
#await ctx.send(embed=embed)
@commands.command(name='play', aliases=['riproduci', 'p'], help='Riproduci una canzone')
async def play_(self, ctx, *, search: str):
await ctx.trigger_typing()
if ctx.author.voice is None:
return await ctx.send(f"{ctx.author.mention} Devi essere in un canale vocale per mettere la musica", reference=ctx.message, mention_author=False)
vc = ctx.voice_client
if not vc:
await ctx.invoke(self.connect_)
player = self.get_player(ctx)
source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop, download=False, add_to_q=(player.current is not None))
await player.queue.put(source)
@commands.command(name='pausa', aliases=['pause'], help="Pausa la canzone in riproduzione")
async def pause_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_playing():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
elif vc.is_paused():
await ctx.invoke(self.resume_)
return
vc.pause()
await ctx.message.add_reaction("🆗")
@commands.command(name='riprendi', aliases=['unpause', 'resume'], help="Riprendi la riproduzione della canzone")
async def resume_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
elif not vc.is_paused():
return
vc.resume()
await ctx.message.add_reaction("🆗")
@commands.command(name='skip', aliases=['next','skippa'], help="Salta la canzone corrente")
async def skip_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
if vc.is_paused():
pass
elif not vc.is_playing():
return
vc.stop()
await ctx.message.add_reaction("🆗")
@commands.command(name='coda', aliases=['q', 'playlist', 'queue'], help="Mostra la coda delle canzoni")
async def queue_info(self, ctx):
vc = ctx.voice_client
player = self.get_player(ctx)
if player.queue.empty():
return await ctx.send('Non ci sono canzoni nella coda.', reference=ctx.message, mention_author=False)
#upcoming = list(itertools.islice(player.queue._queue, 0, 5))
#fmt = '\n'.join(f'[{_["title"]}]({_["webpage_url"]}) [{_["requester"].mention}]' for _ in player.queue._queue)
#embed = Embed(title=f'Canzoni in coda:', color=0xfefefe, description=fmt)
x=0
description = f"```ml\nCoda Canzoni:\n\n\t⬐ prossima traccia\n"
for song in player.queue._queue:
x+=1
description += f'{x}) {song["title"]}\n'
description += '\n\tFine della coda!```'
await ctx.send(description, reference=ctx.message, mention_author=False)
@commands.command(name='in_riproduzione', aliases=['np', 'current', 'currentsong', 'playing', 'ir'], help="Mostra la canzone in riproduzione")
async def now_playing_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Non sono connesso a un canale vocale!', reference=ctx.message, mention_author=False)
player = self.get_player(ctx)
if not player.current:
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
try:
await player.np.delete()
except HTTPException:
pass
embed = Embed(title="Ora in riproduzione:", description=f'{vc.source.title} [{ctx.author.mention}]', color=0xfefefe)
player.np = await ctx.send(embed=embed)
@commands.command(name='volume', aliases=['vol','v'], help="Cambia il volume della musica")
async def change_volume(self, ctx, *, vol: float=None):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Non sono connesso a un canale vocale!', reference=ctx.message, mention_author=False)
player = self.get_player(ctx)
if vol is None:
vol = player.volume*100
elif not 0 < vol < 101:
return await ctx.send('Inserisci un valora compreso tra 1 e 100', reference=ctx.message, mention_author=False)
elif vc.source:
vc.source.volume = vol / 100
player.volume = vol / 100
if vol >= 80:
emoji = ':loud_sound:'
elif 30 < vol < 80:
emoji = ':sound:'
elif vol <=30:
emoji = ':speaker:'
embed = Embed(title=f'**Volume:** {int(vol)} {emoji}:', color=0xfefefe)
await ctx.send(embed=embed)
@commands.command(name='esci', aliases=['stop','leave','fuori'], help="Stoppa la musica (rimuove la coda)")
async def stop_(self, ctx):
vc = ctx.voice_client
await self.cleanup(ctx.guild)
await ctx.message.add_reaction("🆗")
def setup(bot):
bot.add_cog(Musica(bot)) | [
"discord.ext.commands.Cog.listener",
"discord.FFmpegPCMAudio",
"asyncio.Queue",
"async_timeout.timeout",
"asyncio.Event",
"youtube_dl.YoutubeDL",
"functools.partial",
"asyncio.get_event_loop",
"discord.Embed",
"discord.ext.commands.command"
] | [((842, 861), 'youtube_dl.YoutubeDL', 'YoutubeDL', (['ytdlopts'], {}), '(ytdlopts)\n', (851, 861), False, 'from youtube_dl import YoutubeDL\n'), ((4694, 4717), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (4715, 4717), False, 'from discord.ext import commands\n'), ((5833, 5969), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""connetti"""', 'aliases': "['join', 'entra', 'connect', 'connettiti']", 'help': '"""Fai connettere il bot al canale vocale"""'}), "(name='connetti', aliases=['join', 'entra', 'connect',\n 'connettiti'], help='Fai connettere il bot al canale vocale')\n", (5849, 5969), False, 'from discord.ext import commands\n'), ((7049, 7141), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""play"""', 'aliases': "['riproduci', 'p']", 'help': '"""Riproduci una canzone"""'}), "(name='play', aliases=['riproduci', 'p'], help=\n 'Riproduci una canzone')\n", (7065, 7141), False, 'from discord.ext import commands\n'), ((7733, 7828), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""pausa"""', 'aliases': "['pause']", 'help': '"""Pausa la canzone in riproduzione"""'}), "(name='pausa', aliases=['pause'], help=\n 'Pausa la canzone in riproduzione')\n", (7749, 7828), False, 'from discord.ext import commands\n'), ((8205, 8321), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""riprendi"""', 'aliases': "['unpause', 'resume']", 'help': '"""Riprendi la riproduzione della canzone"""'}), "(name='riprendi', aliases=['unpause', 'resume'], help=\n 'Riprendi la riproduzione della canzone')\n", (8221, 8321), False, 'from discord.ext import commands\n'), ((8663, 8759), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""skip"""', 'aliases': "['next', 'skippa']", 'help': '"""Salta la canzone corrente"""'}), "(name='skip', aliases=['next', 'skippa'], help=\n 'Salta la canzone corrente')\n", (8679, 8759), False, 'from discord.ext import commands\n'), ((9142, 9249), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""coda"""', 'aliases': "['q', 'playlist', 'queue']", 'help': '"""Mostra la coda delle canzoni"""'}), "(name='coda', aliases=['q', 'playlist', 'queue'], help=\n 'Mostra la coda delle canzoni')\n", (9158, 9249), False, 'from discord.ext import commands\n'), ((10104, 10249), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""in_riproduzione"""', 'aliases': "['np', 'current', 'currentsong', 'playing', 'ir']", 'help': '"""Mostra la canzone in riproduzione"""'}), "(name='in_riproduzione', aliases=['np', 'current',\n 'currentsong', 'playing', 'ir'], help='Mostra la canzone in riproduzione')\n", (10120, 10249), False, 'from discord.ext import commands\n'), ((10942, 11038), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""volume"""', 'aliases': "['vol', 'v']", 'help': '"""Cambia il volume della musica"""'}), "(name='volume', aliases=['vol', 'v'], help=\n 'Cambia il volume della musica')\n", (10958, 11038), False, 'from discord.ext import commands\n'), ((11950, 12063), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""esci"""', 'aliases': "['stop', 'leave', 'fuori']", 'help': '"""Stoppa la musica (rimuove la coda)"""'}), "(name='esci', aliases=['stop', 'leave', 'fuori'], help=\n 'Stoppa la musica (rimuove la coda)')\n", (11966, 12063), False, 'from discord.ext import commands\n'), ((1651, 1708), 'functools.partial', 'partial', (['ytdl.extract_info'], {'url': 'search', 'download': 'download'}), '(ytdl.extract_info, url=search, download=download)\n', (1658, 1708), False, 'from functools import partial\n'), ((2330, 2397), 'functools.partial', 'partial', (['ytdl.extract_info'], {'url': "data['webpage_url']", 'download': '(False)'}), "(ytdl.extract_info, url=data['webpage_url'], download=False)\n", (2337, 2397), False, 'from functools import partial\n'), ((2919, 2934), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (2932, 2934), False, 'import asyncio\n'), ((2955, 2970), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (2968, 2970), False, 'import asyncio\n'), ((6882, 6932), 'discord.Embed', 'Embed', ([], {'title': '"""Entrato in chiamata"""', 'color': '(16711422)'}), "(title='Entrato in chiamata', color=16711422)\n", (6887, 6932), False, 'from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color\n'), ((10779, 10892), 'discord.Embed', 'Embed', ([], {'title': '"""Ora in riproduzione:"""', 'description': 'f"""{vc.source.title} [{ctx.author.mention}]"""', 'color': '(16711422)'}), "(title='Ora in riproduzione:', description=\n f'{vc.source.title} [{ctx.author.mention}]', color=16711422)\n", (10784, 10892), False, 'from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color\n'), ((1608, 1632), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1630, 1632), False, 'import asyncio\n'), ((1884, 2023), 'discord.Embed', 'Embed', ([], {'title': '"""Aggiunto alla coda:"""', 'description': 'f"""[{data[\'title\']}]({data[\'webpage_url\']}) [{ctx.author.mention}]"""', 'color': '(16711422)'}), '(title=\'Aggiunto alla coda:\', description=\n f"[{data[\'title\']}]({data[\'webpage_url\']}) [{ctx.author.mention}]",\n color=16711422)\n', (1889, 2023), False, 'from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color\n'), ((2249, 2273), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2271, 2273), False, 'import asyncio\n'), ((2474, 2581), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["data['url']"], {'before_options': "ffmpegopts['before_options']", 'options': "ffmpegopts['options']"}), "(data['url'], before_options=ffmpegopts['before_options'],\n options=ffmpegopts['options'])\n", (2488, 2581), False, 'from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color\n'), ((4041, 4181), 'discord.Embed', 'Embed', ([], {'title': '"""Ora in riproduzione:"""', 'description': 'f"""[{source.title}]({source.web_url}) [{source.requester.mention}]"""', 'color': '(16711422)'}), "(title='Ora in riproduzione:', description=\n f'[{source.title}]({source.web_url}) [{source.requester.mention}]',\n color=16711422)\n", (4046, 4181), False, 'from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color\n'), ((3317, 3329), 'async_timeout.timeout', 'timeout', (['(120)'], {}), '(120)\n', (3324, 3329), False, 'from async_timeout import timeout\n')] |
#!/usr/bin/env python
import os
import pickle
import json
import argparse
from collections import defaultdict
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('path', help="Tf path to event files from which to extract variables")
parser.add_argument('-w', '--write', default=None, type=str, dest='write_dir')
parser.add_argument('-j', '--json', action='store_true')
args = parser.parse_args()
def extract_file(file, values):
for event in tf.compat.v1.train.summary_iterator(file):
for value in event.summary.value:
values[value.tag].append(value.simple_value)
return values
if not os.path.exists(args.path):
print("No such file or directory")
exit()
if os.path.isfile(args.path):
files = [args.path]
elif os.path.isdir(args.path):
files = []
for directory, _, file_list in os.walk(args.path):
for file in file_list:
if file.startswith('events.out.tfevents.'):
files.append(os.path.join(directory, file))
if not files:
print("No event file found")
exit()
else:
print("Invalid file type")
exit()
if args.write_dir and not os.path.exists(args.write_dir):
os.makedirs(args.write_dir)
values = defaultdict(list)
for file in files:
values = extract_file(file, values)
if args.write_dir:
save_file_nb, computer_name = file.split('.')[-2:]
extension = '.json' if args.json else '.pkl'
save_file_name = 'Events_' + str(save_file_nb) + '_' + computer_name + extension
mode = 'w' if args.json else 'wb'
with open(os.path.join(args.write_dir, save_file_name), mode) as save_file:
if args.json:
json.dump(values, save_file)
else:
pickle.dump(values, save_file)
for k, v in values.items():
if 'rollout/ep_rew_mean' in k:
plt.plot(v, label=k)
plt.legend()
plt.show()
| [
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"tensorflow.compat.v1.train.summary_iterator",
"os.walk",
"matplotlib.pyplot.plot",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"collections.defaultdict",
"matplotlib.pyplot.legend",
"matplotlib.... | [((198, 223), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (221, 223), False, 'import argparse\n'), ((780, 805), 'os.path.isfile', 'os.path.isfile', (['args.path'], {}), '(args.path)\n', (794, 805), False, 'import os\n'), ((1302, 1319), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1313, 1319), False, 'from collections import defaultdict\n'), ((1964, 1976), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1974, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1977, 1987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1985, 1987), True, 'import matplotlib.pyplot as plt\n'), ((529, 570), 'tensorflow.compat.v1.train.summary_iterator', 'tf.compat.v1.train.summary_iterator', (['file'], {}), '(file)\n', (564, 570), True, 'import tensorflow as tf\n'), ((699, 724), 'os.path.exists', 'os.path.exists', (['args.path'], {}), '(args.path)\n', (713, 724), False, 'import os\n'), ((837, 861), 'os.path.isdir', 'os.path.isdir', (['args.path'], {}), '(args.path)\n', (850, 861), False, 'import os\n'), ((1264, 1291), 'os.makedirs', 'os.makedirs', (['args.write_dir'], {}), '(args.write_dir)\n', (1275, 1291), False, 'import os\n'), ((913, 931), 'os.walk', 'os.walk', (['args.path'], {}), '(args.path)\n', (920, 931), False, 'import os\n'), ((1228, 1258), 'os.path.exists', 'os.path.exists', (['args.write_dir'], {}), '(args.write_dir)\n', (1242, 1258), False, 'import os\n'), ((1942, 1962), 'matplotlib.pyplot.plot', 'plt.plot', (['v'], {'label': 'k'}), '(v, label=k)\n', (1950, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1712), 'os.path.join', 'os.path.join', (['args.write_dir', 'save_file_name'], {}), '(args.write_dir, save_file_name)\n', (1680, 1712), False, 'import os\n'), ((1776, 1804), 'json.dump', 'json.dump', (['values', 'save_file'], {}), '(values, save_file)\n', (1785, 1804), False, 'import json\n'), ((1839, 1869), 'pickle.dump', 'pickle.dump', (['values', 'save_file'], {}), '(values, save_file)\n', (1850, 1869), False, 'import pickle\n'), ((1049, 1078), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (1061, 1078), False, 'import os\n')] |
# TestSwiftPrivateDeclName.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we correctly find private decls
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftPrivateDeclName(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.a_source = "a.swift"
self.a_source_spec = lldb.SBFileSpec(self.a_source)
self.b_source = "b.swift"
self.b_source_spec = lldb.SBFileSpec(self.b_source)
@decorators.swiftTest
@decorators.expectedFailureAll(bugnumber="rdar://23236790")
def test_swift_private_decl_name(self):
"""Test that we correctly find private decls"""
self.build()
self.do_test()
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
a_breakpoint = target.BreakpointCreateBySourceRegex(
'break here', self.a_source_spec)
self.assertTrue(a_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
b_breakpoint = target.BreakpointCreateBySourceRegex(
'break here', self.b_source_spec)
self.assertTrue(b_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, a_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("a")
child_a = var.GetChildMemberWithName("a")
child_b = var.GetChildMemberWithName("b")
child_c = var.GetChildMemberWithName("c")
lldbutil.check_variable(self, var, False, typename="a.S.A")
lldbutil.check_variable(self, child_a, False, value="1")
lldbutil.check_variable(self, child_b, False, '"hello"')
lldbutil.check_variable(self, child_c, False, value='1.25')
process.Continue()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, b_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("a")
child_a = var.GetChildMemberWithName("a")
child_b = var.GetChildMemberWithName("b")
child_c = var.GetChildMemberWithName("c")
lldbutil.check_variable(self, var, False, typename="a.S.A")
lldbutil.check_variable(self, child_a, False, value="3")
lldbutil.check_variable(self, child_b, False, '"goodbye"')
lldbutil.check_variable(self, child_c, False, value='1.25')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| [
"unittest2.main",
"atexit.register",
"os.getcwd",
"lldbsuite.test.lldbutil.check_variable",
"lldbsuite.test.lldbutil.get_threads_stopped_at_breakpoint",
"lldbsuite.test.decorators.expectedFailureAll",
"lldb.SBDebugger.Initialize",
"lldb.SBFileSpec"
] | [((1037, 1095), 'lldbsuite.test.decorators.expectedFailureAll', 'decorators.expectedFailureAll', ([], {'bugnumber': '"""rdar://23236790"""'}), "(bugnumber='rdar://23236790')\n", (1066, 1095), True, 'import lldbsuite.test.decorators as decorators\n'), ((3497, 3525), 'lldb.SBDebugger.Initialize', 'lldb.SBDebugger.Initialize', ([], {}), '()\n', (3523, 3525), False, 'import lldb\n'), ((3530, 3572), 'atexit.register', 'atexit.register', (['lldb.SBDebugger.Terminate'], {}), '(lldb.SBDebugger.Terminate)\n', (3545, 3572), False, 'import atexit\n'), ((3577, 3593), 'unittest2.main', 'unittest2.main', ([], {}), '()\n', (3591, 3593), False, 'import unittest2\n'), ((880, 910), 'lldb.SBFileSpec', 'lldb.SBFileSpec', (['self.a_source'], {}), '(self.a_source)\n', (895, 910), False, 'import lldb\n'), ((974, 1004), 'lldb.SBFileSpec', 'lldb.SBFileSpec', (['self.b_source'], {}), '(self.b_source)\n', (989, 1004), False, 'import lldb\n'), ((1967, 2032), 'lldbsuite.test.lldbutil.get_threads_stopped_at_breakpoint', 'lldbutil.get_threads_stopped_at_breakpoint', (['process', 'a_breakpoint'], {}), '(process, a_breakpoint)\n', (2009, 2032), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2425, 2484), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'var', '(False)'], {'typename': '"""a.S.A"""'}), "(self, var, False, typename='a.S.A')\n", (2448, 2484), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2493, 2549), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_a', '(False)'], {'value': '"""1"""'}), "(self, child_a, False, value='1')\n", (2516, 2549), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2558, 2614), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_b', '(False)', '""""hello\\""""'], {}), '(self, child_b, False, \'"hello"\')\n', (2581, 2614), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2623, 2682), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_c', '(False)'], {'value': '"""1.25"""'}), "(self, child_c, False, value='1.25')\n", (2646, 2682), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2729, 2794), 'lldbsuite.test.lldbutil.get_threads_stopped_at_breakpoint', 'lldbutil.get_threads_stopped_at_breakpoint', (['process', 'b_breakpoint'], {}), '(process, b_breakpoint)\n', (2771, 2794), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3187, 3246), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'var', '(False)'], {'typename': '"""a.S.A"""'}), "(self, var, False, typename='a.S.A')\n", (3210, 3246), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3255, 3311), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_a', '(False)'], {'value': '"""3"""'}), "(self, child_a, False, value='3')\n", (3278, 3311), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3320, 3378), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_b', '(False)', '""""goodbye\\""""'], {}), '(self, child_b, False, \'"goodbye"\')\n', (3343, 3378), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3387, 3446), 'lldbsuite.test.lldbutil.check_variable', 'lldbutil.check_variable', (['self', 'child_c', '(False)'], {'value': '"""1.25"""'}), "(self, child_c, False, value='1.25')\n", (3410, 3446), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((1884, 1895), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1893, 1895), False, 'import os\n')] |
import subprocess
def Settings( **kwargs ):
flags = [
'-x',
'c++',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-std=c++14',
'-I',
'.',
'-I', 'third_party/googletest/googletest/include',
'-I', 'third_party/abseil-cpp',
'-I', 'third_party/libbcf',
'-pthread',
]
flags += subprocess.check_output(['ncursesw5-config', '--cflags']).split('\n')
return {
'flags': flags,
}
| [
"subprocess.check_output"
] | [((329, 386), 'subprocess.check_output', 'subprocess.check_output', (["['ncursesw5-config', '--cflags']"], {}), "(['ncursesw5-config', '--cflags'])\n", (352, 386), False, 'import subprocess\n')] |
"""
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
import argparse
import numpy as np
import time
import torch
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.nn.pytorch import RelGraphConv
from dgl.contrib.data import load_data
from functools import partial
from model import BaseRGCN
class EntityClassify(BaseRGCN):
def create_features(self):
features = torch.arange(self.num_nodes)
if self.use_cuda:
features = features.cuda()
return features
def build_input_layer(self):
return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_hidden_layer(self, idx):
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_output_layer(self):
return RelGraphConv(self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=partial(F.softmax, dim=1),
self_loop=self.use_self_loop)
def main(args):
# load graph data
data = load_data(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)
num_nodes = data.num_nodes
num_rels = data.num_rels
num_classes = data.num_classes
labels = data.labels
train_idx = data.train_idx
test_idx = data.test_idx
# split dataset into train, validate, test
if args.validation:
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
else:
val_idx = train_idx
# since the nodes are featureless, the input feature is then the node id.
feats = torch.arange(num_nodes)
# edge type and normalization factor
edge_type = torch.from_numpy(data.edge_type)
edge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1)
labels = torch.from_numpy(labels).view(-1)
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(args.gpu)
feats = feats.cuda()
edge_type = edge_type.cuda()
edge_norm = edge_norm.cuda()
labels = labels.cuda()
# create graph
g = DGLGraph()
g.add_nodes(num_nodes)
g.add_edges(data.edge_src, data.edge_dst)
# create model
model = EntityClassify(len(g),
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
use_cuda=use_cuda)
if use_cuda:
model.cuda()
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2norm)
# training loop
print("start training...")
forward_time = []
backward_time = []
model.train()
for epoch in range(args.n_epochs):
optimizer.zero_grad()
t0 = time.time()
logits = model(g, feats, edge_type, edge_norm)
torch.cuda.synchronize()
t1 = time.time()
loss = F.cross_entropy(logits[train_idx], labels[train_idx])
loss.backward()
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".
format(epoch, forward_time[-1], backward_time[-1]))
train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx]).item() / len(train_idx)
val_loss = F.cross_entropy(logits[val_idx], labels[val_idx])
val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]).item() / len(val_idx)
#print("Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}".
# format(train_acc, loss.item(), val_acc, val_loss.item()))
print()
model.eval()
logits = model.forward(g, feats, edge_type, edge_norm)
test_loss = F.cross_entropy(logits[test_idx], labels[test_idx])
test_acc = torch.sum(logits[test_idx].argmax(dim=1) == labels[test_idx]).item() / len(test_idx)
print("Test Accuracy: {:.4f} | Test loss: {:.4f}".format(test_acc, test_loss.item()))
print()
print("Mean forward time: {:4f}".format(np.mean(forward_time[len(forward_time) // 4:])))
print("Mean backward time: {:4f}".format(np.mean(backward_time[len(backward_time) // 4:])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=-1,
help="number of filter weight matrices, default: -1 [use all]")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("-e", "--n-epochs", type=int, default=50,
help="number of training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--l2norm", type=float, default=0,
help="l2 norm coef")
parser.add_argument("--relabel", default=False, action='store_true',
help="remove untouched nodes and relabel")
parser.add_argument("--use-self-loop", default=False, action='store_true',
help="include self feature as a special relation")
fp = parser.add_mutually_exclusive_group(required=False)
fp.add_argument('--validation', dest='validation', action='store_true')
fp.add_argument('--testing', dest='validation', action='store_false')
parser.set_defaults(validation=True)
args = parser.parse_args()
print(args)
args.bfs_level = args.n_layers + 1 # pruning used nodes for memory
main(args)
| [
"argparse.ArgumentParser",
"dgl.nn.pytorch.RelGraphConv",
"torch.from_numpy",
"dgl.contrib.data.load_data",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"functools.partial",
"dgl.DGLGraph",
"torch.nn.functional.cross_entropy",
"time.time",
"torch.cuda.set_device",
"torch.arange"
] | [((1468, 1539), 'dgl.contrib.data.load_data', 'load_data', (['args.dataset'], {'bfs_level': 'args.bfs_level', 'relabel': 'args.relabel'}), '(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)\n', (1477, 1539), False, 'from dgl.contrib.data import load_data\n'), ((2023, 2046), 'torch.arange', 'torch.arange', (['num_nodes'], {}), '(num_nodes)\n', (2035, 2046), False, 'import torch\n'), ((2105, 2137), 'torch.from_numpy', 'torch.from_numpy', (['data.edge_type'], {}), '(data.edge_type)\n', (2121, 2137), False, 'import torch\n'), ((2543, 2553), 'dgl.DGLGraph', 'DGLGraph', ([], {}), '()\n', (2551, 2553), False, 'from dgl import DGLGraph\n'), ((4486, 4537), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits[test_idx]', 'labels[test_idx]'], {}), '(logits[test_idx], labels[test_idx])\n', (4501, 4537), True, 'import torch.nn.functional as F\n'), ((4972, 5015), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RGCN"""'}), "(description='RGCN')\n", (4995, 5015), False, 'import argparse\n'), ((603, 631), 'torch.arange', 'torch.arange', (['self.num_nodes'], {}), '(self.num_nodes)\n', (615, 631), False, 'import torch\n'), ((770, 931), 'dgl.nn.pytorch.RelGraphConv', 'RelGraphConv', (['self.num_nodes', 'self.h_dim', 'self.num_rels', '"""basis"""', 'self.num_bases'], {'activation': 'F.relu', 'self_loop': 'self.use_self_loop', 'dropout': 'self.dropout'}), "(self.num_nodes, self.h_dim, self.num_rels, 'basis', self.\n num_bases, activation=F.relu, self_loop=self.use_self_loop, dropout=\n self.dropout)\n", (782, 931), False, 'from dgl.nn.pytorch import RelGraphConv\n'), ((1009, 1160), 'dgl.nn.pytorch.RelGraphConv', 'RelGraphConv', (['self.h_dim', 'self.h_dim', 'self.num_rels', '"""basis"""', 'self.num_bases'], {'activation': 'F.relu', 'self_loop': 'self.use_self_loop', 'dropout': 'self.dropout'}), "(self.h_dim, self.h_dim, self.num_rels, 'basis', self.num_bases,\n activation=F.relu, self_loop=self.use_self_loop, dropout=self.dropout)\n", (1021, 1160), False, 'from dgl.nn.pytorch import RelGraphConv\n'), ((2298, 2323), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2321, 2323), False, 'import torch\n'), ((2349, 2380), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (2370, 2380), False, 'import torch\n'), ((3416, 3427), 'time.time', 'time.time', ([], {}), '()\n', (3425, 3427), False, 'import time\n'), ((3491, 3515), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (3513, 3515), False, 'import torch\n'), ((3529, 3540), 'time.time', 'time.time', ([], {}), '()\n', (3538, 3540), False, 'import time\n'), ((3556, 3609), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits[train_idx]', 'labels[train_idx]'], {}), '(logits[train_idx], labels[train_idx])\n', (3571, 3609), True, 'import torch.nn.functional as F\n'), ((3672, 3683), 'time.time', 'time.time', ([], {}), '()\n', (3681, 3683), False, 'import time\n'), ((4040, 4089), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits[val_idx]', 'labels[val_idx]'], {}), '(logits[val_idx], labels[val_idx])\n', (4055, 4089), True, 'import torch.nn.functional as F\n'), ((2154, 2186), 'torch.from_numpy', 'torch.from_numpy', (['data.edge_norm'], {}), '(data.edge_norm)\n', (2170, 2186), False, 'import torch\n'), ((2213, 2237), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (2229, 2237), False, 'import torch\n'), ((1345, 1370), 'functools.partial', 'partial', (['F.softmax'], {'dim': '(1)'}), '(F.softmax, dim=1)\n', (1352, 1370), False, 'from functools import partial\n')] |
import subprocess
from hop import Stream
from hop.auth import Auth
from hop import auth
from hop.io import StartPosition
from hop.models import GCNCircular
import argparse
import random
import threading
import time
from functools import wraps
import datetime
import numpy
import uuid
from dotenv import load_dotenv
import os
from unittest.mock import Mock
import unittest
from mongoengine import connect, disconnect
# from hypothesis import given
# from hypothesis.strategies import lists, integers
# from hop.apps.SNalert import model as M
# from hop.apps.SNalert import decider
# from hop.apps.SNalert import db_storage
# from . import demo
# from .. import test_anything
test_locations = ["Houston", "New York", "Boston", "Not Texas"]
# load environment variables
load_dotenv(dotenv_path='./../.env')
# for measuring function execution time
# https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
print("Function %s called %d times. " % (fname, data[0]))
print('Execution time max: %.3f, average: %.3f' % (max_time, avg_time))
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
def exponentialDistribution(mean):
"""
Produce exponential distribution data.
:param mean: Mean of exponential distribution.
:return:
"""
return numpy.random.exponential(mean)
class integrationTest(object):
# @given(
# timeout=integers(min_value=1),
# mean=integers(min_value=1),
# totalTime=integers(min_value=1)
# )
def __init__(self, timeout, mean, totalTime):
"""
The constructor.
:param timeout: Time expiration parameter
:param mean:
:param totalTime:
"""
self.count = 0
self.topic = os.getenv("OBSERVATION_TOPIC")
self.mean = mean
self.totalTime = totalTime
# self.minTime = min
# self.maxTime = max
self.timeOut = timeout
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
def run(self):
"""
Run the model for the integration test.
:return: none
"""
t1 = threading.Thread(target=self.readNumMsg, args=(self.topic,))
t1.start()
m = subprocess.Popen(['python3',
'../hop/apps/SNalert/model.py',
'--f',
'./../config.env',
'--no-auth'
])
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# randomTime = random.randint(self.minTime, self.maxTime)
randomTime = exponentialDistribution(self.mean)
start2 = time.monotonic()
while True:
if time.monotonic() - start2 > randomTime:
break
# write message with current time
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
# newFileName = self.writeMessage(now)
stream = Stream(auth=self.auth)
with stream.open(os.getenv("TESTING_TOPIC"), "w") as s:
s.write(self.writeMessage(now))
m.kill()
def readNumMsg(self, topic):
"""
Read the number of alert messages.
:param topic:
:param configFilePath:
:return:
"""
# gcnFormat = "json"
stream = Stream(persist=True, auth=self.auth)
# print("===")
# print(topic)
with stream.open(topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
print("====")
# if gcn_dict['header']['subject'] == "TEST":
# self.count += 1
self.count += 1
def getCount(self):
return self.count
def writeMessage(self, time):
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = "Test Detector"
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = time
msg["header"]["NEUTRINO TIME"] = time
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Observation"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing purposes."
return msg
# def functionalTest():
#
# pass
class latencyTest(object):
def __init__(self, topic, numDetector=50, time=3000):
"""
The constructor.
"""
self.numMsgPublished = 0
self.numMsgReceived = 0
self.totalLatency = 0
self.numDetector = numDetector
self.detectorThreads = {}
self.countMsg = {}
self.totalTime = time
self.topic = topic
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
self.idsWritten = set()
self.idsReceived = set()
self.lock = threading.Lock()
def oneDetectorThread(self, uuid):
# lock = threading.Lock()
print(uuid)
# print(timeout)
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# print(time.monotonic() - startTime)
# print(self.totalTime)
# msg = self.writeMessage(uuid)
stream = Stream(auth=self.auth)
with stream.open(self.topic, "w") as s:
msg = self.writeMessage(uuid)
s.write(msg)
with self.lock:
self.numMsgPublished += 1
self.idsWritten.add(msg["header"]["MESSAGE ID"])
# def countWrittenMsgThread(self):
def runTest(self):
"""
Run the latency test.
:return:
"""
# create the topic if doesn't exist
stream = Stream(auth=self.auth)
# with stream.open(self.topic, "w") as s:
# s.write({"TEST": "TEST"})
# first run the thread that logs every message received
logThread = threading.Thread(target=self.logMsgs)
logThread.start()
# wait a few seconds
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 10:
foo = 1
for i in range(self.numDetector):
# print(i)
id = uuid.uuid4()
# print(id)
t = threading.Thread(target=self.oneDetectorThread, args=(str(id),))
# self.oneDetectorThread(id)
self.detectorThreads[id] = t
t.start()
# # first run the thread that logs every message received
# logThread = threading.Thread(target=self.logMsgs)
# logThread.start()
def countMsgThread(self, msg_dict):
"""
A single thread for process the message received for Latency test.
:param msg_dict:
:return:
"""
# msg_dict = msg.asdict()['content']
id = msg_dict['header']['DETECTOR']
msg_id = msg_dict["header"]["MESSAGE ID"]
receivedTime = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
sentTime = msg_dict['header']['MESSAGE SENT TIME']
timeDiff = datetime.datetime.strptime(receivedTime, os.getenv("TIME_STRING_FORMAT")) - datetime.datetime.strptime(sentTime, os.getenv("TIME_STRING_FORMAT"))
timeDiff_inSeconds = timeDiff.total_seconds()
# print("HERE")
with self.lock:
# print("____")
self.numMsgReceived += 1
self.totalLatency += timeDiff_inSeconds
self.idsReceived.add(msg_id)
def logMsgs(self):
# stream = Stream(persist=True, auth=self.auth, start_at=StartPosition.EARLIEST)
stream = Stream(persist=True, auth=self.auth)
with stream.open(self.topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
t = threading.Thread(target=self.countMsgThread, args=(msg.asdict()['content'],))
t.start()
def calculateAvgLatency(self):
"""
Calculate the latency.
:return:
"""
return self.totalLatency * 1.0 / self.numMsgReceived
def writeMessage(self, detector_id):
"""
Return a dictionary of the message in the required format.
:param uuid:
:return:
"""
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = detector_id
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = now
msg["header"]["NEUTRINO TIME"] = now
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Latency Testing"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing message latency."
return msg
def check(self):
assert self.numMsgReceived == self.numMsgPublished
if __name__ == '__main__':
print("Latency Test")
print("----------------------------------------")
print("Integration Test #1")
test = latencyTest("kafka://dev.hop.scimma.org:9092/snews-latencyTest", 5, 50)
print(test.totalTime)
test.runTest()
print("------")
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 100:
foo = 1
# print(time.monotonic() - startTime)
print(test.calculateAvgLatency())
print(" %d messages written." % test.numMsgPublished)
print(" %d messages received and read." % test.numMsgReceived)
# print(" %d messages written." % len(test.idsWritten))
# print(" %d messages received and read." % len(test.idsReceived))
# print(" %d messages read in written." % len(test.idsReceived.intersection(test.idsWritten)))
assert test.numMsgPublished == test.numMsgReceived
| [
"os.getenv",
"datetime.datetime.utcnow",
"subprocess.Popen",
"time.monotonic",
"threading.Lock",
"numpy.random.exponential",
"functools.wraps",
"dotenv.load_dotenv",
"hop.Stream",
"uuid.uuid4",
"threading.Thread",
"time.time",
"random.randint"
] | [((774, 810), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': '"""./../.env"""'}), "(dotenv_path='./../.env')\n", (785, 810), False, 'from dotenv import load_dotenv\n'), ((981, 990), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (986, 990), False, 'from functools import wraps\n'), ((1905, 1935), 'numpy.random.exponential', 'numpy.random.exponential', (['mean'], {}), '(mean)\n', (1929, 1935), False, 'import numpy\n'), ((10437, 10453), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (10451, 10453), False, 'import time\n'), ((1053, 1064), 'time.time', 'time.time', ([], {}), '()\n', (1062, 1064), False, 'import time\n'), ((2352, 2382), 'os.getenv', 'os.getenv', (['"""OBSERVATION_TOPIC"""'], {}), "('OBSERVATION_TOPIC')\n", (2361, 2382), False, 'import os\n'), ((2761, 2821), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.readNumMsg', 'args': '(self.topic,)'}), '(target=self.readNumMsg, args=(self.topic,))\n', (2777, 2821), False, 'import threading\n'), ((2854, 2958), 'subprocess.Popen', 'subprocess.Popen', (["['python3', '../hop/apps/SNalert/model.py', '--f', './../config.env',\n '--no-auth']"], {}), "(['python3', '../hop/apps/SNalert/model.py', '--f',\n './../config.env', '--no-auth'])\n", (2870, 2958), False, 'import subprocess\n'), ((3127, 3143), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3141, 3143), False, 'import time\n'), ((4097, 4133), 'hop.Stream', 'Stream', ([], {'persist': '(True)', 'auth': 'self.auth'}), '(persist=True, auth=self.auth)\n', (4103, 4133), False, 'from hop import Stream\n'), ((5831, 5847), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5845, 5847), False, 'import threading\n'), ((5987, 6003), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6001, 6003), False, 'import time\n'), ((6745, 6767), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (6751, 6767), False, 'from hop import Stream\n'), ((6943, 6980), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.logMsgs'}), '(target=self.logMsgs)\n', (6959, 6980), False, 'import threading\n'), ((7057, 7073), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (7071, 7073), False, 'import time\n'), ((8666, 8702), 'hop.Stream', 'Stream', ([], {'persist': '(True)', 'auth': 'self.auth'}), '(persist=True, auth=self.auth)\n', (8672, 8702), False, 'from hop import Stream\n'), ((1124, 1135), 'time.time', 'time.time', ([], {}), '()\n', (1133, 1135), False, 'import time\n'), ((2558, 2579), 'os.getenv', 'os.getenv', (['"""USERNAME"""'], {}), "('USERNAME')\n", (2567, 2579), False, 'import os\n'), ((2581, 2602), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (2590, 2602), False, 'import os\n'), ((3392, 3408), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3406, 3408), False, 'import time\n'), ((3723, 3745), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (3729, 3745), False, 'from hop import Stream\n'), ((4642, 4654), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4652, 4654), False, 'import uuid\n'), ((4897, 4917), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (4911, 4917), False, 'import random\n'), ((5669, 5690), 'os.getenv', 'os.getenv', (['"""USERNAME"""'], {}), "('USERNAME')\n", (5678, 5690), False, 'import os\n'), ((5692, 5713), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (5701, 5713), False, 'import os\n'), ((6252, 6274), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (6258, 6274), False, 'from hop import Stream\n'), ((7262, 7274), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7272, 7274), False, 'import uuid\n'), ((8019, 8050), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8028, 8050), False, 'import os\n'), ((9350, 9381), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (9359, 9381), False, 'import os\n'), ((9469, 9481), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9479, 9481), False, 'import uuid\n'), ((9718, 9738), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (9732, 9738), False, 'import random\n'), ((10496, 10512), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (10510, 10512), False, 'import time\n'), ((3194, 3210), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3208, 3210), False, 'import time\n'), ((3618, 3649), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (3627, 3649), False, 'import os\n'), ((6054, 6070), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6068, 6070), False, 'import time\n'), ((7124, 7140), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (7138, 7140), False, 'import time\n'), ((7983, 8009), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8007, 8009), False, 'import datetime\n'), ((8171, 8202), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8180, 8202), False, 'import os\n'), ((8243, 8274), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8252, 8274), False, 'import os\n'), ((9314, 9340), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9338, 9340), False, 'import datetime\n'), ((3582, 3608), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3606, 3608), False, 'import datetime\n'), ((3775, 3801), 'os.getenv', 'os.getenv', (['"""TESTING_TOPIC"""'], {}), "('TESTING_TOPIC')\n", (3784, 3801), False, 'import os\n'), ((3452, 3468), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3466, 3468), False, 'import time\n')] |
from collections import deque
def list_manipulator(numbers, *args):
numbers = deque(numbers)
action = args[0]
direction = args[1]
if action == 'add':
parameters = [int(x) for x in args[2:]]
if direction == 'beginning':
[numbers.appendleft(x) for x in parameters[::-1]]
elif direction == 'end':
[numbers.append(x) for x in parameters]
elif action == 'remove':
parameter = None
if len(args) == 3:
parameter = int(args[2])
if parameter:
if parameter >= len(numbers):
numbers.clear()
else:
if direction == 'beginning':
[numbers.popleft() for x in range(parameter)]
elif direction == 'end':
[numbers.pop() for x in range(parameter)]
else:
if direction == 'beginning':
numbers.popleft()
elif direction == 'end':
numbers.pop()
return [int(x) for x in numbers]
print(list_manipulator([1, 2, 3], "remove", "end"))
print(list_manipulator([1, 2, 3], "remove", "beginning"))
print(list_manipulator([1, 2, 3], "add", "beginning", 20))
print(list_manipulator([1, 2, 3], "add", "end", 30))
print(list_manipulator([1, 2, 3], "remove", "end", 2))
print(list_manipulator([1, 2, 3], "remove", "beginning", 2))
print(list_manipulator([1, 2, 3], "add", "beginning", 20, 30, 40))
print(list_manipulator([1, 2, 3], "add", "end", 30, 40, 50))
| [
"collections.deque"
] | [((84, 98), 'collections.deque', 'deque', (['numbers'], {}), '(numbers)\n', (89, 98), False, 'from collections import deque\n')] |
from django.urls import reverse
from ..links.document_file_links import (
link_document_file_delete, link_document_file_download_quick
)
from ..links.favorite_links import (
link_document_favorites_add, link_document_favorites_remove
)
from ..links.trashed_document_links import link_document_restore
from ..models import TrashedDocument
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_view, permission_trashed_document_restore
)
from .base import GenericDocumentViewTestCase
from .mixins.favorite_document_mixins import FavoriteDocumentTestMixin
class FavoriteDocumentLinkTestCase(
FavoriteDocumentTestMixin, GenericDocumentViewTestCase
):
def test_favorite_document_add_link_no_permission(self):
self._create_test_document_stub()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_add_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_add_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_no_permission(self):
self._create_test_document_stub()
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_remove_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
class DocumentsLinksTestCase(GenericDocumentViewTestCase):
def test_document_file_delete_link_no_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_delete_link_with_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_delete.view,
args=(self.test_document.files.first().pk,)
)
)
def test_document_file_download_link_no_permission(self):
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_download_link_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_download_quick.view,
args=(self.test_document.file_latest.pk,)
)
)
class TrashedDocumentsLinksTestCase(GenericDocumentViewTestCase):
def setUp(self):
super().setUp()
self.test_document.delete()
self.test_trashed_document = TrashedDocument.objects.get(
pk=self.test_document.pk
)
self.add_test_view(test_object=self.test_trashed_document)
self.context = self.get_test_view()
def test_trashed_document_restore_link_no_permission(self):
resolved_link = link_document_restore.resolve(context=self.context)
self.assertEqual(resolved_link, None)
def test_trashed_document_restore_link_with_permission(self):
self.grant_access(
obj=self.test_document, permission=permission_trashed_document_restore
)
resolved_link = link_document_restore.resolve(context=self.context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_restore.view,
args=(self.test_trashed_document.pk,)
)
)
| [
"django.urls.reverse"
] | [((5501, 5605), 'django.urls.reverse', 'reverse', ([], {'viewname': 'link_document_file_download_quick.view', 'args': '(self.test_document.file_latest.pk,)'}), '(viewname=link_document_file_download_quick.view, args=(self.\n test_document.file_latest.pk,))\n', (5508, 5605), False, 'from django.urls import reverse\n'), ((6598, 6686), 'django.urls.reverse', 'reverse', ([], {'viewname': 'link_document_restore.view', 'args': '(self.test_trashed_document.pk,)'}), '(viewname=link_document_restore.view, args=(self.\n test_trashed_document.pk,))\n', (6605, 6686), False, 'from django.urls import reverse\n')] |
# Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from email.message import EmailMessage
from textwrap import dedent
from traceback import format_exception, extract_tb
from django.conf import settings
from django.core.management.base import BaseCommand
from ietf.utils.mail import send_smtp
import debug # pyflakes:ignore
class EmailOnFailureCommand(BaseCommand):
"""Command that sends email when an exception occurs
Subclasses can override failure_message, failure_subject, and failure_recipients
to customize the behavior. Both failure_subject and failure_message are formatted
with keywords for interpolation. By default, the following substitutions are
available:
{error} - the exception instance
{error_summary} - multiline summary of error type and location where it occurred
More interpolation values can be added through the **extra argument to
make_failure_message().
By default, the full traceback will be attached to the notification email.
To disable this, set failure_email_includes_traceback to False.
When a command is executed, its handle() method will be called as usual.
If an exception occurs, instead of printing this to the terminal and
exiting with an error, a message generated via the make_failure_message()
method will be sent to failure_recipients. The command will exit successfully
to the shell.
This can be prevented for debugging by passing the --no-failure-email option.
In this case, the usual error handling will be used. To make this available,
the subclass must call super().add_arguments() in its own add_arguments() method.
"""
failure_message = dedent("""\
An exception occurred:
{error}
""")
failure_subject = 'Exception in management command'
failure_email_includes_traceback = True
@property
def failure_recipients(self):
return tuple(item[1] for item in settings.ADMINS)
def execute(self, *args, **options):
try:
super().execute(*args, **options)
except Exception as error:
if options['email_on_failure']:
msg = self.make_failure_message(error)
send_smtp(msg)
else:
raise
def _summarize_error(self, error):
frame = extract_tb(error.__traceback__)[-1]
return dedent(f"""\
Error details:
Exception type: {type(error).__module__}.{type(error).__name__}
File: {frame.filename}
Line: {frame.lineno}""")
def make_failure_message(self, error, **extra):
"""Generate an EmailMessage to report an error"""
format_values = dict(
error=error,
error_summary=self._summarize_error(error),
)
format_values.update(**extra)
msg = EmailMessage()
msg['To'] = self.failure_recipients
msg['From'] = settings.SERVER_EMAIL
msg['Subject'] = self.failure_subject.format(**format_values)
msg.set_content(
self.failure_message.format(**format_values)
)
if self.failure_email_includes_traceback:
msg.add_attachment(
''.join(format_exception(None, error, error.__traceback__)),
filename='traceback.txt',
)
return msg
def add_arguments(self, parser):
parser.add_argument('--no-failure-email', dest='email_on_failure', action='store_false',
help='Disable sending email on failure') | [
"textwrap.dedent",
"traceback.format_exception",
"ietf.utils.mail.send_smtp",
"email.message.EmailMessage",
"traceback.extract_tb"
] | [((1716, 1779), 'textwrap.dedent', 'dedent', (['""" An exception occurred: \n \n {error}\n """'], {}), '(""" An exception occurred: \n \n {error}\n """)\n', (1722, 1779), False, 'from textwrap import dedent\n'), ((2880, 2894), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (2892, 2894), False, 'from email.message import EmailMessage\n'), ((2351, 2382), 'traceback.extract_tb', 'extract_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (2361, 2382), False, 'from traceback import format_exception, extract_tb\n'), ((2240, 2254), 'ietf.utils.mail.send_smtp', 'send_smtp', (['msg'], {}), '(msg)\n', (2249, 2254), False, 'from ietf.utils.mail import send_smtp\n'), ((3251, 3301), 'traceback.format_exception', 'format_exception', (['None', 'error', 'error.__traceback__'], {}), '(None, error, error.__traceback__)\n', (3267, 3301), False, 'from traceback import format_exception, extract_tb\n')] |
from antlr4 import *
from antlr4.error.ErrorListener import ErrorListener
from antlr.SBHasmLexer import SBHasmLexer
from antlr.SBHasmListener import SBHasmListener
from antlr.SBHasmParser import SBHasmParser
class MyErrorListener(ErrorListener):
def __init__(self):
super(MyErrorListener, self).__init__()
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("SyntaxError in {},{} msg={}".format(line, column, msg))
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("reportAmbiguity")
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
raise Exception("reportAttemptingFullContext")
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("reportContextSensitivity")
class SBHCodeSizeListener(SBHasmListener):
def __init__(self):
self.cmd_cnt = 0
def enterCmd(self, ctx):
self.cmd_cnt += 1
def enterSonst(self, ctx):
self.cmd_cnt += 1
class Pickup:
def __init__(self, item):
self.item = item
def __str__(self):
return "Pickup"
class Mem:
def __init__(self, slot):
self.slot = slot
def __str__(self):
return self.slot
class Dir:
def __init__(self, direction):
self.dir = direction
class SevenBillionHumansParser:
def __init__(self, filepath=None, source=None):
if source:
self.parse(InputStream(source))
elif filepath:
self.parse(FileStream(filepath))
def parse(self, source_stream):
lexer = SBHasmLexer(source_stream)
stream = CommonTokenStream(lexer)
parser = SBHasmParser(stream)
# parser._listeners = [ MyErrorListener() ]
tree = parser.asm()
printer = SBHCodeSizeListener()
walker = ParseTreeWalker()
walker.walk(printer, tree)
self.cmd_size = printer.cmd_cnt
if __name__ == '__main__':
s = SevenBillionHumansParser("../solutions/55 - Data Flowers/size-10_speed-23.asm")
| [
"antlr.SBHasmLexer.SBHasmLexer",
"antlr.SBHasmParser.SBHasmParser"
] | [((1735, 1761), 'antlr.SBHasmLexer.SBHasmLexer', 'SBHasmLexer', (['source_stream'], {}), '(source_stream)\n', (1746, 1761), False, 'from antlr.SBHasmLexer import SBHasmLexer\n'), ((1821, 1841), 'antlr.SBHasmParser.SBHasmParser', 'SBHasmParser', (['stream'], {}), '(stream)\n', (1833, 1841), False, 'from antlr.SBHasmParser import SBHasmParser\n')] |
import logging
from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id
logger = logging.getLogger(__name__.split('.')[-1])
from features.ResponseTypeFeature import ResponseTypeFeature
from features.ReplayTimeSeriesFeature import ReplayTimeSeriesFeature
import tools.Cache as Cache
import random
import pandas as pd
import warnings
from scipy.sparse import SparseEfficiencyWarning
warnings.simplefilter('ignore', SparseEfficiencyWarning)
random.seed(1234)
class PoissonSimulation:
'''
Simple event simulation. Given a replay of base events
and probabilities of responses, generate arbitrary single-layer
event cascades.
Parameters
----------
Parameters here
'''
def __init__(self, cfg, generate_replies=None, **kwargs):
self.start_date = cfg.get("limits.start_date", type=convert_date)
self.end_date = cfg.get("limits.end_date", type=convert_date)
self.time_delta = cfg.get("limits.time_delta", type=pd.Timedelta).total_seconds()
if generate_replies is None:
self.generate_replies = cfg.get("poisson_simulation.generate_replies", True)
else:
self.generate_replies = generate_replies
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs, train_dfs=None):
# Retrieve replay time-series feature and response type feature
ts = ReplayTimeSeriesFeature(self.cfg).compute(dfs)
responses = ResponseTypeFeature(self.cfg).compute(dfs)
res = []
platforms = dfs.get_platforms()
logger.warning('Very slow for dense data generation. Use ParallelPoissonSimulation to reduce runtime.')
for platform in platforms:
ts = ts[platform]
responses = responses[platform]
node_map = dfs.get_node_map(platform)
# For all users that have a nonzero row in their ts, generate events
logger.info('Generating new events.')
nonzero_rows, __ = ts.nonzero()
res = res + _generate_base_event(ts, node_map, nonzero_rows, self.start_date, responses, self.generate_replies, platform)
# Return a pandas DataFrame sorted by time
# Feed into the output module for actual result generation
res = pd.DataFrame(res)
if len(res) == 0:
logger.error('PoissonSimulation produced no events. Terminating.')
raise ValueError('PoissonSimulation produced no events.')
return res.sort_values(by=['nodeTime']).reset_index(drop=True)
def _generate_base_event(ts, node_map, nonzero_rows, start_date, responses, generate_replies, platform):
res = []
for root_user_id in nonzero_rows:
ts_row = ts.getrow(root_user_id)
__, events = ts_row.nonzero()
# For each user, get event counts and the time index in which those events occurred
event_counts = [ts_row.getcol(event).toarray()[0][0] for event in events]
for i in range(len(event_counts)):
for j in range(event_counts[i]):
# Generate the base event
current_day_time = int(start_date + events[i] * 86400)
root_event_id = generate_random_node_id()
res.append({'nodeID': root_event_id, 'nodeUserID': node_map[root_user_id], 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': 'tweet', 'nodeTime': current_day_time,
'platform': platform})
# Generate responses to the base event
if generate_replies:
generated_responses = _generate_responses(root_event_id, root_user_id, current_day_time, responses,
node_map, platform)
# if len(generated_responses) == 0:
# msg = 'Root user ID ' + str(root_user_id) + ' generated no responses.'
# logger.warning(msg)
res = res + generated_responses
return res
def _generate_responses(root_event_id, root_user_id, current_day_time, responses, node_map, platform):
res = []
# For each event type generate responses using associated probabilities
for response_type in responses:
# Get the user response probabilities for the given event type and root user id
response_row = responses[response_type].getrow(root_user_id)
# If the probability is below some threshold, zero it out
# Have the users associated with the nonzero indices generate an event
response_row[response_row < random.random()] = 0
__, acting_indices = response_row.nonzero()
# Generate random timestamps and find the associated user id for each new event
time_stamps = [generate_random_time(current_day_time) for x in acting_indices]
node_user_ids = [node_map[x] for x in acting_indices]
res = res + [{'nodeID': generate_random_node_id(), 'nodeUserID': node_user_id, 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': response_type, 'nodeTime': node_time,
'platform': platform} for
node_user_id, node_time in zip(node_user_ids, time_stamps)]
return res
| [
"pandas.DataFrame",
"features.ReplayTimeSeriesFeature.ReplayTimeSeriesFeature",
"random.seed",
"features.ResponseTypeFeature.ResponseTypeFeature",
"tools.EventGeneration.generate_random_time",
"warnings.simplefilter",
"random.random",
"tools.EventGeneration.generate_random_node_id"
] | [((423, 479), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'SparseEfficiencyWarning'], {}), "('ignore', SparseEfficiencyWarning)\n", (444, 479), False, 'import warnings\n'), ((481, 498), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (492, 498), False, 'import random\n'), ((2301, 2318), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (2313, 2318), True, 'import pandas as pd\n'), ((4830, 4868), 'tools.EventGeneration.generate_random_time', 'generate_random_time', (['current_day_time'], {}), '(current_day_time)\n', (4850, 4868), False, 'from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id\n'), ((1412, 1445), 'features.ReplayTimeSeriesFeature.ReplayTimeSeriesFeature', 'ReplayTimeSeriesFeature', (['self.cfg'], {}), '(self.cfg)\n', (1435, 1445), False, 'from features.ReplayTimeSeriesFeature import ReplayTimeSeriesFeature\n'), ((1479, 1508), 'features.ResponseTypeFeature.ResponseTypeFeature', 'ResponseTypeFeature', (['self.cfg'], {}), '(self.cfg)\n', (1498, 1508), False, 'from features.ResponseTypeFeature import ResponseTypeFeature\n'), ((3211, 3236), 'tools.EventGeneration.generate_random_node_id', 'generate_random_node_id', ([], {}), '()\n', (3234, 3236), False, 'from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id\n'), ((4645, 4660), 'random.random', 'random.random', ([], {}), '()\n', (4658, 4660), False, 'import random\n'), ((4989, 5014), 'tools.EventGeneration.generate_random_node_id', 'generate_random_node_id', ([], {}), '()\n', (5012, 5014), False, 'from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'xml/alert.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(287, 171)
Dialog.setWindowOpacity(0.8)
Dialog.setAutoFillBackground(False)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.okButton = QtGui.QPushButton(Dialog)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.okButton, QtCore.SIGNAL(_fromUtf8("clicked()")), Dialog.on_alert)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Alert", None))
self.label.setText(_translate("Dialog", "TextLabel", None))
self.okButton.setText(_translate("Dialog", "Close", None))
| [
"PyQt4.QtCore.QMetaObject.connectSlotsByName",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.QPushButton",
"PyQt4.QtGui.QVBoxLayout",
"PyQt4.QtGui.QApplication.translate",
"PyQt4.QtGui.QHBoxLayout",
"PyQt4.QtGui.QSpacerItem"
] | [((451, 515), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig', '_encoding'], {}), '(context, text, disambig, _encoding)\n', (479, 515), False, 'from PyQt4 import QtCore, QtGui\n'), ((976, 1001), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['Dialog'], {}), '(Dialog)\n', (993, 1001), False, 'from PyQt4 import QtCore, QtGui\n'), ((1107, 1126), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (1124, 1126), False, 'from PyQt4 import QtCore, QtGui\n'), ((1219, 1239), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['Dialog'], {}), '(Dialog)\n', (1231, 1239), False, 'from PyQt4 import QtCore, QtGui\n'), ((1364, 1450), 'PyQt4.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(20)', '(40)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Expanding'], {}), '(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.\n Expanding)\n', (1381, 1450), False, 'from PyQt4 import QtCore, QtGui\n'), ((1587, 1606), 'PyQt4.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (1604, 1606), False, 'from PyQt4 import QtCore, QtGui\n'), ((1704, 1790), 'PyQt4.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.\n Minimum)\n', (1721, 1790), False, 'from PyQt4 import QtCore, QtGui\n'), ((1861, 1886), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['Dialog'], {}), '(Dialog)\n', (1878, 1886), False, 'from PyQt4 import QtCore, QtGui\n'), ((2210, 2255), 'PyQt4.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (2247, 2255), False, 'from PyQt4 import QtCore, QtGui\n'), ((599, 652), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig'], {}), '(context, text, disambig)\n', (627, 652), False, 'from PyQt4 import QtCore, QtGui\n')] |