content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
# The function annotations in this module require Python 3.5 or higher.
import os
import textwrap
from itertools import chain
from typing import Dict, List, Union
# Type aliases
FilePath = str
NotePath = str
SQLSelectStatement = str
def sort_notepaths(notepaths: List[NotePath]) -> List[NotePath]:
'''Sort notepaths so that parents and children are never separated.
The problem is this: We use the slash character ('/', chr(47)) to
separate the parts of a notepath, so that 'a' is the parent of 'a/b'.
If a notepath contains characters that sort before the slash, such as
the space (chr(32)) or the comma (chr(44)), then after the list of
notepaths is sorted, such a notepath may wind up separating a parent
with a similar name from its children, when a parent's children should
immediately follow the parent.
Example: if the items are ['a/b', 'a', and 'a(b)'], then items.sort()
produces ['a', 'a(b)', 'a/b']. We want ['a', 'a/b', 'a(b)'] instead.
So we ask the sort method to replace each slash character with a
character that is (1) guaranteed to sort before any other character
that is ever likely to be found in a text file and (2) unlikely to
ever appear in a text file itself (much less in any of the parts of a
notepath). The character with the smallest codepoint value is the null
character, which Python allows in strings.
(We also ask the sort method to ignore case.)
'''
notepaths.sort(key=lambda s: s.lower().replace('/', chr(0)))
return notepaths
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
383,
2163,
37647,
287,
428,
8265,
2421,
11361,
513,
13,
20,
393,
2440,
13,
198,
198,
11748,
28686,
198,
11748,
2420,
37150,
198,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
6... | 3.103314 | 513 |
from unittest import TestCase
from testfixtures import LogCapture
from scrapy.spidermiddlewares.urllength import UrlLengthMiddleware
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from scrapy.settings import Settings
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1332,
69,
25506,
1330,
5972,
49630,
198,
198,
6738,
15881,
88,
13,
2777,
1304,
27171,
86,
3565,
13,
333,
297,
3286,
1330,
8799,
75,
24539,
34621,
1574,
198,
6738,
15881,
88,
13,
... | 3.730769 | 78 |
from math import trunc
num = float(input('Digite um número: '))
print('A porção inteira é {}.'.format(trunc(num)))
| [
6738,
10688,
1330,
40122,
198,
22510,
796,
12178,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
25,
705,
4008,
198,
4798,
10786,
32,
16964,
16175,
28749,
493,
68,
8704,
38251,
23884,
2637,
13,
18982,
7,
2213,
19524,
7,
22510,... | 2.674419 | 43 |
"""Graphic Elements for the Pins."""
from .pin import Pin
__all__ = ["Pin"]
| [
37811,
38,
22262,
26632,
329,
262,
350,
1040,
526,
15931,
198,
6738,
764,
11635,
1330,
13727,
198,
198,
834,
439,
834,
796,
14631,
28348,
8973,
198
] | 2.961538 | 26 |
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if num is higher than the picked number
# 1 if num is lower than the picked number
# otherwise return 0
# def guess(num: int) -> int:
| [
2,
383,
4724,
7824,
318,
1541,
5447,
329,
345,
13,
198,
2,
2488,
17143,
997,
11,
534,
4724,
198,
2,
2488,
7783,
532,
16,
611,
997,
318,
2440,
621,
262,
6497,
1271,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
352,
611,
... | 2.962025 | 79 |
#!/usr/bin/env python3
import argparse
import numpy as np
import pandas as pd
from plotly import tools
import plotly.graph_objs as go
from plotly.offline import plot
from celseq2.helper import print_logger, base_name, is_nonempty_file
def plotly_qc(fpath, saveto, sep=',', name=''):
'''
Generate a plotly html plot for QC of a scRNA-seq data.
QC inlucdes:
- number of total UMIs
- number of detected genes
- percent of MT expression
Input:
fpath: file path (CSV/TSV) to the expression file with genes/features as rows
and cells/samples on columns. First column saves gene names.
saveto: a html file to save the plots using Plot.ly
sep: file sep. Default: ","
'''
bool_success = False
if not is_nonempty_file(fpath):
return bool_success
if not name:
name = base_name(fpath)
expr = pd.read_csv(fpath, index_col=0, sep=sep)
print_logger(('UMI count matrix: '
'{} genes x {} cells').format(expr.shape[0], expr.shape[1]))
total_num_UMIs = expr.sum(axis=0)
num_detected_genes = (expr > 0).sum(axis=0)
mt_index = [x for x in expr.index if x.startswith(
'mt-') or x.startswith('MT-')]
if not mt_index:
percent_mt = 0
else:
mt_umis = expr.loc[pd.Series(mt_index), :].sum(axis=0)
percent_mt = mt_umis / total_num_UMIs
percent_mt = percent_mt.replace(np.inf, 0)
qc = pd.DataFrame(dict(total_num_UMIs=total_num_UMIs,
num_detected_genes=num_detected_genes,
percent_mt=percent_mt))
# 1/5
plotly_g_vs_umi = plotly_scatter(
x=qc.total_num_UMIs,
y=qc.num_detected_genes,
xlab='#Total UMIs (median={})'.format(qc.total_num_UMIs.median()),
ylab='#Detected Genes (median={})'.format(
qc.num_detected_genes.median()),
main=name,
hover_text=qc.index.values)
plotly_g_vs_umi.layout.yaxis.scaleanchor = None
# 2/5
plotly_mt_vs_umi = plotly_scatter(
x=qc.total_num_UMIs,
y=qc.percent_mt,
xlab='#Total UMIs (median={})'.format(qc.total_num_UMIs.median()),
ylab='MT Fraction (median={:6.4f})'.format(qc.percent_mt.median()),
main=name,
hover_text=qc.index.values)
plotly_mt_vs_umi.layout.yaxis.scaleanchor = None
# 3/5
plotly_hist_umis = plotly_hist(
vals=qc.total_num_UMIs,
xlab='#Total UMIs (median={})'.format(qc.total_num_UMIs.median()))
# 4/5
plotly_hist_g = plotly_hist(
vals=qc.num_detected_genes,
xlab=('#Detected Genes '
'(median={})').format(qc.num_detected_genes.median()))
# 5/5
plotly_hist_percent_mt = plotly_hist(
vals=qc.percent_mt,
xlab='MT Fraction (median={:6.4f})'.format(qc.percent_mt.median()))
# Merge the 5 figures together
qc_fig = tools.make_subplots(
rows=2, cols=3,
specs=[[{}, {}, None], [{}, {}, {}]])
qc_fig.append_trace(plotly_g_vs_umi.data[0], 1, 1)
qc_fig.append_trace(plotly_mt_vs_umi.data[0], 1, 2)
qc_fig.append_trace(plotly_hist_umis.data[0], 2, 1)
qc_fig.append_trace(plotly_hist_g.data[0], 2, 2)
qc_fig.append_trace(plotly_hist_percent_mt.data[0], 2, 3)
qc_fig.layout.xaxis1 = {**qc_fig.layout.xaxis1,
**plotly_g_vs_umi.layout.xaxis}
qc_fig.layout.yaxis1 = {**qc_fig.layout.yaxis1,
**plotly_g_vs_umi.layout.yaxis}
qc_fig.layout.xaxis2 = {**qc_fig.layout.xaxis2,
**plotly_mt_vs_umi.layout.xaxis}
qc_fig.layout.yaxis2 = {**qc_fig.layout.yaxis2,
**plotly_mt_vs_umi.layout.yaxis}
qc_fig.layout.xaxis3 = {**qc_fig.layout.xaxis3,
**plotly_hist_umis.layout.xaxis}
qc_fig.layout.yaxis3 = {**qc_fig.layout.yaxis3,
**plotly_hist_umis.layout.yaxis}
qc_fig.layout.xaxis4 = {**qc_fig.layout.xaxis4,
**plotly_hist_g.layout.xaxis}
qc_fig.layout.yaxis4 = {**qc_fig.layout.yaxis4,
**plotly_hist_g.layout.yaxis}
qc_fig.layout.xaxis5 = {**qc_fig.layout.xaxis5,
**plotly_hist_percent_mt.layout.xaxis}
qc_fig.layout.yaxis5 = {**qc_fig.layout.yaxis5,
**plotly_hist_percent_mt.layout.yaxis}
qc_fig['layout'].update(height=800, width=1000, title=name,
showlegend=False)
plot(qc_fig, filename=saveto, auto_open=False)
bool_success = True
return bool_success
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
7110,
306,
1330,
4899,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355... | 1.932421 | 2,412 |
#!/usr/bin/env python
from operator import itemgetter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
10088,
1330,
2378,
1136,
353,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.114286 | 35 |
# -*- coding:utf8 -*-
# !/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
try:
import apiai
except ImportError:
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)
)
import apiai
import uuid
CLIENT_ACCESS_TOKEN = 'YOUR_ACCESS_TOKEN'
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
40477,
23,
532,
9,
12,
198,
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
2177,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,... | 3.035948 | 306 |
from model.group_stations import Stations
# app.group.pay(email_pay="uz.all.test@gmail.com")
# app.group.pay(email_pay="uz.all.test@gmail.com")
| [
198,
198,
6738,
2746,
13,
8094,
62,
301,
602,
1330,
520,
602,
628,
628,
198,
2,
220,
220,
220,
598,
13,
8094,
13,
15577,
7,
12888,
62,
15577,
2625,
10277,
13,
439,
13,
9288,
31,
14816,
13,
785,
4943,
198,
198,
2,
220,
220,
220,
... | 2.367647 | 68 |
# utility functions for manipulating MJCF XML models
import xml.etree.ElementTree as ET
import os
import numpy as np
from collections.abc import Iterable
from PIL import Image
from pathlib import Path
from copy import deepcopy
import robosuite
RED = [1, 0, 0, 1]
GREEN = [0, 1, 0, 1]
BLUE = [0, 0, 1, 1]
CYAN = [0, 1, 1, 1]
ROBOT_COLLISION_COLOR = [0, 0.5, 0, 1]
MOUNT_COLLISION_COLOR = [0.5, 0.5, 0, 1]
GRIPPER_COLLISION_COLOR = [0, 0, 0.5, 1]
OBJECT_COLLISION_COLOR = [0.5, 0, 0, 1]
ENVIRONMENT_COLLISION_COLOR = [0.5, 0.5, 0, 1]
SENSOR_TYPES = {
"touch",
"accelerometer",
"velocimeter",
"gyro",
"force",
"torque",
"magnetometer",
"rangefinder",
"jointpos",
"jointvel",
"tendonpos",
"tendonvel",
"actuatorpos",
"actuatorvel",
"actuatorfrc",
"ballangvel",
"jointlimitpos",
"jointlimitvel",
"jointlimitfrc",
"tendonlimitpos",
"tendonlimitvel",
"tendonlimitfrc",
"framepos",
"framequat",
"framexaxis",
"frameyaxis",
"framezaxis",
"framelinvel",
"frameangvel",
"framelinacc",
"frameangacc",
"subtreecom",
"subtreelinvel",
"subtreeangmom",
"user",
}
MUJOCO_NAMED_ATTRIBUTES = {
"class", "childclass", "name", "objname", "material", "texture",
"joint", "joint1", "joint2", "jointinparent", "geom", "geom1", "geom2",
"mesh", "fixed", "actuator", "objname", "tendon", "tendon1", "tendon2",
"slidesite", "cranksite", "body", "body1", "body2", "hfield", "target",
"prefix", "site",
}
IMAGE_CONVENTION_MAPPING = {
"opengl": 1,
"opencv": -1,
}
TEXTURES = {
"WoodRed": "red-wood.png",
"WoodGreen": "green-wood.png",
"WoodBlue": "blue-wood.png",
"WoodLight": "light-wood.png",
"WoodDark": "dark-wood.png",
"WoodTiles": "wood-tiles.png",
"WoodPanels": "wood-varnished-panels.png",
"WoodgrainGray": "gray-woodgrain.png",
"PlasterCream": "cream-plaster.png",
"PlasterPink": "pink-plaster.png",
"PlasterYellow": "yellow-plaster.png",
"PlasterGray": "gray-plaster.png",
"PlasterWhite": "white-plaster.png",
"BricksWhite": "white-bricks.png",
"Metal": "metal.png",
"SteelBrushed": "steel-brushed.png",
"SteelScratched": "steel-scratched.png",
"Brass": "brass-ambra.png",
"Bread": "bread.png",
"Can": "can.png",
"Ceramic": "ceramic.png",
"Cereal": "cereal.png",
"Clay": "clay.png",
"Dirt": "dirt.png",
"Glass": "glass.png",
"FeltGray": "gray-felt.png",
"Lemon": "lemon.png",
}
ALL_TEXTURES = TEXTURES.keys()
class CustomMaterial(object):
"""
Simple class to instantiate the necessary parameters to define an appropriate texture / material combo
Instantiates a nested dict holding necessary components for procedurally generating a texture / material combo
Please see http://www.mujoco.org/book/XMLreference.html#asset for specific details on
attributes expected for Mujoco texture / material tags, respectively
Note that the values in @tex_attrib and @mat_attrib can be in string or array / numerical form.
Args:
texture (None or str or 4-array): Name of texture file to be imported. If a string, should be part of
ALL_TEXTURES. If texture is a 4-array, then this argument will be interpreted as an rgba tuple value and
a template png will be procedurally generated during object instantiation, with any additional
texture / material attributes specified. If None, no file will be linked and no rgba value will be set
Note, if specified, the RGBA values are expected to be floats between 0 and 1
tex_name (str): Name to reference the imported texture
mat_name (str): Name to reference the imported material
tex_attrib (dict): Any other optional mujoco texture specifications.
mat_attrib (dict): Any other optional mujoco material specifications.
shared (bool): If True, this material should not have any naming prefixes added to all names
Raises:
AssertionError: [Invalid texture]
"""
def xml_path_completion(xml_path):
"""
Takes in a local xml path and returns a full path.
if @xml_path is absolute, do nothing
if @xml_path is not absolute, load xml that is shipped by the package
Args:
xml_path (str): local xml path
Returns:
str: Full (absolute) xml path
"""
if xml_path.startswith("/"):
full_path = xml_path
else:
full_path = os.path.join(robosuite.models.assets_root, xml_path)
return full_path
def array_to_string(array):
"""
Converts a numeric array into the string format in mujoco.
Examples:
[0, 1, 2] => "0 1 2"
Args:
array (n-array): Array to convert to a string
Returns:
str: String equivalent of @array
"""
return " ".join(["{}".format(x) for x in array])
def string_to_array(string):
"""
Converts a array string in mujoco xml to np.array.
Examples:
"0 1 2" => [0, 1, 2]
Args:
string (str): String to convert to an array
Returns:
np.array: Numerical array equivalent of @string
"""
return np.array([float(x) for x in string.split(" ")])
def convert_to_string(inp):
"""
Converts any type of {bool, int, float, list, tuple, array, string, np.str_} into an mujoco-xml compatible string.
Note that an input string / np.str_ results in a no-op action.
Args:
inp: Input to convert to string
Returns:
str: String equivalent of @inp
"""
if type(inp) in {list, tuple, np.ndarray}:
return array_to_string(inp)
elif type(inp) in {int, float, bool}:
return str(inp).lower()
elif type(inp) in {str, np.str_}:
return inp
else:
raise ValueError("Unsupported type received: got {}".format(type(inp)))
def set_alpha(node, alpha=0.1):
"""
Sets all a(lpha) field of the rgba attribute to be @alpha
for @node and all subnodes
used for managing display
Args:
node (ET.Element): Specific node element within XML tree
alpha (float): Value to set alpha value of rgba tuple
"""
for child_node in node.findall(".//*[@rgba]"):
rgba_orig = string_to_array(child_node.get("rgba"))
child_node.set("rgba", array_to_string(list(rgba_orig[0:3]) + [alpha]))
def new_element(tag, name, **kwargs):
"""
Creates a new @tag element with attributes specified by @**kwargs.
Args:
tag (str): Type of element to create
name (None or str): Name for this element. Should only be None for elements that do not have an explicit
name attribute (e.g.: inertial elements)
**kwargs: Specified attributes for the new joint
Returns:
ET.Element: new specified xml element
"""
# Name will be set if it's not None
if name is not None:
kwargs["name"] = name
# Loop through all attributes and pop any that are None, otherwise convert them to strings
for k, v in kwargs.copy().items():
if v is None:
kwargs.pop(k)
else:
kwargs[k] = convert_to_string(v)
element = ET.Element(tag, attrib=kwargs)
return element
def new_joint(name, **kwargs):
"""
Creates a joint tag with attributes specified by @**kwargs.
Args:
name (str): Name for this joint
**kwargs: Specified attributes for the new joint
Returns:
ET.Element: new joint xml element
"""
return new_element(tag="joint", name=name, **kwargs)
def new_actuator(name, joint, act_type="actuator", **kwargs):
"""
Creates an actuator tag with attributes specified by @**kwargs.
Args:
name (str): Name for this actuator
joint (str): type of actuator transmission.
see all types here: http://mujoco.org/book/modeling.html#actuator
act_type (str): actuator type. Defaults to "actuator"
**kwargs: Any additional specified attributes for the new joint
Returns:
ET.Element: new actuator xml element
"""
element = new_element(tag=act_type, name=name, **kwargs)
element.set("joint", joint)
return element
def new_site(name, rgba=RED, pos=(0, 0, 0), size=(0.005,), **kwargs):
"""
Creates a site element with attributes specified by @**kwargs.
NOTE: With the exception of @name, @pos, and @size, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
name (str): Name for this site
rgba (4-array): (r,g,b,a) color and transparency. Defaults to solid red.
pos (3-array): (x,y,z) 3d position of the site.
size (array of float): site size (sites are spherical by default).
**kwargs: Any additional specified attributes for the new site
Returns:
ET.Element: new site xml element
"""
kwargs["pos"] = pos
kwargs["size"] = size
kwargs["rgba"] = rgba if rgba is not None else None
return new_element(tag="site", name=name, **kwargs)
def new_geom(name, type, size, pos=(0, 0, 0), group=0, **kwargs):
"""
Creates a geom element with attributes specified by @**kwargs.
NOTE: With the exception of @geom_type, @size, and @pos, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
name (str): Name for this geom
type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size (n-array of float): geom size parameters.
pos (3-array): (x,y,z) 3d position of the site.
group (int): the integrer group that the geom belongs to. useful for
separating visual and physical elements.
**kwargs: Any additional specified attributes for the new geom
Returns:
ET.Element: new geom xml element
"""
kwargs["type"] = type
kwargs["size"] = size
kwargs["pos"] = pos
kwargs["group"] = group if group is not None else None
return new_element(tag="geom", name=name, **kwargs)
def new_body(name, pos=(0, 0, 0), **kwargs):
"""
Creates a body element with attributes specified by @**kwargs.
Args:
name (str): Name for this body
pos (3-array): (x,y,z) 3d position of the body frame.
**kwargs: Any additional specified attributes for the new body
Returns:
ET.Element: new body xml element
"""
kwargs["pos"] = pos
return new_element(tag="body", name=name, **kwargs)
def new_inertial(pos=(0, 0, 0), mass=None, **kwargs):
"""
Creates a inertial element with attributes specified by @**kwargs.
Args:
pos (3-array): (x,y,z) 3d position of the inertial frame.
mass (float): The mass of inertial
**kwargs: Any additional specified attributes for the new inertial element
Returns:
ET.Element: new inertial xml element
"""
kwargs["mass"] = mass if mass is not None else None
kwargs["pos"] = pos
return new_element(tag="inertial", name=None, **kwargs)
def get_size(size,
size_max,
size_min,
default_max,
default_min):
"""
Helper method for providing a size, or a range to randomize from
Args:
size (n-array): Array of numbers that explicitly define the size
size_max (n-array): Array of numbers that define the custom max size from which to randomly sample
size_min (n-array): Array of numbers that define the custom min size from which to randomly sample
default_max (n-array): Array of numbers that define the default max size from which to randomly sample
default_min (n-array): Array of numbers that define the default min size from which to randomly sample
Returns:
np.array: size generated
Raises:
ValueError: [Inconsistent array sizes]
"""
if len(default_max) != len(default_min):
raise ValueError('default_max = {} and default_min = {}'
.format(str(default_max), str(default_min)) +
' have different lengths')
if size is not None:
if (size_max is not None) or (size_min is not None):
raise ValueError('size = {} overrides size_max = {}, size_min = {}'
.format(size, size_max, size_min))
else:
if size_max is None:
size_max = default_max
if size_min is None:
size_min = default_min
size = np.array([np.random.uniform(size_min[i], size_max[i])
for i in range(len(default_max))])
return np.array(size)
def postprocess_model_xml(xml_str):
"""
This function postprocesses the model.xml collected from a MuJoCo demonstration
in order to make sure that the STL files can be found.
Args:
xml_str (str): Mujoco sim demonstration XML file as string
Returns:
str: Post-processed xml file as string
"""
path = os.path.split(robosuite.__file__)[0]
path_split = path.split("/")
# replace mesh and texture file paths
tree = ET.fromstring(xml_str)
root = tree
asset = root.find("asset")
meshes = asset.findall("mesh")
textures = asset.findall("texture")
all_elements = meshes + textures
for elem in all_elements:
old_path = elem.get("file")
if old_path is None:
continue
old_path_split = old_path.split("/")
ind = max(
loc for loc, val in enumerate(old_path_split) if val == "robosuite"
) # last occurrence index
new_path_split = path_split + old_path_split[ind + 1 :]
new_path = "/".join(new_path_split)
elem.set("file", new_path)
return ET.tostring(root, encoding="utf8").decode("utf8")
def add_to_dict(dic, fill_in_defaults=True, default_value=None, **kwargs):
"""
Helper function to add key-values to dictionary @dic where each entry is its own array (list).
Args:
dic (dict): Dictionary to which new key / value pairs will be added. If the key already exists,
will append the value to that key entry
fill_in_defaults (bool): If True, will automatically add @default_value to all dictionary entries that are
not explicitly specified in @kwargs
default_value (any): Default value to fill (None by default)
Returns:
dict: Modified dictionary
"""
# Get keys and length of array for a given entry in dic
keys = set(dic.keys())
n = len(list(keys)[0]) if keys else 0
for k, v in kwargs.items():
if k in dic:
dic[k].append(v)
keys.remove(k)
else:
dic[k] = [default_value] * n + [v] if fill_in_defaults else [v]
# If filling in defaults, fill in remaining default values
if fill_in_defaults:
for k in keys:
dic[k].append(default_value)
return dic
def add_prefix(
root,
prefix,
tags="default",
attribs="default",
exclude=None,
):
"""
Find all element(s) matching the requested @tag, and appends @prefix to all @attributes if they exist.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through.
prefix (str): Prefix to add to all specified attributes
tags (str or list of str or set): Tag(s) to search for in this ElementTree. "Default" corresponds to all tags
attribs (str or list of str or set): Element attribute(s) to append prefix to. "Default" corresponds
to all attributes that reference names
exclude (None or function): Filtering function that should take in an ET.Element or a string (attribute) and
return True if we should exclude the given element / attribute from having any prefixes added
"""
# Standardize tags and attributes to be a set
if tags != "default":
tags = {tags} if type(tags) is str else set(tags)
if attribs == "default":
attribs = MUJOCO_NAMED_ATTRIBUTES
attribs = {attribs} if type(attribs) is str else set(attribs)
# Check the current element for matching conditions
if (tags == "default" or root.tag in tags) and (exclude is None or not exclude(root)):
for attrib in attribs:
v = root.get(attrib, None)
# Only add prefix if the attribute exist, the current attribute doesn't already begin with prefix,
# and the @exclude filter is either None or returns False
if v is not None and not v.startswith(prefix) and (exclude is None or not exclude(v)):
root.set(attrib, prefix + v)
# Continue recursively searching through the element tree
for r in root:
add_prefix(root=r, prefix=prefix, tags=tags, attribs=attribs, exclude=exclude)
def add_material(root, naming_prefix="", custom_material=None):
"""
Iterates through all element(s) in @root recursively and adds a material / texture to all visual geoms that don't
already have a material specified.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through.
naming_prefix (str): Adds this prefix to all material and texture names
custom_material (None or CustomMaterial): If specified, will add this material to all visual geoms.
Else, will add a default "no-change" material.
Returns:
4-tuple: (ET.Element, ET.Element, CustomMaterial, bool) (tex_element, mat_element, material, used)
corresponding to the added material and whether the material was actually used or not.
"""
# Initialize used as False
used = False
# First, make sure material is specified
if custom_material is None:
custom_material = CustomMaterial(
texture=None,
tex_name="default_tex",
mat_name="default_mat",
tex_attrib={
"type": "cube",
"builtin": "flat",
"width": 100,
"height": 100,
"rgb1": np.ones(3),
"rgb2": np.ones(3),
},
)
# Else, check to make sure the custom material begins with the specified prefix and that it's unique
if not custom_material.name.startswith(naming_prefix) and not custom_material.shared:
custom_material.name = naming_prefix + custom_material.name
custom_material.tex_attrib["name"] = naming_prefix + custom_material.tex_attrib["name"]
custom_material.mat_attrib["name"] = naming_prefix + custom_material.mat_attrib["name"]
custom_material.mat_attrib["texture"] = naming_prefix + custom_material.mat_attrib["texture"]
# Check the current element for matching conditions
if root.tag == "geom" and root.get("group", None) == "1" and root.get("material", None) is None:
# Add a new material attribute to this geom
root.set("material", custom_material.name)
# Set used to True
used = True
# Continue recursively searching through the element tree
for r in root:
_, _, _, _used = add_material(root=r, naming_prefix=naming_prefix, custom_material=custom_material)
# Update used
used = used or _used
# Lastly, return the new texture and material elements
tex_element = new_element(tag="texture", **custom_material.tex_attrib)
mat_element = new_element(tag="material", **custom_material.mat_attrib)
return tex_element, mat_element, custom_material, used
def recolor_collision_geoms(root, rgba, exclude=None):
"""
Iteratively searches through all elements starting with @root to find all geoms belonging to group 0 and set
the corresponding rgba value to the specified @rgba argument. Note: also removes any material values for these
elements.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through
rgba (4-array): (R, G, B, A) values to assign to all geoms with this group.
exclude (None or function): Filtering function that should take in an ET.Element and
return True if we should exclude the given element / attribute from having its collision geom impacted.
"""
# Check this body
if root.tag == "geom" and root.get("group") in {None, "0"} and (exclude is None or not exclude(root)):
root.set("rgba", array_to_string(rgba))
root.attrib.pop("material", None)
# Iterate through all children elements
for r in root:
recolor_collision_geoms(root=r, rgba=rgba, exclude=exclude)
def _element_filter(element, parent):
"""
Default element filter to be used in sort_elements. This will filter for the following groups:
:`'root_body'`: Top-level body element
:`'bodies'`: Any body elements
:`'joints'`: Any joint elements
:`'actuators'`: Any actuator elements
:`'sites'`: Any site elements
:`'sensors'`: Any sensor elements
:`'contact_geoms'`: Any geoms used for collision (as specified by group 0 (default group) geoms)
:`'visual_geoms'`: Any geoms used for visual rendering (as specified by group 1 geoms)
Args:
element (ET.Element): Current XML element that we are filtering
parent (ET.Element): Parent XML element for the current element
Returns:
str or None: Assigned filter key for this element. None if no matching filter is found.
"""
# Check for actuator first since this is dependent on the parent element
if parent is not None and parent.tag == "actuator":
return "actuators"
elif element.tag == "joint":
# Make sure this is not a tendon (this should not have a "joint", "joint1", or "joint2" attribute specified)
if element.get("joint") is None and element.get("joint1") is None:
return "joints"
elif element.tag == "body":
# If the parent of this does not have a tag "body", then this is the top-level body element
if parent is None or parent.tag != "body":
return "root_body"
return "bodies"
elif element.tag == "site":
return "sites"
elif element.tag in SENSOR_TYPES:
return "sensors"
elif element.tag == "geom":
# Only get collision and visual geoms (group 0 / None, or 1, respectively)
group = element.get("group")
if group in {None, "0", "1"}:
return "visual_geoms" if group == "1" else "contact_geoms"
else:
# If no condition met, return None
return None
def sort_elements(root, parent=None, element_filter=None, _elements_dict=None):
"""
Utility method to iteratively sort all elements based on @tags. This XML ElementTree will be parsed such that
all elements with the same key as returned by @element_filter will be grouped as a list entry in the returned
dictionary.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through
parent (ET.Element): Parent of the root node. Default is None (no parent node initially)
element_filter (None or function): Function used to filter the incoming elements. Should take in two
ET.Elements (current_element, parent_element) and return a string filter_key if the element
should be added to the list of values sorted by filter_key, and return None if no value should be added.
If no element_filter is specified, defaults to self._element_filter.
_elements_dict (dict): Dictionary that gets passed to recursive calls. Should not be modified externally by
top-level call.
Returns:
dict: Filtered key-specific lists of the corresponding elements
"""
# Initialize dictionary and element filter if None is set
if _elements_dict is None:
_elements_dict = {}
if element_filter is None:
element_filter = _element_filter
# Parse this element
key = element_filter(root, parent)
if key is not None:
# Initialize new entry in the dict if this is the first time encountering this value, otherwise append
if key not in _elements_dict:
_elements_dict[key] = [root]
else:
_elements_dict[key].append(root)
# Loop through all possible subtrees for this XML recurisvely
for r in root:
_elements_dict = sort_elements(
root=r,
parent=root,
element_filter=element_filter,
_elements_dict=_elements_dict
)
return _elements_dict
def find_parent(root, child):
"""
Find the parent element of the specified @child node, recurisvely searching through @root.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through.
child (ET.Element): Child element whose parent is to be found
Returns:
None or ET.Element: Matching parent if found, else None
"""
# Iterate through children (DFS), if the correct child element is found, then return the current root as the parent
for r in root:
if r == child:
return root
parent = find_parent(root=r, child=child)
if parent is not None:
return parent
# If we get here, we didn't find anything ):
return None
def find_elements(root, tags, attribs=None, return_first=True):
"""
Find all element(s) matching the requested @tag and @attributes. If @return_first is True, then will return the
first element found matching the criteria specified. Otherwise, will return a list of elements that match the
criteria.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through.
tags (str or list of str or set): Tag(s) to search for in this ElementTree.
attribs (None or dict of str): Element attribute(s) to check against for a filtered element. A match is
considered found only if all attributes match. Each attribute key should have a corresponding value with
which to compare against.
return_first (bool): Whether to immediately return once the first matching element is found.
Returns:
None or ET.Element or list of ET.Element: Matching element(s) found. Returns None if there was no match.
"""
# Initialize return value
elements = None if return_first else []
# Make sure tags is list
tags = [tags] if type(tags) is str else tags
# Check the current element for matching conditions
if root.tag in tags:
matching = True
if attribs is not None:
for k, v in attribs.items():
if root.get(k) != v:
matching = False
break
# If all criteria were matched, add this to the solution (or return immediately if specified)
if matching:
if return_first:
return root
else:
elements.append(root)
# Continue recursively searching through the element tree
for r in root:
if return_first:
elements = find_elements(tags=tags, attribs=attribs, root=r, return_first=return_first)
if elements is not None:
return elements
else:
found_elements = find_elements(tags=tags, attribs=attribs, root=r, return_first=return_first)
pre_elements = deepcopy(elements)
if found_elements:
elements += found_elements if type(found_elements) is list else [found_elements]
return elements if elements else None
def save_sim_model(sim, fname):
"""
Saves the current model xml from @sim at file location @fname.
Args:
sim (MjSim): XML file to save, in string form
fname (str): Absolute filepath to the location to save the file
"""
with open(fname, "w") as f:
sim.save(file=f, format="xml")
def get_ids(sim, elements, element_type="geom", inplace=False):
"""
Grabs the mujoco IDs for each element in @elements, corresponding to the specified @element_type.
Args:
sim (MjSim): Active mujoco simulation object
elements (str or list or dict): Element(s) to convert into IDs. Note that the return type corresponds to
@elements type, where each element name is replaced with the ID
element_type (str): The type of element to grab ID for. Options are {geom, body, site}
inplace (bool): If False, will create a copy of @elements to prevent overwriting the original data structure
Returns:
str or list or dict: IDs corresponding to @elements.
"""
if not inplace:
# Copy elements first so we don't write to the underlying object
elements = deepcopy(elements)
# Choose what to do based on elements type
if isinstance(elements, str):
# We simply return the value of this single element
assert element_type in {"geom", "body", "site"},\
f"element_type must be either geom, body, or site. Got: {element_type}"
if element_type == "geom":
elements = sim.model.geom_name2id(elements)
elif element_type == "body":
elements = sim.model.body_name2id(elements)
else: # site
elements = sim.model.site_name2id(elements)
elif isinstance(elements, dict):
# Iterate over each element in dict and recursively repeat
for name, ele in elements:
elements[name] = get_ids(sim=sim, elements=ele, element_type=element_type, inplace=True)
else: # We assume this is an iterable array
assert isinstance(elements, Iterable), "Elements must be iterable for get_id!"
elements = [get_ids(sim=sim, elements=ele, element_type=element_type, inplace=True) for ele in elements]
return elements
| [
2,
10361,
5499,
329,
29349,
33974,
22495,
23735,
4981,
198,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
13,
39305,
1330,
40806,
540,
198,
6738,
... | 2.593731 | 11,581 |
"""
Notes Web App
Copyright (C) 2019 DesmondTan
"""
###########
# Imports #
###########
from app.models import User, Followers, Notes, Folders, Notes_Permissions, Notes_tag, Tags
from datetime import datetime
from werkzeug.security import generate_password_hash
#############
# Functions #
#############
| [
37811,
198,
16130,
5313,
2034,
198,
15269,
357,
34,
8,
13130,
41045,
45557,
198,
37811,
628,
198,
7804,
21017,
198,
2,
1846,
3742,
1303,
198,
7804,
21017,
198,
198,
6738,
598,
13,
27530,
1330,
11787,
11,
7281,
364,
11,
11822,
11,
3995... | 3.428571 | 91 |
import logging
| [
11748,
18931,
198
] | 5 | 3 |
from django.urls import path
from . import views
""" IF YOU CHANGE URL MUST CHANGE IN LIST PAGES"""
urlpatterns = [
path('list', views.team_list, name="team-list"),
path('add', views.team_add, name="team-add"),
path('update/<str:team_name>', views.team_update, name="team-update"),
# if you change url then must change in team_list.html.
path('delete/<str:team_name>', views.team_delete, name="team-delete"),
path('my-team-members/', views.team_member_list, name="my-team-members"),
path('my-member-dashboard/', views.team_my_member_dashboard, name="my-member-dashboard"),
path('module/all/', views.team_all_module, name="team-all-module"),
path('module/<int:module_id>/', views.team_module_details, name="team-module-details"),
path('module/completed/', views.team_completed_modules, name="team-completed-module"),
path('module/running/', views.team_running_modules, name="team-running-module"),
# TASK URLS
path('module/<int:module_id>/module/add/', views.task_create, name='task-create'),
path('module/<int:module_id>/task/update/<int:task_id>/', views.task_update,
name='task-update'),
path('module/<int:module_id>/task/delete/<int:task_id>/', views.task_delete,
name='task-delete'),
path('module/<int:module_id>/task/assign/<int:task_id>', views.task_assign,
name="task-assign"),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
628,
198,
37811,
16876,
7013,
5870,
27746,
10289,
17191,
5870,
27746,
3268,
39498,
350,
25552,
37811,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10... | 2.609434 | 530 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constructs a generic image model based on the hparams the user passes in.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from common import cluster_utils
import horovod.tensorflow as hvd
import network_utils_slice as network_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
FLAGS = flags.FLAGS
def _build_loss(loss_fn, loss_name, logits, end_points, labels,
add_summary=False):
"""Compute total loss based on the specified loss function."""
# Collect all losses explicitly to sum up the total_loss.
losses = []
# Whethere to add aux loss is controled in network_fn. Once an aux head is
# built, an aux loss would be added here automatically.
aux_head_endpoint = None
if 'AuxLogits' in end_points:
# For Inception/Genet aux head.
aux_head_endpoint = end_points['AuxLogits']
elif 'aux_logits' in end_points:
# For AmoebaNet aux head.
aux_head_endpoint = end_points['aux_logits'],
if aux_head_endpoint:
aux_loss = loss_fn(
labels,
tf.squeeze(aux_head_endpoint, axis=[0]),
weights=0.4,
scope='aux_loss')
tf.logging.info('Adding to aux loss.')
if add_summary:
tf.summary.scalar('losses/aux_loss', aux_loss)
losses.append(aux_loss)
# Add the empirical loss.
primary_loss = loss_fn(labels, logits, weights=1.0, scope=loss_name)
losses.append(primary_loss)
# Add regularization losses.
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if reg_losses:
fp32_reg_losses = []
for reg_loss in reg_losses:
fp32_reg_losses.append(tf.cast(reg_loss, tf.float32))
reg_loss = tf.add_n(fp32_reg_losses, name='regularization_loss')
losses.append(reg_loss)
total_loss = tf.add_n(losses, name='total_loss')
if add_summary:
tf.summary.scalar('losses/' + loss_name, primary_loss)
tf.summary.scalar('losses/regularization_loss', reg_loss)
tf.summary.scalar('losses/total_loss', total_loss)
return total_loss
def build_learning_rate(initial_lr,
lr_decay_type,
global_step,
decay_factor=None,
decay_steps=None,
stepwise_epoch=None,
total_steps=None,
add_summary=True,
warmup_steps=0):
"""Build learning rate."""
if lr_decay_type == 'exponential':
assert decay_steps is not None
assert decay_factor is not None
lr = tf.train.exponential_decay(
initial_lr, global_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
elif lr_decay_type == 'stepwise':
assert stepwise_epoch is not None
boundaries = [
10 * stepwise_epoch,
20 * stepwise_epoch,
]
values = [initial_lr, initial_lr * 0.1, initial_lr * 0.01]
lr = tf.train.piecewise_constant(global_step, boundaries, values)
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
# By default, warmup_steps_fraction = 0.0 which means no warmup steps.
tf.logging.info('Learning rate warmup_steps: %d' % warmup_steps)
warmup_lr = (
initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
if add_summary:
tf.summary.scalar('learning_rate', lr)
return lr
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
aux_scaling = 1.0
# TODO(huangyp): double check aux_scaling with vrv@.
if hasattr(hparams, 'aux_scaling'):
aux_scaling = hparams.aux_scaling
tf.logging.info('aux scaling: {}'.format(aux_scaling))
with tf.variable_scope(scope, custom_getter=network_utils.bp16_getter, reuse=tf.AUTO_REUSE):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits', reuse=tf.AUTO_REUSE):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, int(128 * aux_scaling),
[1, 1], scope='proj')
aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = tf.nn.relu(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, int(768 * aux_scaling),
shape, padding='VALID')
aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = tf.nn.relu(aux_logits)
aux_logits = tf.contrib.layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_point_name = (
'aux_logits' if 'aux_logits' not in end_points else 'aux_logits_2')
end_points[end_point_name] = tf.cast(aux_logits, tf.float32)
def _imagenet_stem(inputs, hparams, stem_cell, filter_scaling_rate):
"""Stem used for models trained on ImageNet."""
# 149 x 149 x 32
num_stem_filters = hparams.stem_reduction_size
with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter, reuse=tf.AUTO_REUSE):
net = slim.conv2d(
inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',
padding='VALID')
net = network_utils.batch_norm(net, scope='conv0_bn')
tf.logging.info('imagenet_stem shape after conv2d_bn: {}'.format(net.shape))
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (filter_scaling_rate**hparams.num_stem_cells)
for cell_num in range(hparams.num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num)
cell_outputs.append(net)
filter_scaling *= filter_scaling_rate
tf.logging.info('imagenet_stem shape at reductionlayer{}: {}'.format(
cell_num, net.shape))
# Only include cells in the cell_outputs.
return net, cell_outputs
def network_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3,
is_training=True,
data_format='NHWC',
num_shards=None,
distributed_group_size=1):
"""Defines the default arg scope for the AmoebaNet ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
is_training: whether is training or not.
Useful for fine-tuning a model with different num_classes.
data_format: Input data format.
num_shards: Number of shards in the job
distributed_group_size: Size of the group to average for batch norm.
Returns:
An `arg_scope` to use for the AmoebaNet Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'moving_vars': 'moving_vars',
'is_training': is_training,
'data_format': data_format,
'num_shards': num_shards,
'distributed_group_size': distributed_group_size,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=None, #weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([network_utils.batch_norm], **batch_norm_params):
with arg_scope(
[slim.dropout, network_utils.drop_path], is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.separable_conv2d,
network_utils.factorized_reduction,
network_utils.global_avg_pool,
network_utils.get_channel_index,
network_utils.get_channel_dim],
data_format=data_format) as sc:
return sc
def build_network(inputs,
num_classes,
is_training=True,
hparams=None,
dep_outputs=None):
"""Builds an image model.
Builds a model the takes inputs and return logits and endpoints.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of classes needed to be predicted by the model. If None,
only returns the feature vector endpoints after global_pool.
is_training: whether is training or not.
Useful for fine-tuning a model with different num_classes.
hparams: hparams used to construct the imagenet model.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
Raises:
ValueError: upon invalid hparams.
"""
total_num_cells = (hparams.num_cells +
hparams.num_reduction_layers +
hparams.num_stem_cells)
normal_cell = network_utils.BaseCell(
hparams.reduction_size, hparams.normal_cell_operations,
hparams.normal_cell_used_hiddenstates,
hparams.normal_cell_hiddenstate_indices, hparams.drop_connect_keep_prob,
total_num_cells, hparams.drop_path_burn_in_steps, dep_outputs)
reduction_cell = network_utils.BaseCell(
hparams.reduction_size, hparams.reduction_cell_operations,
hparams.reduction_cell_used_hiddenstates,
hparams.reduction_cell_hiddenstate_indices,
hparams.drop_connect_keep_prob, total_num_cells,
hparams.drop_path_burn_in_steps, dep_outputs)
num_shards = hparams.num_shards
distributed_group_size = hparams.distributed_group_size
assert distributed_group_size == 1 or hparams.use_tpu
sc = network_arg_scope(weight_decay=hparams.weight_decay,
batch_norm_decay=hparams.batch_norm_decay,
batch_norm_epsilon=hparams.batch_norm_epsilon,
is_training=is_training,
data_format=hparams.data_format,
num_shards=num_shards,
distributed_group_size=distributed_group_size)
with arg_scope(sc):
return _build_network_base(inputs,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training)
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.294414 | 5,353 |
from .base import BaseModel
from .package import *
| [
198,
6738,
764,
8692,
1330,
7308,
17633,
198,
6738,
764,
26495,
1330,
1635,
198
] | 3.714286 | 14 |
from flask import render_template,redirect,url_for, flash,request, session
from .forms import RegistrationForm, LoginForm
from flask_login import login_user, logout_user, login_required
from ..models import User
from .. import db
from app import login_manager
from . import auth
# login route
@auth.route('/login',methods=['GET','POST'])
#logout user route
@auth.route('/logout')
@login_required
@auth.route('/register',methods = ["GET","POST"])
@login_manager.user_loader
def load_user(user_id):
"""Check if user is logged-in on every page load."""
if user_id is not None:
return User.query.get(user_id)
return None
@login_manager.unauthorized_handler
def unauthorized():
"""Redirect unauthorized users to Login page."""
flash('You must be logged in to view that page.')
return redirect(url_for('auth.login')) | [
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
445,
1060,
11,
6371,
62,
1640,
11,
7644,
11,
25927,
11,
6246,
198,
6738,
764,
23914,
1330,
24610,
8479,
11,
23093,
8479,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
7220,
11,
2604,
448,
... | 3.113139 | 274 |
import pandas as pd
import numpy as np
from rdkit import Chem
from scipy import stats
import pubchempy as pcp
df = pd.read_excel("../2_bbb_all_complete_CID_out_smiles_fixed_updated.xlsx")
df = df[~df["logBB"].isna()]
df["logBB"] = df["logBB"].astype(float)
# remove molecules with logBB <= -9
df = df[df["logBB"] > -9]
# a dictionary to host inchi keys and isomeric smiles
for idx, row in df.iterrows():
mol = Chem.MolFromSmiles(row["smiles_fixed_rdkit"])
df.loc[idx, "Inchi"] = Chem.inchi.MolToInchi(mol)
df.to_excel("regression_inchi.xlsx", index=None, engine="openpyxl")
df = pd.read_excel("regression_inchi.xlsx")
# generate a dictionary to host all the inchi and isomeric smiles (or canonical smiles if isomeric smiles is not avaliable)
inchi_smi_dict = {inchi:[] for inchi in df["Inchi"].to_list()}
for idx, row in df.iterrows():
inchi_smi_dict = append_value(inchi_smi_dict, row["Inchi"], row["smiles_fixed_rdkit"])
# exam how inchi has more than one isomeric smiles
counter = 0
for key, value in inchi_smi_dict.items():
if len(value) >= 2:
counter += 1
print(counter)
# use non-redundant isomeric smiles for inchi_smi_dict
# manually inspect inchies with more than one non-redundant smiles
inchi_smi_dict = {inchi: set(smi) for inchi, smi in inchi_smi_dict.items()}
counter = 0
for key, value in inchi_smi_dict.items():
if len(value) >= 2:
print(key, value)
# the same inchi may have more than one inchi values, 12 in total
# but they are just resonance structure, so use inchi as an identifier
###########################################################################
df = pd.read_excel("regression_inchi.xlsx")
# smiles fixing with 02_clean_smiles_chembl_way_20210214.py
#########################################################################
df_unique = df.drop_duplicates(subset="Inchi", keep="first").reset_index(drop=True)
# df_duplicated = df.drop_duplicates(subset="Inchi", keep=False).reset_index(drop=True)
# df_unique["logBB"] = [[] for _ in np.arange(df_unique.shape[0])]
df_unique["logBB"] = ""
df_unique["compound_name"] = ""
df_unique["CID"] = ""
df_unique["new_name"] = ""
df_unique["iupac_name"] = ""
df_unique["reference"] = ""
df_unique["NO."] = ""
df["logBB"] = df["logBB"].astype(float)
# append compound_name, CID, logBB, new_name, iupac_name to the df_unique
# for idx_unique, row_unique in df_unique.iterrows():
# for idx, row in df.iterrows():
# if row["Inchi"] == row_unique["Inchi"]:
# # logBB
# df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + "|" + str(row["logBB"])
# # compound_name
# df_unique.loc[idx_unique, "compound_name"] = df_unique.loc[idx_unique, "compound_name"] + "|" + str(row["compound_name"])
# # CID
# df_unique.loc[idx_unique, "CID"] = df_unique.loc[idx_unique, "CID"] + "|" + str(row["CID"])
# # new_name
# df_unique.loc[idx_unique, "new_name"] = df_unique.loc[idx_unique, "new_name"] + "|" + str(row["new_name"])
# # iupac_name
# df_unique.loc[idx_unique, "iupac_name"] = df_unique.loc[idx_unique, "iupac_name"] + "|" + str(row["iupac_name"])
# df_unique.to_excel("tmp.xlsx", index=None, engine="openpyxl")
# a more efficient way
for idx_unique, row_unique in df_unique.iterrows():
inchi_unique = row_unique["Inchi"]
df_inchi_matching = df[df["Inchi"] == inchi_unique].reset_index(drop=True)
for _, row_matching in df_inchi_matching.iterrows():
# logBB
# df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + str(row_matching["logBB"]) + "|"
df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + str(round(row_matching["logBB"], 2)) + "|"
# compound_name
df_unique.loc[idx_unique, "compound_name"] = df_unique.loc[idx_unique, "compound_name"] + str(row_matching["compound_name"]) + "|"
# CID
df_unique.loc[idx_unique, "CID"] = df_unique.loc[idx_unique, "CID"] + str(row_matching["CID"]) + "|"
# new_name
df_unique.loc[idx_unique, "new_name"] = df_unique.loc[idx_unique, "new_name"] + str(row_matching["new_name"]) + "|"
# iupac_name
df_unique.loc[idx_unique, "iupac_name"] = df_unique.loc[idx_unique, "iupac_name"] + str(row_matching["iupac_name"]) + "|"
# reference
df_unique.loc[idx_unique, "reference"] = df_unique.loc[idx_unique, "reference"] + str(row_matching["reference"]) + "|"
# original NO.
df_unique.loc[idx_unique, "NO."] = df_unique.loc[idx_unique, "NO."] + str(row_matching["NO."]) + "|"
df_unique.to_excel("regression_logBB_combined.xlsx", index=None, engine="openpyxl")
##################################################
# preprocess logBB data
from copy import deepcopy
df = pd.read_excel("regression_logBB_combined.xlsx")
# df_bak = deepcopy(df)
# filter molecules with max(logBB) – min(logBB) > 1
counter = 0
for idx, row in df.iterrows():
logBB_values = [float(logBB) for logBB in row["logBB"].strip("|").split("|")]
if max(logBB_values) - min(logBB_values) > 1:
counter += 1
df.loc[idx, "logBB"] = np.nan
df = df.dropna(subset=["logBB"]).reset_index(drop=True)
df["std"] = np.nan
df["group"] = ""
for idx, row in df.iterrows():
# round logBB values to two decimal points as this is the most data hold for
logBB_values = [logBB for logBB in row["logBB"].strip("|").split("|")]
# find the minimum decimal places
decimal_places = min([logBB[::-1].find('.') for logBB in logBB_values])
logBB_values = [round(float(logBB), decimal_places) for logBB in logBB_values]
# set logBB values if there is only one
if len(logBB_values) == 1:
df.loc[idx, "logBB"] = logBB_values[0]
df.loc[idx, "group"] = "A"
df.loc[idx, "std"] = 0
else:
mean_logBB = np.multiply(np.ones(len(logBB_values)),
np.average(logBB_values))
mean_logBB = np.around(mean_logBB, decimals=decimal_places)
# set logBB values if all the values are the same or within 5% difference
if np.allclose(np.array(logBB_values), mean_logBB, atol=0, rtol=0.05):
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "B"
df.loc[idx, "std"] = np.std(logBB_values)
else:
# if less than 3 values, use average value
if len(logBB_values) < 3:
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "C"
df.loc[idx, "std"] = np.std(logBB_values)
# if more than 3 values, use mode
else:
# not using stats.mode() because it can not handel the suitation when two mode values are avaliable
# stats.mode(logBB_values)[0]
values, counts = np.unique(logBB_values, return_counts=True)
sorted_idx = np.argsort(counts)[::-1]
values_sorted = values[sorted_idx]
counts_sorted = counts[sorted_idx]
# when there is only one number of maximum counts
if counts_sorted[0] > counts_sorted[1]:
df.loc[idx, "logBB"] = values_sorted[0]
df.loc[idx, "group"] = "D"
df.loc[idx, "std"] = np.std(logBB_values)
# when there are more than one maximum counts, they are equal
else:
# more than 3 unique values
if len(values_sorted) >= 3:
# when there are two mode numbers
# counts_sorted[0] == counts_sorted[1] is a fact in such a condition as it
# is sorted
# the first 3 counts are the same
if counts_sorted[1] == counts_sorted[2]:
df.loc[idx, "logBB"] = sum(values_sorted[:3]) / 3
df.loc[idx, "group"] = "dropped_E"
df.loc[idx, "std"] = np.std(logBB_values)
# the first 2 counts are the same
else:
df.loc[idx, "logBB"] = sum(values_sorted[:2]) / 2
df.loc[idx, "group"] = "dropped_F"
df.loc[idx, "std"] = np.std(logBB_values)
# as counts_sorted is in descening order, counts_sorted[0] will not be less than counts_sorted[1]
# counts_sorted[0] == counts_sorted[1] and counts_sorted[0] == counts_sorted[2]
# when there are two unique count values
else:
# these two unique values are the same
if counts_sorted[0] == counts_sorted[1]:
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "dropped_G"
df.loc[idx, "std"] = np.std(logBB_values)
# the first one is greater than the second one
else:
df.loc[idx, "logBB"] = values_sorted[0]
df.loc[idx, "group"] = "dropped_H"
df.loc[idx, "std"] = np.std(logBB_values)
#iupac name
for idx, row in df.iterrows():
iupac_names = [name.lower() for name in row["iupac_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
if len(iupac_names) >= 1:
df.loc[idx, "iupac_name"] = iupac_names[0].lstrip()
else:
df.loc[idx, "iupac_name"] = ""
# deal with compound_name, new_name
df["new_compound_name"] = ""
for idx, row in df.iterrows():
# new_compound_name
compound_names = [name.lower() for name in row["compound_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
new_names = [name.lower() for name in row["new_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
# these names found in pubchem come first
names = list(set(new_names + compound_names))
# when compound_names list is not empty
if names != []:
df.loc[idx, "new_compound_name"] = names[0].lstrip()
else:
df.loc[idx, "new_compound_name"] = row["iupac_name"]
# deal with CID
# for idx, row in df.iterrows():
# cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
# if len(cids) != 0:
# df.loc[idx, "CID"] = cids[0]
# else:
# df.loc[idx, "CID"] = ""
# deal with smiles and CID
# df["smiles_fixed_rdkit"] = df["smiles_fixed_rdkit"].astype(str)
# df["CID"] = df["CID"].astype(str)
# for idx, row in df.iterrows():
# # smiles_list = [smi.lower() for smi in row["smiles_fixed_rdkit"].strip("|").split("|")
# # if smi != "nan" if not smi.isdigit() if len(smi) != 1]
# smiles_list = [smi for smi in row["smiles_fixed_rdkit"].strip("|").split("|")
# if smi != "nan" if not smi.isdigit()]
# smiles_list = list(set(smiles_list))
# cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
# if len(smiles_list) >= 1:
# # df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0].lstrip()
# # get new CID from the smiles if CID is none
# # else: use old CID
# if len(cids) == 0:
# ## try to get CID until using up the smiles
# # flag to indicate if we found new CID and smiles
# flag = False
# for smi in smiles_list:
# try:
# # because can get an error with
# # O=[SH](O)(c1ccc2cc[nH]c2c1)N1CCCC1CCN1CCC(Oc2cccc(Cl)c2)CC1
# compound = pcp.get_compounds(identifier=smi, namespace="smiles")
# cid_new = compound[0].cid
# if cid_new is not None:
# flag = True
# break
# except:
# print("error found when searching pubchem")
# if flag is True:
# df.loc[idx, "smiles_fixed_rdkit"] = smi
# df.loc[idx, "CID"] = cid_new
# else:
# df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0]
# df.loc[idx, "CID"] = ""
# else:
# # use old CIDs
# df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0]
# if len(cids) >= 1:
# df.loc[idx, "CID"] = cids[0]
# else:
# df.loc[idx, "CID"] = ""
###########################################################
df["CID"] = df["CID"].fillna("")
df["CID"] = df["CID"].astype(str)
# deal with CID
df["CID"] = df["CID"].astype(str)
for idx, row in df.iterrows():
# no need to deal with CID for regression data again
if pd.isnull(row["logBB"]):
cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
if len(cids) != 0:
df.loc[idx, "CID"] = cids[0]
else:
df.loc[idx, "CID"] = ""
# deal with SMILES
df["smiles_fixed_rdkit"] = df["smiles_fixed_rdkit"].astype(str)
for idx, row in df.iterrows():
smi_strings = list(set([smi for smi in row["smiles_fixed_rdkit"].strip("|").split("|") if smi != "nan"]))
if len(cids) != 0:
df.loc[idx, "smiles_fixed_rdkit"] = smi_strings[0]
else:
df.loc[idx, "smiles_fixed_rdkit"] = ""
df = df.sort_values(by=["group", "logBB"])
df.to_excel("regression_clean_done.xlsx", index=None, engine="openpyxl")
# clean the data manually
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
374,
67,
15813,
1330,
12870,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
2240,
2395,
3149,
88,
355,
279,
13155,
198,
198,
7568,
796,
279,
67,
13,
9... | 2.02887 | 6,789 |
from flask import Flask, redirect, render_template, request, session, flash, jsonify,url_for, Response
from flask_session import Session
from tempfile import mkdtemp
from db.postgresql import Db
from werkzeug.security import check_password_hash, generate_password_hash
from werkzeug.utils import secure_filename
from helpers import *
import re
from datetime import datetime, timedelta
import string
import random
import decimal
import requests
import uuid
import json
from email.message import EmailMessage
from email.headerregistry import Address
from email.utils import make_msgid
from openpyxl import load_workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.styles import PatternFill, Font
from flask.json import JSONEncoder
app = Flask(__name__)
key = open("secret_key.txt", "r")
secret_key = key.read()
app.secret_key = secret_key
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# make an instance of the database class
db = Db(app.logger)
layout = read_csv("static/csv/layout.csv")
login_csv = read_csv("static/csv/login.csv")
end_csv = read_csv("static/csv/end_task.csv")
payment_csv = read_csv("static/csv/payment.csv")
tasks = read_csv("static/csv/index.csv")
register_csv = pd.read_excel('static/csv/register.xlsx', index_col="tag")
account_csv = read_csv("static/csv/account.csv")
consent_csv = pd.read_excel('static/csv/consent.xlsx', index_col="tag")
email_unsent = read_csv("static/csv/email_unsent.csv")
eeg_csv = pd.read_excel('static/csv/eeg.xlsx', index_col="tag")
sent_email_csv = read_csv("static/csv/sent_email.csv")
home_csv = read_csv("static/csv/home.csv")
collected_csv = read_csv("static/csv/collected.csv")
about_study_csv = read_csv("static/csv/about_study.csv")
for_participant_csv = pd.read_excel('static/csv/for_participant.xlsx', index_col="tag")
about_app_csv = pd.read_excel('static/csv/about_app.xlsx', index_col="tag")
contact_csv = read_csv("static/csv/contact.csv")
forgot_password_csv = read_csv("static/csv/forgot_password.csv")
verify_csv = read_csv("static/csv/verify.csv")
reset_password_email = read_csv("static/csv/reset_password_email.csv")
youtube_csv = read_csv("static/csv/youtube.csv")
flash_msg_csv = read_csv("static/csv/flash_msg.csv")
admin_csv = read_csv("static/csv/admin.csv")
rec_system_csv = read_csv("static/csv/rec_system.csv")
reminder_csv = read_csv("static/csv/reminder.csv")
transportation_csv = read_csv("static/csv/transportation.csv")
rec_system_success_csv = read_csv("static/csv/rec_system_success.csv")
# See which language was chosen and update the language
# Force user to a save location
@app.before_request
@app.before_request
################################################################################
########################### TASK MANAGEMENT ####################################
###############################################################################
def generate_id(id):
"""
This function takes the user' id and transforms it into a
8 digit id.
"""
digs = string.ascii_letters + string.digits
x = int2base(id, 62)
for i in range(8):
if len(x) < 8:
x = digs[0] + x
return x
def task_opening(task_id):
"""
This function keeps track of which task the user is doing
Get the link to the task in psytoolkit
Insert an open task in tracked_task
a status of 0 means its open and a status of 1 means its completed
"""
id = session["user_id"]
if session["language"] == "english":
select_link = f"SELECT TASK_LINK FROM TASKS WHERE TASK_ID=(%s)"
else:
select_link = f"SELECT dutch_link FROM TASKS WHERE TASK_ID=(%s)"
link = db.execute(select_link, (task_id, ), 1)
insert = f"INSERT INTO TRACKED_TASK (user_id, task_id, status) VALUES (%s, %s, %s)"
insert_values = (id, task_id, 0)
db.execute(insert, insert_values, 0)
return link
def task_completed(task_id):
"""
This function keeps tracks of the completed tasks,
select the last task the user followed
If the task the user last started is the same as the task the user completed
Update the status of the task to completed
Insert the completed task in the table task_completed
"""
id = session["user_id"]
select = f"SELECT * FROM TRACKED_TASK WHERE time_exec= (SELECT MAX(time_exec) FROM TRACKED_TASK WHERE USER_ID = (%s));"
newest_task = db.execute(select, (id,), 1)
opened_task = newest_task[0]["task_id"]
if opened_task == task_id:
update = f"UPDATE TRACKED_TASK SET status = 1 WHERE time_exec= (SELECT MAX(time_exec) FROM TRACKED_TASK WHERE USER_ID = (%s));"
insert = f"INSERT INTO TASK_COMPLETED (user_id, task_id, collect) VALUES (%s, %s, 1);"
db.execute(update, (id,), 0)
db.execute(insert, (id, task_id), 0)
def send_email(message, username, recipient_adress):
"""
This function takes an input email and password from sender and an input
email from receiver. It sends an email with input text.
"""
try:
email_r = open("email.txt", "r")
email = email_r.read().strip('\n')
pasw_r = open("pass.txt", "r")
pasw = pasw_r.read().strip('\n')
context = ssl.create_default_context()
with smtplib.SMTP_SSL("mail.privateemail.com", 465, context=context) as server:
server.login(email, pasw)
server.sendmail(email, recipient_adress, message)
return True
except SMTPResponseException as e:
error_code = e.smtp_code
error_message = e.smtp_error
print(f'fail to send email {error_code}, {error_message}')
app.logger.info('%s tried to send the following email: %s. There was an error', username, message)
return False
@app.route('/nortonsw_df670de0-523c-0.html', methods=["GET"])
################################################################################
##################### LINK TO CORSI TASK_ID = 0 ################################
###############################################################################
@app.route('/rt', methods=["GET"])
@language_check
@login_required
def rt():
"""
This function manages the rt task.
First it checks if it has been done already if it has it will automatically
redirect user to the selected task.
If the rt task of rt_long task has not been yet then it will check what task
was chosen and select whether the rt or rt_long should be shown.
Finally if there is a youtube video it will redirect the user to the video.
If not it will redirect user to the task (rt or rt_long)
"""
language = request.args.get('language')
if language:
return redirect("/home")
id = session["user_id"]
task = request.args.get('task')
# select tasks that were completed that day by the user
select = "SELECT * FROM task_completed WHERE user_id = (%s) AND task_id = (%s) AND date_trunc('day', time_exec) = date_trunc('day', current_date);"
# check if the rt task has been completed
rt_done = db.execute(select, (id,8), 1)
#select = "SELECT * FROM task_completed WHERE user_id = (%s) AND task_id = (%s) AND date_trunc('day', time_exec) = date_trunc('day', current_date);"
# check if the rt_long task has been completed
#rt_long_done = db.execute(select, (id,9), 1)
if session["language"] == "english":
youtube_link_select = 'youtube_link'
else:
youtube_link_select = 'youtube_link_nl'
# Check if either the short or long rt task has been done today already
# If one of them has been done then redirect the user to the video of the selected task directly
# They should not do the rt or rt_long again
#if rt_done or rt_long_done:
if rt_done or task == "task_switching" or task == "n_back":
# select the youtube link to the video
select = f'SELECT {youtube_link_select} FROM TASKS WHERE task_name=(%s)'
youtube_link = db.execute(select,(task,), 1)
task_link = '/' + task
return render_template('youtube.html', youtube_csv=youtube_csv[session["language"]], youtube_link=youtube_link[0][0], task_link=task_link, layout=layout[session["language"]])
# if the task is task_switching then the shortened RT is done
# if it is not task switching the long rt is done
#if task == "task_switching":
# task_id for short rt = 8
task_id = 8
link = task_opening(task_id)
task_link = link[0][0] + '&task=' + task + '&user_id=' + generate_id(id)
print(task_link)
select = f'SELECT {youtube_link_select} FROM TASKS WHERE task_id=(%s)'
youtube_link = db.execute(select,(task_id,), 1)
"""
else:
# task_id for long rt = 9
task_id = 9
link = task_opening(task_id)
task_link = link[0][0] + '&task=' + task + '&user_id=' + generate_id(id)
print(task_link)
select = f'SELECT {youtube_link_select} FROM TASKS WHERE task_id=(%s)'
youtube_link = db.execute(select,(task_id,), 1)
"""
# check if there is a youtube_link
# if there is then redirect user to see the youtube video
# if not redirect user to the task directly
return render_template('youtube.html', youtube_csv=youtube_csv[session["language"]], youtube_link=youtube_link[0][0], task_link=task_link, layout=layout[session["language"]])
@app.route('/rt_end', methods=["GET"])
@language_check
@login_required
@app.route('/corsi', methods=["GET"])
@login_required
def corsi():
"""
This function gives the link to the corsi task in psytoolkit
"""
task_id = 1
link = task_opening(task_id)
return redirect(link[0][0]+ "&user_id=" + generate_id(session["user_id"]))
@app.route('/corsi_end', methods=["GET"])
@login_required
@language_check
def corsi_end():
"""
This function redirects user to a thank you page. It registers that they have completed the task (thus updating their money earned)
"""
task_id = 1
task_completed(task_id)
return render_template("end_task.html", end_csv=end_csv[session["language"]], layout=layout[session["language"]])
################################################################################
#################### LINK TO N_BACK TASK_ID = 1 ################################
###############################################################################
@app.route('/n_back', methods=["GET"])
@login_required
@app.route('/n_back_end', methods=["GET"])
@login_required
@language_check
################################################################################
############# LINK TO TASK_SWITCHING TASK_ID = 2 ##############################
###############################################################################
@app.route('/task_switching', methods=["GET"])
@login_required
@app.route('/task_switching_end', methods=["GET"])
@login_required
@language_check
################################################################################
##################### LINK TO SF-36 TASK_ID = 3 ################################
###############################################################################
@app.route('/sf_36', methods=["GET"])
@language_check
@login_required
@app.route('/sf_36_end', methods=["GET"])
@login_required
@language_check
################################################################################
################## LINK TO PHONE SURVEY TASK_ID = 4 ############################
###############################################################################
@app.route('/phone_survey', methods=["GET"])
@login_required
@app.route('/phone_survey_end', methods=["GET"])
@login_required
@language_check
def should_show_task(task_id):
"""
Looks for the last time the task was completed, then it calculates
the next time the task will be available.
if todays date is larger than the date where the task becomes available then
return True, which indicated the button should be shown, the user can do the task
else return False
"""
id = session['user_id']
select = f"SELECT MAX(time_exec) FROM TASK_COMPLETED WHERE USER_ID = (%s) AND TASK_ID = (%s)"
values = (id, task_id)
last_timestamp = db.execute(select, values, 1)
# [0][0] the second 0 refers to the maximum value
if last_timestamp[0][0]:
task_last_timestamp = last_timestamp[0][0]
task = db.execute(f"SELECT FREQUENCY FROM TASKS WHERE TASK_ID=(%s)",(task_id,), 1)
#task_freq = round(float(task[0]["frequency"])*10*30,1)
task_freq = round(float(task[0]["frequency"])*10*30,1)
new_date = task_last_timestamp + timedelta(days=task_freq)
if datetime.now() > new_date:
return True
else:
return False
else:
return True
def calculate_rec_system_payment(f_promo_code):
"""
This function calculates the reward/payment for referring friends to the study
It takes the f_promo_code which is the promo code from the user that is logged in
The user whose dashboard the payment will appear on.
"""
total_payment = 0
successful_completion = 0
# select the number of times the user's promocode has been used
# in the table this is stored as f_promo_code because the user at this point is the friend
# make sure payment has not been collected already for this
select = "SELECT * FROM rec_system where f_promo_code=(%s) AND COLLECT NOT IN ( 0 )"
recomended = db.execute(select, (f_promo_code,), 1)
# go over each person the user recommended
for i in recomended:
# since you only know this user's promo code select their info from SESSION_INFO
# this is to check if they have been signed up for 1 year and have completed 12 tasks
select = "SELECT user_id,time_sign_up FROM SESSION_INFO WHERE promo_code=(%s)"
user_info = db.execute(select, (i["promo_code"],), 1)
# get the date 1 year after the user has signed up
today = datetime.now()
one_year_after_sign_up = user_info[0]["time_sign_up"] + timedelta(weeks = 52)
# if the user has been signed up for more than a year (52 weeks) then check if they
# have completed 12 tasks.. if they have not then the total_payment is 0
if one_year_after_sign_up < today:
# select the number of tasks they have completed
select = "SELECT * FROM TASK_COMPLETED WHERE user_id=(%s)"
executed_tasks = db.execute(select, (user_info[0]["user_id"],), 1)
# check if the number of tasks is more than 12 if so the referred friend has
# successfully completed the study and the user may get the reward
if len(executed_tasks) >= 6:
total_payment = total_payment + 5
successful_completion = successful_completion + 1
return (len(recomended),total_payment, successful_completion)
def calculate_money(id):
"""
Calculate the total amount of money the user has earned since last payment collection
"""
# get the completed tasks where the user has not completed payment
select = f"SELECT * FROM TASK_COMPLETED WHERE USER_ID = (%s) AND COLLECT NOT IN ( 0 )"
completed_tasks = db.execute(select, (id,), 1)
# Select all the tasks (even if the payment has been collected already)
select_all_tasks = f"SELECT * FROM TASK_COMPLETED WHERE USER_ID = (%s)"
all_tasks = db.execute(select_all_tasks, (id,), 1)
# Assume that payment for task can be collected (because they have not collected payment this month yet)
can_collect_task_this_month = True
# go over all the tasks
for i in all_tasks:
# check if a task (not a survey) has been performed this month AND check if payment has alread been collected this month
# If this is true, since payment for tasks can only be collected once a month set can_collect_task_this_month to false
if type(i[-1]) is datetime:
if (i[2] != 4 or i[2] != 5) and i[-1].month == datetime.now().month:
can_collect_task_this_month = False
print(can_collect_task_this_month)
# seperate each task per month
months_dict = {}
for task in completed_tasks:
if task[0].month in months_dict:
months_dict[task[0].month].append(task)
else:
months_dict[task[0].month] = [task]
# this is the total amount earned
money_earned = 0
# this is the amount only for the tasks
money_earned_tasks = 0
# Go over every month in the dictionary
for month in months_dict:
# Go over every value in that month
for i in months_dict[month]:
# select task_id
task_id = i[2]
# get the price for that task
money = db.execute(f"SELECT PRICE FROM TASKS WHERE TASK_ID=(%s)", (task_id,), 1)
# check if its a survey if it is then the user automatically gets paid the price of the survey
if i[2] == 4 or i[2] == 5:
money_earned = money_earned + float(money[0]["price"])
# if it is a task then
else:
# calculate the ammount earned for tasks
money_earned_tasks = money_earned_tasks + float(money[0]["price"])
# Check if its smaller than the max value per month add it to the total amount earned
# (80-8)/(12*3) = 2
if money_earned_tasks <= (80-8)/(12*3) and can_collect_task_this_month:
money_earned = money_earned + money_earned_tasks
# money_earned_tasks < 72. 72 is the total ammount a user can ever make on the tasks through the study
# if money_earned_tasks is larger than 2 only count the first 2 euros for that month
elif money_earned_tasks > (80-8)/(12*3) and money_earned_tasks < 72 and can_collect_task_this_month:
money_earned = money_earned + 2
# set the amount for the tasks to 0 every month as not to go over the total value
money_earned_tasks = 0
print(money_earned)
# round to 2 decimals
return round(money_earned, 2)
################################################################################
################################ HOME PAGE #####################################
###############################################################################
# Home page Login required
@app.route('/', methods=["GET"])
@login_required
@language_check
################################################################################
################################ REGISTER #####################################
###############################################################################
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
preferred_language = "dutch"
if "language" in session:
preferred_language = session["language"]
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
session["language"] = preferred_language
# api-endpoint
URL = "https://www.google.com/recaptcha/api/siteverify"
# location given here
location = "delhi technological university"
secret_recaptcha_r = open("secret_recaptcha.txt", "r")
secret_recaptcha = secret_recaptcha_r.read()
# defining a params dict for the parameters to be sent to the API
PARAMS = {'secret':secret_recaptcha, 'response': request.form.get("g-recaptcha-response")}
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
if not data["success"]:
flash(flash_msg_csv[session["language"]]["robot"])
return render_template("register.html", consent_csv=consent_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
# set to lowercase
username_r = request.form.get("username").lower()
# set to lowercase
email_r = request.form.get("email").lower()
for_money_r = request.form.get("money")
collect_possible_r = request.form.get("collect_possible")
year = request.form.get("year")
month = request.form.get("month")
birthdate_r = str(year) + str(month) + "01"
gender_r = request.form.get("gender")
password = request.form.get("password")
new_password = request.form.get("confirmation")
f_promo_code_r = request.form.get("f_promo_code")
# ensure that all fiels that are required were filled in correctly
if not username_r or not birthdate_r or not gender_r or not email_r:
flash(flash_msg_csv[session["language"]]["missing_fieds"])
return render_template("register.html", consent_csv=consent_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
# preprocesses inputs to get in right format for database
username = remove_whitespace(username_r)
email = remove_whitespace(email_r)
for_money = preprocess_checkbox(for_money_r)
collect_possible = preprocess_checkbox(collect_possible_r)
user_type = 2
if for_money and collect_possible:
user_type = 1
birthdate = preprocess_birthdate(birthdate_r)
gender = preprocess_gender(gender_r)
if f_promo_code_r:
f_promo_code = f_promo_code_r.upper().replace(" ", "")
# Check if password matches and has a number, capital letter, lower case letter and minimum 5 characters
if check_password(password, new_password):
# encrypt the users' password
hash = generate_password_hash(password)
# check if the username is already in use
select = "SELECT user_name FROM SESSION_INFO WHERE user_name = (%s)"
rows = db.execute(select, (username,), 1)
# check true it means it is in use
if rows:
flash(flash_msg_csv[session["language"]]["used_email"])
return render_template("register.html", consent_csv=consent_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
# send_email
# select and delete a participation_id from the table with id's
delete = """DELETE FROM participation_id WHERE p_id = ANY(SELECT * FROM participation_id order by p_id desc limit 1) RETURNING *"""
# commit the delete statement by calling execute with fetch argument as 2
participation_id_r = db.execute(delete, ("",), 2)
# select the id from the list of lists
participation_id = participation_id_r[0][0]
session["participation_id"] = participation_id
session["show_p_id"] = True
# check if we have enough ID's left
select = "SELECT * FROM PARTICIPATION_ID"
all_p_ids = db.execute(select, ("", ), 1)
# if there are less than 200 ID's then send a reminder email
if len(all_p_ids) < 200:
message = "Subject: (IMPORTANT) New Participation ID's Needed \n\n There are less than 200 participation_ids left."
send_email(message, username, "agestudy@fsw.leidenuniv.nl")
promo_code = add_promo_code_2_db()
# add user to the database
param = (username, email, gender, collect_possible, for_money, user_type, birthdate, hash, participation_id, promo_code, session["language"])
insert = "INSERT INTO session_info (user_name, email, gender, collect_possible, for_money, user_type, birthyear, pas_hash, participation_id, promo_code, language) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
result = db.execute(insert, param, 0)
select = "SELECT * FROM SESSION_INFO WHERE user_name = %s"
rows = db.execute(select, (username,), 1)
# Remember which user has logged in
session['user_id'] = rows[0][0]
if f_promo_code_r:
# if user has been recommended by a friend add promo code to db
insert_promo_code = "INSERT INTO rec_system (user_id, promo_code, f_promo_code, collect) VALUES (%s, %s, %s, %s)"
params = (session["user_id"], promo_code, f_promo_code, 1)
db.execute(insert_promo_code, params, 0)
# send welcome email to paricipant and to agestudy with participants info
message = 'Subject: New Participant \n\n username: ' + username + "\npsytoolkit_id: " + generate_id(session['user_id']) + "\n email: " + email + "\n user_id: " + str(rows[0][0]) + "\n user_type: " + str(user_type) + "\n gender: " + str(gender_r) + "\n language: " + session["language"] + "\n participation_id: " + participation_id + "\n Birthdate: " + birthdate
send_email(message, username, "agestudy@fsw.leidenuniv.nl")
subject_en='Welcome from Leiden University to Agestudy.nl (Important info, save the email)'
subject_nl='Welkom van de Universiteit Leiden bij Agestudy.nl (Belangrijke info, bewaar deze e-mail)'
send_direct_email(subject_en, subject_nl, email, subject_en, False, participation_id)
# Redirect user to home page
return redirect("/home")
else:
# the passwords did not match
flash(flash_msg_csv[session["language"]]["unmatched_pas"])
return render_template("register.html", consent_csv=consent_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
# User reached route via GET (as by clicking a link or via redirect)
else:
session["language"] = preferred_language
language_set()
return render_template("register.html", consent_csv=consent_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
@app.route("/check_promo_code", methods=["GET"])
@app.route("/availability", methods=["GET"])
def availability():
"""
Checks the availability of the username
Returns JSON with True or False
"""
# set to lowercase
username = request.args.get('username').lower()
# get the user information
select = "SELECT user_name FROM SESSION_INFO WHERE user_name = (%s)"
rows = db.execute(select, (username,), 1)
return jsonify(len(rows) == 0)
@app.route("/session_pc", methods=["GET"])
def session_pc():
"""
Checks if the checkbox on index.html has been checked already
this is done by checking the session["pc"]
"""
if request.args.get('checked') == "true":
session["pc"] = True
else:
session["pc"] = False
return jsonify(session["pc"])
@app.route("/email", methods=["GET"])
def email():
"""
Checks the availability of the username
Returns JSON with True or False
"""
# set to lowercase
username = request.args.get('username').lower()
email = request.args.get('email').lower()
# get the user information
return jsonify(username == email)
@app.route("/automatic_unsubscribe_reminder", methods=["GET", "POST"])
@app.route("/unsubscribe_reminder", methods=["GET", "POST"])
@language_check
@app.route("/reminder", methods=["GET", "POST"])
@language_check
def reminder():
""" Add user's email to sql table of reminders """
# select the user' email from the db
select = "SELECT email,participation_id from session_info WHERE user_id = (%s)"
select_users = db.execute(select, (session["user_id"],), 1)
email = select_users[0][0]
participation_id = select_users[0][1]
# check if the email adress already exists
select = "SELECT email_adress FROM REMINDER WHERE email_adress = (%s)"
email_reminder_exists = db.execute(select, (email,), 1)
if not email_reminder_exists:
# if it doesnt, insert the new email adress in the db
insert = "INSERT INTO reminder(email_adress, user_id) VALUES (%s,%s);"
db.execute(insert, (email,session["user_id"]), 0)
message = "Subject: Reminder participant \n\n Participant with email adress: " + email + " and participation_id: " + participation_id + " wants to join the reminder system."
email_sent = send_email(message, email, "agestudy@fsw.leidenuniv.nl")
return render_template("reminder.html", reminder_csv=reminder_csv[session["language"]], layout=layout[session["language"]])
################################################################################
################################ LOGIN #######################################
###############################################################################
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
preferred_language = "dutch"
if "language" in session:
preferred_language = session["language"]
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
session["language"] = preferred_language
# set to lowercase
username = request.form.get("username").lower()
password = request.form.get("password")
# Ensure username was submitted
if not username:
flash(flash_msg_csv[session["language"]]["email_needed"])
return render_template("login.html", login_csv=login_csv[session["language"]], layout=layout[session["language"]])
# Ensure password was submitted
elif not password:
flash(flash_msg_csv[session["language"]]["pas_needed"])
return render_template("login.html", login_csv=login_csv[session["language"]], layout=layout[session["language"]])
# get the user information
select = "SELECT * FROM SESSION_INFO WHERE user_name = (%s)"
rows = db.execute(select, (username,), 1)
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]['pas_hash'], request.form.get("password")):
flash(flash_msg_csv[session["language"]]["invalid_username"])
if len(rows) != 1:
app.logger.info('%s invalid username', username)
else:
app.logger.info('%s invalid password with email combination', username)
return render_template("login.html", login_csv=login_csv[session["language"]], layout=layout[session["language"]])
# Remember which user has logged in
session['user_id'] = rows[0]['user_id']
session['consent'] = rows[0]['consent']
session['admin'] = rows[0]['admin']
session['participation_id'] = rows[0]['participation_id']
session['show_p_id'] = True
session['show_rec_system'] = True
select = "SELECT time_sign_up FROM SESSION_INFO WHERE user_id = (%s)"
time_sign_up = db.execute(select, (session["user_id"],), 1)
month_after_sign_up = time_sign_up[0][0] + timedelta(weeks=4)
two_weeks_after_sign_up = time_sign_up[0][0] + timedelta(weeks=0)
select_msg = f"SELECT * FROM BB_BOARD WHERE time_insert= (SELECT MAX(time_insert) FROM BB_BOARD WHERE USER_ID = (%s));"
bb_board_selection = db.execute(select_msg, (rows[0]['user_id'],), 1)
if month_after_sign_up < datetime.now() or bb_board_selection and bb_board_selection[0]["show_msg"]:
session['show_p_id'] = False
# user has been signed up for less than 2 weeks
if two_weeks_after_sign_up > datetime.now():
session['show_rec_system'] = False
# Redirect user to home page
return redirect("/home")
# User reached route via GET (as by clicking a link or via redirect)
else:
session["language"] = preferred_language
language_set()
return render_template("login.html", login_csv=login_csv[session["language"]], layout=layout[session["language"]])
def check_password(password, new_password):
"""Make sure password is correctly chosen"""
# ensure that all fiels were filled in correctly
if password != new_password:
return False
# check if password has number, length and symbols
number = len(re.findall(r"[0-9]", password))
return len(password) >= 5 and number > 0
@app.route("/bb_board", methods=["GET", "POST"])
@app.route("/forgot_password", methods=["GET", "POST"])
@app.route("/verify", methods=["GET", "POST"])
@app.route("/reset", methods=["GET", "POST"])
@app.route("/reset_link", methods=["GET"])
################################################################################
################################ LOGOUT #####################################
###############################################################################
@app.route("/logout")
@login_required
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/pc", methods=["POST"])
@login_required
def pc():
"""
This function stores if the user is on a pc in the session
"""
if request.form.get("pc"):
session["pc"] = 1
################################################################################
################################ ACCOUNT #####################################
###############################################################################
@app.route("/account", methods=["GET", "POST"])
@login_required
@language_check
def account():
"""
User can view their account information
User can update their password
"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# change password, get old password and two new entries
old_password = request.form.get("old_password")
password = request.form.get("password")
confirmation = request.form.get("confirmation")
# select users information
select = "SELECT * FROM SESSION_INFO WHERE USER_ID = (%s)"
id=session["user_id"]
rows = db.execute(select, (id,), 1)
username = rows[0]["user_name"]
email = rows[0]["email"]
# Check if the old password matches
if not check_password_hash(rows[0]['pas_hash'], old_password):
flash(flash_msg_csv[session["language"]]["incorrect_pas"])
app.logger.warning('%s Tried to change password in the /account but old password was incorrect', username)
return render_template("account.html", account_csv=account_csv[session["language"]], username=username.capitalize(), email=email, layout=layout[session["language"]])
# check if the new password is properly implemented
if check_password(password, confirmation):
# encrypt the users' password
hash = generate_password_hash(password)
update = "UPDATE SESSION_INFO SET pas_hash = (%s) WHERE USER_ID = (%s)"
param = (hash,id)
db.execute(update, param, 0)
flash(flash_msg_csv[session["language"]]["change_pas"])
# Redirect user to home page
return redirect("/")
else:
app.logger.warning('%s In /account password did not match with confirmation password', username)
flash(flash_msg_csv[session["language"]]["incorrect_fields"])
return render_template("account.html", account_csv=account_csv[session["language"]], username=username.capitalize(), email=email, layout=layout[session["language"]])
# User reached route via GET (as by clicking a link or via redirect)
else:
# Show them their account information
id = session["user_id"]
select = "SELECT * FROM SESSION_INFO WHERE USER_ID = (%s)"
rows = db.execute(select, (id,), 1)
username = rows[0]["user_name"]
email = rows[0]["email"]
return render_template("account.html", account_csv=account_csv[session["language"]], username=username.capitalize(), email=email, layout=layout[session["language"]])
################################################################################
################################ STATIC #####################################
###############################################################################
@app.route("/consent", methods=["GET", "POST"])
@language_check
def consent():
"""
Consent form
"""
language_set()
if request.method == "POST":
id = session["user_id"]
if request.form.get("consent") == "on":
update = f"UPDATE SESSION_INFO SET CONSENT=1 WHERE USER_ID = (%s)"
db.execute(update, (id,), 0)
select = "SELECT CONSENT FROM SESSION_INFO WHERE USER_ID = (%s)"
rows = db.execute(select, (id,), 1)
session['consent'] = rows[0]['consent']
return redirect("/home")
else:
return render_template("consent.html", consent_csv=consent_csv[session["language"]], layout=layout[session["language"]])
@app.route("/transportation", methods=["POST"])
@language_check
def transportation():
"""
transportation costs
"""
transportation = request.form.get("transportation")
transportation_cost = request.form.get("transportation_cost")
id = session["user_id"]
# select the user info
select = "SELECT * FROM SESSION_INFO WHERE USER_ID = (%s)"
rows = db.execute(select, (id,), 1)
# send_email with the users info to our server to contact them about participating
message = 'Subject: EEG \n\n The following participant wants to participate in the EEG study' + '\n username: ' + str(rows[0]['user_name']) + "\n email: " + str(rows[0]['email']) + "\n user_id: " + str(rows[0]['user_id']) + "\n user_type: " + str(str(rows[0]['user_type'])) + "\n language: " + session["language"] + "\n transportation cost required: " + transportation + "\n transportation cost: " + transportation_cost
email_sent = send_email(message, rows[0]['user_name'], "agestudy@fsw.leidenuniv.nl")
# render a thank you page
date_requested = datetime.now()
update = "UPDATE SESSION_INFO SET eeg_participation_request = (%s), eeg_participation_request_date = (%s) WHERE user_id= (%s);"
db.execute(update, (1,date_requested,id), 0)
if email_sent:
return render_template("sent_email.html", sent_email_csv=sent_email_csv[session["language"]], layout=layout[session["language"]])
else:
return render_template("email_unsent.html", email_unsent=email_unsent[session["language"]], layout=layout[session["language"]])
@app.route("/eeg", methods=["GET", "POST"])
@language_check
def eeg():
"""
EEG information
"""
# If they clicked on the submit button
if request.method == "POST":
return render_template("transportation.html", transportation_csv=transportation_csv[session["language"]], layout=layout[session["language"]])
else:
language_set()
return render_template("eeg.html", eeg_csv=eeg_csv[session["language"]], layout=layout[session["language"]])
def check_can_collect_payment(id):
"""
Check if participant can collect payment this is true if :
- They have been signed up for a year
- They have never collected payment before or their last collection was more than 5 months ago
"""
select = "SELECT time_sign_up FROM SESSION_INFO WHERE user_id = (%s)"
time_sign_up = db.execute(select, (id,), 1)
one_year_after_sign_up = time_sign_up[0][0] + timedelta(weeks=43)
select = "SELECT date_collected,next_collection from TASK_COMPLETED WHERE user_id = (%s)"
date_collected = db.execute(select, (id,), 1)
can_collect_payment = False
#if one_year_after_sign_up < datetime.now() and user_type and next_collection[0][0] and next_collection[0][0] < datetime.now():
if one_year_after_sign_up < datetime.now() and len(date_collected) >= 1 and (date_collected[0][0] == None or date_collected[0][0] < (datetime.now() - timedelta(weeks=22))):
can_collect_payment = True
date_collected = date_collected[0][0]
elif len(date_collected) > 1:
date_collected = date_collected[0][0]
return (can_collect_payment,date_collected,time_sign_up)
@app.route("/home", methods=["GET", "POST"])
@login_required
@language_check
def home():
"""
Home page, contains a money tab and a recommended task
"""
# make the recomendation system which will recommend one task, show only 1 div
id = session["user_id"]
select = "SELECT promo_code FROM SESSION_INFO WHERE USER_ID=(%s)"
f_promo_code = db.execute(select, (id,), 1)
# calculate the money earned to draw the barchart
task_payment = calculate_money(id)
rec_system_payment = calculate_rec_system_payment(f_promo_code[0][0])
#price = task_payment + rec_system_payment[1]
amount_to_earn = rec_system_payment[0]*5
# select all the completed tasks
select = f"SELECT * FROM TASK_COMPLETED WHERE USER_ID = (%s)"
rows = db.execute(select,(id,), 1)
# select the user type to see if money barchart should be shown
select = f"SELECT USER_TYPE FROM SESSION_INFO WHERE USER_ID = (%s)"
user_type_row = db.execute(select,(id,), 1)
# user type of 1 indicates they want and are able to participate for money
if user_type_row[0]["user_type"] == 1:
user_type = True
# user type of 2 indicates they are unable to participate for money
else:
user_type = False
recomendation = True
# put the completed tasks in a list
completed_tasks = []
for i in rows:
completed_tasks.append(i["task_id"])
can_collect_payment,date_collected,time_sign_up = check_can_collect_payment(id)
three_weeks_after_sign_up = time_sign_up[0][0] + timedelta(weeks=3)
phone_survey_available = three_weeks_after_sign_up < datetime.now()
select_msg = f"SELECT * FROM BB_BOARD WHERE time_insert= (SELECT MAX(time_insert) FROM BB_BOARD WHERE USER_ID = (%s));"
bb_board_selection = db.execute(select_msg, (id,), 1)
bb_msg=""
bb_msg_title=""
if bb_board_selection and bb_board_selection[0]["show_msg"]:
bb_msg = bb_board_selection[0]["msg"]
bb_msg_title = bb_board_selection[0]["msg_title"]
bb_no_msg = False
else:
bb_no_msg = True
# First recomendation is to do the phone survey
if (5 not in completed_tasks or should_show_task(5)) and phone_survey_available:
task = {"img":"/static/images/TaskIcons/phone_survey.png", "alt":"Phone survey", "btn_class": "survey", "title":tasks[session["language"]]['phone_survey_title'], "text" : tasks[session["language"]]['phone_survey_description'], "link" : "/phone_survey", "button_text": tasks[session["language"]]['phone_survey_button']}
# Second recomendation is to do the sf-36
elif 4 not in completed_tasks or should_show_task(4):
task = {"img":"/static/images/TaskIcons/SF-36.png", "alt":"Health survey", "btn_class": "survey", "title":tasks[session["language"]]['sf_36_title'], "text" : tasks[session["language"]]['sf_36_description'], "link" : "/sf_36", "button_text": tasks[session["language"]]['sf_36_button']}
# Third recomendation is to do corsi task
elif 1 not in completed_tasks or should_show_task(1):
task = {"img":"/static/images/TaskIcons/corsi.png", "alt":"Pattern Memory", "btn_class":"", "title" : tasks[session["language"]]['corsi_title'], "text" : tasks[session["language"]]['corsi_description'], "link" : "/rt?task=corsi", "button_text": tasks[session["language"]]['corsi_button']}
# Fourth recomendation is to do n_back task
elif 2 not in completed_tasks or should_show_task(2):
task = {"img":"/static/images/TaskIcons/N-back.png", "alt":"Letter Memory", "btn_class":"", "title" : tasks[session["language"]]['n_back_title'], "text" : tasks[session["language"]]['n_back_description'], "link" : "/rt?task=n_back", "button_text": tasks[session["language"]]['n_back_button']}
# Fifth recomendation is to do task switching task
elif 3 not in completed_tasks or should_show_task(3):
if session["language"] == "english":
task = {"img":"/static/images/TaskIcons/TSwitch_EN.png", "alt":"Task Switching", "btn_class":"", "title" : tasks[session["language"]]['task_switching_title'], "text" : tasks[session["language"]]['task_switching_description'], "link" : "/rt?task=task_switching", "button_text": tasks[session["language"]]['task_switching_button']}
else:
task = {"img":"/static/images/TaskIcons/TSwitch_NL.png", "alt":"Task Switching", "btn_class":"", "title" : tasks[session["language"]]['task_switching_title'], "text" : tasks[session["language"]]['task_switching_description'], "link" : "/rt?task=task_switching", "button_text": tasks[session["language"]]['task_switching_button']}
# If all tasks have been completed and are locked then give no recommendation dont show the div
else:
recomendation = False
task = {"img":"", "alt":"", "title":"", "btn_class":"", "text" : "", "link" : "", "button_text": ""}
return render_template("home.html", amount_to_earn=amount_to_earn, len_rec_system_payment=rec_system_payment[0], suc_rec_system=rec_system_payment[1], bb_no_msg=bb_no_msg, bb_msg_title=bb_msg_title, bb_msg=bb_msg, price=task_payment, can_collect_payment=can_collect_payment, user_type=user_type, recomendation=recomendation, layout=layout[session["language"]], home_csv=home_csv[session["language"]], btn_class=task["btn_class"], img=task["img"], alt=task["alt"], title=task["title"], text=task["text"], link=task["link"], button_text=task["button_text"])
@app.route("/rec_system", methods=["GET", "POST"])
@login_required
@language_check
@app.route("/payment", methods=["POST", "GET"])
@login_required
@language_check
def payment():
"""
Request payment from agestudy. Ask for personal information.
"""
if request.method == "POST":
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
IBAN = request.form.get("IBAN")
address = request.form.get("address")
collection = request.form.get("collection")
BSN = request.form.get("BSN")
date = request.form.get("date_payment")
month = request.form.get("month_payment")
year = request.form.get("year_payment")
birthdate = f'{date}/{month}/{year}'
id = session["user_id"]
task_payment = calculate_money(id)
# update the collection to 0 which means that the user has/will collect the money_earned
# otherwise collect is 1
date_collected = datetime.now()
next_collection = datetime.now() + timedelta(weeks = 52)
update = f"UPDATE TASK_COMPLETED SET COLLECT=0, DATE_COLLECTED = (%s), next_collection = (%s) WHERE USER_ID = (%s)"
db.execute(update, (date_collected, next_collection, id), 0)
# select the user info
select = f"SELECT * FROM SESSION_INFO WHERE USER_ID = (%s)"
rows = db.execute(select, (id,), 1)
# if the payment has already been collected by friend then make sure friend does not see payment again
# collect = 0 if it has been collected and 1 otherwise
f_promo_code = rows[0]["promo_code"]
rec_system_payment = calculate_rec_system_payment(f_promo_code)
update = f"UPDATE rec_system SET COLLECT=0 WHERE f_promo_code = (%s)"
db.execute(update, (f_promo_code,), 0)
money_earned = task_payment + rec_system_payment[1]
# send_email with the users info to our email to contact them about participating
# email contains username, email, usertype, user_id and the ammount to be collect
message = 'Subject: Payment collection \n\n The following participant wants to collect their payment for the study' + '\n username: ' + str(rows[0]['user_name']) + "\n First name: " + first_name + "\n Last name: " + last_name + "\n IBAN: " + IBAN + "\n Address: " + address + "\n BSN: " + BSN + "\n birthdate: " + birthdate + "\n email: " + str(rows[0]['email']) + "\n user_id: " + str(rows[0]['user_id']) + "\n user_type: " + str(rows[0]['user_type']) + "\n ammount to collect from tasks: " + str(task_payment) + "\n ammount to collect from recommender system: "+ str(rec_system_payment[1]) + "\n language: " + session["language"]
email_sent = send_email(message, rows[0]['user_name'], "agestudy@fsw.leidenuniv.nl")
if email_sent:
# render a thank you page
return render_template("collected.html", money_earned=money_earned, collected_csv=collected_csv[session["language"]], layout=layout[session["language"]])
else:
return render_template("email_unsent.html", email_unsent=email_unsent[session["language"]], layout=layout[session["language"]])
else:
return render_template("payment.html", payment=payment_csv[session["language"]], register_csv=register_csv[session["language"]], layout=layout[session["language"]])
@app.route("/admin", methods=["POST", "GET"])
@login_required
@language_check
def admin():
"""
This function controls the admin page, a page which gives an overview of the database.
It also allows changes of the database
"""
if session["admin"] != 1:
return redirect("/home")
return render_template("admin.html", admin_csv=admin_csv[session["language"]], layout=layout[session["language"]])
@app.route("/change_user", methods=["GET"])
@language_check
@app.route("/duplicate_user_ajax", methods=["GET"])
@language_check
@app.route("/select_user", methods=["GET"])
@language_check
@app.route("/query_data", methods=["GET"])
@language_check
@app.route("/get_data", methods=["GET"])
@language_check
ALLOWED_EXTENSIONS = {'xlsx', 'xlx', 'csv'}
@app.route("/excel_upload", methods=["POST"])
@app.route("/download_data", methods=["GET"])
@language_check
def admin_view_user_payment(view_user_payment_bar, participant_info_str, participant_info, user_type):
"""
Based on either a username (email), participation_id, or user_id
calculate the payment a participant is entitled to.
If this information was not requested then return None
"""
task_payment = None
rec_system_payment = None
if view_user_payment_bar == 'true' and user_type == 1:
select = f"SELECT promo_code FROM SESSION_INFO WHERE {participant_info_str}=(%s)"
f_promo_code = db.execute(select, (participant_info,), 1)
# calculate the money earned to draw the barchart
if participant_info_str != 'user_id':
select = f"SELECT user_id FROM SESSION_INFO WHERE {participant_info_str}=(%s)"
user_id = db.execute(select, (participant_info,), 1)
participant_info = user_id[0][0]
task_payment = calculate_money(participant_info)
rec_system_payment = calculate_rec_system_payment(f_promo_code[0][0])
return (task_payment, rec_system_payment)
@app.route("/inactive_users", methods=["GET"])
@language_check
@app.route("/email_sent", methods=["POST"])
@language_check
@app.route("/about_study", methods=["GET"])
@language_check
@app.route("/for_participant", methods=["GET"])
@language_check
@app.route("/about_app", methods=["GET"])
@language_check
@app.route("/contact", methods=["GET"])
@language_check
#port = int(os.getenv("PORT"))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
| [
6738,
42903,
1330,
46947,
11,
18941,
11,
8543,
62,
28243,
11,
2581,
11,
6246,
11,
7644,
11,
33918,
1958,
11,
6371,
62,
1640,
11,
18261,
201,
198,
6738,
42903,
62,
29891,
1330,
23575,
201,
198,
6738,
20218,
7753,
1330,
33480,
67,
29510... | 2.653296 | 19,870 |
first = int(input())
second = int(input())
mod = str(divmod(first,second))
print(str(first//second))
print(str(first%second))
print(mod) | [
11085,
796,
493,
7,
15414,
28955,
198,
12227,
796,
493,
7,
15414,
28955,
198,
198,
4666,
796,
965,
7,
7146,
4666,
7,
11085,
11,
12227,
4008,
198,
4798,
7,
2536,
7,
11085,
1003,
12227,
4008,
198,
4798,
7,
2536,
7,
11085,
4,
12227,
... | 2.795918 | 49 |
"""Mk one feature matrix for sample and chrom.
Use for all samples (not training).
"""
###Adjusting const import was import sfConst, but trying just include: const.py
include: "const.py"
import argparse, csv, sys, os, pandas
from functools import reduce
PWD = os.getcwd().split('code')[0]
sys.path.append(PWD + 'code/rules/')
# def loadData(dataFile):
# """Load {} of chrom:pos:ref:alt to fields."""
if __name__ == "__main__":
desc = 'Make on feature matrix for sample and chrom.'
parser = argparse.ArgumentParser(description=desc)
argLs = ['noCallCounts', 'cgiFeats',
'outFile']
for param in argLs:
parser.add_argument(param)
args = parser.parse_args()
main(args)
| [
37811,
44,
74,
530,
3895,
17593,
329,
6291,
290,
15358,
13,
198,
220,
220,
5765,
329,
477,
8405,
357,
1662,
3047,
737,
198,
37811,
198,
21017,
39668,
278,
1500,
1330,
373,
1330,
264,
69,
34184,
11,
475,
2111,
655,
2291,
25,
1500,
13... | 2.70412 | 267 |
from django.http import JsonResponse
class DTAException(Exception):
""" Base exception which could represent itself as a JSON response"""
| [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
628,
198,
4871,
360,
5603,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
7308,
6631,
543,
714,
2380,
2346,
355,
257,
19449,
2882,
37811,
628,
198
] | 4.055556 | 36 |
from flask_babelex import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import SelectField, SubmitField
from wtforms.validators import DataRequired
from project.forms.common import event_rating_choices
| [
6738,
42903,
62,
65,
11231,
2588,
1330,
16931,
62,
1136,
5239,
198,
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
9683,
15878,
11,
39900,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
6060,
... | 3.694915 | 59 |
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: alex.yang0326@gmail.com
@file: inference.py
@time: 2018/4/22 14:32
@desc:
"""
import os
import random
from argparse import ArgumentParser
import numpy as np
import pickle
import pandas as pd
from models import EntDect, RelNet, SubTransE, SubTypeVec
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--data', type=str, default='../../data/test.csv', help='path to test data')
parser.add_argument('--word2idx', type=str, default='../../data/fb_word2idx.pkl', help='path to word2idx.pkl')
parser.add_argument('--rel2idx', type=str, default='../../data/FB5M_relation2idx.pkl',
help='path to relation2idx.pkl')
parser.add_argument('--sub2idx', type=str, default='../../data/FB5M_subject2idx.pkl',
help='path to subject2idx.pkl')
parser.add_argument('--idx2sub', type=str, default='../../data/FB5M_idx2subject.pkl',
help='path to idx2subject.pkl')
parser.add_argument('--sub2type', type=str, default='../../data/trim_subject2type.pkl',
help='path to subject2type')
parser.add_argument('--type2idx', type=str, default='../../data/FB5M_type2idx.pkl', help='path to type2idx.pkl')
parser.add_argument('--name2sub', type=str, default='../../data/name2subject.pkl',
help='path to subject2name.pkl')
parser.add_argument('--ngram2sub', type=str, default='../../data/ngram2subject.pkl',
help='path to subngram2entity.pkl')
parser.add_argument('--kb', type=str, default='../../data/FB5M_triple.pkl', help='path to knowledge graph')
parser.add_argument('--entdect_type', type=str, required=True,
help='model type of entity detection, options are [lstm | lstm_crf | bilstm | bilstm_crf]')
parser.add_argument('--subnet_type', type=str, required=True,
help='model type of subject network, options are [transe | typevec]')
parser.add_argument('--entdect', type=str, required=True, help='path to entity detection tensorflow model')
parser.add_argument('--relnet', type=str, required=True, help='path to relation network tensorflow model')
parser.add_argument('--subnet', type=str, required=True, help='path to subject network tensorflow model')
args = parser.parse_args()
# load needed data
print('loading word2idx...')
word2idx = pickle_load(args.word2idx)
print('loading relation2idx...')
relation2dix = pickle_load(args.rel2idx)
print('loading idx2subject...')
idx2subject = pickle_load(args.idx2sub)
print('loading subject2idx...')
subject2idx = pickle_load(args.sub2idx)
print('loading type2idx...')
type2idx = pickle_load(args.type2idx)
print('loading subject2type...')
subject2type = pickle_load(args.sub2type)
print('loading name2subject')
name2subject = pickle_load(args.name2sub)
print('loading ngram2subject...')
ngram2subject = pickle_load(args.ngram2sub)
print('loading knowledge graph...')
kb_triple = pickle_load(args.kb)
# load model
print('load entity detection model...')
entdect = EntDect(args.entdect_type, args.entdect)
print('load relation network model...')
relnet = RelNet(args.relnet)
if args.subnet_type == 'typevec':
subnet = SubTypeVec(args.subnet)
else:
subnet = SubTransE(args.subnet)
# load test data
print('loading test data...')
q_lineid, questions, q_word_ids, q_seq_len, gold_sub_ids, gold_rel_ids = read_data(args.data, word2idx,
relation2dix, subject2idx)
# '''step1: entity detection: find possible subject mention in question'''
mentions = entdect.infer((questions, q_word_ids, q_seq_len))
# '''step2: entity linking: find possible subjects responding to subject mention;
# search space reduction: generate candidate (subject, relation) pair according to possible subjects
# '''
cand_sub_ids, cand_rel_ids, cand_subrel_ids = link_entity(mentions, name2subject, ngram2subject,
kb_triple, subject2idx, relation2dix)
# '''step3: relation scoring: compute score for each candidate relations'''
rel_scores = relnet.infer((q_word_ids, q_seq_len, cand_rel_ids))
# '''step4: subject scoring: compute score for each candidate subjects'''
if args.subnet_type == 'typevec':
cand_sub_typevecs = []
for can_sub in cand_sub_ids:
type_vecs = []
for sub_id in can_sub:
types = subject2type.get(idx2subject[sub_id], [])
type_ids = [type2idx[type] for type in types]
type_vecs.append(type_ids)
cand_sub_typevecs.append(type_vecs)
sub_scores = subnet.infer((q_word_ids, q_seq_len, cand_sub_ids, cand_sub_typevecs))
else:
sub_scores = subnet.infer((q_word_ids, q_seq_len, cand_sub_ids))
# '''step5: inference'''
inference(gold_sub_ids, gold_rel_ids, cand_subrel_ids, rel_scores, sub_scores)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
198,
31,
9800,
25,
257,
2588,
17859,
198,
198,
31,
32057,
25,
257,
2588,
13,
17859,
3070,
2075,
31,
14816,
13,
785,
198,
198,
31,
7753,
25,
32278,
1... | 2.300175 | 2,292 |
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk.erd import ErdCode, ErdApplianceType
from .base import ApplianceApi
from ..entities import GeSacClimate, GeSacTemperatureSensor, GeErdSensor, GeErdSwitch, ErdOnOffBoolConverter
_LOGGER = logging.getLogger(__name__)
class SacApi(ApplianceApi):
"""API class for Split AC objects"""
APPLIANCE_TYPE = ErdApplianceType.SPLIT_AIR_CONDITIONER
| [
11748,
18931,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
1363,
562,
10167,
13,
16794,
364,
13,
26858,
1330,
20885,
198,
6738,
4903,
71,
2586,
34388,
13,
45744,
1330,
16455,
10669,
11,
16455,
4677,
75,
3610,
6030,
198,
198,
6738,
764,... | 2.911392 | 158 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 12:07:10 2019
Generates plots.
DOES NOT WORK! review input files!
@author: modal
"""
import pandas as pd
import numpy as np
import pickle
# Disable warnings in Anaconda
import warnings
warnings.filterwarnings('ignore')
# Matplotlib forms basis for visualization in Python
import matplotlib.pyplot as plt
# We will use the Seaborn library
import seaborn as sns
sns.set()
#%% LOAD DEI DATASET PULITI
file_path = '../../DATASET/'
pickle_off = open(file_path+"df_cup.pickle","rb")
df_cup = pickle.load(pickle_off)
#%% CREAZIONE DEL DATAFRAME DI INTERESSE
df_cup_extraction = pd.DataFrame({
'anno_ins': df_cup.sa_data_ins.dt.year,
'assistito': df_cup.sa_ass_cf,
'operatore': df_cup.sa_ut_id,
'sesso_assistito': df_cup.sa_sesso_id,
'eta_assistito': df_cup.sa_eta_id,
'comune': df_cup.sa_comune_id,
'branca': df_cup.sa_branca_id,
'prestazione': df_cup.sa_pre_id,
'num_prestazioni': df_cup.sa_num_prestazioni,
'priorita': df_cup.sa_classe_priorita,
'data_app' : df_cup.sa_data_app,
'mese_app': df_cup.sa_data_app.dt.month,
'anno_app': df_cup.sa_data_app.dt.year,
'attesa_app': df_cup.sa_gg_attesa,
'attesa_disp': df_cup.sa_gg_attesa_pdisp,
'asl': df_cup.sa_asl
})
#%%GRAFICA GENERALE
file_path = '../../'
# NUMERO DI PRENOTAZIONI PER BRANCA E PER ASL
stats_df = (df_cup_extraction.groupby('asl')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df['pdf'] = stats_df.sum(axis=1)
stats_df = stats_df.sort_values(by='pdf', ascending=False)
stats_df['cdf'] = stats_df['pdf'].cumsum()
asl_list = list(df_cup_extraction['asl'].value_counts().index)
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=asl_list, stacked=True, rot=90, legend=True, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
ax2 = ax.twinx()
fig = stats_df.plot(x='branca', y=['cdf'], color = 'r', linewidth = 3, legend=False, ax = ax2, alpha = 0.5, grid=False)
ax.legend(bbox_to_anchor=(1.08, 0.90))
ax.set_xlim(-0.5,len(stats_df)-1)
plt.legend(bbox_to_anchor=(1.08, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y='pdf', stacked=True, rot=90, legend=True, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
ax2 = ax.twinx()
fig = stats_df.plot(x='branca', y=['cdf'], color = 'r', linewidth = 3, legend=False, ax = ax2, alpha = 0.5, grid=False)
ax.legend(bbox_to_anchor=(1.08, 0.90))
ax.set_xlim(-0.5,len(stats_df)-1)
plt.legend(bbox_to_anchor=(1.08, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# Salvo la lista dele branche ignobili
list_branche_sign = list(stats_df['branca'].loc[stats_df['cdf']<=0.9])
del stats_df
# NUMERO DI PRENOTAZIONI PER COMUNE
stats_df = (df_cup_extraction['comune'].value_counts()/len(df_cup_extraction)).to_frame()
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'comune':'prenotazioni'})
stats_df = stats_df.rename(columns={'index':'comune'})
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='comune', y='prenotazioni', rot=90, legend=False, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Comune")
fig.set_ylabel("Frequency on Dataset")
plt.savefig(file_path+'IMG/PRENOTAZIONI(comune).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# NUMERO DI PRENOTAZIONI PER COMUNE (SOLO CAMPANIA ESPLICITI)
comuni_list = ['Napoli','Salerno','Avellino','Benevento','Caserta','Altro']
stats_df = (df_cup_extraction['comune'].value_counts()/len(df_cup_extraction)).to_frame()
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'comune':'prenotazioni'})
stats_df = stats_df.rename(columns={'index':'comune'})
stats_df.loc[-1] = ['Altro', stats_df[~stats_df['comune'].isin(comuni_list)]['prenotazioni'].sum()]
stats_df = stats_df[stats_df['comune'].isin(comuni_list)]
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='comune', y='prenotazioni', rot=90, legend=False, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Comune")
fig.set_ylabel("Frequency on Dataset")
plt.savefig(file_path+'IMG/PRENOTAZIONI(comune_campania).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# ANDAMENTO BRANCHE NEGLI ANNI
import re
data = df_cup_extraction.groupby('anno_ins')['branca'].value_counts().unstack().fillna(0)
fig, ax = plt.subplots()
fig =data.plot(rot=90, legend=True, figsize=(12, 6), linewidth = 3, ax = ax)
# fig.set_xlabel("Comune")
fig.set_ylabel("#Prestazioni")
ax.legend(bbox_to_anchor=(1, 1.01),ncol=2)
#fig.set_title(branca)
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
colormap = plt.cm.gist_ncar #nipy_spectral, Set1,Paired
colors = [colormap(i) for i in np.linspace(0, 1,len(ax.lines))]
for i,j in enumerate(ax.lines):
j.set_color(colors[i])
ax.legend(loc=2,bbox_to_anchor=(1, 1.01),ncol=2)
plt.savefig(file_path+'IMG/ANDAMENTO_BRANCHE/ANDAMENTOgenerale.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
for branca in list(data.columns):
fig, ax = plt.subplots()
fig =data.plot(
y = branca,rot=90, legend=False, figsize=(12, 6), linewidth = 3, ax = ax)
# fig.set_xlabel("Comune")
fig.set_ylabel("#Prestazioni")
fig.set_title(branca)
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
plt.savefig(file_path+'IMG/ANDAMENTO_BRANCHE/'+branca_name+'.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del data
# CODICE DI PRIORITA' E ATTESA
df_cup_extraction['priorita'].loc[np.isnan(df_cup_extraction['priorita'])]= 'NAN'
data = pd.DataFrame({
'attesa': df_cup_extraction.groupby('priorita')['attesa_app'].mean(),
'disponibilita': df_cup_extraction.groupby('priorita')['attesa_disp'].mean()})
fig, ax = plt.subplots()
fig =data.plot.bar(y = ['attesa','disponibilita'],rot=90, legend=True, figsize=(12, 6), ax = ax)
fig.set_xlabel("Codice Priorità")
fig.set_ylabel("Average Value (days)")
fig.legend(["Attesa", "Dsiponibilità"])
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
plt.savefig(file_path+'IMG/ATTESA-DISPONIBILITA(priorita).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
data = pd.DataFrame({
'attesa': df_cup_extraction.groupby('priorita')['attesa_app'].median(),
'disponibilita': df_cup_extraction.groupby('priorita')['attesa_disp'].median()})
fig, ax = plt.subplots()
fig =data.plot.bar(y = ['attesa','disponibilita'],rot=90, legend=True, figsize=(12, 6), ax = ax)
fig.set_xlabel("Codice Priorità")
fig.set_ylabel("Median Value (days)")
fig.legend(["Attesa", "Dsiponibilità"])
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
plt.savefig(file_path+'IMG/ATTESA-DISPONIBILITA(priorita) - median.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
for branca in list(df_cup_extraction.branca.unique()):
data = pd.DataFrame({
'attesa': df_cup_extraction.loc[df_cup_extraction.branca==branca].groupby('priorita')['attesa_app'].mean(),
'disponibilita': df_cup_extraction.loc[df_cup_extraction.branca==branca].groupby('priorita')['attesa_disp'].mean()})
fig, ax = plt.subplots()
fig =data.plot.bar(y = ['attesa','disponibilita'],rot=90, legend=True, figsize=(12, 6), ax = ax)
fig.set_xlabel("Codice Priorità")
fig.set_ylabel("Average Value (days)")
fig.legend(["Attesa", "Dsiponibilità"])
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
plt.savefig(file_path+'IMG/PRIORITA/AD(priorita) - mean - '+branca_name+'.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
for branca in list(df_cup_extraction.branca.unique()):
data = pd.DataFrame({
'attesa': df_cup_extraction.loc[df_cup_extraction.branca==branca].groupby('priorita')['attesa_app'].median(),
'disponibilita': df_cup_extraction.loc[df_cup_extraction.branca==branca].groupby('priorita')['attesa_disp'].median()})
fig, ax = plt.subplots()
fig =data.plot.bar(y = ['attesa','disponibilita'],rot=90, legend=True, figsize=(12, 6), ax = ax)
fig.set_xlabel("Codice Priorità")
fig.set_ylabel("Median Value (days)")
fig.legend(["Attesa", "Dsiponibilità"])
branca_name = re.sub(r"[^A-Za-z]+", '', branca)
plt.savefig(file_path+'IMG/PRIORITA/AD(priorita) - median - '+branca_name+'.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
#%% SFELLO IL DATASET
# Sostituisco i comuni ignobili
df_cup_extraction.loc[~df_cup_extraction['comune'].isin(comuni_list),'comune'] = 'Altro'
# Sostituisco le branche ignobili
df_cup_extraction.loc[~df_cup_extraction['branca'].isin(list_branche_sign),'branca'] = 'ALTRO'
#%% GRAFICA PRENOTAZIONI
# NUMERO DI PRENOTAZIONI PER COMUNE E PER ASL (dopo il taglio)
stats_df = (df_cup_extraction.groupby('asl')['comune'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['comune'].value_counts().sum())
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'comune'})
stats_df['pdf'] = stats_df.sum(axis=1)
stats_df = stats_df.sort_values(by='pdf', ascending=False)
stats_df['cdf'] = stats_df['pdf'].cumsum()
asl_list = list(df_cup_extraction['asl'].value_counts().index)
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='comune', y=asl_list, stacked=True, rot=90, legend=True, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Comune")
fig.set_ylabel("Frequency on Dataset")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(comune,asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA E PER ASL (dopo il taglio)
stats_df = (df_cup_extraction.groupby('asl')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df['pdf'] = stats_df.sum(axis=1)
stats_df = stats_df.sort_values(by='pdf', ascending=False)
stats_df['cdf'] = stats_df['pdf'].cumsum()
asl_list = list(df_cup_extraction['asl'].value_counts().index)
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=asl_list, stacked=True, rot=90, legend=True, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI_2(branca,asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y='pdf', stacked=True, rot=90, legend=True, figsize=(24, 6), width=0.7, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI_2(branca).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# Salvo la lista dele branche ignobili
list_branche_sign = list(stats_df['branca'].loc[stats_df['cdf']<=0.9])
del stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA E PER COMUNE
stats_df = (df_cup_extraction.groupby('comune')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df['pdf'] = stats_df.sum(axis=1)
stats_df = stats_df.sort_values(by='pdf', ascending=False)
stats_df['cdf'] = stats_df['pdf'].cumsum()
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=comuni_list, stacked=True, rot=90, legend=True, figsize=(24, 8), width=0.8, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
#ax2 = ax.twinx()
#fig = stats_dfF.plot.bar(x='branca', y=fascia_eta_list, stacked=True, rot=90, width=0.4, ax = ax2, position=0, grid=False)
##ax.legend(bbox_to_anchor=(0.99, 0.95))
#ax.set_xlim(-0.5,len(stats_df2)-1)
plt.legend(bbox_to_anchor=(1.09, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,comune).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA, PER COMUNE E PER SESSO
stats_df_M = (df_cup_extraction[df_cup_extraction['sesso_assistito']==1].groupby('comune')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df_M['pdf'] = stats_df_M.sum(axis=1)
stats_df_M['cdf'] = stats_df_M['pdf'].cumsum()
stats_df_M = stats_df_M.add_suffix('_M')
stats_df_F = (df_cup_extraction[df_cup_extraction['sesso_assistito']==2].groupby('comune')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df_F['pdf'] = stats_df_F.sum(axis=1)
stats_df_F['cdf'] = stats_df_F['pdf'].cumsum()
stats_df_F = stats_df_F.add_suffix('_F')
stats_df = pd.concat([stats_df_M, stats_df_F], axis=1, sort=False)
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df = stats_df.sort_values(by='pdf_M', ascending=False)
comune_M = [item+'_M' for item in comuni_list]
comune_F = [item+'_F' for item in comuni_list]
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=comune_M, stacked=True, rot=90, legend=True, figsize=(32, 8), width=0.4, ax = ax, position=1)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset - M/F")
fig = stats_df.plot.bar(x='branca', y=comune_F, stacked=True, rot=90, width=0.4, ax = ax, position=0, grid=True, legend = False)
ax.set_xlim(-0.5,len(stats_df_M)+0.5)
plt.legend(comuni_list,bbox_to_anchor=(1.07, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,comune,sesso).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df_M, stats_df_F, stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA, PER COMUNE E PER SESSO (ASLbyASL)
for idx, asl in enumerate(asl_list):
stats_df_M = (df_cup_extraction[(df_cup_extraction['sesso_assistito']==1) & (df_cup_extraction['asl']==asl)].groupby(
'comune')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df_M['pdf'] = stats_df_M.sum(axis=1)
stats_df_M['cdf'] = stats_df_M['pdf'].cumsum()
stats_df_M = stats_df_M.add_suffix('_M')
stats_df_F = (df_cup_extraction[(df_cup_extraction['sesso_assistito']==2) & (df_cup_extraction['asl']==asl)].groupby(
'comune')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df_F['pdf'] = stats_df_F.sum(axis=1)
stats_df_F['cdf'] = stats_df_F['pdf'].cumsum()
stats_df_F = stats_df_F.add_suffix('_F')
stats_df = pd.concat([stats_df_M, stats_df_F], axis=1, sort=False)
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df = stats_df.sort_values(by='pdf_M', ascending=False)
comune_M = [item+'_M' for item in comuni_list]
comune_F = [item+'_F' for item in comuni_list]
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=comune_M, stacked=True, rot=90, legend=True, figsize=(32, 8), width=0.4, ax = ax, position=1)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset - M/F")
fig = stats_df.plot.bar(x='branca', y=comune_F, stacked=True, rot=90, width=0.4, ax = ax, position=0, grid=True, legend = False)
ax.set_xlim(-0.5,len(stats_df_M)+0.5)
ax.set_ylim(0,0.08)
plt.legend(comuni_list, bbox_to_anchor=(1.07, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,comune,sesso,asl_'+asl+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df_M, stats_df_F, stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA E PER FASCIA D'ETA
df_cup_extraction['fascia_eta'] = np.nan
df_cup_extraction['fascia_eta'].loc[df_cup_extraction['eta_assistito']<=17] = '0-17'
df_cup_extraction['fascia_eta'].loc[(df_cup_extraction['eta_assistito']>17) & (df_cup_extraction['eta_assistito']<=35)] = '18-35'
df_cup_extraction['fascia_eta'].loc[(df_cup_extraction['eta_assistito']>35) & (df_cup_extraction['eta_assistito']<=45)] = '36-45'
df_cup_extraction['fascia_eta'].loc[(df_cup_extraction['eta_assistito']>45) & (df_cup_extraction['eta_assistito']<=65)] = '46-65'
df_cup_extraction['fascia_eta'].loc[(df_cup_extraction['eta_assistito']>65)] = 'over65'
fascia_eta_list = list(df_cup_extraction['fascia_eta'].value_counts().index)
fascia_eta_list.sort(reverse=True)
stats_df = (df_cup_extraction.groupby('fascia_eta')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df['pdf'] = stats_df.sum(axis=1)
stats_df = stats_df.sort_values(by='pdf', ascending=False)
stats_df['cdf'] = stats_df['pdf'].cumsum()
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=fascia_eta_list, stacked=True, rot=90, legend=True, figsize=(24, 8), width=0.8, ax = ax)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset")
#ax2 = ax.twinx()
#fig = stats_dfF.plot.bar(x='branca', y=fascia_eta_list, stacked=True, rot=90, width=0.4, ax = ax2, position=0, grid=False)
##ax.legend(bbox_to_anchor=(0.99, 0.95))
#ax.set_xlim(-0.5,len(stats_df2)-1)
plt.legend(bbox_to_anchor=(1.07, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,fascia_eta).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA, PER FASCIA D'ETA E PER SESSO
stats_df_M = (df_cup_extraction[df_cup_extraction['sesso_assistito']==1].groupby('fascia_eta')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
#stats_df_M = stats_df_M.reset_index()
#stats_df_M = stats_df_M.rename(columns={'index':'branca'})
stats_df_M['pdf'] = stats_df_M.sum(axis=1)
#stats_df_M = stats_df_M.sort_values(by=index, ascending=False)
stats_df_M['cdf'] = stats_df_M['pdf'].cumsum()
stats_df_M = stats_df_M.add_suffix('_M')
stats_df_F = (df_cup_extraction[df_cup_extraction['sesso_assistito']==2].groupby('fascia_eta')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
#stats_df_F = stats_df_F.reset_index()
#stats_df_F = stats_df_F.rename(columns={'index':'branca'})
stats_df_F['pdf'] = stats_df_F.sum(axis=1)
#stats_df_F = stats_df_F.sort_values(by=index, ascending=False)
stats_df_F['cdf'] = stats_df_F['pdf'].cumsum()
stats_df_F = stats_df_F.add_suffix('_F')
stats_df = pd.concat([stats_df_M, stats_df_F], axis=1, sort=False)
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df = stats_df.sort_values(by='pdf_M', ascending=False)
fascia_M = [item+'_M' for item in fascia_eta_list]
fascia_F = [item+'_F' for item in fascia_eta_list]
fig, ax = plt.subplots()
fig = stats_df.plot.bar(x='branca', y=fascia_M, stacked=True, rot=90, legend=True, figsize=(32, 8), width=0.4, ax = ax, position=1)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset - M/F")
#ax2 = ax.twinx()
fig = stats_df.plot.bar(x='branca', y=fascia_F, stacked=True, rot=90, width=0.4, ax = ax, position=0, grid=True, legend = False)
#ax.legend(bbox_to_anchor=(0.99, 0.95))
ax.set_xlim(-0.5,len(stats_df_M)+0.5)
plt.legend(fascia_eta_list, bbox_to_anchor=(1.055, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,fascia_eta,sesso).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df_M, stats_df_F, stats_df
# NUMERO DI PRENOTAZIONI PER BRANCA, PER FASCIA D'ETA E PER SESSO (ASLbyASL)
for idx, asl in enumerate(asl_list):
stats_df_M = (df_cup_extraction[(df_cup_extraction['sesso_assistito']==1) & (df_cup_extraction['asl']==asl)].groupby(
'fascia_eta')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
#stats_df_M = stats_df_M.reset_index()
#stats_df_M = stats_df_M.rename(columns={'index':'branca'})
stats_df_M['pdf'] = stats_df_M.sum(axis=1)
#stats_df_M = stats_df_M.sort_values(by=index, ascending=False)
stats_df_M['cdf'] = stats_df_M['pdf'].cumsum()
stats_df_M = stats_df_M.add_suffix('_M')
stats_df_F = (df_cup_extraction[(df_cup_extraction['sesso_assistito']==2) & (df_cup_extraction['asl']==asl)].groupby(
'fascia_eta')['branca'].value_counts().unstack().fillna(0).T)/(df_cup_extraction['branca'].value_counts().sum())
#stats_df_F = stats_df_F.reset_index()
#stats_df_F = stats_df_F.rename(columns={'index':'branca'})
stats_df_F['pdf'] = stats_df_F.sum(axis=1)
#stats_df_F = stats_df_F.sort_values(by=index, ascending=False)
stats_df_F['cdf'] = stats_df_F['pdf'].cumsum()
stats_df_F = stats_df_F.add_suffix('_F')
stats_df = pd.concat([stats_df_M, stats_df_F], axis=1, sort=False)
stats_df = stats_df.reset_index()
stats_df = stats_df.rename(columns={'index':'branca'})
stats_df = stats_df.sort_values(by='pdf_M', ascending=False)
fascia_M = [item+'_M' for item in fascia_eta_list]
fascia_F = [item+'_F' for item in fascia_eta_list]
fig, ax = plt.subplots()
stats_df = stats_df.sort_values('branca')
fig = stats_df.plot.bar(x='branca', y=fascia_M, stacked=True, rot=90, legend=True, figsize=(32, 8), width=0.4, ax = ax, position=1)
fig.set_xlabel("Branca")
fig.set_ylabel("Frequency on Dataset - M/F")
#ax2 = ax.twinx()
fig = stats_df.plot.bar(x='branca', y=fascia_F, stacked=True, rot=90, width=0.4, ax = ax, position=0, grid=True, legend = False)
#ax.legend(bbox_to_anchor=(0.99, 0.95))
ax.set_xlim(-0.5,len(stats_df_M)-0.5)
ax.set_ylim(0,0.08)
plt.legend(fascia_eta_list,bbox_to_anchor=(1.055, 1))
plt.savefig(file_path+'IMG/PRENOTAZIONI(branca,fascia_eta,sesso,asl_'+asl+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df_M, stats_df_F, stats_df
#%% DISPONIBILITA VS ATTESA
# DISPONIBILITÀ E ATTESA IN MEDIA
df_mean_waits = df_cup_extraction.groupby('branca')[['attesa_disp','attesa_app']].mean().sort_values(by='attesa_disp', ascending=False)
df_mean_waits['branca'] = df_mean_waits.index
df_mean_waits = df_mean_waits.reset_index(drop=True)
df_mean_waits = df_mean_waits.sort_values('branca')
fig = df_mean_waits.plot.bar(x='branca', y=['attesa_disp','attesa_app'], rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,140)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(["Disponibilità", "Appuntamento"],bbox_to_anchor=(1.08, 1))
plt.savefig(file_path+'IMG/DISPONIBILIAvsATTESAmean.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# DISPONIBILITÀ E ATTESA IN MEDIA (ASLbyASL)
for idx, asl in enumerate(asl_list):
df_mean_waits = df_cup_extraction[df_cup_extraction['asl']==asl].groupby('branca')[['attesa_disp','attesa_app']].mean().sort_values(by='attesa_disp', ascending=False)
df_mean_waits['branca'] = df_mean_waits.index
df_mean_waits = df_mean_waits.reset_index(drop=True)
df_mean_waits = df_mean_waits.sort_values('branca')
fig = df_mean_waits.plot.bar(x='branca', y=['attesa_disp','attesa_app'], rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,140)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(["Disponibilità", "Appuntamento"],bbox_to_anchor=(1.08, 1))
plt.savefig(file_path+'IMG/DISPONIBILIAvsATTESAmean_asl_'+asl+'.png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# DISPONIBILITAvsATTESA (Scatter)
plt.figure(figsize=(8, 8))
#sns.lmplot('attesa_disp', 'attesa_app', data=df_cup_extraction, hue='asl', fit_reg=False)
sns.scatterplot(x = 'attesa_disp', y = 'attesa_app', hue='asl',
data=df_cup_extraction.sample(100000), alpha=.7, s=80, edgecolor="none")
plt.xlabel("Disponibilità")
plt.ylabel("Attesa")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITAvsATTESA(asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
#%% DISPONIBILITÀ E ATTESA per ANNO
# DISPONIBILITA' IN GIORNI PER BRANCA E PER ANNO
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_disp', hue='anno_ins', data=df_cup_extraction.sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Disponibilità', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITA(branca,anno).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# DISPONIBILITA' IN GIORNI PER BRANCA E PER ANNI - ASLbyASL
for idx, asl in enumerate(asl_list):
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_disp', hue='anno_ins', data=df_cup_extraction[df_cup_extraction['asl']==asl].sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Disponibilità', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITA(branca,anno,asl_'+asl+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# ATTESA IN GIORNI PER BRANCA E PER ANNO
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_app', hue='anno_ins', data=df_cup_extraction.sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Attesa', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/ATTESA(branca,anno).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# ATTESA IN GIORNI PER BRANCA E PER ANNI - ASLbyASL
for idx, asl in enumerate(asl_list):
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_app', hue='anno_ins', data=df_cup_extraction[df_cup_extraction['asl']==asl].sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Attesa', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/ATTESA(branca,anno,asl_'+asl+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
#%% DISPONIBILITÀ E ATTESA per ASL
# DISPONIBILITA' IN GIORNI PER BRANCA E PER ASL
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_disp', hue='asl', data=df_cup_extraction.sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Disponibilità', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITA(branca,asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
## DISPONIBILITA' IN GIORNI PER BRANCA E PER ASL (COMUNEbyCOMUNE)
#for idx, comune in enumerate(comuni_list):
# plt.figure(figsize=(32, 8))
# fig = sns.boxplot(x='branca', y='attesa_disp', hue='asl', data=df_cup_extraction[df_cup_extraction['comune']==comune].sort_values(by='branca'))
# fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
# fig.set(ylabel='Disponibilità', xlabel='Branca')
# #handles, _ = fig.get_legend_handles_labels()
# #fig.legend(handles, ["Male", "Female"])
# fig.set_yscale("symlog")
# plt.legend(bbox_to_anchor=(1.05, 1))
# plt.savefig('IMG/DISPONIBILITA(branca,asl,comune_'+comune+').png', format='png', dpi=300, bbox_inches = "tight")
# plt.show()
stats_df = df_cup_extraction.groupby(['branca','anno_ins'])['attesa_disp'].mean().unstack().sort_values(by='branca')
fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,80)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITAmean(anno).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# DISPONIBILITA' MEDIA PER BRANCA e ANNI (ASLbyASL)
for idx, asl in enumerate(asl_list):
stats_df = df_cup_extraction[df_cup_extraction['asl']==asl].groupby(['branca','anno_ins'])['attesa_disp'].mean().unstack().sort_values(by='branca')
fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,80)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITAmean(anno,asl_'+asl+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
del stats_df
# ATTESA IN GIORNI PER BRANCA E PER ASL
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_app', hue='asl', data=df_cup_extraction)
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Attesa', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/ATTESA(branca,asl).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# ATTESA' IN GIORNI PER BRANCA E PER ASL (COMUNEbyCOMUNE)
for idx, comune in enumerate(comuni_list):
plt.figure(figsize=(32, 8))
fig = sns.boxplot(x='branca', y='attesa_app', hue='asl', data=df_cup_extraction[df_cup_extraction['comune']==comune].sort_values(by='branca'))
fig.set_xticklabels(fig.get_xticklabels(),rotation=90)
fig.set(ylabel='Attesa', xlabel='Branca')
#handles, _ = fig.get_legend_handles_labels()
#fig.legend(handles, ["Male", "Female"])
fig.set_yscale("symlog")
plt.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/ATTESA(branca,asl,comune_'+comune+').png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
# ATTESA/DISPONIBILITA' RISPETTO ALLE PRIORITA'
stats_df = df_cup_extraction.groupby(['branca','priorita'])['attesa_disp'].mean().unstack().sort_values(by='branca')
fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,40)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/DISPONIBILITAmean(branca,priorita).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
stats_df = df_cup_extraction.groupby(['branca','priorita'])['attesa_app'].mean().unstack().sort_values(by='branca')
fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
fig.set_ylim(0,120)
fig.set_xlabel("Branca")
fig.set_ylabel("Mean waiting time")
fig.legend(bbox_to_anchor=(1.05, 1))
plt.savefig(file_path+'IMG/ATTESAmean(branca,priorita).png', format='png', dpi=300, bbox_inches = "tight")
plt.show()
## ATTESA/DISPONIBILITA' RISPETTO ALLE PRIORITA' BOX
#stats_df = df_cup_extraction.groupby(['branca','priorita'])['attesa_disp']
#fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
#fig = sns.boxplot(x='branca', y='attesa_disp', hue='priorita', data=stats_df)
#fig.set_ylim(0,40)
#fig.set_xlabel("Branca")
#fig.set_ylabel("Mean waiting time")
#fig.legend(bbox_to_anchor=(1.05, 1))
#plt.savefig(file_path+'IMG/DISPONIBILITAmean(branca,priorita)_box.png', format='png', dpi=300, bbox_inches = "tight")
#plt.show()
#
#stats_df = df_cup_extraction.groupby(['branca','priorita'])['attesa_app'].unstack().sort_values(by='branca')
#fig = stats_df.plot.bar(rot=90, legend=True, width=0.7, figsize=(32, 8))
#fig.set_ylim(0,120)
#fig.set_xlabel("Branca")
#fig.set_ylabel("Mean waiting time")
#fig.legend(bbox_to_anchor=(1.05, 1))
#plt.savefig(file_path+'IMG/ATTESAmean(branca,priorita)_box.png', format='png', dpi=300, bbox_inches = "tight")
#plt.show()
#%% SALVATAGGIO SU FILES PICKLE
print('Salvataggio su files PICKLE...', end="")
pickling_on = open(file_path+"DATASET/df_cup_RF.pickle","wb")
pickle.dump(df_cup_extraction, pickling_on)
pickling_on.close()
#df_cup_diabe = df_cup_extraction[df_cup_extraction['branca']=='DIABETOLOGIA']
#print('Salvataggio su files PICKLE...', end="")
#pickling_on = open("DATASET/CLEAN/df_cup_diabe.pickle","wb")
#pickle.dump(df_cup_diabe, pickling_on)
#pickling_on.close()
#
#df_cup_cardio = df_cup_extraction[df_cup_extraction['branca']=='CARDIOLOGIA']
#print('Salvataggio su files PICKLE...', end="")
#pickling_on = open("DATASET/CLEAN/df_cup_cardio.pickle","wb")
#pickle.dump(df_cup_cardio, pickling_on)
#pickling_on.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
5979,
220,
513,
1105,
25,
2998,
25,
940,
13130,
198,
8645,
689,
21528,
13,
198,
182... | 2.185126 | 15,006 |
from pathlib import Path
import os
if __name__ == '__main__':
import py_wake
docs_path = Path(py_wake.__file__).parents[1] / 'docs'
if os.path.isdir(docs_path / 'api'):
print("Switch to PDF mode")
for ext in ['*.rst', 'notebooks/*.ipynb']:
for f in docs_path.glob(ext):
svg2eps(f)
(docs_path / 'api').rename("api_hide")
else:
print("Switch to html mode")
for ext in ['*.rst', 'notebooks/*.ipynb']:
for f in docs_path.glob(ext):
svg2eps(f, lambda s: s.replace('.eps', '.svg'))
(docs_path / 'api_hide').rename("api")
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
12972,
62,
48530,
198,
220,
220,
220,
34165,
62,
6978,
796,
10644,
7,
9078,
62,
48530... | 2 | 319 |
def depth(n):
"""
depth computes the depth of node n.
"""
d = 0
while n is not None:
n = n.parent
d += 1
return d
def lca(a, b):
"""
Computes the least common ancestor of a and b.
"""
da = depth(a)
db = depth(b)
# ensure a is the closest to the root
if da > db:
a, b = b, a
da, db = db, da
# step b until it is at the same level as a
while db > da:
b = b.parent
db -= 1
# step them both until collision
while a != b:
a = a.parent
b = b.parent
return a
if __name__ == '__main__':
main()
| [
198,
198,
4299,
6795,
7,
77,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6795,
552,
1769,
262,
6795,
286,
10139,
299,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
288,
796,
657,
198,
220,
220,
220,
981,
299,
318,
... | 2.085526 | 304 |
# coding=utf-8
"""
test.test_stream
~~~~~~~~~~~~~~~~
Test stream.stream module.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
import os
import gzip
import filecmp
from .context import stream
from . import vg_pb2
def read_alns1(fpath, **kwargs):
"""Read protobuf objects from a file by using `with` statement.
Here, as an example, the file is a GAM file containing Alignment messages
defined in vg_pb2.
Args:
fpath (string): path of the file to be read.
"""
with stream.open(fpath, 'rb', **kwargs) as istream:
for data in istream:
aln = vg_pb2.Alignment()
aln.ParseFromString(data)
yield aln
def read_alns2(fpath, **kwargs):
"""Read protobuf objects from a file without using `with` statement.
Here, as an example, the file is a GAM file containing Alignment messages
defined in vg_pb2.
NOTE: Do the same as `read_alns1`.
Args:
fpath (string): path of the file to be read.
"""
nof_groups = 0
istream = stream.open(fpath, 'rb', group_delimiter=True, **kwargs)
for data in istream:
if data is None:
nof_groups += 1
continue
aln = vg_pb2.Alignment()
aln.ParseFromString(data)
yield aln
istream.close()
assert nof_groups == 2
def write_objs1(fpath, *objs_list, **kwargs):
"""Write protobuf message objects into the file by using `with` statement.
It writes half of objects in one group, and then the other half in another
one.
Args:
fpath (string): path of the file to be written.
objs_list (*protobuf.message.Message): list of objects to be written.
"""
with stream.open(fpath, 'wb', **kwargs) as ostream:
length = len(objs_list)
ostream.write(*objs_list[:length//2])
ostream.write(*objs_list[length//2:])
def write_objs2(fpath, *objs_list, **kwargs):
"""Write protobuf message objects into the file w/o using `with` statement.
It writes half of them in one group, and then the other half in another one
by setting buffer size to half of the object list size.
NOTE: Do the same as `write_objs1`.
Args:
fpath (string): path of the file to be written.
objs_list (*protobuf.message.Message): list of objects to be written.
"""
ostream = stream.open(fpath, 'wb', buffer_size=(len(objs_list)//2),
**kwargs)
ostream.write(*objs_list)
ostream.close()
def test_low(**kwargs):
"""Run low-level methods tests."""
# Files
testdir = os.path.dirname(os.path.realpath(__file__))
gamfile = os.path.join(testdir, 'sample_reads.gam')
gamfile_nof_alns = 12
rw1_gamfile = os.path.join(testdir, 'rw1_sample_reads.gam')
rw2_gamfile = os.path.join(testdir, 'rw2_sample_reads.gam')
# Read a sample file.
alns = [a for a in read_alns1(gamfile)]
assert len(alns) == gamfile_nof_alns
# Rewrite it into a new file in two groups of 6 objects.
write_objs1(rw1_gamfile, *alns, **kwargs)
# Read the rewritten file.
re_alns = [a for a in read_alns2(rw1_gamfile, **kwargs)]
# Check the length of the objects storing in both files.
assert len(alns) == len(re_alns)
# Rewrite again the read data.
write_objs2(rw2_gamfile, *re_alns, **kwargs)
# Check whether the two generated files have the same the content.
assert compare(rw1_gamfile, rw2_gamfile, **kwargs)
# Remove the generated files.
os.remove(rw1_gamfile)
os.remove(rw2_gamfile)
def test_high(**kwargs):
"""Run high-level methods tests."""
# Files
testdir = os.path.dirname(os.path.realpath(__file__))
gamfile = os.path.join(testdir, 'sample_reads.gam')
gamfile_nof_alns = 12
rw1_gamfile = os.path.join(testdir, 'rw1_sample_reads.gam')
rw2_gamfile = os.path.join(testdir, 'rw2_sample_reads.gam')
# Read a sample file.
alns = [a for a in stream.parse(gamfile, vg_pb2.Alignment)]
assert len(alns) == gamfile_nof_alns
# Rewrite it into a new file in two groups of 6 objects.
stream.dump(rw1_gamfile, *alns, buffer_size=len(alns)//2, **kwargs)
# Read the rewritten file.
re_alns = [a for a in read_alns2(rw1_gamfile, **kwargs)]
# Check the length of the objects storing in both files.
assert len(alns) == len(re_alns)
# Rewrite again the read data.
write_objs2(rw2_gamfile, *re_alns, **kwargs)
# Check whether the two generated files have the same the content.
assert compare(rw1_gamfile, rw2_gamfile, **kwargs)
# Remove the generated files.
os.remove(rw1_gamfile)
os.remove(rw2_gamfile)
def test_low_no_gzip():
"""Run low-level methods tests with no compression."""
return test_low(gzip=False)
def test_high_no_gzip():
"""Run high-level methods tests with no compression."""
return test_high(gzip=False)
def compare_gzipped(first, second):
"""Compare two gzipped stream files.
Since the stream files are gzipped and the file name is included in the
compressed file, they need to be decompressed first before comparing their
contents.
Args:
first (string): path to the first stream file.
second (string): path to the second stream file.
"""
ungz_first = '.ungz'.join(os.path.splitext(first))
ungz_second = '.ungz'.join(os.path.splitext(second))
# Unzip first file.
with gzip.open(first, 'rb') as gfp, open(ungz_first, 'wb') as ufp:
ufp.write(gfp.read())
# Unzip second file.
with gzip.open(second, 'rb') as gfp, open(ungz_second, 'wb') as ufp:
ufp.write(gfp.read())
# Compare two unzipped files.
result = filecmp.cmp(ungz_first, ungz_second)
# Remove decompressed files.
os.remove(ungz_first)
os.remove(ungz_second)
return result
def compare(first, second, **kwargs):
"""Compare two stream files.
The stream can be gzipped or not specified by `gzipped` keyword argument.
"""
if kwargs.get('gzip', True):
return compare_gzipped(first, second)
return filecmp.cmp(first, second)
if __name__ == '__main__':
test_low()
test_low_no_gzip()
test_high()
test_high_no_gzip()
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
1332,
13,
9288,
62,
5532,
198,
220,
220,
220,
220,
27156,
628,
220,
220,
220,
6208,
4269,
13,
5532,
8265,
13,
628,
220,
220,
220,
1058,
22163,
4766,
25,
357,
66,
... | 2.470403 | 2,534 |
import numpy as np
from ir_sim.world import motion_omni
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4173,
62,
14323,
13,
6894,
1330,
6268,
62,
296,
8461,
628,
198,
220,
220,
220,
220,
628,
220,
220,
220,
220
] | 2.428571 | 28 |
# -*- coding: utf-8 -*-
from pywinauto import application
import time
app = application.Application()
app.start("Notepad.exe")
time.sleep(2)
app.Notepad.edit.type_keys("Este é um teste", with_spaces = True)
time.sleep(2)
app.Notepad.menu_select("Arquivo ->Salvar")
app.Salvar.edit.set_edit_text("pywinauto.txt")
app.Salvar.Salvar.click()
'''app = application.Application()
app.start("Calc.exe")'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
6738,
12972,
86,
1437,
9390,
1330,
3586,
201,
198,
11748,
640,
201,
198,
1324,
796,
3586,
13,
23416,
3419,
201,
198,
1324,
13,
9688,
7203,
3673,
47852,
13,
13499,
... | 2.473054 | 167 |
#!/usr/bin/env python2
import json
import logging
import logging.handlers
import random
import pytest
from openvisualizer.motehandler.moteprobe import openhdlc
# ============================ logging =========================================
from openvisualizer.utils import format_string_buf
LOGFILE_NAME = 'test_hdlc.log'
log = logging.getLogger('test_hdlc')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
log_handler = logging.handlers.RotatingFileHandler(LOGFILE_NAME, maxBytes=2 * 1024 * 1024, backupCount=5, mode='w')
log_handler.setFormatter(logging.Formatter("%(asctime)s [%(name)s:%(levelname)s] %(message)s"))
for logger_name in ['test_hdlc', 'OpenHdlc']:
temp = logging.getLogger(logger_name)
temp.setLevel(logging.DEBUG)
temp.addHandler(log_handler)
# ============================ fixtures ========================================
RANDOM_FRAME = []
for frame_len in range(1, 100, 5):
for run in range(100):
frame = None
while (not frame) or (frame in RANDOM_FRAME):
frame = []
for _ in range(frame_len):
frame += [random.randint(0x00, 0xff)]
RANDOM_FRAME.append(frame)
RANDOM_FRAME = [json.dumps(f) for f in RANDOM_FRAME]
@pytest.fixture(params=RANDOM_FRAME)
# ============================ helpers =========================================
# ============================ tests ===========================================
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
18931,
13,
4993,
8116,
198,
11748,
4738,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1280,
41464,
7509,
13,
76,
1258,
30281,
13,
... | 2.8998 | 499 |
# Generated by Django 2.1.2 on 2018-11-04 18:49
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
17,
319,
2864,
12,
1157,
12,
3023,
1248,
25,
2920,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: abs"""
import akg.topi
import akg.tvm
import akg.utils as utils
@utils.check_input_type(akg.tvm.tensor.Tensor, (str, type(None)))
def Abs(in_data, target=utils.CCE):
"""
Compute absolute value of a tensor.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, unit8, int32.
Returns:
tvm.tensor.Tensor of same type and shape as data.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
utils.check_shape(in_data.shape)
in_type = in_data.dtype
if target == utils.CCE:
utils.ops_dtype_check(in_type, utils.DtypeForDavinci.ALL_TYPES)
need_cast_dtype = ["int8", "int32", "uint8"]
if in_type in need_cast_dtype:
in_data = akg.tvm.compute(in_data.shape, lambda *indice: in_data(*indice).astype("float16"), name='type_cast')
output = akg.tvm.compute(in_data.shape, lambda *index: akg.tvm.abs(in_data(*index)), name='abs_value')
if in_type in need_cast_dtype:
output = akg.tvm.compute(in_data.shape, lambda *indice: output(*indice).astype(in_type), name='res')
else:
if in_type == 'float16':
in_data = akg.topi.cast(in_data, 'float32')
output = akg.topi.abs(in_data)
if in_type == 'float16':
output = akg.topi.cast(output, 'float16')
return output | [
2,
15069,
12131,
12,
1238,
2481,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 2.49066 | 803 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
11748,
288,
4743,
198,
6738,
288,
4743,
13,
20471,
13,
9078,
13165,
354,
13,
4743,
672,
1330,
5060,
27201,
278,
11,... | 2.647059 | 153 |
#
# Copyright 2018 Fiberhome
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mogan.tests.functional.api import v1 as v1_test
| [
2,
198,
2,
15069,
2864,
29933,
11195,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
733... | 3.716763 | 173 |
words = input().split(", ")
words_lengths = {word: len(word) for word in words}
print_result(words_lengths) | [
198,
10879,
796,
5128,
22446,
35312,
7,
1600,
366,
8,
198,
198,
10879,
62,
13664,
82,
796,
1391,
4775,
25,
18896,
7,
4775,
8,
329,
1573,
287,
2456,
92,
198,
4798,
62,
20274,
7,
10879,
62,
13664,
82,
8
] | 2.794872 | 39 |
# -*- coding: utf-8 -*-
import numpy as np
import json
import algo.arranging.base as arr
import algo.arranging.metrics as metrics
model = research.model
if model.layers_count != 2:
raise ValueError("Ths research is only for 2-level hierarchical models!")
for metric in metrics.metrics_list:
research.report("Metric %s" % metric)
beta_range = np.linspace(-0.2, 1.2, 57)
SCC_chart = []
NDS1_chart = []
NDS2_chart = []
for beta in beta_range:
model.arrange_topics(mode="hierarchical", metric=metric, beta=beta)
SCC_chart.append(model.spectrum_crosses_count())
NDS1_chart.append(model.neighbor_distance_sum(metric=metric, layer=1))
NDS2_chart.append(model.neighbor_distance_sum(metric=metric, layer=2))
# research.report("%f %d %f" % (beta, SCC_chart[-1], NDS2_chart[-1]))
fig = research.get_figure(figsize=(12, 10))
from matplotlib import gridspec
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 1])
ax1 = fig.add_subplot(gs[0])
ax1.set_ylabel("SCC", fontsize=25)
ax1.plot(beta_range, SCC_chart)
ax2 = fig.add_subplot(gs[1], sharex=ax1)
ax2.set_ylabel(r"$NDS_1$", fontsize=25)
ax2.plot(beta_range, NDS1_chart)
ax3 = fig.add_subplot(gs[2], sharex=ax1)
ax3.set_xlabel(r"$\beta$", fontsize=25)
ax3.set_ylabel(r"$NDS_2$", fontsize=25)
ax3.plot(beta_range, NDS2_chart)
ax1.tick_params(labelsize=15)
ax2.tick_params(labelsize=15)
ax3.tick_params(labelsize=15)
fig.subplots_adjust(hspace=.15)
put_vertical(ax1, SCC_chart, [0.8])
put_vertical(ax2, NDS1_chart, [0.8])
put_vertical(ax3, NDS2_chart, [0.8])
# fig.suptitle(r"Hierarchical spectrum quality, depending on $\beta$",
# fontsize=20)
# axes.set_title(r"Hierarchical spectrum quality, depending on $\beta$")
# lgd = axes.legend(loc='center left', bbox_to_anchor=(1, 0.5))
research.report_picture(width=400, name="beta_%s_%s" %
(str(research.dataset), metric))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
435,
2188,
13,
3258,
4924,
13,
8692,
355,
5240,
198,
11748,
435,
2188,
13,
3258,
4924,
13,
4164,
10466,
355... | 2.165422 | 937 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
from .base import BackProp
class VanillaBackprop(BackProp):
"""
Produces gradients generated with vanilla back propagation from the image
https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/vanilla_backprop.py
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
628,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471... | 2.805755 | 139 |
'''
Tests the data merge functions and package.
.. moduleauthor:: Chris Fournier <chris.m.fournier@gmail.com>
'''
from __future__ import absolute_import
import unittest
import os
from segeval.data.tsv import (input_linear_mass_tsv, input_linear_positions_tsv)
from segeval.data.samples import HEARST_1997_STARGAZER
class TestTsv(unittest.TestCase):
'''
Test data merge functions.
'''
test_data_dir = os.path.split(__file__)[0]
def test_input_linear_mass_tsv(self):
'''
Test mass TSV file input.
'''
tsv_file = os.path.join(self.test_data_dir, 'hearst1997.tsv')
dataset = input_linear_mass_tsv(tsv_file)
self.assertEqual(dataset['hearst1997'],
HEARST_1997_STARGAZER['stargazer'])
def test_input_linear_positions_tsv(self):
'''
Test position TSV file input.
'''
tsv_file = os.path.join(self.test_data_dir, 'hearst1997_positions.csv')
dataset = input_linear_positions_tsv(tsv_file, delimiter=',')
self.assertEqual(dataset['hearst1997_positions'],
HEARST_1997_STARGAZER['stargazer'])
| [
7061,
6,
198,
51,
3558,
262,
1366,
20121,
5499,
290,
5301,
13,
198,
198,
492,
8265,
9800,
3712,
5180,
376,
1798,
959,
1279,
354,
2442,
13,
76,
13,
69,
1798,
959,
31,
14816,
13,
785,
29,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
... | 2.166667 | 534 |
import numpy as np
from numba import jit
from ..constants import Constants as c
from .lagrange import calcLagrangeCoeffs
from .lagrange import applyLagrangeCoeffs
__all__ = [
"propagateUniversal",
]
MU = c.MU
@jit(["f8[:,:](f8[:,:], f8[:], f8[:], f8, i8, f8)"], nopython=True, cache=True)
def propagateUniversal(orbits, t0, t1, mu=MU, max_iter=100, tol=1e-14):
"""
Propagate orbits using the universal anomaly formalism.
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Orbital state vectors (X_0) with position in units of AU and velocity in units of AU per day.
t0 : `~numpy.ndarray` (N)
Epoch in MJD at which orbits are defined.
t1 : `~numpy.ndarray` (M)
Epochs to which to propagate each orbit. If a single epoch is given, all orbits are propagated to this
epoch. If multiple epochs are given, then will propagate each orbit to that epoch.
mu : float, optional
Gravitational parameter (GM) of the attracting body in units of
AU**3 / d**2.
max_iter : int, optional
Maximum number of iterations over which to converge. If number of iterations is
exceeded, will return the value of the universal anomaly at the last iteration.
tol : float, optional
Numerical tolerance to which to compute universal anomaly using the Newtown-Raphson
method.
Returns
-------
orbits : `~numpy.ndarray` (N*M, 8)
Orbits propagated to each MJD with position in units of AU and velocity in units of AU per day.
The first two columns are the orbit ID (a zero-based integer value assigned to each unique input orbit)
and the MJD of each propagated state.
"""
new_orbits = []
num_orbits = orbits.shape[0]
for i in range(num_orbits):
for j, t in enumerate(t1):
r = np.ascontiguousarray(orbits[i, 0:3])
v = np.ascontiguousarray(orbits[i, 3:6])
dt = t - t0[i]
lagrange_coeffs, stumpff_coeffs, chi = calcLagrangeCoeffs(
r,
v,
dt,
mu=mu,
max_iter=max_iter,
tol=tol
)
r_new, v_new = applyLagrangeCoeffs(r, v, *lagrange_coeffs)
new_orbits.append([i, t, r_new[0], r_new[1], r_new[2], v_new[0], v_new[1], v_new[2]])
return np.array(new_orbits) | [
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
474,
270,
198,
198,
6738,
11485,
9979,
1187,
1330,
4757,
1187,
355,
269,
198,
6738,
764,
30909,
9521,
1330,
42302,
43,
363,
9521,
34,
2577,
487,
82,
198,
6738,
764,
30909,
952... | 2.310811 | 1,036 |
import pandas as pd
from bokeh.palettes import Reds
REGIONAL_LINKS = [{"name": "Россия", "link": "/"},
{"name": "Москва", "link": "/moscow"}]
# Categories
CATEGORIES = ["total", "died", "recovered", "swabs"]
CATEGORIES_STYLES = {
"total": {"color_class": "red-font", "text": "выявлено", "icon_class": "fa-exclamation"},
"died": {"color_class": "black-font", "text": "умерло", "icon_class": "fa-cross"},
"recovered": {"color_class": "green-font", "text": "выздоровело", "icon_class": "fa-heart"},
"swabs": {"color_class": "blue-font", "text": "тыс. тестов проведено", "icon_class": "fa-vial"}
}
LEGEND_MAP = ["выявлено", "умерло", "выздоровело", "активных"]
# Swabs categories
SWABS_CATEGORIES = ["swabs_clean", "swabs_derived_daily"]
SWABS_LEGEND_MAP = ["тестов всего", "выявлено"]
SWABS_RANGE = (10000, 56500000)
SWABS_ALPHA = 25
SWABS_START_DATE = "2020-03-20"
SWABS_SIZE = 6000
SWABS_DELAY = -1
# Colors
COLOR_RAMP = ["#073763", "#990800", "#38761d", "#417ab0", "#e50b00", "#85c26b"]
MAP_PALLETE = Reds[9]
AGES_COLOR_RAMP = ["#38761d", "#417ab0", "#073763", "#e50b00", "#990800"]
DISCHARGES_COLOR_RAMP = ["#990800", "#38761d"]
# Daily plots
DAILY_LEGEND_MAP = {"18-45": "в возрасте от 18 до 45 лет",
"46-65": "в возрасте от 46 до 65 лет",
"66-79": "в возрасте от 66 до 79 лет",
"80+": "в возрасте старше 80 лет",
"children": "детей",
"18-45%": "в возрасте от 18 до 45 лет",
"46-65%": "в возрасте от 46 до 65 лет",
"66-79%": "в возрасте от 66 до 79 лет",
"80+%": "в возрасте старше 80 лет",
"children%": "детей",
"died": "умерло",
"recovered": "выздоровело"}
DAILY_WIDTH = pd.Timedelta(hours=16)
DAILY_DISCHARGE_CATEGORIES = ["died", "recovered"]
DAILY_RANGE = (-9000, 9000)
# Tooltips
CASES_TOOLTIP = """<div class="plot-tooltip">
<h4>{city}</h4>
<div>
<span style="font-weight: bold;">Дата: </span>@date_str
</div>"""
CASES_TOOLTIP_FOOTER = """<div>
<span style="font-weight: bold;">{value_type}: </span>@{col}{fmt}
</div>
</div>
"""
MAP_TOOLTIP = """
<div class="plot-tooltip">
<div class="mb-2">
<h4>@REGION</h4>
</div>
<div>
<span style="font-weight: bold;">Выявлено: </span>@total{0,0}<sup>@total_diff{+0,0}</sup>
</div>
<div>
<span style="font-weight: bold;">Выздоровело: </span>@recovered{0,0}<sup>@recovered_diff{+0,0}</sup>
</div>
<div>
<span style="font-weight: bold;">Умерло: </span>@died{0,0}<sup>@died_diff{+0,0}</sup>
</div>
</div>
"""
SWABS_TOOLTIP = """<div class="plot-tooltip">
<h4>Россия</h4>
<div>
<span style="font-weight: bold;">Дата: </span>@date_str
</div>
<div>
<span style="font-weight: bold;">Всего проведено тестов: </span>@swabs_clean{0.0a}
</div>
<div>
<span style="font-weight: bold;">Проведено тестов за день: </span>@swabs_derived_daily{0.0a}
</div>
<div>
<span style="font-weight: bold;">% положительных (за все время): </span>@positive{0.0 %} <div class="square" style="opacity: @alpha"></div>
</div>
</div>
"""
DAILY_TOOLTIP_FOOTER = """<div>
<span style="font-weight: bold;">Выявлено всего: </span>@{total_col}{{0,0}}
</div>
<div>
<span style="font-weight: bold;">{value_type}: </span>@{col}{fmt}
</div>
</div>
"""
DISCHARGES_TOOLTIP_FOOTER = """<div>
<span style="font-weight: bold;">Выбыло: </span>@{total_col}{{0,0}}
</div>
<div>
<span style="font-weight: bold;">{value_type}: </span>@{col}{fmt}
</div>
</div>
"""
# Moscow specific
MSK_DIR = "moscow"
# Transport
MSK_TRANSPORT_DIR = "hospitals"
PUBLIC_TR_COLS = [
{"key": "metro", "name": "метро"},
{"key": "landlines", "name": "наземный транспорт"},
{"key": "intercity trains", "name": "электрички"}
]
PRIVATE_TR_COLS = [
{"key": "taxi", "name": "такси"},
{"key": "car sharing", "name": "каршеринг"},
{"key": "cars", "name": "личные автомобили"}
]
# Hospitals
MSK_HOSPITALS_DIR = "hospitals"
MSK_HOSPITALS = [{"key": "gkb40",
"name": "ГКБ № 40",
"link": "https://gkb40dzm.ru/",
"cmo": "Денис Николаевич Проценко",
"address": "ул. Сосенский стан, д. 8",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19"},
{"key": "total_vent", "name": "на ИВЛ", "glyph": "line"},
{"key": "total_icu", "name": "в ОРИТ", "glyph": "line"},
{"key": "total_oxygen", "name": "с кислородной поддержкой", "glyph": "line"},
{"key": "pneumonia", "name": "внебольничная пневмония"}
]
},
{"key": "gkb15",
"name": "ГКБ № 15 им. О. М. Филатова",
"link": "http://gkb15.moscow/",
"cmo": "Валерий Иванович Вечорко",
"address": "ул. Вешняковская, д. 23",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19", "glyph": "line"},
{"key": "total_vent", "name": "на ИВЛ", "glyph": "line"},
{"key": "total_icu", "name": "в ОРИТ", "glyph": "line"},
{"key": "total_oxygen", "name": "с кислородной поддержкой", "glyph": "line"},
{"key": "pneumonia", "name": "внебольничная пневмония", "glyph": "line"}
]
},
{"key": "ikb2",
"name": "ИКБ № 2",
"link": "https://www.ikb2.ru/",
"cmo": "Светлана Васильевна Краснова",
"address": "8-я ул. Соколиной горы, д. 15",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "CoVID-19 (всего)", "glyph": "line"},
{"key": "covid_confirmed", "name": "CoVID-19 (лабораторно подтвержденных)", "glyph": "line"},
{"key": "covid_icu", "name": "CoVID-19 в ОРИТ", "glyph": "line"},
{"key": "covid_vent", "name": "CoVID-19 на ИВЛ", "glyph": "line"},
{"key": "covid_children", "name": "детей с CoVID-19", "glyph": "line"}
]
},
{"key": "gkb50",
"name": "ГКБ им. С. И. Спасокукоцкого",
"link": "https://50gkb.ru/",
"cmo": "Алексей Владимирович Погонин",
"address": "ул. Вучетича, д. 21",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "внебольничная пневмония + CoVID-19", "glyph": "line"},
{"key": "pneumonia", "name": "внебольничная пневмония", "glyph": "line"}
]
},
{"key": "gkb67",
"name": "ГКБ № 67 им. Л. А. Ворохобова",
"link": "http://67gkb.ru/",
"cmo": "Андрей Сергеевич Шкода",
"address": "ул. Саляма Адиля, д. 2/44",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19", "glyph": "line"},
{"key": "total_vent", "name": "на ИВЛ", "glyph": "line"},
{"key": "total_icu", "name": "в ОРИТ", "glyph": "line"}
]
},
{"key": "gkbi",
"name": "ГКБ им. Ф. И. Иноземцева",
"link": "https://inozemtcev.ru/",
"cmo": "Александр Евгеньевич Митичкин",
"address": "ул. Фортунатовская, д. 1",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19", "glyph": "line"},
{"key": "total_vent", "name": "на ИВЛ", "glyph": "line"},
{"key": "total_icu", "name": "в ОРИТ", "glyph": "line"}
]
},
{"key": "nmsc",
"name": "НМХЦ им. Н. И. Пирогова",
"link": "http://www.pirogov-center.ru/",
"cmo": "Виталий Геннадьевич Гусаров",
"address": "ул. Нижняя Первомайская, д. 70",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19", "glyph": "line"},
{"key": "total_vent", "name": "всего на ИВЛ", "glyph": "line"},
{"key": "total_icu", "name": "в ОРИТ", "glyph": "line"},
{"key": "covid_vent", "name": "подтвержденных CoVID-19 на ИВЛ", "glyph": "line"},
{"key": "covid_suspected", "name": "предполагаемых CoVID-19", "glyph": "line"}
]
},
{"key": "mccid",
"name": 'МКЦИБ "Вороновское"',
"link": "https://demikhova.ru/",
"cmo": "Сергей Николаевич Переходов",
"address": "Вороновское поселение, д. Голохвастово",
"fields": [{"key": "total", "name": "всего на лечении", "glyph": "bar", "alpha": 0.25},
{"key": "covid", "name": "подтверждено CoVID-19", "glyph": "line"},
{"key": "total_vent", "name": "всего на ИВЛ", "glyph": "line"},
{"key": "total_oxygen", "name": "с кислородной поддержкой", "glyph": "line"}
]
},
]
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1489,
365,
71,
13,
18596,
23014,
1330,
14703,
198,
198,
31553,
2849,
1847,
62,
43,
17248,
50,
796,
685,
4895,
3672,
1298,
366,
140,
254,
15166,
21727,
21727,
18849,
40623,
1600,
366,
8726,
1... | 1.47581 | 7,131 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from mr_database import MrDatabase
from mr_database import LogLevel
""" import of table classes """
from samples.table_schema_examples import Image
db = MrDatabase(os.path.join(os.path.abspath(os.path.join(__file__, os.pardir)), 'test_functionality.db'))
if __name__ == '__main__':
# enables logging at 'DEBUG' level
MrDatabase.logging(level=LogLevel.error)
# drop tables
db.drop_table(Image)
# create tables
db.create_table(Image)
for index, image_name in enumerate(os.listdir(os.path.join(os.path.dirname(__file__), 'sample_data'))):
image_path = os.path.realpath(os.path.join(os.path.dirname(__file__), 'sample_data', image_name))
with open(image_path, 'rb') as image_file:
image = Image()
image.id = index
image.md5 = Image.md5_file_object(image_file)
image.imageName = image_path
image.imageData = Image.read_blob_file(image_file)
db.insert_record(image)
image2: Image = db.select_record(Image, 'id=2')
with open(image2.imageName, 'wb') as file:
file.write(image2.imageData)
print('done')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
198,
6738,
285,
81,
62,
48806,
1330,
1770,
38105,
198,
6738,
285,
81,
62,
48806,
1330,
5972,
497... | 2.390438 | 502 |
from rest_framework.serializers import ModelSerializer
from rest_framework import serializers
from . import models
| [
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
9104,
32634,
7509,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
1330,
4981,
628,
628
] | 4.407407 | 27 |
'''
Configy confguration container
'''
# pylint: disable=W0212,R0903
import os
import re
from copy import deepcopy
import yaml
class ConfigyError(Exception):
'''
Configy exception handler
'''
pass
env_pattern = re.compile(r".*?\${(.*?)}.*?")
yaml.add_implicit_resolver("!pathex", env_pattern)
yaml.add_constructor("!pathex", env_constructor)
class CDict(dict):
'''
Dict-type that allows accessing by attribute
'''
class ICDict(dict):
'''
Case-insensitive dict-type that allows accessing by attribute
'''
class ConfigContainer(object):
'''
Singleton containing configuration
'''
def _set_config(self, conf, case_sensitive=None):
'''
Private helper to set the config data to new dict
'''
if case_sensitive is None:
case_sensitive = self._case_sensitive
else:
self._case_sensitive = case_sensitive
if case_sensitive:
self._config = CDict(conf)
else:
self._config = ICDict(conf)
def _get_config(self):
'''
Private helper that gets the actual config data
'''
return self._config
def __getitem__(self, item):
'''
Override .get() to use config reference correctly
'''
return self._config[item]
def __getattr__(self, attr):
'''
Override getattr() so config.SOME_VALUE works transparently
'''
return self._config[attr]
def __iter__(self):
'''
Makes base config iterable
'''
return self._config.__iter__()
def keys(self):
'''
Makes base config mappable
'''
return self._config.keys()
config = ConfigContainer() # pylint: disable=C0103
def extend_config(conf, data):
'''
Extends the config by replacing the overwriting the dataset granularily.
'''
for key, val in data.items():
if isinstance(val, dict) and isinstance(conf.get(key, None), dict):
conf[key] = extend_config(conf[key], val)
else:
conf[key] = val
return conf
def load_file(name):
'''
Loads the given file by name as a dict object.
Returns None on error.
'''
if name:
try:
with open(name) as fil:
val = yaml.load(fil, Loader=yaml.FullLoader)
if isinstance(val, dict):
return val
elif val is None:
pass
else:
raise ConfigyError(
"File '%s' does not contain key-value pairs" % name)
except IOError:
raise ConfigyError("File '%s' does not exist" % name)
except yaml.error.YAMLError:
raise ConfigyError("File '%s' is not a valid YAML document" % name)
return None
def build_config(conf=None, env=None, defaults=None, data=None, case_sensitive=True):
'''
Builds the config for load_config. See load_config for details.
'''
# 1) data
if isinstance(data, dict):
res = deepcopy(data)
else:
res = {}
# 2) defaults
_res = load_file(defaults)
if _res:
res = extend_config(res, _res)
# 3) conf/env
if env:
conf = os.environ.get(env, conf)
_res = load_file(conf)
if _res:
res = extend_config(res, _res)
if not case_sensitive:
def recursive_lowkey(dic):
'''Recursively lowercases dict keys'''
_dic = {}
for key, val in dic.items():
if isinstance(val, dict):
val = recursive_lowkey(val)
_dic[key.lower()] = val
return _dic
res = recursive_lowkey(res)
return res
def load_config(conf=None, env=None, defaults=None, data=None, case_sensitive=True):
'''
Loads configuration and sets the config singleton.
In order of least precedence:
data
Manually provided defaults as dict
defaults
File-name of defaults to load
env
Overrides conf file-name based on existance of env var with this name.
If env-var points to non-existing or unparseable file, then conf is
loaded as per usual.
conf
Default configuration file if ``env`` doesn't exist.
case_sensitive
Defaults to True, set to False if you want case insensitive config
'''
config._set_config(build_config(conf, env, defaults, data), case_sensitive)
| [
7061,
6,
198,
16934,
88,
1013,
70,
3924,
9290,
198,
7061,
6,
198,
2,
279,
2645,
600,
25,
15560,
28,
54,
2999,
1065,
11,
49,
2931,
3070,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
331,
43695,
... | 2.264275 | 1,979 |
import base64
from filelock import FileLock
from kaggle_environments import make
import numpy as np
import os
from pathlib import Path
import pickle
import time
import torch
import torch.nn.functional as F
from torch.cuda import amp
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.jit import TracerWarning
from tqdm import tqdm
from typing import *
import warnings
from .alphagoose_data import AlphaGooseDataset
from hungry_geese.nns.models import FullConvActorCriticNetwork
from ...env import goose_env as ge
| [
11748,
2779,
2414,
198,
6738,
2393,
5354,
1330,
9220,
25392,
198,
6738,
479,
9460,
293,
62,
268,
12103,
1330,
787,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2298,
293,
198,
11... | 3.621795 | 156 |
from rest_framework import serializers
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from ngallery.images.models import Image
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
10920,
19288,
1330,
19449,
49,
437,
11882,
11,
37350,
28656,
49,
437,
11882,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
... | 4.084746 | 59 |
__author__ = 'NovikovII'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
alist = [{'a':[1,2,3], 'b':(1,2,3), 'c':{'a':'List','b':'Data'}, 'd':3.14}]
print(alist)
json_list = json.dumps(alist)
print(json_list)
json_list = json.dumps(alist, separators=(',',':'))
print(json_list)
json_list = json.dumps(alist, separators=(',',':')mc)
print(json_list)
json_list = json.dumps(alist, separators=(',',':'), indent=4, sort_keys=True)
print(json_list)
alist_2 = json.loads(json_list)
print(alist)
print(alist_2) | [
834,
9800,
834,
796,
705,
20795,
1134,
709,
3978,
6,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
198,
49845,
796,
685,
90,
6,
... | 2.236052 | 233 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628,
628
] | 2.805556 | 36 |
#! /usr/bin/env python3
"""
* Copyright (c) 2019, Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import os.path as path
import re
from androidbpgenerator import INDENT, CCDefaults, ModuleInfo, Generator, NOVERBOSE
RUN_CALLABLE = True
if RUN_CALLABLE:
m = Main()
m.run() | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
9,
15069,
357,
66,
8,
13130,
11,
8180,
10501,
198,
9,
198,
9,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
9,
4866,
2... | 3.581081 | 370 |
import collections
import json
import logging
import gevent.queue
import requests
from metagrok import utils
from metagrok.showdown import actor
from metagrok.showdown.room import BattleRoom
from metagrok.showdown.battle_queue import BattleQueue
from metagrok.showdown.connection import ShowdownConnection
class Client(actor.Actor):
'''Contains primitives to communicate with a Pokemon Showdown server.
Features:
- Username challenge-response handling (of non-registered users so far)
- Battle challenge management
- Battle room management
args:
- [conf] username
'''
@property
@property
@property
def _battle_finalized(self, roomid):
'Called when a battle is finalized (fully finished).'
self.log('closing battle [%s]', roomid)
self._s.battles[roomid].stop()
self._s.battles[roomid].join()
del self._s.battles[roomid]
self.log('closed battle [%s]', roomid)
self._bq_send('battle-ended', None)
def _battle_accepted(self, roomid):
'Called when a battle has started between two players.'
self._bq_send('battle-started', None)
if self._s.mm_queue:
data = self._s.mm_queue.popleft()
else:
data = {
'player_ctor': self._player_ctor,
'id': roomid,
}
room = self.spawn(BattleRoom,
roomid = roomid,
player_ctor = data['player_ctor'],
conf = self._conf,
extra = data,
)
self._s.battles[roomid] = room
return room
| [
11748,
17268,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
11748,
4903,
1151,
13,
36560,
198,
11748,
7007,
198,
198,
6738,
1138,
363,
305,
74,
1330,
3384,
4487,
198,
6738,
1138,
363,
305,
74,
13,
12860,
2902,
1330,
8674,
198,
6738,
... | 2.719852 | 539 |
from pathlib import Path
from autograde.cli.util import namespace_args, find_archives, traverse_archives
from autograde.util import logger
@namespace_args
def cmd_patch(patch: str, result: str, **_) -> int:
"""Patch result archive(s) with results from a different run"""
patch = Path(patch)
result = Path(result)
# load patches
patches = {a.results.checksum: a.results for a in traverse_archives(find_archives(patch))}
# inject patches
for archive in traverse_archives(find_archives(result), mode='a'):
if patch := patches.get(archive.results.checksum):
archive.inject_patch(patch)
else:
logger.warning(f'no patch for {archive.filename} found')
return 0
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
1960,
519,
27585,
13,
44506,
13,
22602,
1330,
25745,
62,
22046,
11,
1064,
62,
48814,
11,
38138,
62,
48814,
198,
6738,
1960,
519,
27585,
13,
22602,
1330,
49706,
628,
198,
31,
14933,
10223,
... | 2.859375 | 256 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
_author_ = 'lilu'
print('hello')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
62,
9800,
62,
796,
705,
75,
346,
84,
6,
198,
198,
4798,
10786,
31373,
11537,
198
] | 2.025 | 40 |
import random
import math
from sklearn.datasets import make_blobs
from src.data.utils.places_utils import get_random_image
from src.data.utils.lowpoly_utils import get_random_lowpoly_vehicle
MIN_LIGHTS = 5
MAX_LIGHTS = 8
LIGHT_ENERGY_MIN = 0.4
LIGHT_ENERGY_MAX = 1
LIGHT_POSITION_VAR = 3
LIGHT_CAST_SHADOWS = True
LIGHT_TYPES = ['SUN']
LIGHT_COLOR_MIN = 1
LIGHT_NEGATIVE = False
OBJECT_SCALE_MIN = 20
OBJECT_SCALE_MAX = 40#0.2
OBJECT_TRANSFORMS = ['RANDOMIZE'] #['CAST', 'SOLIDIFY', 'SIMPLE_DEFORM', 'RANDOMIZE']
OBJECT_TRANSFORMS_MIN = 0
OBJECT_TRANSFORMS_MAX = 0
OBJECT_TRANSFORMS_CAST_FACTOR_MIN = -0.4
OBJECT_TRANSFORMS_CAST_FACTOR_MAX = 0.3
OBJECT_TRANSFORMS_CAST_TYPES = ['SPHERE', 'CUBOID']
OBJECT_TRANSFORMS_SOLIDIFY_THICKNESS_MIN = -0.18
OBJECT_TRANSFORMS_SOLIDIFY_THICKNESS_MAX = 0.4
OBJECT_TRANSFORMS_SIMPLE_DEFORM_ANGLE_MIN = -0.3
OBJECT_TRANSFORMS_SIMPLE_DEFORM_ANGLE_MAX = 0.3
OBJECT_TRANSFORMS_RANDOMIZE_FACTOR_MIN = -0.05
OBJECT_TRANSFORMS_RANDOMIZE_FACTOR_MAX = 0.05
BACKGROUND_CLASSES = ['cliff', 'hotel-outdoor', 'hangar-outdoor', 'bridge', 'moat-water', 'pond', 'hospital_room', 'mezzanine', 'ocean', 'apartment_building-outdoor', 'shoe_shop', 'bow_window-indoor', 'raceway', 'forest_path', 'ski_slope', 'building_facade', 'boathouse', 'hardware_store', 'barndoor', 'cafeteria', 'aqueduct', 'village', 'iceberg', 'lighthouse', 'discotheque', 'sky', 'alley', 'corral', 'tower', 'oast_house', 'amusement_park', 'balcony-exterior', 'slum', 'delicatessen', 'pasture', 'embassy', 'jacuzzi-indoor', 'balcony-interior', 'office_cubicles', 'pub-indoor', 'legislative_chamber', 'basketball_court-indoor', 'gymnasium-indoor', 'harbor', 'kitchen', 'chalet', 'watering_hole', 'shower', 'elevator_lobby', 'forest-broadleaf', 'beauty_salon', 'staircase', 'auditorium', 'train_interior', 'living_room', 'swimming_pool-outdoor', 'restaurant_kitchen', 'swamp', 'lecture_room', 'biology_laboratory', 'forest_road', 'heliport', 'gift_shop', 'bazaar-outdoor', 'ball_pit', 'hangar-indoor', 'canal-urban', 'bank_vault', 'hunting_lodge-outdoor', 'loading_dock', 'boxing_ring', 'entrance_hall', 'shed', 'mosque-outdoor', 'desert-vegetation', 'parking_lot', 'elevator_shaft', 'throne_room', 'kennel-outdoor', 'pantry', 'recreation_room', 'jail_cell', 'inn-outdoor', 'locker_room', 'lock_chamber', 'arcade', 'swimming_hole', 'cockpit', 'boat_deck', 'fire_station', 'mountain', 'chemistry_lab', 'rope_bridge', 'lagoon', 'airplane_cabin', 'downtown', 'tree_farm', 'islet', 'mountain_snowy', 'ruin', 'attic', 'ice_floe', 'car_interior', 'lake-natural', 'dorm_room', 'home_theater', 'greenhouse-indoor', 'bar', 'fishpond', 'florist_shop-indoor', 'mansion', 'bus_interior', 'dam', 'patio', 'athletic_field-outdoor', 'grotto', 'basement', 'shopfront', 'carrousel', 'archaelogical_excavation', 'gazebo-exterior', 'beer_hall', 'rainforest', 'windmill', 'kasbah', 'office', 'mountain_path', 'schoolhouse', 'palace', 'bedchamber', 'crevasse', 'canyon', 'plaza', 'archive', 'laundromat', 'diner-outdoor', 'booth-indoor', 'natural_history_museum', 'pagoda', 'volleyball_court-outdoor', 'beer_garden', 'wheat_field', 'rice_paddy', 'art_studio', 'bedroom', 'banquet_hall', 'repair_shop', 'raft', 'glacier', 'army_base', 'playroom', 'football_field', 'subway_station-platform', 'waiting_room', 'field_road', 'waterfall', 'classroom', 'garage-outdoor', 'utility_room', 'dining_room', 'wave', 'youth_hostel', 'phone_booth', 'art_gallery', 'corridor', 'museum-outdoor', 'temple-asia', 'nursing_home', 'snowfield', 'bakery-shop', 'lobby', 'office_building', 'bamboo_forest', 'aquarium', 'galley', 'butte', 'bazaar-indoor', 'library-outdoor', 'field-wild', 'amusement_arcade', 'gas_station', 'wind_farm', 'beach', 'sauna', 'ballroom', 'wet_bar', 'food_court', 'greenhouse-outdoor', 'corn_field', 'courthouse', 'racecourse', 'cabin-outdoor', 'courtyard', 'hayfield', 'farm', 'martial_arts_gym', 'excavation', 'cottage', 'medina', 'ticket_booth', 'childs_room', 'clothing_store', 'zen_garden', 'candy_store', 'fire_escape', 'doorway-outdoor', 'orchestra_pit', 'drugstore', 'mausoleum', 'general_store-outdoor', 'clean_room', 'campsite', 'landing_deck', 'manufactured_home', 'highway', 'playground', 'water_tower', 'orchard', 'underwater-ocean_deep', 'physics_laboratory', 'pavilion', 'art_school', 'tundra', 'river', 'beach_house', 'cemetery', 'baseball_field', 'church-outdoor', 'artists_loft', 'skyscraper', 'museum-indoor', 'ice_shelf', 'landfill', 'science_museum', 'picnic_area', 'soccer_field', 'volcano', 'auto_factory', 'alcove', 'library-indoor', 'hospital', 'junkyard', 'crosswalk', 'pier', 'vegetable_garden', 'closet', 'promenade', 'botanical_garden', 'marsh', 'train_station-platform', 'bullring', 'desert_road', 'auto_showroom', 'trench', 'home_office', 'creek', 'department_store', 'conference_room', 'valley', 'pizzeria', 'oilrig', 'butchers_shop', 'hotel_room', 'igloo', 'yard', 'amphitheater', 'roof_garden', 'house', 'badlands', 'pet_shop', 'railroad_track', 'reception', 'japanese_garden', 'castle', 'campus', 'television_room', 'bathroom', 'field-cultivated', 'lawn', 'hot_spring', 'burial_chamber', 'ice_cream_parlor', 'berth', 'restaurant', 'engine_room', 'dressing_room', 'sushi_bar', 'fountain', 'runway', 'toyshop', 'catacomb', 'construction_site', 'bowling_alley', 'canal-natural', 'topiary_garden', 'formal_garden', 'fabric_store', 'storage_room', 'general_store-indoor', 'airfield', 'park', 'vineyard', 'bookstore', 'barn', 'ski_resort', 'porch', 'jewelry_shop', 'arch', 'golf_course', 'viaduct', 'boardwalk', 'garage-indoor', 'industrial_area', 'bus_station-indoor', 'motel', 'sandbox', 'music_studio', 'dining_hall', 'street', 'elevator-door', 'computer_room', 'kindergarden_classroom', 'coffee_shop', 'television_studio', 'swimming_pool-indoor', 'desert-sand', 'stable', 'assembly_line', 'synagogue-outdoor', 'server_room', 'fastfood_restaurant', 'atrium-public', 'veterinarians_office', 'residential_neighborhood', 'nursery', 'coast', 'driveway', 'operating_room', 'parking_garage-indoor', 'tree_house', 'escalator-indoor', 'pharmacy', 'rock_arch', 'parking_garage-outdoor', 'restaurant_patio', 'conference_center']
BACKGROUND_SIZE_X = 6
BACKGROUND_SIZE_Y = 8
AMBIENT_COLOR = [0.5, 0.5, 0.5]
CAMERA_POSITION = [5, 0, 0]
CAMERA_FOCUS = [-5, 0, 0]
RENDER_SIZE_X = 1024
RENDER_SIZE_Y = 768
| [
11748,
4738,
198,
11748,
10688,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
787,
62,
2436,
8158,
198,
198,
6738,
12351,
13,
7890,
13,
26791,
13,
23625,
62,
26791,
1330,
651,
62,
25120,
62,
9060,
198,
6738,
12351,
13,
7890,
13,... | 2.563644 | 2,459 |
from django.contrib.auth.models import User
from core.views.config_user import ConfigUser
user1 = {
'email': 'test@clarus-films.com',
'password': 'demo',
'first_name': "Clarus",
'last_name': "Films GmbH",
}
email = user1['email']
password = user1['password']
first_name = user1['first_name']
last_name = user1['last_name']
username = email.replace("@", "__")
username = username.replace(".", "_").replace("-", "___")
user, created = User.objects.get_or_create(username=username, email=email, first_name=first_name, last_name=last_name)
if created:
user.set_password(password)
user.save()
print("User {} saved.".format(email))
else:
print("User {} already exists.".format(email))
config_user = ConfigUser(user)
config_user.generate_token(password)
config_user.create_group()
config_user.user_plan_and_account(plan='basic', account_type='admin')
config_user.create_databases('data_' + username.split('__')[1])
config_user.create_databases('results_' + username.split('__')[1])
config_user.add_user()
print("User {} configured.".format(email))
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
4755,
13,
33571,
13,
11250,
62,
7220,
1330,
17056,
12982,
198,
198,
7220,
16,
796,
1391,
198,
220,
220,
220,
705,
12888,
10354,
705,
9288,
31,
565,
... | 2.798969 | 388 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Chris Griffith"
from pathlib import Path
import pkg_resources
name = "HEVC (NVENC)"
requires = "cuda-llvm"
video_extension = "mkv"
video_dimension_divisor = 1
icon = str(Path(pkg_resources.resource_filename(__name__, f"../../data/encoders/icon_nvenc.png")).resolve())
enable_subtitles = True
enable_audio = True
enable_attachments = True
from fastflix.encoders.ffmpeg_hevc_nvenc.command_builder import build
from fastflix.encoders.ffmpeg_hevc_nvenc.settings_panel import NVENC as settings_panel
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
366,
15645,
30469,
1,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
279,
10025,
62,
37540,
... | 2.745098 | 204 |
import socket,uuid
# 获取主机名
hostname = socket.gethostname()
#获取IP
ip = socket.gethostbyname(hostname)
# 获取Mac地址
print(hostname, ip, get_mac_address()) | [
11748,
17802,
11,
12303,
312,
198,
2,
5525,
236,
115,
20998,
244,
10310,
119,
17312,
118,
28938,
235,
198,
4774,
3672,
796,
17802,
13,
1136,
4774,
3672,
3419,
198,
2,
164,
236,
115,
20998,
244,
4061,
198,
541,
796,
17802,
13,
1136,
... | 1.948052 | 77 |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import atexit
import datetime
import logging
import os
import threading
import time
from functools import wraps
import elasticsearch as es7
import elasticsearch6 as es6
import pytz
from elasticsearch import helpers as helpers7
from elasticsearch6 import helpers as helpers6
from fedlearner.common.common import Config, INDEX_NAME, INDEX_TYPE, \
get_es_template
class ElasticSearchHandler(Handler):
"""
Emit documents to ElasticSearch
"""
@staticmethod
def _create_template_and_index(self, index_type):
"""
Args:
index_type: ES index type.
Creates a template and an index on ES.
"""
assert index_type in INDEX_TYPE
self._es.indices.put_template(
name='{}-template'.format(INDEX_NAME[index_type]),
body=get_es_template(index_type, self._version)
)
try:
self._es.indices.create(index=INDEX_NAME[index_type])
return
# index may have been created by other jobs
except (es6.exceptions.RequestError, es7.exceptions.RequestError) as e:
# if due to other reasons, re-raise exception
if e.info['error']['type'] != 'resource_already_exists_exception':
raise e
_metrics_client = Metrics()
# Currently no actual differences among the methods below
emit_counter = emit
emit_store = emit
emit_timer = emit
| [
2,
15069,
12131,
383,
10169,
14961,
1008,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 2.83844 | 718 |
#!/usr/bin/python
#Aplicacao de administracao de classes de servicos - Versao 7.0
#Manipula uma lista de objetos Classe
import os
from classe import Classe
import pickle
path_home = os.getenv("HOME") #Captura o caminho da pasta HOME
path_home = '/home/bruno'
filename=path_home+'/ryu/Bruno/classes.conf' #Nome do arquivo de classes de servicos
tx_max=1000000000 #Vazao maxima da rede em bps
if __name__=='__main__': #Funcao principal
classlist=[]
if os.path.isfile(filename):
classlist=retrieve()
'''
print '\nCONFIGURACAO INICIAL:'
#tx_max=int(raw_input('Digite a vazao maxima da rede em bps: '))
tx_max = 9000000
#media=int(raw_input('Digite a taxa media da classe de melhor esforco (be) em bps: '))
media = 9000000
#pico=int(raw_input('Digite a taxa de pico da classe de melhor esforco (be) em bps: '))
pico = 9000000
if len(classlist)==0:
classlist.append(Classe(0,'be',media,pico))
else:
classlist[0]=Classe(0,'be',media,pico)
'''
'''
while True:
opcao=menu()
#classlist.append(Classe(len(classlist),nome,media,pico))
if opcao==1: #Incluir classe
nome=raw_input('\nDigite o nome da classe: ')
media=int(raw_input('Digite a taxa media em bps: '))
pico=int(raw_input('Digite a taxa de pico em bps: '))
classlist.append(Classe(len(classlist),nome,media,pico))
if opcao==2: #Alterar classe
c=search(classlist)
if c==None:
continue
media=int(raw_input('Digite a taxa media em bps: '))
pico=int(raw_input('Digite a taxa de pico em bps: '))
c.media=media
c.pico=pico
if opcao==3: #Listar parametros de classe
c=search(classlist)
if c==None:
continue
c.imprime()
if opcao==4: #Listar classes
if isempty(classlist):
continue
for c in classlist:
c.imprime()
if opcao==9: #Sair
break
'''
persist(classlist)
configqos(classlist)
print('Aplicando a Filas!!')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
32,
489,
291,
330,
5488,
390,
6863,
330,
5488,
390,
6097,
390,
37756,
418,
532,
18535,
5488,
767,
13,
15,
198,
2,
5124,
541,
4712,
334,
2611,
1351,
64,
390,
26181,
316,
418,
1012,
2161... | 2.236623 | 841 |
# -*- coding: utf-8 -*-
import argparse
from abc import abstractmethod
from argparse import ArgumentTypeError
from pre_commit_hooks.loaderon_hooks.util.check_failed_exception import CheckFailedException
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
1822,
29572,
1330,
45751,
6030,
12331,
198,
198,
6738,
662,
62,
41509,
62,
25480,
82,
13,
29356,
2... | 3.306452 | 62 |
#!/usr/bin/env python
# encoding: utf-8
from models.base.config import BaseConfig
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
4981,
13,
8692,
13,
11250,
1330,
7308,
16934,
198
] | 3.037037 | 27 |
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: © 2013 The freestyle-hid Authors
# SPDX-License-Identifier: 0BSD
# Ensure it's present.
import setuptools_scm # noqa: F401
from setuptools import setup
setup()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
30628,
55,
12,
8979,
15269,
8206,
25,
10673,
2211,
383,
2030,
10992,
12,
49675,
46665,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
657,
21800,
198,
... | 2.721519 | 79 |
print(__doc__)
from collections import OrderedDict
from functools import partial
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
# Create figure
fig = plt.figure(figsize=(15, 8))
fig.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
# Add 3d scatter plot
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
plt.show()
| [
4798,
7,
834,
15390,
834,
8,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
640,
1330,
640,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
... | 2.613014 | 292 |
"""
Takes a gff file and for genes with
multiple mRNAs, only keeps the longest
(largest sum of exon lengths). Prints
out a new gff.
If a proteins fasta is also provided,
removes genes shorter than a given
cutoff
"""
from __future__ import print_function
import gffutils
import sys
from os import remove
from time import time
from Bio import SeqIO
in_gff = sys.argv[1]
out_gff = sys.argv[2]
gene_mrna_out = out_gff + '.gene_to_mRNA'
if len(sys.argv) == 6:
proteins_fasta = sys.argv[3]
min_len = int(sys.argv[4])
name_attribute = sys.argv[5] # mRNA attribute where the protein name is stored (must mastch with fasta)
else:
proteins_fasta = None
db_path = "tmp_%s.sqlite3" % time()
gff_db = gffutils.create_db(in_gff, db_path, force=True, merge_strategy="create_unique")
gff = gffutils.FeatureDB(db_path)
prot_lens = {}
if proteins_fasta:
# go over fasta and save protein lengths
prot_lens = {rec.id: len(rec.seq) for rec in SeqIO.parse(proteins_fasta, "fasta")}
with open(out_gff, 'w') as fo, open(gene_mrna_out,'w') as fo2:
for feature in gff.all_features():
if feature.featuretype not in {'gene', 'mRNA', 'exon', 'CDS', 'five_prime_UTR', 'three_prime_UTR'}:
print(str(feature), file=fo)
continue
if feature.featuretype != 'gene': # mRNA and exon features
continue
gene = feature
print(str(gene), file=fo)
mrnas = list(gff.children(gene, featuretype='mRNA'))
if len(mrnas) == 0:
continue
longest_transcript = mrnas[0]
max_len = 0
for mrna in mrnas:
exons = list(gff.children(mrna, featuretype='exon'))
total_exons_len = sum([exon.end - exon.start for exon in exons])
if total_exons_len > max_len:
max_len = total_exons_len
longest_transcript = mrna
if proteins_fasta:
transcript_name = longest_transcript[name_attribute][0]
if transcript_name not in prot_lens or prot_lens[transcript_name] < min_len:
continue
print(str(longest_transcript), file=fo)
print("%s\t%s" %(gene['ID'][0], longest_transcript['ID'][0]),file=fo2)
for x in gff.children(longest_transcript):
print(str(x), file=fo)
remove(db_path)
| [
37811,
198,
51,
1124,
257,
308,
487,
2393,
290,
329,
10812,
351,
198,
48101,
285,
42336,
1722,
11,
691,
7622,
262,
14069,
198,
7,
28209,
2160,
286,
409,
261,
20428,
737,
12578,
82,
198,
448,
257,
649,
308,
487,
13,
198,
1532,
257,
... | 2.414716 | 897 |
import os
import numpy as np
import pytest
from multiml import logger
from multiml.saver import Saver
from multiml.task.keras.modules import MLPBlock
if __name__ == '__main__':
test_keras_util()
| [
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
43104,
75,
1330,
49706,
198,
6738,
43104,
75,
13,
82,
8770,
1330,
311,
8770,
198,
6738,
43104,
75,
13,
35943,
13,
6122,
292,
13,
18170,
13... | 2.914286 | 70 |
"""Handle config files"""
import yaml
from pysyte.types.dictionaries import NameSpaces
| [
37811,
37508,
4566,
3696,
37811,
198,
198,
11748,
331,
43695,
198,
198,
6738,
279,
893,
88,
660,
13,
19199,
13,
67,
2867,
3166,
1330,
6530,
4561,
2114,
628,
628
] | 3.172414 | 29 |
# Generated by Django 2.2.12 on 2020-07-20 09:22
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1065,
319,
12131,
12,
2998,
12,
1238,
7769,
25,
1828,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import os
import argparse
import math
import time
import numpy as np
from tqdm import trange
from tqdm import tqdm
from modules.dataset import KittiDataset, euler_from_quaternion
from modules.load_dataset import *
from modules.box import box_encoding, box_decoding
from modules.model_gcn import *
from modules.visualizer import *
from modules.nms import *
from util.config_util import load_train_config, load_config
from util.metrics import recall_precisions, mAP
import torch
import torch.nn as nn
import torch.optim as optim
#import tensorboardX import SummaryWriter
import open3d as o3d
if torch.cuda.is_available() == True:
print("torch cuda is available")
TRAIN_CONFIG_PATH = "./configs/train_config"
CONFIG_PATH = "./configs/config"
DATASET_DIR = "./dataset/kitti"
DATASET_LIST = "../dataset/split.txt"
train_config = load_train_config(TRAIN_CONFIG_PATH)
config_complete = load_config(CONFIG_PATH)
if 'train' in config_complete:
config = config_complete['train']
else:
config = config_complete
#=============================================================
# GCN Training Config
#=============================================================
model_args = {
"feature_size": 6,
"hidden_size": 512,
"n_block": 3,
"cls_layer_list": [512, 256, 64],
"loc_layer_list": [512, 256, 128, 64],
"pred_cls_num": 3,
"pred_loc_len": 10,
"sc_type": "sc",
"concat_feature": False,
"gcn_act_fn": "relu",
"bias": True,
"use_batch_norm": True,
"dropout_rate": 0,
"use_rd": True,
"rd_act_fn": "relu",
}
loss_args = {
#"cls_loss_type": "focal_sigmoid",
"cls_loss_type": "softmax",
"loc_loss_type": "huber_loss",
"cls_loss_weight": 0.1,
"loc_loss_weight": 10.0
}
learning_rate = 0.008
decay_rate = 0.1
epoches = 2000
batch_size = 1
"""
device : None = CPU
device : cuda:$GPU_NUM = GPU
"""
device = None
#device = "cuda:0"
save_model_perid = 10
base_model_folder = "./model"
model_name = "Test"
current_epoch = 0
"""
TRAIN : 학습시 [True] / 시각화시 [False]
TRAIN_VIS : 학습시 실시간 시각화 [True]
RAW_PRED : cls, loc를 모두 예측값으로 시각화 [True] / cls는 정답, loc만 예측으로 시각화 [False]
"""
TRAIN = False
TRAIN_VIS = True
RAW_PRED = False
#=============================================================
# input function ==============================================================
dataset = KittiDataset(
os.path.join(DATASET_DIR, 'image/training/image_2'),
os.path.join(DATASET_DIR, 'velodyne/training/velodyne/'),
os.path.join(DATASET_DIR, 'calib/training/calib/'),
os.path.join(DATASET_DIR, 'labels/training/label_2'),
DATASET_LIST,
num_classes=model_args["pred_cls_num"])
NUM_CLASSES = dataset.num_classes
BOX_ENCODING_LEN = model_args["pred_loc_len"]
#=============================================================
optimizer = None
model = GCN_Model(**model_args)
if device == "cuda:0":
model = model.to(device)
NUM_TEST_SAMPLE = dataset.num_files
label_map = {'Background': 0, 'Car': 1, 'DontCare': 3}
if __name__ == "__main__":
frame_idx_list = np.random.permutation(NUM_TEST_SAMPLE)
VIS = None
pre_model = ""
if current_epoch != 0:
pre_model = os.path.join(base_model_folder, model_name, (f"model_state_{current_epoch}.pt"))
if TRAIN:
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if pre_model != "":
model.load_state_dict(torch.load(pre_model))
if TRAIN_VIS:
VIS = Visualizer(dataset)
save_path = os.path.join(base_model_folder, model_name)
train(save_path, VIS, TRAIN_VIS, RAW_PRED, current_epoch, decay_rate)
else:
VIS = Visualizer(dataset)
model.load_state_dict(torch.load(pre_model))
#model.eval()
for frame_idx in frame_idx_list:
input_v, vertex_coord_list, detection_boxes_3d, cls_point = detect(frame_idx)
gt_box_label_list = dataset.get_ply_label(frame_idx)
VIS.set_cls_point(cls_point)
VIS.set_base_pcd(vertex_coord_list[0], input_v[:, 1:])
VIS.set_gt_box(gt_box_label_list)
VIS.set_dt_box(detection_boxes_3d)
#VIS.realtime_draw()
VIS.scene_draw()
| [
11748,
28686,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
10688,
201,
198,
11748,
640,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
256,
80,
36020,
1330,
491,
858,
201,
198,
6738,
256,
80,
36020,
1330,
256,
80... | 2.165778 | 2,063 |
nome = input('Qual é o seu nome? ')
print(f'Prazer em te conhecer {nome:=^20}!') | [
77,
462,
796,
5128,
10786,
46181,
38251,
267,
384,
84,
299,
462,
30,
705,
8,
198,
4798,
7,
69,
6,
47,
3247,
263,
795,
573,
369,
258,
2189,
1391,
77,
462,
25,
28,
61,
1238,
92,
0,
11537
] | 2.105263 | 38 |
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import train_test_split
class ClassificationComparator:
'''
Performs simple test to check and compare several classification
methods
'''
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
8081,
44292,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
277,
16,
... | 3.626263 | 99 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pysixdesk",
version="0.0.1",
author='Xiaohan Lu,A. Mereghetti',
author_email='luxh@ihep.ac.cn,Alessio.Mereghetti@cern.ch',
description="A python interface to manage and control the workflow of SixTrack jobs",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/SixTrack/pysixdesk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.588015 | 267 |
#!/usr/bin/env python
#Communicate with end devices via LoRa.
#Communicate with server via MQTT(hbmqtt) and HTTP POST.
#Save data in the sqlite database.
#Parse JSON from MQTT and LoRa protocol.
#Communication module: LoRa.
#Communication method with device via LoRa.
#Uart port drive LoRa module.
#Parse JSON between device and gateway via LoRa channel.
#LoRa module: E32-TTL-100
#Pin specification:
#M0 <--> GPIO(OUT) #mode setting connct to GND is OK!(Low)
#M1 <--> GPIO(OUT) #mode setting connct to GND is OK!(Low)
#RXD <--> 8(TXD) #ttyS0
#TXD <--> 10(RXD) #ttyS0
#AUX <--> GPIO/INT(IN) #module status detecting
#VCC
#GND
#You need to install pyserial manually, install command is below:
#pip install pyserial
import serial
import time
import json
#ser = serial.Serial("/dev/ttyS0", 9600)
ser = serial.Serial("/dev/ttyS0", 9600, timeout=0.2)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
if ser != None:
ser.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
30813,
5344,
351,
886,
4410,
2884,
6706,
21762,
13,
198,
2,
30813,
5344,
351,
4382,
2884,
337,
48,
15751,
7,
71,
20475,
80,
926,
8,
290,
14626,
24582,
13,
198,
2,
16928,
1366,
2... | 2.474453 | 411 |
import argparse, os, json
import numpy as np
import matplotlib.pyplot as plt
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('log', help='log file dumped by pyxivo to process')
parser.add_argument('out_dir', help='output directory to save undistorted images')
parser.add_argument('-debug', action='store_true', default=False, help='if set, show images')
if __name__ == '__main__':
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
lut = createUndistortionLUT()
with open(args.log, 'r') as fid:
data = json.load(fid)
try:
for each in data:
image = cv2.imread(each['ImagePath'])
ts = each['Timestamp']
Tsb = np.array(each['TranslationXYZ'])
Qsb = np.array(each['QuaternionWXYZ'])
undistorted = cv2.remap(image, lut[0], lut[1], cv2.INTER_LINEAR)
outputPath = os.path.join(args.out_dir, '{}.png'.format(ts))
cv2.imwrite(outputPath, undistorted)
if args.debug:
cv2.imshow('distorted image'.format(ts), image)
cv2.imshow('undistorted image'.format(ts), undistorted)
cv2.waitKey(30)
except KeyboardInterrupt:
pass
cv2.destroyAllWindows()
| [
11748,
1822,
29572,
11,
28686,
11,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
269,
85,
17,
628,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
... | 2.203046 | 591 |
import asyncio
from dataclasses import dataclass, field
from typing import Generic, Callable, Union
from .const import _T, logger, _ThingT, Logger
import typing
from asyncio_primitives import CustomCondition, utils as async_utils
from .utils.proxy import LambdaProxy
logger = logger.getChild('states')
@dataclass(eq=False)
class State(Generic[_T]):
"""
Represents one of Thing's state, when called change, command or update,
then Events are triggered accordingly, notifying all the subscribers
:var check: callable, passed to rule-creator
"""
converter: Callable[[str], _T] = field(default_factory=float)
value: _T = 0
thing: Union[_ThingT] = field(default=None, init=False, repr=True)
name: str = field(default=None, init=False, repr=True)
changed: typing.List[CustomCondition] = field(default_factory=lambda : [CustomCondition()], init=False, repr=False)
received_update: typing.List[CustomCondition] = field(default_factory=lambda : [CustomCondition()], init=False, repr=False)
received_command: typing.List[CustomCondition] = field(default_factory=lambda : [CustomCondition()], init=False, repr=False)
check: typing.Callable = field(default_factory=lambda :lambda :True)
_str: str = 'state'
def make_proxy(self
, str_template: str
, value: typing.Union[typing.Callable[[_T], _T], _T] = None
, y = None
, check = None
, _and=True
):
"""
Make proxy for State. Replace value with lambda, adds conditions from other if other is State
:param x:
:param value:
:param y:
:param check:
:return:
"""
if isinstance(y, State):
_other_str = y._str
else:
_other_str = y
new_str = f'({str_template.format(x=self._str, y=_other_str)})'
kwargs = {'_str': new_str}
if check:
kwargs['check'] = lambda x: new_check
setattr(new_check, '_str', new_str)
if value is not None:
kwargs['value'] = value
if isinstance(y, State):
kwargs.update(
changed=self.changed + [x for x in y.changed if x not in self.changed]
, received_update=self.received_update + [x for x in y.received_update if x not in self.received_update]
, received_command=self.received_command + [x for x in y.received_command if x not in self.received_command]
)
return typing.cast(self.__class__, LambdaProxy(self, **kwargs))
| [
11748,
30351,
952,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
42044,
11,
4889,
540,
11,
4479,
198,
198,
6738,
764,
9979,
1330,
4808,
51,
11,
49706,
11,
4808,
51,
722,
51,
11,
5972,
1362,
1... | 2.392336 | 1,096 |
#!/usr/bin/env python3
"""Used to convert Python strings of JSON into human-readable JSON
Useful when building AWS Lambda functions"""
from ast import literal_eval
from .common import main
def py_to_js(obj_str):
"""Evaluate string and return readable result"""
return rprint(literal_eval(obj_str))
def rprint(obj, tabs=0):
"""Make 'obj' human readable in JSON context"""
parsed = ""
if isinstance(obj, dict):
if not obj:
return "{}"
tabs += 1
parsed += "{\n"
for key, value in obj.items():
parsed += "".join([" "] * tabs) + rprint(key, tabs) + ": " + rprint(value, tabs)\
+ ",\n"
tabs -= 1
parsed += "".join([" "] * tabs) + "}"
parsed = remove_last_comma(parsed)
elif isinstance(obj, list):
if not obj:
return "[]"
tabs += 1
parsed = "[\n"
for i in obj:
parsed += "".join([" "] * tabs) + rprint(i, tabs) + ",\n"
tabs -= 1
parsed += "".join([" "] * tabs) + "]"
parsed = remove_last_comma(parsed)
elif isinstance(obj, str):
return parsed + '"' + obj + '"'
elif isinstance(obj, bool):
return ["false", "true"][obj]
elif isinstance(obj, (int, float)):
return str(obj)
else:
raise Exception(f"Unexpected type: {type(obj)}")
return parsed.replace("'", '"')
if __name__ == "__main__": # pragma: no cover
main(py_to_js, str, "Object to convert")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
38052,
284,
10385,
11361,
13042,
286,
19449,
656,
1692,
12,
46155,
19449,
198,
11041,
913,
618,
2615,
30865,
21114,
6814,
5499,
37811,
198,
198,
6738,
6468,
1330,
18875,
... | 2.215543 | 682 |
#
# This file is part of Zhockon Platform project.
#
# Copyright (C) 2009-2011 William Oliveira de Lagos <william.lagos1@gmail.com>
#
# Zhockon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zhockon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zhockon. If not, see <http://www.gnu.org/licenses/>.
#
import os, sys, collections
class Map(object):
""" Map wraps a dictionary. It is essentially an abstract class from which
specific multimaps are subclassed. """
__str__ = __repr__
def dict(self):
""" Allows access to internal dictionary, if necessary. Caution: multimaps
will break if keys are not associated with proper container."""
return self._dict
class ListMultimap(Map):
""" ListMultimap is based on lists and allows multiple instances of same value. """
class DictMultimap(Map):
""" DictMultimap is based on dicts and allows fast tests for membership. """
| [
2,
198,
2,
770,
2393,
318,
636,
286,
10511,
735,
261,
19193,
1628,
13,
198,
2,
220,
198,
2,
15069,
357,
34,
8,
3717,
12,
9804,
3977,
30012,
8704,
390,
21003,
418,
1279,
10594,
1789,
13,
30909,
418,
16,
31,
14816,
13,
785,
29,
19... | 3.426434 | 401 |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
columns_names=["name","num_file","xDim","yDim","m2","num_mics","num_comb","freq","NMSE","SSIM","pattern","p_real","p_predicted","p_previous"]
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
28665,
82,
62,
14933,
28,
14692,
3672,
2430,
22510,
62,
7753,
2430,
87,
29271,
2430,
88,
2927... | 2.654321 | 81 |
from tonclient.decorators import result_as
from tonclient.module import TonModule
from tonclient.types import ParamsOfStart, RegisteredDebot, ParamsOfFetch, \
ParamsOfExecute, ParamsOfSend, ResponseHandler, ParamsOfInit, \
ResultOfFetch, ParamsOfRemove
class TonDebot(TonModule):
""" Free TON debot SDK API implementation """
@result_as(classname=RegisteredDebot)
def init(
self, params: ParamsOfInit,
callback: ResponseHandler) -> RegisteredDebot:
"""
Creates and instance of DeBot.
Downloads debot smart contract (code and data) from blockchain and
creates an instance of Debot Engine for it
:param params: See `types.ParamsOfInit`
:param callback: Callback for debot events
:return: See `types.RegisteredDebot`
"""
return self.request(
method='debot.init', callback=callback, **params.dict)
def start(self, params: ParamsOfStart):
"""
Starts the DeBot.
Downloads debot smart contract from blockchain and switches it to
context zero.
This function must be used by Debot Browser to start a dialog with
debot. While the function is executing, several Browser Callbacks can
be called, since the debot tries to display all actions from the
context 0 to the user.
When the debot starts SDK registers `BrowserCallbacks` AppObject.
Therefore when `debot.remove` is called the debot is being deleted and
the callback is called with `finish=true` which indicates that it will
never be used again
:param params: See `types.ParamsOfStart`
:return:
"""
return self.request(method='debot.start', **params.dict)
@result_as(classname=ResultOfFetch)
def fetch(self, params: ParamsOfFetch) -> ResultOfFetch:
"""
Fetches DeBot metadata from blockchain.
Downloads DeBot from blockchain and creates and fetches its metadata
:param params: See `types.ParamsOfFetch`
:return: See `types.ResultOfFetch`
"""
return self.request(method='debot.fetch', **params.dict)
def execute(self, params: ParamsOfExecute):
"""
Executes debot action.
Calls debot engine referenced by debot handle to execute input action.
Calls Debot Browser Callbacks if needed.
Chain of actions can be executed if input action generates a list of
subactions
:param params: See `types.ParamsOfExecute`
:return:
"""
return self.request(method='debot.execute', **params.dict)
def send(self, params: ParamsOfSend):
"""
Sends message to Debot.
Used by Debot Browser to send response on DInterface call or from
other Debots
:param params: See `types.ParamsOfSend`
:return:
"""
return self.request(method='debot.send', **params.dict)
def remove(self, params: ParamsOfRemove):
"""
Destroys debot handle.
Removes handle from Client Context and drops debot engine referenced
by that handle
:param params: See `types.ParamsOfRemove`
:return:
"""
return self.request(method='debot.remove', **params.dict)
| [
6738,
5680,
16366,
13,
12501,
273,
2024,
1330,
1255,
62,
292,
198,
6738,
5680,
16366,
13,
21412,
1330,
16859,
26796,
198,
6738,
5680,
16366,
13,
19199,
1330,
2547,
4105,
5189,
10434,
11,
27049,
16587,
313,
11,
2547,
4105,
5189,
37,
7569... | 2.629482 | 1,255 |
from django.db.models.fields.related import ForeignKey
try:
from south.modelsinspector import add_introspection_rules
has_south = True
except ImportError:
has_south = False
from smart_selects import form_fields
class ChainedForeignKey(ForeignKey):
"""
chains the choices of a previous combo box with this one
"""
class GroupedForeignKey(ForeignKey):
"""
Opt Grouped Field
"""
if has_south:
rules_grouped = [(
(GroupedForeignKey,),
[],
{
'to': ['rel.to', {}],
'group_field': ['group_field', {}],
},
)]
add_introspection_rules([], ["^smart_selects\.db_fields\.ChainedForeignKey"])
add_introspection_rules(rules_grouped, ["^smart_selects\.db_fields\.GroupedForeignKey"])
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
25747,
13,
5363,
1330,
8708,
9218,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
5366,
13,
27530,
1040,
806,
273,
1330,
751,
62,
600,
305,
31308,
62,
38785,
198,
220,
220,
220,
468,
62,
... | 2.516026 | 312 |
# Generated by Django 3.0.7 on 2020-06-13 22:22
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1485,
2534,
25,
1828,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python
# coding: utf-8
from urllib.parse import urlparse
from pocsuite3.api import requests as req
from pocsuite3.api import register_poc
from pocsuite3.api import Output, POCBase
from pocsuite3.api import POC_CATEGORY, VUL_TYPE
import json
register_poc(TestPOC)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
6738,
279,
420,
2385,
578,
18,
13,
15042,
1330,
7007,
355,
43089,
198,
6738,
279,
420,
... | 2.666667 | 105 |
from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.db import models
from django.core.validators import validate_comma_separated_integer_list
from django.db import models
from servicecatalog.models import STATE, LIVE, ACCESS_DIRECTION, BOTH, PaymentMethod, Module, Contact
from contact_manager.models import Contact, ContactRole
DEV = 'd'
INTE = 'i'
QA = 'q'
CTEST = 'ct'
PROD = 'p'
ENVIRONMENT_OPTIONS = (
(DEV, 'Development'),
(INTE, 'Integration'),
(QA, 'Quality Assurance'),
(CTEST, 'Customer Test'),
(PROD, 'Production'),
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
31065,
1958,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
... | 2.871429 | 210 |
"""
This script prepares SDF files which can then be used for machine learning.
This includes sanitizing, filtering molecules with bad functional groups and
unwanted elements, removing salts, filtering by Lipinski's rule of five and
unify different tautomers. OpenEye QUACPAC and ChemAxon Marvin are required.
"""
from argparse import ArgumentParser, Namespace
from io import StringIO
from os.path import basename
from subprocess import PIPE, Popen, DEVNULL, SubprocessError
from sys import argv
from typing import Optional, List
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import PandasTools, SaltRemover, Descriptors, Lipinski, Crippen, Mol
__author__ = 'Marcel Baltruschat'
__copyright__ = 'Copyright © 2020'
__license__ = 'MIT'
__version__ = '1.1.0'
SMILES_COL = 'ISO_SMI'
TEMP_COL = 'temp'
PKA_COL = 'pKa'
DS_COL = 'original_dataset'
OID_COL = 'original_ID'
INDEX_NAME = 'ID'
DEFAULT_COLS = ['ROMol', PKA_COL, TEMP_COL, DS_COL, OID_COL]
TEMP_LOWER_CUT = 20
TEMP_UPPER_CUT = 25
# Selenium, Silicon and Boron
BAD_ELEMENTS = ['Se', 'Si', 'B']
BAD_ELEM_QUERY = Chem.MolFromSmarts(f'[{",".join(BAD_ELEMENTS)}]')
BFG = [
Chem.MolFromSmarts('[!#8][NX3+](=O)[O-]'), # "Classical" nitro group
Chem.MolFromSmarts('[$([NX3+]([O-])O),$([NX3+]([O-])[O-])]=[!#8]'), # Nitro group in tautomer form
]
ADDITIONAL_SALTS = [
Chem.MolFromSmarts('[H+]'),
Chem.MolFromSmarts('[I,N][I,N]'),
Chem.MolFromSmarts('[Cs+]'),
Chem.MolFromSmarts('F[As,Sb,P](F)(F)(F)(F)F'),
Chem.MolFromSmarts('[O-,OH][Cl+3]([O-,OH])([O-,OH])[O-,OH]')
]
PKA_LOWER_CUT = 2
PKA_UPPER_CUT = 12
LIPINSKI_RULES = [
(Descriptors.MolWt, 500),
(Lipinski.NumHDonors, 5),
(Lipinski.NumHAcceptors, 10),
(Crippen.MolLogP, 5),
]
def count_bad_elements(mol: Mol) -> int:
"""
Counts occurrences of bad elements
specified in `BAD_ELEM_QUERY` for a molecule.
Parameters
----------
mol : Mol
RDKit mol object
Returns
-------
int
Bad element count
"""
return len(mol.GetSubstructMatches(BAD_ELEM_QUERY))
def count_bfg(mol: Mol) -> int:
"""
Counts occurrences of bad functional groups
specified in `BFG` for a molecule.
Parameters
----------
mol : Mol
RDKit mol object
Returns
-------
int
Bad functional group count
"""
n = 0
for bfg in BFG:
if mol.HasSubstructMatch(bfg):
n += 1
return n
ADDITIONAL_FILTER_RULES = [
(count_bad_elements, 0), # Are there any bad elements (more than zero)
(count_bfg, 0), # Are there any bad functional groups (more than zero)
]
def parse_args() -> Namespace:
"""
Parses commandline parameters
Returns
-------
Namespace
Argparse Namespace object containing parsed commandline
parameters
"""
parser = ArgumentParser()
parser.add_argument('infile', metavar='INFILE')
parser.add_argument('outfile', metavar='OUTFILE')
parser.add_argument('--keep-props', '-kp', metavar='PROP1,PROP2,...', default=[], type=lambda x: x.split(','))
return parser.parse_args()
def check_on_remaining_salts(mol: Mol) -> Optional[Mol]:
"""
Checks if any salts are remaining in the given molecule.
Parameters
----------
mol : Mol
Returns
-------
Mol, optional
Input molecule if no salts were found, None otherwise
"""
if len(Chem.GetMolFrags(mol)) == 1:
return mol
return None
def check_sanitization(mol: Mol) -> Optional[Mol]:
"""
Checks if molecule is sanitizable.
Parameters
----------
mol : Mol
RDKit mol object
Returns
-------
Mol, optional
Sanitized molecule if possible, None otherwise
"""
try:
Chem.SanitizeMol(mol)
return mol
except ValueError:
return None
def cleaning(df: pd.DataFrame, keep_props: List[str]) -> pd.DataFrame:
"""
Cleans the input DataFrame by removing unwanted columns,
removing salts from all molecules and sanitize the molecules.
Parameters
----------
df : DataFrame
DataFrame containing a ROMol column with RDKit molecules
and all columns specified in "keep_props"
keep_props : List[str]
Property names that should be kept through this script
Returns
-------
DataFrame
Cleaned DataFrame
"""
df = df.loc[:, DEFAULT_COLS + keep_props]
salt_rm = SaltRemover.SaltRemover()
salt_rm.salts.extend(ADDITIONAL_SALTS)
df.ROMol = df.ROMol.apply(salt_rm.StripMol)
df.dropna(subset=['ROMol'], inplace=True)
df.ROMol = df.ROMol.apply(check_on_remaining_salts)
df.dropna(subset=['ROMol'], inplace=True)
df.ROMol = df.ROMol.apply(check_sanitization)
df.dropna(subset=['ROMol'], inplace=True)
return df
def filtering(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters DataFrame rows by molecules contained in column
"ROMol" by Lipinski's rule of five, bad functional groups
and unwanted elements.
Parameters
----------
df : DataFrame
DataFrame containing a ROMol column with RDKit molecules
Returns
-------
DataFrame
Filtered DataFrame
"""
del_ix = []
lip = 0
for ix, row in df.iterrows():
violations = 0
for func, thres in LIPINSKI_RULES:
if func(row.ROMol) > thres:
violations += 1
if violations > 1:
del_ix.append(ix)
lip += 1
break
if lip > 0 and del_ix[-1] == ix:
continue
for func, thres in ADDITIONAL_FILTER_RULES:
if func(row.ROMol) > thres:
del_ix.append(ix)
break
print(f'Dropped {lip} mols because of more than one Lipinski rule violation')
print(f'Dropped {len(del_ix) - lip} mols through additional filtering')
return df.drop(index=del_ix)
def mols_to_sdbuffer(df: pd.DataFrame, props: List[str] = None) -> StringIO:
"""
Writes a DataFrame containing a ROMol column in SD format
to a StringIO buffer.
Parameters
----------
df : DataFrame
DataFrame that should be written to a buffer
props : List[str]
List of column names that should also be written
to the buffer
Returns
-------
StringIO
StringIO buffer containing data in SD format
"""
buffer = StringIO()
PandasTools.WriteSDF(df, buffer, properties=props, idName='RowID')
return buffer
def run_external(args: List[str], df: pd.DataFrame, reset_ix: bool = False) -> str:
"""
Calls an external program via subprocess and writes the given
DataFrame in SD format to stdin of the program. It returns
the stdout of the external program.
Parameters
----------
args : List[str]
List of arguments including the call of the desired program
that can be directly passed to the subprocess Popen constructor
df : DataFrame
DataFrame that should be piped to the external program in SD format
reset_ix : bool
If True, the DataFrame index will be reset before passing to the program.
Additionally the index will be written out as SD tag with the name of INDEX_NAME.
Returns
-------
str
Stdout of the external program
Raises
------
SubprocessError
If the called program exits with a non-zero exit code
"""
in_df = df.reset_index() if reset_ix else df
in_prop = [INDEX_NAME] if reset_ix else None
with mols_to_sdbuffer(in_df, in_prop) as buffer:
p = Popen(args, text=True, stdin=PIPE, stdout=PIPE, stderr=DEVNULL)
stdout, _ = p.communicate(buffer.getvalue())
if p.returncode != 0:
raise SubprocessError(f'{args[0]} ended with non-zero exit code {p.returncode}')
return stdout
def run_marvin_pka(df: pd.DataFrame) -> pd.DataFrame:
"""
Calculates pKa values at 25°C within the configured pH range with
ChemAxon Marvin for all molecules contained in `df`. Returns a new
DataFrame with the Marvin results merged into the input DataFrame.
Parameters
----------
df : DataFrame
DataFrame containing RDKit molecules
Returns
-------
DataFrame
Merged DataFrame containing the results from ChemAxon Marvin
"""
cmd_call = ['cxcalc', '--id', INDEX_NAME, 'pka', '-i', str(PKA_LOWER_CUT), '-x', str(PKA_UPPER_CUT),
'-T', '298.15', '-a', '4', '-b', '4']
res_df = pd.read_csv(StringIO(run_external(cmd_call, df, True)),
sep='\t').set_index(INDEX_NAME, verify_integrity=True)
res_df.index = res_df.index.astype(str)
df = df.merge(res_df, right_index=True, left_index=True)
for ix in df.index:
try:
if np.isnan(df.loc[ix, 'atoms']):
continue
except TypeError:
pass
ci = 0
for col in ['apKa1', 'apKa2', 'apKa3', 'apKa4', 'bpKa1', 'bpKa2', 'bpKa3', 'bpKa4']:
val = df.loc[ix, col]
if np.isnan(val):
continue
if val < PKA_LOWER_CUT or val > PKA_UPPER_CUT:
df.loc[ix, col] = np.nan
atoms = df.loc[ix, 'atoms'].split(',')
if len(atoms) == 1:
df.loc[ix, 'atoms'] = np.nan
else:
del atoms[ci]
df.loc[ix, 'atoms'] = ','.join(atoms)
ci -= 1
ci += 1
df.dropna(subset=['atoms'], inplace=True)
return df
def run_oe_tautomers(df: pd.DataFrame) -> pd.DataFrame:
"""
Unifies different tautomers with OpenEye QUACPAC/Tautomers.
Parameters
----------
df : DataFrame
DataFrame containing RDKit molecules
Returns
-------
DataFrame
DataFrame with tautomer canonized structures
"""
cmd_call = ['tautomers', '-maxtoreturn', '1', '-in', '.sdf', '-warts', 'false']
mols, ix, ix_to_drop = [], [], []
for line in run_external(cmd_call, df).split('\n'):
if not line:
continue
smi, idx = line.split(' ')
mol = Chem.MolFromSmiles(smi)
if not mol:
ix_to_drop.append(idx)
continue
mols.append(mol)
ix.append(idx)
if ix_to_drop:
df.drop(index=ix_to_drop, inplace=True)
ixs = set(ix)
if len(ix) != len(ixs):
print('WARNING: Duplicates in tautomers result, removing')
dropped = df.index.difference(ixs)
df.drop(index=dropped, inplace=True)
df.ROMol = mols
return df
def make_dataset_unique(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters out duplicated structures and saves all single values
to a list for pKa, temperature, original dataset and original ID.
Parameters
----------
df : DataFrame
DataFrame containing RDKit molecules
Returns
-------
DataFrame
Aggregated DataFrame without duplicated structures
"""
df[SMILES_COL] = df.ROMol.apply(Chem.MolToSmiles, isomericSmiles=True, canonical=True)
grp = df.groupby(SMILES_COL)
df2 = grp.first()
list_cols = [PKA_COL, TEMP_COL, DS_COL, OID_COL]
for col in list_cols:
df2[col] = grp[col].agg(list)
df2.index.set_names(INDEX_NAME, inplace=True)
return df2
def filter_by_temperature(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters out entries outside the temperature range
Parameters
----------
df : DataFrame
DataFrame containing RDKit molecules
Returns
-------
DataFrame
DataFrame without measurements with to high or to low temperatures
"""
df[TEMP_COL] = pd.to_numeric(df[TEMP_COL], errors='coerce')
return df.query(f'{TEMP_LOWER_CUT} <= temp <= {TEMP_UPPER_CUT}')
def read_dataset(infile: str) -> pd.DataFrame:
"""
Reads a SD file from specified path and returns a DataFrame as result.
Parameters
----------
infile : str
Path to SD file
Returns
-------
DataFrame
DataFrame containing the information from the specified SD file
"""
df = PandasTools.LoadSDF(infile, idName=OID_COL)
df.index = df.index.astype(str).set_names(INDEX_NAME)
if DS_COL not in df.columns:
df[DS_COL] = basename(infile)
return df
def main(args: Namespace) -> None:
"""
Main function of this script
Parameters
----------
args : Namespace
Namespace object containing the parsed commandline arguments
"""
df = read_dataset(args.infile)
print(f'Initial: {len(df)}')
df = cleaning(df, args.keep_props)
print(f'After cleaning: {len(df)}')
df = filtering(df)
print(f'After filtering: {len(df)}')
df = filter_by_temperature(df)
print(f'After temperature control: {len(df)}')
df = run_oe_tautomers(df)
print(f'After QuacPac tautomers: {len(df)}')
df = make_dataset_unique(df)
print(f'After unifying dataset: {len(df)}')
df = run_marvin_pka(df)
print(f'After Marvin pKa: {len(df)}')
PandasTools.WriteSDF(df, args.outfile, idName='RowID', properties=df.columns)
if __name__ == '__main__':
main(parse_args())
| [
37811,
198,
1212,
4226,
25978,
311,
8068,
3696,
543,
460,
788,
307,
973,
329,
4572,
4673,
13,
198,
1212,
3407,
5336,
270,
2890,
11,
25431,
17745,
351,
2089,
10345,
2628,
290,
198,
403,
86,
4126,
4847,
11,
10829,
37056,
11,
25431,
416,... | 2.349043 | 5,644 |
from .base_options import BaseOptions
| [
6738,
764,
8692,
62,
25811,
1330,
7308,
29046,
198
] | 4.222222 | 9 |
#ipython --pylab
import scipy
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pandas
import pylab
Folder = 'results'
species1 = ['Thunnus_obesus', 'Thunnus_albacares', 'Katsuwonus_pelamis', 'Thunnus_thynnus', 'Thunnus_orientalis', 'Thunnus_maccoyii']
#species2 = ['Thunnus obesus', 'Thunnus albacares', 'Katsuwonus pelamis', 'Thunnus thynnus', 'Thunnus orientalis', 'Thunnus maccoyii']
species2 = ['bigeye tuna', 'yellowfin tuna', 'skipjack tuna', 'Atlantic bluefin tuna', 'Pacific bluefin tuna', 'southern bluefin tuna']
bottomlist = [0.72, 0.43, 0.14]
width = 0.46
height = 0.24
g = [[0.02, bottomlist[0], width, height], [0.52, bottomlist[0], width, height],
[0.02, bottomlist[1], width, height], [0.52, bottomlist[1], width, height],
[0.02, bottomlist[2], width, height], [0.52, bottomlist[2], width, height]]
i = 0
while i<len(species1):
file = Folder + '/modelmean/modelmean.deltap50depth.' + species1[i] + '.nc'
file2 = 'data/IUCN/csv_5deg/IUCN_5deg_' + species1[i] + '.csv'
nc = Dataset(file,'r')
lats = nc.variables['LAT'][:]
lons = nc.variables['LON'][:]
depth = nc.variables['MODELMEAN'][:]
depth = depth.squeeze()
agree = pandas.read_csv(file2, names=['lons', 'lats'])
agree['lons2'] = np.where(agree['lons'] <= 20 , agree['lons'] + 360, agree['lons'])
agreelons = agree['lons2']
agreelats = agree['lats']
fig = plt.figure(1, figsize=(6,5.75))
axg1 = plt.axes(g[i])
m = Basemap(llcrnrlat=-80.,urcrnrlat=80.,projection='eck4',lon_0=205)
depth_cyclic, lons_cyclic = shiftgrid(20., depth, lons, start=True)
x, y = m(*np.meshgrid(lons_cyclic, lats))
a, b = m(pandas.DataFrame.as_matrix(agreelons), pandas.DataFrame.as_matrix(agreelats))
m.drawmapboundary(fill_color='#cccccc') #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
levels=[-200,-150, -100, -50, 0, 50, 100, 150, 200]
im1 = m.contourf(x,y,depth_cyclic, levels, cmap=plt.cm.RdBu_r, extend='both')
im2 = m.scatter(a,b,s=1.2, marker='o', facecolor='0', lw=0)
plt.title(species2[i], fontsize=12)
# plt.suptitle("Model Mean P50 Depth Change")
# if i==5:
# cb_axes = plt.subplot2grid((4, 2), (0, 1), rowspan=3)
# cb = m.colorbar(im1,ax=cb_axes, size="30%")
# cb.set_ticks([-200,-150,-100,-50,0,50,100,150,200])
# cb.set_ticklabels([-200,'',-100,'',0,'',100,'',200])
i=i+1
cax = fig.add_axes([0.29, 0.06, 0.42, 0.03])
cb=fig.colorbar(im1, cax=cax, orientation='horizontal')
cb.set_ticklabels([-200,'',-100,'',0,'',100,'',200])
pylab.text(0.6, 1.3, 'compression', fontsize=12)
pylab.text(0.01, 1.3, 'expansion', fontsize=12)
pylab.text(-0.6, -0.8, 'Change in\nP$_{50}$ depth (m)', fontsize = 12)
outfig = 'graphs/modelmean_deltap50depthav.ps'
plt.savefig(outfig, dpi=300, bbox_inches=0)
| [
2,
541,
7535,
1377,
79,
2645,
397,
198,
11748,
629,
541,
88,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
12093,
368,
499,
1330,
6455,
368,
499,
11,
751,
15539,
291,
11,
6482,
25928,
198,
6738,
2010,
34,
8068,
19,
1330,
16092,
292... | 2.228462 | 1,300 |
import unittest
import time
import app.programs.hd
import app.programs.original
from app.state import state, ProgramNotFound
| [
11748,
555,
715,
395,
198,
11748,
640,
198,
198,
11748,
598,
13,
23065,
82,
13,
31298,
198,
11748,
598,
13,
23065,
82,
13,
14986,
198,
6738,
598,
13,
5219,
1330,
1181,
11,
6118,
3673,
21077,
628
] | 3.527778 | 36 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-16 09:49
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
17,
319,
2177,
12,
3312,
12,
1433,
7769,
25,
2920,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: main.py
# This code is mainly borrowed
# from the official example codes of tensorpack library.
# https://github.com/ppwwyyxx/tensorpack/tree/master/examples
# Revised by Junsuk Choe <skykite@yonsei.ac.kr>
# Weakly Supervised Object Localization (WSOL) based on VGG-GAP
import cv2
import sys
import argparse
import numpy as np
import os
import multiprocessing
import tensorflow as tf
import random
from PIL import Image
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils import optimizer, gradproc
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils import viz
from tensorpack.tfutils.tower import get_current_tower_context
from utils import *
from utils_loc import *
from utils_args import *
from models_vgg import *
if __name__ == '__main__':
args = get_args()
nr_gpu = get_nr_gpu()
TOTAL_BATCH_SIZE = int(args.batch)
BATCH_SIZE = TOTAL_BATCH_SIZE // nr_gpu
args.batch = BATCH_SIZE
model = Model()
# For testing
log_dir = '/min-val-error-top1.index'
if args.gradcam: gradcam(args, model, cam, log_dir)
# For training
logdir = 'train_log/' + args.logdir
logger.set_logger_dir(logdir)
config = get_config(model, args)
if args.load:
args.load = 'pretrained/vgg16.npz'
config.session_init = get_model_loader(args.load)
launch_train_with_config(config,
SyncMultiGPUTrainerParameterServer(nr_gpu))
bundle_test(args, model, cam, log_dir)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
9220,
25,
1388,
13,
9078,
198,
198,
2,
770,
2438,
318,
8384,
22546,
198,
2,
422,
262,
1743,
1672,
12416,
286,
11192,
... | 2.714286 | 602 |
import mx.DateTime, sys, pg, random
v = sys.argv[1]
ts = mx.DateTime.strptime(v, "%Y%m%d%H%M")
out = open("n0r%s.tfw" % (v,), "w")
out.write(
""" 0.0100000000000%s
0.00000
0.00000
-0.010000000000000%s
-126.000000
50.0000"""
% (v, random.randint(0, 1000))
)
out.close()
sys.exit(0)
mydb = pg.connect("postgis", "iemdb-postgis.local")
mydb.query("SET TIME ZONE 'GMT'")
sql = "SELECT * from nexrad_n0r_tindex WHERE datetime = '%s'" % (
ts.strftime("%Y-%m-%d %H:%M"),
)
sql2 = (
"INSERT into nexrad_n0r_tindex( the_geom, datetime, filepath) values \
('SRID=4326;MULTIPOLYGON(((-126 50,-66 50,-66 24,-126 24,-126 50)))', '%s', '/mesonet/ARCHIVE/data/%s/GIS/uscomp/n0r_%s.png')"
% (
ts.strftime("%Y-%m-%d %H:%M"),
ts.strftime("%Y/%m/%d"),
ts.strftime("%Y%m%d%H%M"),
)
)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
mydb.query(sql2)
| [
11748,
285,
87,
13,
10430,
7575,
11,
25064,
11,
23241,
11,
4738,
628,
198,
85,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
912,
796,
285,
87,
13,
10430,
7575,
13,
2536,
457,
524,
7,
85,
11,
36521,
56,
4,
76,
4,
67,
4,
39,
4,
... | 1.921444 | 471 |
import pandas as pd
from yr.libyr import Yr
class YRNOLocation:
"""Locations for YRNO"""
ATX = 'USA/Texas/Austin'
TLL = 'Estonia/Harjumaa/Tallinn'
RKV = 'Estonia/Lääne-Virumaa/Rakvere'
class YrNoWeather:
"""Pulls weather data using Yr.no weather API"""
def __init__(self, location: str, timezone: str = 'US/Central'):
"""
Args:
location(str): location name `country/region/city`
timezone(str): time zone to record time
"""
self.location = location
self.tz = timezone
@staticmethod
def _process_data(data: dict) -> dict:
"""Process the raw data from YrNo API"""
return {
'from': pd.to_datetime(data['@from']),
'to': pd.to_datetime(data['@to']),
'summary': data['symbol']['@name'],
'precip-intensity': float(data['precipitation']['@value']),
'wind-bearing': float(data['windDirection']['@deg']),
'wind-speed': float(data['windSpeed']['@mps']),
'wind-summary': data['windSpeed']['@name'],
'temp-avg': float(data['temperature']['@value']),
'pressure': float(data['pressure']['@value'])
}
def get_current_summary(self) -> pd.DataFrame:
"""Collect the current weather data for the location"""
data = Yr(location_name=self.location).now()
cleaned = pd.DataFrame(self._process_data(data), index=[1])
return cleaned
def get_hourly_forecast(self) -> pd.DataFrame:
"""Creates a 48 hour forecast summary"""
data = Yr(location_name=self.location, forecast_link='forecast_hour_by_hour')
return pd.DataFrame([self._process_data(x) for x in data.forecast()])
def get_daily_forecast(self) -> pd.DataFrame:
"""Creates a 7-day forecast summary"""
data = Yr(location_name=self.location)
df = pd.DataFrame([self._process_data(x) for x in data.forecast()])
# We'll need to group by day and have high/low calculated for each metric
keep_cols = ['from', 'precipIntensity', 'windSpeed', 'temperature', 'pressure']
df = df[keep_cols].groupby(pd.Grouper(key='from', freq='D')).agg(
{x: ['min', 'max'] for x in keep_cols[1:]}
)
# Flatten the columns for the dataframe, but keep everything preserved from each level
df.columns = ['_'.join(col).strip() for col in df.columns.values]
return df
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
42635,
13,
8019,
2417,
1330,
575,
81,
628,
198,
4871,
575,
42336,
3535,
5040,
25,
198,
220,
220,
220,
37227,
43,
20968,
329,
575,
49,
15285,
37811,
198,
220,
220,
220,
5161,
55,
796,
705,
... | 2.271468 | 1,083 |
N = int(input())
before = ""
all = set()
check = True
for i in range(N):
s = input()
if i == 0:
before = s
all.add(s)
else:
if before[-1] == s[0]:
before = s
all.add(s)
else:
check = False
if check:
print("Yes" if len(all)==N else "No")
else:
print("No")
| [
45,
796,
493,
7,
15414,
28955,
198,
19052,
796,
13538,
198,
439,
796,
900,
3419,
198,
9122,
796,
6407,
198,
1640,
1312,
287,
2837,
7,
45,
2599,
198,
220,
220,
220,
264,
796,
5128,
3419,
198,
220,
220,
220,
611,
1312,
6624,
657,
25... | 1.834225 | 187 |
from .lf_set import LFSet
from .analysis import LFAnalysis
from .preprocess import preprocessor
from .prelabels import PreLabels
from .lf import labeling_function, ABSTAIN
from .continuous_scoring import continuous_scorer | [
6738,
764,
1652,
62,
2617,
1330,
47629,
7248,
198,
6738,
764,
20930,
1330,
47629,
32750,
198,
6738,
764,
3866,
14681,
1330,
662,
41341,
198,
6738,
764,
79,
2411,
397,
1424,
1330,
3771,
17822,
1424,
198,
6738,
764,
1652,
1330,
27393,
62,... | 3.810345 | 58 |
import os
import sys
from netaddr import *
from fabric.api import *
from fabfile.config import testbed
from fabfile.utils.host import *
from fabfile.utils.cluster import get_all_hostnames
#end get_storage_data_ip
#end get_storage_host_string
#end get_storage_disk_config
#end get_storage_disk_config
#end get_storage_local_disk_config
#end get_storage_local_disk_config
# NFS config parser
# Eg., nfs configuration. This is for NFS storage support for cinder.
# Cinder can create volumes from the NFS store.
# storage_node_config = {
# host1 : { 'disks' : ['/dev/sdd:/dev/sdc'], 'nfs' : ['11.1.0.1:/nfsvol'] },
# host2 : { 'disks' : ['/dev/sdd:/dev/sdc'], 'nfs' : ['11.1.0.3:/nfsvol'] },
# host3 : { 'disks' : ['/dev/sdb:/dev/sdf'] },
# host4 : { 'disks' : ['/dev/sdd:/dev/sdc'] },
# }
# The function will parse the above config and returns
# the list '11.1.0.1:/nfsvol' '11.1.0.3:/nfsvol'
# Note: The host entry is not needed.
#end get_storage_nfs_disk_config
#end get_storage_journal_config
#end get_storage_directory_config
# Chassis config parser
# Eg., chassis configuration. This has to be provided when more than one
# node is part of a single chassis. This will avoid replication of data
# between nodes in the same chassis to avoid data loss when chassis goes
# down
# storage_node_config = {
# host1 : { 'disks' : ['/dev/sdd:/dev/sdc'], 'chassis' : ['T0'] },
# host2 : { 'disks' : ['/dev/sdd:/dev/sdc'], 'chassis' : ['T0'] },
# host3 : { 'disks' : ['/dev/sdb:/dev/sdf'], 'chassis' : ['T1'] },
# host4 : { 'disks' : ['/dev/sdd:/dev/sdc'], 'chassis' : ['T1'] },
# }
# The function will parse the above config and returns
# the list 'host1:T0 host2:T0 host3:T1 host4:T1'
#end get_storage_chassis_config
#end get_cinder_ha_vip
# storage host with monitors config
#end get_storage_mon_hosts config
# Returns interal HA vip
#end get_cfg_ha_vip
# Returns replica size
#end get_storage_replica_size
# Return Storage cache tier
#end get_storage_cache_tier
# Return object storage
#end get_object_storage
# Return object storage pool
#end get_object_storage_pool
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
2010,
29851,
1330,
1635,
198,
198,
6738,
9664,
13,
15042,
1330,
1635,
198,
198,
6738,
7843,
7753,
13,
11250,
1330,
1332,
3077,
198,
6738,
7843,
7753,
13,
26791,
13,
4774,
1330,
1635,
198,
6738... | 2.693299 | 776 |
import uvicorn
import app.main as main
uvicorn.run(main.app)
| [
11748,
334,
25531,
1211,
198,
11748,
598,
13,
12417,
355,
1388,
198,
198,
14795,
291,
1211,
13,
5143,
7,
12417,
13,
1324,
8,
198
] | 2.583333 | 24 |
from setuptools import setup, find_packages
setup(
name="pybitbay",
version="0.0.1",
description="python api for bitbay cryptocurrency exchange",
author="dominik heinisch",
author_email="dominikheinisch2@gmail.com",
url="https://github.com/dominikheinisch/pybitbay",
license='Apache 2.0',
packages=find_packages("pybitbay"),
install_requires=["pandas", "requests"],
python_requires=">=3.6",
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
9078,
2545,
24406,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
15,
13,
16,
1600,
198,
220,
220,
220,
6764,
2625,
29412,
... | 2.614458 | 166 |
print("connect setup")
import socket
HOST, PORT = "192.168.100.1",9999 #"169.254.44.240", 9999 #
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("trying to establish a connection")
try:
sock.connect((HOST, PORT))
print("connect ready")
except:
print("CONNECTION FAILED.")
print("have you run the code on the raspberry pi?")
print("P.S. dont break the pi please")
#raise RuntimeEror
print(sock)
| [
4798,
7203,
8443,
9058,
4943,
198,
11748,
17802,
198,
198,
39,
10892,
11,
350,
9863,
796,
366,
17477,
13,
14656,
13,
3064,
13,
16,
1600,
24214,
1303,
1,
22172,
13,
24970,
13,
2598,
13,
16102,
1600,
860,
17032,
1303,
198,
82,
735,
79... | 2.652439 | 164 |
from random import randint, sample
from functools import reduce
N_CLASSES = 200
N_STUDENTS = 1000
# create a graph-file consisting of 200 courses and 1000 students
# edges are randomly generated between students and classes
classes = map(lambda ind : "class " + str(ind), range(N_CLASSES))
students = map(lambda ind : "student " + str(ind), range(N_STUDENTS))
sparse_fname = "large_graph_sparse.txt"
generate_graph_file(sparse_fname, 4)
mid_fname = "large_graph_mid.txt"
generate_graph_file(mid_fname, 8)
dense_fname = "large_graph_dense.txt"
generate_graph_file(dense_fname, 16)
| [
6738,
4738,
1330,
43720,
600,
11,
6291,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
45,
62,
31631,
1546,
796,
939,
198,
45,
62,
2257,
8322,
15365,
796,
8576,
198,
198,
2,
2251,
257,
4823,
12,
7753,
17747,
286,
939,
10902,
290... | 2.881773 | 203 |
import lcddriver
import time
import array as arr
# Load the driver and set it to "display"
# If you use something from the driver library use the "display." prefix first
#display = lcddriver.lcd()
# Main body of code
# display.lcd_clear()
#try:
# while True:
#
# time.sleep(0.5)
# twinkle([3,4])
# time.sleep(0.5)
#except KeyboardInterrupt:
# print("CLEAN")
# display.lcd_clear()
| [
11748,
300,
66,
1860,
38291,
198,
11748,
640,
198,
11748,
7177,
355,
5240,
198,
198,
2,
8778,
262,
4639,
290,
900,
340,
284,
366,
13812,
1,
198,
2,
1002,
345,
779,
1223,
422,
262,
4639,
5888,
779,
262,
366,
13812,
526,
21231,
717,
... | 2.256281 | 199 |
###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
import base64
import jsonpickle
import httplib2
import datetime
from io import BytesIO
from oauth2client.client import Credentials, OAuth2Credentials, GoogleCredentials, Storage, EXPIRY_FORMAT
from oauth2client.file import Storage as LocalStorage
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import _helpers
from googleapiclient.errors import HttpError
from apiclient.http import MediaIoBaseUpload
from apiclient import discovery
from google.cloud import storage
#from django.db import models
from django.utils import encoding
from setup import UI_SERVICE
| [
29113,
29113,
7804,
21017,
198,
2,
198,
2,
220,
15069,
2177,
3012,
3457,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
284... | 3.924855 | 346 |
import re
from copy import copy
| [
11748,
302,
198,
6738,
4866,
1330,
4866,
628,
628,
628,
628,
198
] | 3.333333 | 12 |
# Generated by Django 2.2.6 on 2020-04-06 15:53
from django.db import migrations, models
import django.utils.timezone
import foodieshoot.models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
12131,
12,
3023,
12,
3312,
1315,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
198,
11748,
205... | 2.979592 | 49 |