content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
class Solution:
"""
@param tickets:
@return: nothing
"""
## memory limitation
class Solution:
"""
@param tickets:
@return: nothing
"""
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
8587,
25,
220,
198,
220,
220,
220,
2488,
7783,
25,
2147,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
628,
198,
198,
2235,
4088,
17385,
628,
198,
... | 2.452055 | 73 |
# Create the dictionary
mydict = {'archiveType':'coral',
'Publication':{'author':'J. Doe','title':'The most important record'}}
#print the keys
mydict.keys()
| [
2,
13610,
262,
22155,
198,
198,
1820,
11600,
796,
1391,
6,
17474,
6030,
10354,
6,
66,
6864,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
705,
15202,
341,
10354,
90,
6,
9800,
10354,
6,
41,
13,
31780,
41707,
7839,
10354,
6,
464,
74... | 2.725806 | 62 |
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from main.models import *
| [
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
62,
75,
12582,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
1388,
13,
27530,
1330,
1635,
628,
198
] | 3.416667 | 36 |
"""Generic utility for encoding and decoding binary packets."""
from collections import namedtuple
import struct
def defpacket(name: str, **kwargs):
"""Define a protocol packet."""
fmt: str = ">" + "".join(kwargs.values())
msg_type = namedtuple(name, kwargs.keys()) # type: ignore
return _MessageType
| [
37811,
46189,
10361,
329,
21004,
290,
39938,
13934,
24624,
526,
15931,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
2878,
628,
198,
4299,
825,
8002,
316,
7,
3672,
25,
965,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
37227... | 3.147059 | 102 |
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
# ========================================================================
| [
11748,
12972,
9288,
198,
11748,
25064,
198,
11748,
28686,
198,
418,
13,
268,
2268,
17816,
50,
3525,
1268,
3698,
62,
10943,
16254,
20520,
796,
28686,
13,
6978,
13,
27237,
6978,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
... | 2.810458 | 153 |
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.ShaderTweaks,
"description",
"""
Makes modifications to shader parameter values.
""",
plugs = {
"shader" : [
"description",
"""
The type of shader to modify. This is actually the name
of an attribute which contains the shader network.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"presetsPlugValueWidget:allowCustom", True,
"preset:None", "",
"layout:index", 0
],
"localise" : [
"description",
"""
Turn on to allow location-specific tweaks to be made to inherited
shaders. Shaders will be localised to locations matching the
node's filter prior to tweaking. The original inherited shader will
remain untouched.
""",
"layout:index", 1
],
"ignoreMissing" : [
"description",
"""
Ignores tweaks targeting missing parameters. When off, missing parameters
cause the node to error.
""",
"layout:index", 2
],
"tweaks" : [
"description",
"""
The tweaks to be made to the parameters of the shader.
Arbitrary numbers of user defined tweaks may be
added as children of this plug via the user
interface, or using the ShaderTweaks API via python.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:customWidget:footer:widgetType", "GafferSceneUI.ShaderTweaksUI._TweaksFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferSceneUI.ShaderTweaksUI.PlugAdder",
],
"tweaks.*" : [
"noduleLayout:visible", False, # Can be shown individually using PlugAdder above
],
}
)
##########################################################################
# Internal utilities
##########################################################################
##########################################################################
# _TweaksFooter
##########################################################################
##########################################################################
# PlugValueWidget context menu
##########################################################################
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
##########################################################################
# Nodule context menu
##########################################################################
GafferUI.GraphEditor.plugContextMenuSignal().connect( __graphEditorPlugContextMenu, scoped = False )
| [
29113,
29113,
7804,
2235,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
1584,
11,
7412,
7117,
8495,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
... | 3.43459 | 1,353 |
import bpy
import bmesh
import argparse
import sys
import math
from mathutils import Vector, Matrix
scene = bpy.context.scene
scene_data=[]
all_objects = [ob for ob in scene.objects if ob.layers[0] and ob.type=='MESH']
for ob in all_objects:
obdata = ob.data
ob_name_tokens= ob.name.split('.')
ob_data={
"model": ob.name if len(ob_name_tokens)==1 else ob_name_tokens[0],
"pos":[round(ob.location.x,2), round(ob.location.z,2), round(ob.location.y,2)],
"rotation":[round(math.degrees(ob.rotation_euler.x)/360,2)-0.5,round(math.degrees(ob.rotation_euler.z)/360,2)-0.5,round(math.degrees(ob.rotation_euler.y)/360,2)-0.5]
}
scene_data.append(ob_data)
print(scene_data)
| [
11748,
275,
9078,
198,
11748,
275,
76,
5069,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
10688,
198,
6738,
10688,
26791,
1330,
20650,
11,
24936,
198,
198,
29734,
796,
275,
9078,
13,
22866,
13,
29734,
198,
198,
29734,
62,
789... | 2.264331 | 314 |
import os
from collections import OrderedDict
import numpy as np
from ConfigSpace.hyperparameters import NumericalHyperparameter, CategoricalHyperparameter, OrdinalHyperparameter, \
Constant
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.helpers import get_config_origin
class OverviewTable(BaseAnalyzer):
"""
Meta data, i.e. number of instances and parameters as well as configuration budget. Statistics apply to the
best run, if multiple configurator runs are compared.
"""
def run(self):
""" Generate tables. """
scenario = self.runscontainer.scenario
# General infos
general_dict = self._general_dict(scenario)
html_table_general = DataFrame(data=OrderedDict([('General', general_dict)]))
html_table_general = html_table_general.reindex(list(general_dict.keys()))
html_table_general = html_table_general.to_html(escape=False, header=False, justify='left')
self.result["General"] = {"table": html_table_general,
"tooltip": "General information about the optimization scenario."}
# Run-specific / budget specific infos
for mode in ['parallel', 'budget']:
runspec_dict = self._runspec_dict(identify=mode)
if not runspec_dict:
continue
order_spec = list(list(runspec_dict.values())[0].keys()) # Get keys of any sub-dict for order
html_table_specific = DataFrame(runspec_dict)
html_table_specific = html_table_specific.reindex(order_spec)
html_table_specific = html_table_specific.to_html(escape=False, justify='left')
if mode == 'parallel':
self.result["Parallel Runs"] = {"table": html_table_specific,
"tooltip": "Information to individual parallel runs."}
if mode == 'budget':
self.result["Budgets"] = {"table": html_table_specific,
"tooltip": "Statistics related to the budgets used in this optimization."}
# ConfigSpace in tabular form
cs_dict = self._configspace(scenario.cs)
cs_table = DataFrame(data=cs_dict)
html_table_cs = cs_table.to_html(escape=False, justify='left', index=False)
self.result["Configuration Space"] = {"table": html_table_cs,
"tooltip": "The parameter configuration space. "
"(See github.com/automl/ConfigSpace)"}
return self.result
def _general_dict(self, scenario):
""" Generate the meta-information that holds for all runs (scenario info etc)
Parameters
----------
scenario: smac.Scenario
scenario file to get information from
"""
# general stores information that holds for all runs, runspec holds information on a run-basis
general = OrderedDict()
if len(self.runscontainer.get_budgets()) > 1:
general['# budgets'] = len(self.runscontainer.get_budgets())
if len(self.runscontainer.get_folders()) > 1:
general['# parallel runs'] = len(self.runscontainer.get_folders())
# Scenario related
general['# parameters'] = len(scenario.cs.get_hyperparameters())
general['Deterministic target algorithm'] = scenario.deterministic
general['Optimized run objective'] = scenario.run_obj
if scenario.cutoff or scenario.run_obj == 'runtime':
general['Cutoff'] = scenario.cutoff
if any([str(lim)!='inf' for lim in [scenario.wallclock_limit, scenario.ta_run_limit, scenario.algo_runs_timelimit]]):
general['Walltime budget'] = scenario.wallclock_limit
general['Runcount budget'] = scenario.ta_run_limit
general['CPU budget'] = scenario.algo_runs_timelimit
# Instances
num_train, num_test = [len([i for i in insts if i]) for insts in [scenario.train_insts, scenario.test_insts]]
if num_train > 0 or num_test > 0:
general['# instances (train/test)'] = "{} / {}".format(num_train, num_test)
# Features
num_feats = scenario.n_features if scenario.feature_dict else 0
num_dup_feats = 0
if scenario.feature_dict:
dup_feats = DataFrame(scenario.feature_array)
num_dup_feats = len(dup_feats[dup_feats.duplicated()]) # only contains train instances
if num_feats > 0:
general['# features (duplicates)'] = "{} ({})".format(num_feats, num_dup_feats)
general['----------'] = '----------'
combined_run = self.runscontainer.get_aggregated(False, False)[0]
combined_stats = self._stats_for_run(combined_run.original_runhistory,
combined_run.scenario,
combined_run.incumbent)
for k, v in combined_stats.items():
general[k] = v
return general
def _runspec_dict(self, identify='parallel'):
"""
identify-keyword specifies whether to use path or budget for name
"""
if identify not in ['parallel', 'budget']:
raise ValueError("illegal use of _runspec_dict")
if (identify == 'budget' and len(self.runscontainer.get_budgets()) <= 1 and
(self.runscontainer.get_budgets() is None or self.runscontainer.get_budgets()[0] == 0.0)):
return False
if (identify == 'parallel' and len(self.runscontainer.get_folders()) <= 1):
return False
runspec = OrderedDict()
runs = self.runscontainer.get_aggregated(keep_folders=identify=='parallel',
keep_budgets=identify=='budget')
for idx, run in enumerate(runs):
if identify == 'budget' and len(set(run.reduced_to_budgets)) != 1:
raise ValueError("Runs processed here should only have a single budget specified (%s)." %
run.reduced_to_budgets)
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder) if identify == 'parallel' else str(run.reduced_to_budgets[0])
runspec[name] = self._stats_for_run(run.original_runhistory,
run.scenario,
run.incumbent)
return runspec
def _configspace(self, cs):
""" Return configspace in table-format """
d = OrderedDict([("Parameter", []),
("Type", []),
("Range/Choices", []),
("Default", [])]
)
for hp in cs.get_hyperparameters():
d["Parameter"].append(hp.name)
d["Type"].append(type(hp).__name__)
if isinstance(hp, NumericalHyperparameter):
d["Range/Choices"].append("[{}, {}]{}".format(hp.lower, hp.upper, ' (log)' if hp.log else ''))
elif isinstance(hp, CategoricalHyperparameter):
d["Range/Choices"].append("{}".format(hp.choices))
elif isinstance(hp, OrdinalHyperparameter):
d["Range/Choices"].append("{}".format(hp.sequence))
elif isinstance(hp, Constant):
d["Range/Choices"].append("{}".format(hp.default_value))
else:
d["Range/Choices"].append("?")
d["Default"].append(hp.default_value)
return d
| [
11748,
28686,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17056,
14106,
13,
49229,
17143,
7307,
1330,
399,
6975,
605,
38197,
17143,
2357,
11,
327,
2397,
12409,
38197,
17143,
2357,
11,
... | 2.192867 | 3,505 |
import pandas as pd
import numpy as np
import random
from sklearn.preprocessing import LabelEncoder
import nltk
from Stemmer import Stemmer
def dataPrepro(raw_text, y_enc):
'''
preprocess data, and tokenize
arg:
raw_test: pandes array, each line contains a text
y_enc: pandas array, each line is the label(1 for spam, 0 for ham)
returns:
data_tokenized: a processed and tokenized data(numpy array),
in the form like below:
[[feature1_value, feature2_value, feature3_value..., label],
...
[feature1_value, feature2_value, feature3_value..., label]]
each feature value defines whether a n-gram(unigram or bigram) is in the sentence
processed: the preprocessed text
'''
# replace e-mail address, url, money symbol, phone number and number
# with emailaddr, httpaddr, moneysymb, phonenum, and number
print('step1: replace emal,url,money symbol,phone number,number with their classes...')
processed = raw_text.str.replace(r'\b[\w\-.]+?@\w+?\.\w{2,4}\b',
' emailaddr ')
processed = processed.str.replace(r'(http[s]?\S+)|(\w+\.[A-Za-z]{2,4}\S*)',
' httpaddr ')
processed = processed.str.replace(r'£|\$', ' moneysymb ')
processed = processed.str.replace(
r'(\+\d{1,2}\s)?\d?[\-(.]?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}\b',
' phonenum ')
processed = processed.str.replace(r'(\s)?\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' num ')
print('done')
# remove punctuations
print('step2: remove punctuations, spaces...')
processed = processed.str.replace(r'[^\w\d\s]', ' ')
processed = processed.str.replace(r'^\s+|\s+?$', '')
processed = processed.str.lower()
print('done')
# remove stop words
# here we define an inline function removeStopWord to generate text without stopwords
print('step3: remove stop words...')
stop_words = set(nltk.corpus.stopwords.words('english'))
processed = processed.apply(removeStopWord)
print('done')
# stemming
# we use our redefined simplified Stemmer to stem
print('step4: stemming...')
simple_porter = Stemmer()
processed = processed.apply(stemming)
print('done')
# replace some odd words by mannual concluded rules
print('step5: replaced with mannual rules...')
mannual_word_map = {
'aaooooright':'alright',
'aww':'aw',
'awww':'aw',
'baaaaaaaabe':'babe',
'baaaaabe':'babe',
'boooo':'boo',
'buzzzz':'buzz',
'daaaaa':'da',
'ffffffffff':'f',
'fffff':'f',
'ffffuuuuuuu':'fu',
'geeee':'gee',
'geeeee':'gee',
'hmm':'hm',
'hmmm':'hm',
'hmmmm':'hm',
'latelyxxx':'late',
'lololo':'lol',
'loooooool':'lol',
'lool':'lol',
'looovvve':'love',
'miiiiiiissssssssss':'miss',
'mmm':'mm',
'mmmm':'mm',
'mmmmm':'mm',
'mmmmmm':'mm',
'mmmmmmm':'mm',
'nooooooo':'no',
'noooooooo':'no',
'oooh':'ooh',
'oooooh':'ooh',
'ooooooh':'ooh',
'pleassssssseeeeee':'please',
'sooo':'soo',
'soooo':'soo',
'sooooo':'soo',
'ummmmmaah':'nmma',
'xxxxx':'xxxx',
'xxxxxx':'xxxx',
'xxxxxxx':'xxxx',
'xxxxxxxx':'xxxx',
'xxxxxxxxx':'xxxx',
'xxxxxxxxxxxxxx':'xxxx',
}
processed = processed.apply(mannualReplace)
print('done')
# replace rare word with <unk>
print('step6: replace rare words with <unk>...')
# replace number again
processed = processed.str.replace(r'\s\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' number ')
vocab = {}
# building inventory
for sent in processed:
words = sent.split(' ')
for word in words:
if(word not in vocab.keys()):
vocab[word] = 1
else:
vocab[word] += 1
# sorted words by their frequency, from high to low
sorted_list = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
# print(sorted_list[:-1000])
preserved_list = []
for i in range(len(sorted_list)):
preserved_list.append(sorted_list[i][0])
# print('size of vocab:',len(preserved_list))
# preserve the first 6000 words in preserved_list
preserved_list = preserved_list[:6000]
processed = processed.apply(replaceUNK)
print('done')
# To avoid over fitting, add some noise to the modal to increase robustness
print('step7: add noise....')
spam_list = []
ham_list = []
# seperate our current data to ham and spam list
for i in range(len(processed)):
if(y_enc[i] == 1):
spam_list.append(processed[i].split(' '))
else:
ham_list.append(processed[i].split(' '))
# using dynamic programming to define a function to calculate edit distance
# processing data
for i in range(len(processed)):
if i % 500 == 0:
print('proceeding data',i,'to',min(i+499,len(processed)))
sent = processed[i].split(' ')
if y_enc[i] == 1:
for s in spam_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
else:
for s in ham_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
print('done')
# then we begin to tokenize
print('tokenizing...')
# construct the mapping from n-grams to feature indecies
n_gram_map = {}
for sent in processed:
cnt = 0
sent = sent.split(' ')
for n in [1, 2]:
for i in range(len(sent)-n):
gram = ' '.join(sent[i:i+n])
if gram not in n_gram_map.keys():
n_gram_map[gram] = cnt
cnt += 1
# print(len(n_gram_map)) #there are totaly 31493 n-grams
# begin tokenizing
data_tokenized = []
for i in range(len(processed)):
feature_vec = [0] * 31494
sent = processed[i].split(' ')
for n in [1, 2]:
for i in range(len(sent) - n):
gram = ' '.join(sent[i:i + n])
feature_vec[n_gram_map[gram]] = 1
feature_vec[-1] = int(y_enc[i])
data_tokenized.append(feature_vec)
data_tokenized = np.array(data_tokenized)
print('done, the data size is:', data_tokenized.shape[0], 'the feature size is ', data_tokenized.shape[1] - 1)
return data_tokenized, processed
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
198,
11748,
299,
2528,
74,
198,
6738,
520,
368,
647,
1330,
520,
368,
647,
198,
... | 2.024833 | 3,584 |
from typing import Tuple, Optional
# дум:ROOT
| [
6738,
19720,
1330,
309,
29291,
11,
32233,
628,
220,
220,
220,
1303,
12466,
112,
35072,
43108,
25,
13252,
2394,
628
] | 2.6 | 20 |
"""Create a summary report of the BIDS dataset."""
from ._report import make_report
| [
37811,
16447,
257,
10638,
989,
286,
262,
347,
14255,
27039,
526,
15931,
198,
198,
6738,
47540,
13116,
1330,
787,
62,
13116,
198
] | 3.863636 | 22 |
import unittest
import math
import numpy as np
from functools import reduce
from unittest.case import expectedFailure
import nibabel
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, which is::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33*r33 + r23*r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
We can derive this formula in Sympy using:
1. Formula giving quaternion corresponding to rotation of theta radians
about arbitrary axis:
http://mathworld.wolfram.com/EulerParameters.html
2. Generated formulae from 1.) for quaternions corresponding to
theta radians rotations about ``x, y, z`` axes
3. Apply quaternion multiplication formula -
http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
formulae from 2.) to give formula for combined rotations.
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return mat2euler(nq.quat2mat(q))
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
555,
715,
395,
13,
7442,
1330,
2938,
50015,
198,
11748,
33272,
9608,
628,
198,
4299,
304,
18173,
17,
6759,
... | 2.22566 | 3,523 |
from boto3.session import Session
from rest_framework import status
from django.http import response
import botocore
import boto3
from core.settings import (
AWS_STORAGE_BUCKET_NAME,
AWS_SECRET_ACCESS_KEY,
AWS_ACCESS_KEY_ID,
)
from .errors import AWSDownloadError
| [
6738,
275,
2069,
18,
13,
29891,
1330,
23575,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
42625,
14208,
13,
4023,
1330,
2882,
198,
11748,
10214,
420,
382,
198,
11748,
275,
2069,
18,
198,
198,
6738,
4755,
13,
33692,
1330,
357,
19... | 2.926316 | 95 |
from django.forms import ModelForm
from autos.models import Make
# Create the form class.
| [
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
44619,
13,
27530,
1330,
6889,
628,
198,
2,
13610,
262,
1296,
1398,
13,
198
] | 3.72 | 25 |
import pylab as pl
from PySpectrograph.Models import RSSModel
from PySpectrograph.Spectra import Spectrum
# create the spectrograph model
rss = RSSModel.RSSModel(grating_name="PG0900", gratang=15.875, camang=31.76496,
slit=1.50, xbin=2, ybin=2)
# print out some basic statistics
print(1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_centralwavelength(), 1e7 * rss.calc_redwavelength())
R = rss.calc_resolution(rss.calc_centralwavelength(), rss.alpha(), -rss.beta())
res = 1e7 * rss.calc_resolelement(rss.alpha(), -rss.beta())
print(R, res)
# set up the detector
ycen = rss.detector.get_ypixcenter()
d_arr = rss.detector.make_detector()[ycen, :]
w = 1e7 * rss.get_wavelength(d_arr)
# set up the artificial spectrum
sw, sf = pl.loadtxt('Ne.txt', usecols=(0, 1), unpack=True)
wrange = [1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_redwavelength()]
spec = Spectrum.Spectrum(sw, sf, wrange=wrange, dw=res / 10, stype='line', sigma=res)
# interpolate it over the same range as the detector
spec.interp(w)
# plot it
pl.figure()
pl.plot(spec.wavelength, d_arr * ((spec.flux) / spec.flux.max()))
pl.show()
| [
11748,
279,
2645,
397,
355,
458,
198,
198,
6738,
9485,
49738,
3828,
1470,
13,
5841,
1424,
1330,
25012,
17633,
198,
6738,
9485,
49738,
3828,
1470,
13,
49738,
430,
1330,
27217,
198,
198,
2,
2251,
262,
5444,
3828,
1470,
2746,
198,
42216,
... | 2.40552 | 471 |
"""
Classes related to working with XIA's Pixie16 electronics line
"""
| [
37811,
198,
9487,
274,
3519,
284,
1762,
351,
1395,
3539,
338,
46687,
1433,
17075,
1627,
198,
37811,
198
] | 3.944444 | 18 |
"""
Japanese language data.
This module contains a dict named 'hiragana' which maps hiragana
unicode characters to romaji pronunciations, as well as a
'romajiToHiragana' dict which maps romaji pronunciation to *lists* of
hiragana characters. There are multiple hiragana characters with the
same pronunciation, thus the multiple values per romaji in the
romajiToHiragana dict.
"""
# Hiragana.
hiragana = {
u'\u3042': 'A', u'\u3044': 'I', u'\u3046': 'U', u'\u3048': 'E',
u'\u3081': 'ME', u'\u3080': 'MU', u'\u3082': 'MO', u'\u3084': 'YA',
u'\u3086': 'YU', u'\u3089': 'RA', u'\u3088': 'YO', u'\u308b': 'RU',
u'\u308a': 'RI', u'\u308d': 'RO', u'\u308c': 'RE', u'\u308f': 'WA',
u'\u3091': 'WE', u'\u3090': 'WI', u'\u3093': 'N', u'\u3092': 'WO',
u'\u304b': 'KA', u'\u304a': 'O', u'\u304d': 'KI', u'\u304c': 'GA',
u'\u304f': 'KU', u'\u304e': 'GI', u'\u3051': 'KE', u'\u3050': 'GU',
u'\u3053': 'KO', u'\u3052': 'GE', u'\u3055': 'SA', u'\u3054': 'GO',
u'\u3057': 'SHI',u'\u3056': 'ZA', u'\u3059': 'SU', u'\u3058': 'JI',
u'\u305b': 'SE', u'\u305a': 'ZU', u'\u305d': 'SO', u'\u305c': 'ZE',
u'\u305f': 'TA', u'\u305e': 'ZO', u'\u3061': 'CHI', u'\u3060': 'DA',
u'\u3062': 'JI', u'\u3065': 'ZU', u'\u3064': 'TSU', u'\u3067': 'DE',
u'\u3066': 'TE', u'\u3069': 'DO', u'\u3068': 'TO', u'\u306b': 'NI',
u'\u306a': 'NA', u'\u306d': 'NE', u'\u306c': 'NU', u'\u306f': 'HA',
u'\u306e': 'NO', u'\u3071': 'PA', u'\u3070': 'BA', u'\u3073': 'BI',
u'\u3072': 'HI', u'\u3075': 'FU', u'\u3074': 'PI', u'\u3077': 'PU',
u'\u3076': 'BU', u'\u3079': 'BE', u'\u3078': 'HE', u'\u307b': 'HO',
u'\u307a': 'PE', u'\u307d': 'PO', u'\u307c': 'BO', u'\u307f': 'MI',
u'\u307e': 'MA'}
romajiToHiragana = {}
for k, v in hiragana.iteritems():
romajiToHiragana.setdefault(v, []).append(k)
# Katakana.
# katakana = {
# }
| [
37811,
198,
25324,
3303,
1366,
13,
198,
198,
1212,
8265,
4909,
257,
8633,
3706,
705,
71,
343,
363,
2271,
6,
543,
8739,
289,
343,
363,
2271,
198,
46903,
1098,
3435,
284,
9267,
26436,
9668,
49652,
602,
11,
355,
880,
355,
257,
198,
6,
... | 1.844708 | 1,011 |
from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
# Does it make sense to have a Game model that could be saved as an instance with Chars linked to it? Good to research
name = models.CharField(max_length=100)
card = models.IntegerField(choices=CARD_CHOICES)
effect = models.CharField(max_length=100)
# Would it make sense to have a separate monster model? Could have a more basic "living thing" model that other can inherit from.
# I don't think heros and traitors would need a distinct model, but then the standard character model might need some rarely used fields
# Game manual calls PCs "explorers"; both they and monsters could inherit from character | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
2,
8314,
340,
787,
2565,
284,
423,
257,
3776,
2746,
326,
714,
... | 3.879781 | 183 |
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_structural_harvey_jaeger.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Detrending, Stylized Facts and the Business Cycle
#
# In an influential article, Harvey and Jaeger (1993) described the use of
# unobserved components models (also known as "structural time series
# models") to derive stylized facts of the business cycle.
#
# Their paper begins:
#
# "Establishing the 'stylized facts' associated with a set of time
# series is widely considered a crucial step
# in macroeconomic research ... For such facts to be useful they
# should (1) be consistent with the stochastic
# properties of the data and (2) present meaningful information."
#
# In particular, they make the argument that these goals are often better
# met using the unobserved components approach rather than the popular
# Hodrick-Prescott filter or Box-Jenkins ARIMA modeling techniques.
#
# statsmodels has the ability to perform all three types of analysis, and
# below we follow the steps of their paper, using a slightly updated
# dataset.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from IPython.display import display, Latex
# ## Unobserved Components
#
# The unobserved components model available in statsmodels can be written
# as:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{\gamma_{t}}_{\text{seasonal}} +
# \underbrace{c_{t}}_{\text{cycle}} + \sum_{j=1}^k \underbrace{\beta_j
# x_{jt}}_{\text{explanatory}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# see Durbin and Koopman 2012, Chapter 3 for notation and additional
# details. Notice that different specifications for the different individual
# components can support a wide range of models. The specific models
# considered in the paper and below are specializations of this general
# equation.
#
# ### Trend
#
# The trend component is a dynamic extension of a regression model that
# includes an intercept and linear time-trend.
#
# $$
# \begin{align}
# \underbrace{\mu_{t+1}}_{\text{level}} & = \mu_t + \nu_t + \eta_{t+1}
# \qquad & \eta_{t+1} \sim N(0, \sigma_\eta^2) \\\\
# \underbrace{\nu_{t+1}}_{\text{trend}} & = \nu_t + \zeta_{t+1} &
# \zeta_{t+1} \sim N(0, \sigma_\zeta^2) \\
# \end{align}
# $$
#
# where the level is a generalization of the intercept term that can
# dynamically vary across time, and the trend is a generalization of the
# time-trend such that the slope can dynamically vary across time.
#
# For both elements (level and trend), we can consider models in which:
#
# - The element is included vs excluded (if the trend is included, there
# must also be a level included).
# - The element is deterministic vs stochastic (i.e. whether or not the
# variance on the error term is confined to be zero or not)
#
# The only additional parameters to be estimated via MLE are the variances
# of any included stochastic components.
#
# This leads to the following specifications:
#
# | |
# Level | Trend | Stochastic Level | Stochastic Trend |
# |----------------------------------------------------------------------|
# -------|-------|------------------|------------------|
# | Constant |
# ✓ | | | |
# | Local Level <br /> (random walk) |
# ✓ | | ✓ | |
# | Deterministic trend |
# ✓ | ✓ | | |
# | Local level with deterministic trend <br /> (random walk with drift) |
# ✓ | ✓ | ✓ | |
# | Local linear trend |
# ✓ | ✓ | ✓ | ✓ |
# | Smooth trend <br /> (integrated random walk) |
# ✓ | ✓ | | ✓ |
#
# ### Seasonal
#
# The seasonal component is written as:
#
# <span>$$
# \gamma_t = - \sum_{j=1}^{s-1} \gamma_{t+1-j} + \omega_t \qquad \omega_t
# \sim N(0, \sigma_\omega^2)
# $$</span>
#
# The periodicity (number of seasons) is `s`, and the defining character
# is that (without the error term), the seasonal components sum to zero
# across one complete cycle. The inclusion of an error term allows the
# seasonal effects to vary over time.
#
# The variants of this model are:
#
# - The periodicity `s`
# - Whether or not to make the seasonal effects stochastic.
#
# If the seasonal effect is stochastic, then there is one additional
# parameter to estimate via MLE (the variance of the error term).
#
# ### Cycle
#
# The cyclical component is intended to capture cyclical effects at time
# frames much longer than captured by the seasonal component. For example,
# in economics the cyclical term is often intended to capture the business
# cycle, and is then expected to have a period between "1.5 and 12 years"
# (see Durbin and Koopman).
#
# The cycle is written as:
#
# <span>$$
# \begin{align}
# c_{t+1} & = c_t \cos \lambda_c + c_t^* \sin \lambda_c + \tilde \omega_t
# \qquad & \tilde \omega_t \sim N(0, \sigma_{\tilde \omega}^2) \\\\
# c_{t+1}^* & = -c_t \sin \lambda_c + c_t^* \cos \lambda_c + \tilde
# \omega_t^* & \tilde \omega_t^* \sim N(0, \sigma_{\tilde \omega}^2)
# \end{align}
# $$</span>
#
# The parameter $\lambda_c$ (the frequency of the cycle) is an additional
# parameter to be estimated by MLE. If the seasonal effect is stochastic,
# then there is one another parameter to estimate (the variance of the error
# term - note that both of the error terms here share the same variance, but
# are assumed to have independent draws).
#
# ### Irregular
#
# The irregular component is assumed to be a white noise error term. Its
# variance is a parameter to be estimated by MLE; i.e.
#
# $$
# \varepsilon_t \sim N(0, \sigma_\varepsilon^2)
# $$
#
# In some cases, we may want to generalize the irregular component to
# allow for autoregressive effects:
#
# $$
# \varepsilon_t = \rho(L) \varepsilon_{t-1} + \epsilon_t, \qquad
# \epsilon_t \sim N(0, \sigma_\epsilon^2)
# $$
#
# In this case, the autoregressive parameters would also be estimated via
# MLE.
#
# ### Regression effects
#
# We may want to allow for explanatory variables by including additional
# terms
#
# <span>$$
# \sum_{j=1}^k \beta_j x_{jt}
# $$</span>
#
# or for intervention effects by including
#
# <span>$$
# \begin{align}
# \delta w_t \qquad \text{where} \qquad w_t & = 0, \qquad t < \tau, \\\\
# & = 1, \qquad t \ge \tau
# \end{align}
# $$</span>
#
# These additional parameters could be estimated via MLE or by including
# them as components of the state space formulation.
#
# ## Data
#
# Following Harvey and Jaeger, we will consider the following time series:
#
# - US real GNP, "output",
# ([GNPC96](https://research.stlouisfed.org/fred2/series/GNPC96))
# - US GNP implicit price deflator, "prices",
# ([GNPDEF](https://research.stlouisfed.org/fred2/series/GNPDEF))
# - US monetary base, "money",
# ([AMBSL](https://research.stlouisfed.org/fred2/series/AMBSL))
#
# The time frame in the original paper varied across series, but was
# broadly 1954-1989. Below we use data from the period 1948-2008 for all
# series. Although the unobserved components approach allows isolating a
# seasonal component within the model, the series considered in the paper,
# and here, are already seasonally adjusted.
#
# All data series considered here are taken from [Federal Reserve Economic
# Data (FRED)](https://research.stlouisfed.org/fred2/). Conveniently, the
# Python library [Pandas](https://pandas.pydata.org/) has the ability to
# download data from FRED directly.
# Datasets
from pandas_datareader.data import DataReader
# Get the raw data
start = '1948-01'
end = '2008-01'
us_gnp = DataReader('GNPC96', 'fred', start=start, end=end)
us_gnp_deflator = DataReader('GNPDEF', 'fred', start=start, end=end)
us_monetary_base = DataReader('AMBSL', 'fred', start=start,
end=end).resample('QS').mean()
recessions = DataReader('USRECQ', 'fred', start=start,
end=end).resample('QS').last().values[:, 0]
# Construct the dataframe
dta = pd.concat(map(np.log, (us_gnp, us_gnp_deflator, us_monetary_base)),
axis=1)
dta.columns = ['US GNP', 'US Prices', 'US monetary base']
dta.index.freq = dta.index.inferred_freq
dates = dta.index._mpl_repr()
# To get a sense of these three variables over the timeframe, we can plot
# them:
# Plot the data
ax = dta.plot(figsize=(13, 3))
ylim = ax.get_ylim()
ax.xaxis.grid()
ax.fill_between(dates,
ylim[0] + 1e-5,
ylim[1] - 1e-5,
recessions,
facecolor='k',
alpha=0.1)
# ## Model
#
# Since the data is already seasonally adjusted and there are no obvious
# explanatory variables, the generic model considered is:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{c_{t}}_{\text{cycle}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# The irregular will be assumed to be white noise, and the cycle will be
# stochastic and damped. The final modeling choice is the specification to
# use for the trend component. Harvey and Jaeger consider two models:
#
# 1. Local linear trend (the "unrestricted" model)
# 2. Smooth trend (the "restricted" model, since we are forcing
# $\sigma_\eta = 0$)
#
# Below, we construct `kwargs` dictionaries for each of these model types.
# Notice that rather that there are two ways to specify the models. One way
# is to specify components directly, as in the table above. The other way is
# to use string names which map to various specifications.
# Model specifications
# Unrestricted model, using string specification
unrestricted_model = {
'level': 'local linear trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Unrestricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# local linear trend model with a stochastic damped cycle:
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': True, 'trend':
# True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# The restricted model forces a smooth trend
restricted_model = {
'level': 'smooth trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Restricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# smooth trend model with a stochastic damped cycle. Notice
# that the difference from the local linear trend model is that
# `stochastic_level=False` here.
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': False,
# 'trend': True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# We now fit the following models:
#
# 1. Output, unrestricted model
# 2. Prices, unrestricted model
# 3. Prices, restricted model
# 4. Money, unrestricted model
# 5. Money, restricted model
# Output
output_mod = sm.tsa.UnobservedComponents(dta['US GNP'], **unrestricted_model)
output_res = output_mod.fit(method='powell', disp=False)
# Prices
prices_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**unrestricted_model)
prices_res = prices_mod.fit(method='powell', disp=False)
prices_restricted_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**restricted_model)
prices_restricted_res = prices_restricted_mod.fit(method='powell', disp=False)
# Money
money_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**unrestricted_model)
money_res = money_mod.fit(method='powell', disp=False)
money_restricted_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**restricted_model)
money_restricted_res = money_restricted_mod.fit(method='powell', disp=False)
# Once we have fit these models, there are a variety of ways to display
# the information. Looking at the model of US GNP, we can summarize the fit
# of the model using the `summary` method on the fit object.
print(output_res.summary())
# For unobserved components models, and in particular when exploring
# stylized facts in line with point (2) from the introduction, it is often
# more instructive to plot the estimated unobserved components (e.g. the
# level, trend, and cycle) themselves to see if they provide a meaningful
# description of the data.
#
# The `plot_components` method of the fit object can be used to show plots
# and confidence intervals of each of the estimated states, as well as a
# plot of the observed data versus the one-step-ahead predictions of the
# model to assess fit.
fig = output_res.plot_components(legend_loc='lower right', figsize=(15, 9))
# Finally, Harvey and Jaeger summarize the models in another way to
# highlight the relative importances of the trend and cyclical components;
# below we replicate their Table I. The values we find are broadly
# consistent with, but different in the particulars from, the values from
# their table.
# Create Table I
table_i = np.zeros((5, 6))
start = dta.index[0]
end = dta.index[-1]
time_range = '%d:%d-%d:%d' % (start.year, start.quarter, end.year, end.quarter)
models = [
('US GNP', time_range, 'None'),
('US Prices', time_range, 'None'),
('US Prices', time_range, r'$\sigma_\eta^2 = 0$'),
('US monetary base', time_range, 'None'),
('US monetary base', time_range, r'$\sigma_\eta^2 = 0$'),
]
index = pd.MultiIndex.from_tuples(
models, names=['Series', 'Time range', 'Restrictions'])
parameter_symbols = [
r'$\sigma_\zeta^2$',
r'$\sigma_\eta^2$',
r'$\sigma_\kappa^2$',
r'$\rho$',
r'$2 \pi / \lambda_c$',
r'$\sigma_\varepsilon^2$',
]
i = 0
for res in (output_res, prices_res, prices_restricted_res, money_res,
money_restricted_res):
if res.model.stochastic_level:
(sigma_irregular, sigma_level, sigma_trend, sigma_cycle,
frequency_cycle, damping_cycle) = res.params
else:
(sigma_irregular, sigma_level, sigma_cycle, frequency_cycle,
damping_cycle) = res.params
sigma_trend = np.nan
period_cycle = 2 * np.pi / frequency_cycle
table_i[i, :] = [
sigma_level * 1e7, sigma_trend * 1e7, sigma_cycle * 1e7, damping_cycle,
period_cycle, sigma_irregular * 1e7
]
i += 1
pd.set_option('float_format', lambda x: '%.4g' % np.round(x, 2)
if not np.isnan(x) else '-')
table_i = pd.DataFrame(table_i, index=index, columns=parameter_symbols)
table_i
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
8410,
5626,
48483,
198,
2,
5231,
519,
877,
515,
422,
262,
20922,
2585,
10223,
62,
7249,
1523,
62,
9869,
3304,
62,
73,
3609,
1362,
13,
... | 2.680136 | 5,593 |
from abc import abstractmethod
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.pipeline.algorithmbase import AlgorithmBase
| [
6738,
450,
66,
1330,
12531,
24396,
198,
198,
6738,
3491,
11084,
13,
7295,
13,
48466,
395,
441,
13,
48466,
395,
441,
1330,
7412,
25896,
198,
6738,
3491,
11084,
13,
7295,
13,
24503,
1435,
13,
39491,
62,
27932,
1330,
45755,
45195,
36307,
... | 3.688525 | 61 |
from django.db import models
from datetime import datetime
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
628
] | 3.875 | 16 |
# coding=utf-8
import logging
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
# Adapted from Marty Alchin: http://martyalchin.com/2008/jan/10/simple-plugin-framework/
#noinspection PyUnresolvedReferences,PyUnusedLocal
def load_plugins():
"""Load all plugins."""
# Ensure the built-in plugins are loaded by importing the module
from engineer.plugins import bundled
# Load registered plugin modules
for name, module in find_plugins('engineer.plugins'):
# No need to import the module manually because find_plugins will do that.
pass
#noinspection PyMissingConstructor,PyUnusedLocal
class PluginMount(type):
"""A metaclass used to identify :ref:`plugins`."""
class ThemeProvider(PluginMixin):
"""
Base class for Theme :ref:`plugins`.
ThemeProvider subclasses must provide a value for :attr:`~engineer.plugins.ThemeProvider.paths`.
.. versionchanged:: 0.3.0
"""
__metaclass__ = PluginMount
paths = () # empty tuple
"""An iterable of absolute paths containing one or more :ref:`theme manifests <theme manifest>`."""
class PostProcessor(PluginMixin):
"""
Base class for Post Processor :ref:`plugins`.
PostProcessor subclasses should provide implementations for :meth:`~engineer.plugins.PostProcessor.preprocess` or
:meth:`~engineer.plugins.PostProcessor.postprocess` (or both) as appropriate.
"""
__metaclass__ = PluginMount
@classmethod
def preprocess(cls, post, metadata):
"""
The ``preprocess`` method is called during the Post import process, before any post metadata defaults
have been set.
The preprocess method should use the ``content_preprocessed`` attribute to get/modify the content of *post*.
This ensures that preprocessors from other plugins can be chained together.
By default, the ``content_preprocessed`` value is used only
for generating post HTML. It is not written back to the source post file. However, sometimes you may want
to make a permanent change to the post content that is written out. In this case, you should call the
:meth:`~engineer.models.Post.set_finalized_content` method, passing it the modified content. This
method will ensure the data is written back to the source file by the :ref:`metadata finalization` plugin.
This means that in order for a plugin to write preprocessed data back to the post file,
the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be
enabled.
Your plugin will also need to be explicitly granted the ``MODIFY_RAW_POST`` permission. See more
detail in :ref:`plugin permissions`.
In addition, the preprocess method can add/remove/update properties on the *post* object itself as needed.
.. tip::
Since the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be enabled for
plugins to write back to source post files, you should check this setting in addition to any other
settings you may be using.
:param post: The post being currently processed by Engineer.
:param metadata: A dict of the post metadata contained in the post source file. It contains no
default values - only the values contained within the post source file itself. The preprocess method can
add, update, or otherwise manipulate metadata prior to it being processed by Engineer manipulating this
parameter.
:return: The *post* and *metadata* values should be returned (as a 2-tuple) by the method.
"""
return post, metadata
@classmethod
def postprocess(cls, post):
"""
The ``postprocess`` method is called after the post has been imported and processed as well as converted to
HTML and output.
:param post: The post being currently processed by Engineer.
:return: The *post* parameter should be returned.
"""
return post
class CommandPlugin(PluginMixin):
"""
Base class for Command :ref:`plugins`.
Command plugins add new commands to the :ref:`cmdline`. CommandPlugin subclasses must provide an implementation
for :meth:`~engineer.plugins.CommandPlugin.add_command`, and can optionally override
the :meth:`~engineer.plugins.CommandPlugin.active` classmethod to determine whether or not the plugin should
actually be loaded.
.. note::
Because Engineer uses :mod:`argparse` for parsing out its commands, you should be somewhat familiar with
it in order to implement a Command plugin.
.. seealso:: :ref:`command plugin examples`
"""
__metaclass__ = PluginMount
@classmethod
def active(cls):
"""
If this method returns ``False``, the plugin will not run and any commands added by the plugin will not
be available.
This method can be overridden to make commands available only if certain criteria are met (for example,
a custom :ref:`setting<settings>`).
:return: A boolean value indicating whether or not the plugin is active and should run. Default
implementation always returns ``True``.
"""
return True
@classmethod
def add_command(cls, subparser, main_parser, common_parser):
"""
This method is called by Engineer while it is building its :class:`~argparse.ArgumentParser`,
allowing one to add addition parsers and subparsers to supplement the core :ref:`Engineer commands<cmdline>`.
:param subparser:
Since Engineer's built-in commands are subparsers, :meth:`~argparse.ArgumentParser.add_subparsers` is
called to generate a subparser. :mod:`argparse` only supports
calling :meth:`~argparse.ArgumentParser.add_subparsers` once, so the subparser object itself (the result
of the initial :meth:`~argparse.ArgumentParser.add_subparsers` call Engineer made when building its
parser) is passed in this parameter. This allows you to add either another top-level command by calling
``add_parser()`` then adding arguments directly, or to create further nested commands by adding a parser
with additional subparsers within it.
:param main_parser:
The top level :class:`~argparse.ArgumentParser` used by Engineer. This is generally only useful if you're
using an :mod:`argparse` wrapper library such as `argh <http://packages.python.org/argh/index.html>`_ in
your plugin. Most wrapper libraries require the root :class:`~argparse.ArgumentParser` object to add their
subparsers to. If you're using :mod:`argparse` directly, you can ignore this parameter and work with
the ``subparser`` parameter exclusively.
:param common_parser:
Engineer provides several :ref:`common arguments<engineer>` for its commands. If you wish to makes these
arguments available for your custom commands, you should pass ``common_parser`` in
to ``add_parser()`` via the ``parents`` parameter.
"""
raise NotImplementedError()
class JinjaEnvironmentPlugin(PluginMixin):
"""
Base class for JinjaEnvironment :ref:`plugins`.
JinjaEnvironment plugins can supplement the Jinja 2 environment with things like filters and global
functions. These additions can then be used in your Jinja templates.
.. versionadded:: 0.5.0
"""
__metaclass__ = PluginMount
filters = {}
"""
A dict of filters to add to the Jinja environment. The key of each entry should be the name of the filter (as it
will be used inside templates), while the value should be the filter function. If you require more custom logic
to build the dict of filters, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_filters` method.
"""
globals = {}
"""
A dict of functions to add to the Jinja environment globally. The key of each entry should be the name of the
function (as it will be used inside templates), while the value should be the function itself. If you require more
custom logic to build this dict, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_globals` method.
"""
@classmethod
@classmethod
@classmethod
def update_environment(cls, jinja_env):
"""
For complete customization of the Jinja environment, subclasses can override this method.
Subclasses should ensure that the base implementation is called first in their overridden implementation. For
example:
.. code-block:: python
@classmethod
def update_environment(cls, jinja_env):
super(BundledFilters, cls).update_environment(jinja_env)
# some other code here...
:param jinja_env: The Jinja environment.
"""
cls._add_filters(jinja_env)
cls._add_globals(jinja_env)
@classmethod
def get_filters(cls):
"""
If required, subclasses can override this method to return a dict of filters to add to the Jinja environment.
The default implementation simply returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.filters`.
"""
return cls.filters
@classmethod
def get_globals(cls):
"""
If required, subclasses can override this method to return a dict of functions to add to the Jinja
environment globally. The default implementation simply
returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.globals`.
"""
return cls.globals
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
18931,
198,
198,
834,
9800,
834,
796,
705,
46807,
17389,
1279,
774,
1754,
31,
774,
1754,
4360,
1754,
13,
785,
29,
6,
198,
198,
2,
30019,
276,
422,
29876,
978,
24658,
25,
2638,
1378,
13822,
... | 3.003406 | 3,230 |
import random
import discord
import asyncio
from config import DISCORD_TOKEN
client = discord.Client()
@client.event
@client.event
client.run(DISCORD_TOKEN)
| [
11748,
4738,
198,
198,
11748,
36446,
198,
11748,
30351,
952,
198,
198,
6738,
4566,
1330,
13954,
34,
12532,
62,
10468,
43959,
198,
198,
16366,
796,
36446,
13,
11792,
3419,
628,
198,
31,
16366,
13,
15596,
628,
198,
198,
31,
16366,
13,
1... | 2.964286 | 56 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.5625 | 16 |
import os
import intake
import pandas as pd
import pytest
import xarray as xr
from intake_esm import config
here = os.path.abspath(os.path.dirname(__file__))
@pytest.mark.parametrize(
'chunks, expected_chunks',
[
({'time': 1, 'lat': 2, 'lon': 2}, (1, 1, 2, 2)),
({'time': 2, 'lat': 1, 'lon': 1}, (1, 2, 1, 1)),
],
)
| [
11748,
28686,
198,
198,
11748,
10337,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
198,
6738,
10337,
62,
45798,
1330,
4566,
198,
198,
1456,
796,
28686,
13,
6978,
13,
397,
... | 2.171779 | 163 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Contains functionality for representing data in a tabular format by parsing the provided file or list of files.
For more information, see the article [Add & register
datasets](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets).
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
"""
import warnings
from datetime import datetime, timedelta
from azureml._common.exceptions import AzureMLException
from azureml.data.constants import _PUBLIC_API, _DATASET_PROP_TIMESTAMP_FINE, _DATASET_PROP_TIMESTAMP_COARSE, \
_DEPRECATED_TIMESTAMP_NAME, _DEPRECATED_PARTITION_TIMESTAMP_NAME, _ACTION_TYPE_PROFILE, \
_LEGACY_DATASET_ID, _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY, \
_TIMESERIES_BEFORE_ACTIVITY, _TIMESERIES_AFTER_ACTIVITY, _TIMESERIES_BETWEEN_ACTIVITY, \
_TIMESERIES_RECENT_ACTIVITY, _HALF_SECOND, _PATITION_BY_ACTIVITY
from azureml.data.dataset_error_handling import _validate_has_data, _validate_has_columns, _try_execute
from azureml.data.abstract_dataset import AbstractDataset
from azureml.data._dataprep_helper import dataprep, get_dataflow_for_execution, get_dataflow_with_meta_flags
from azureml.data._dataset_rest_helper import _restclient, _custom_headers
from azureml.data._loggerfactory import track, _LoggerFactory, collect_datasets_usage
from azureml._base_sdk_common._docstring_wrapper import experimental
from azureml.exceptions import UserErrorException, DatasetTimestampMissingError
_logger = None
class TabularDataset(AbstractDataset):
"""Represents a tabular dataset to use in Azure Machine Learning.
A TabularDataset defines a series of lazily-evaluated, immutable operations to load data from the
data source into tabular representation. Data is not loaded from the source until TabularDataset
is asked to deliver data.
TabularDataset is created using methods like
:func:`azureml.data.dataset_factory.TabularDatasetFactory.from_delimited_files` from the
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
For more information, see the article `Add & register
datasets <https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets>`_.
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
.. remarks::
A TabularDataset can be created from CSV, TSV, Parquet files, or SQL query using the ``from_*``
methods of the :class:`azureml.data.dataset_factory.TabularDatasetFactory` class. You can
perform subsetting operations on a TabularDataset like splitting, skipping, and filtering records.
The result of subsetting is always one or more new TabularDataset objects.
You can also convert a TabularDataset into other formats like a pandas DataFrame.
The actual data loading happens when TabularDataset is asked to deliver the data into another
storage mechanism (e.g. a Pandas Dataframe, or a CSV file).
TabularDataset can be used as input of an experiment run. It can also be registered to workspace
with a specified name and be retrieved by that name later.
"""
def __init__(self):
"""Initialize a TabularDataset object.
This constructor is not supposed to be invoked directly. Dataset is intended to be created using
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
"""
super().__init__()
@property
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def timestamp_columns(self):
"""Return the timestamp columns.
:return: The column names for timestamp (used to be referred as fine_grain_timestamp) and partition_timestamp
(used to be referred as coarse grain timestamp) defined for the dataset.
:rtype: (str, str)
"""
timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
partition_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_COARSE, None)
return (timestamp, partition_timestamp)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def with_timestamp_columns(self, timestamp=None, partition_timestamp=None, validate=False, **kwargs):
"""Define timestamp columns for the dataset.
.. remarks::
The method defines columns to be used as timestamps. Timestamp columns on a dataset make it possible
to treat the data as time-series data and enable additional capabilities. When a dataset has
both ``timestamp (used to be referred as fine_grain_timestamp)`` and ``partition_timestamp (used to be
referred as coarse grain timestamp)`` specified, the two columns should represent the same timeline.
:param timestamp: The name of column as timestamp (used to be referred as fine_grain_timestamp) (optional).
The default is None(clear).
:type timestamp: str
:param partition_timestamp: The name of column partition_timestamp (used to be referred as coarse grain
timestamp) (optional). The default is None(clear).
:type partition_timestamp: str
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is False.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: Returns a new TabularDataset with timestamp columns defined.
:rtype: azureml.data.TabularDataset
"""
fine_grain_timestamp = kwargs.get(_DEPRECATED_TIMESTAMP_NAME, None)
coarse_grain_timestamp = kwargs.get(_DEPRECATED_PARTITION_TIMESTAMP_NAME, None)
if fine_grain_timestamp:
warnings.warn("fine_grain_timestamp is deprecated, use timestamp.", DeprecationWarning)
if coarse_grain_timestamp:
warnings.warn("coarse_grain_timestamp is deprecated, use partition_timestamp.", DeprecationWarning)
if (timestamp or partition_timestamp) and (fine_grain_timestamp or coarse_grain_timestamp):
raise UserErrorException('fine_grain_timestamp and coarse_grain_timestamp have been replaced by '
'timestamp and partition_timestamp parameters and cannot be used together.')
if not timestamp and partition_timestamp:
raise UserErrorException('partition_timestamp can be assigned only if timestamp is assigned.')
if timestamp and timestamp == partition_timestamp:
raise UserErrorException('partition_timestamp cannot be the same as timestamp.')
if not fine_grain_timestamp and coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp can be assigned only if fine_grain_timestamp is '
'assigned.')
if fine_grain_timestamp and fine_grain_timestamp == coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp cannot be the same as fine_grain_timestamp.')
if validate:
self._validate_timestamp_columns([fine_grain_timestamp, coarse_grain_timestamp])
if timestamp:
fine_grain_timestamp = timestamp
coarse_grain_timestamp = partition_timestamp
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY,
[self], self._registration.workspace, "N/A")
dataset = TabularDataset._create(self._dataflow, self._properties, telemetry_info=self._telemetry_info)
if fine_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_FINE] = fine_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_FINE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_FINE]
if coarse_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE] = coarse_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_COARSE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE]
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_pandas_dataframe(self, on_error='null', out_of_range_datetime='null'):
"""Load all records from the dataset into a pandas DataFrame.
:param on_error: How to handle any error values in the dataset, such as those produced by an error while
parsing values. Valid values are 'null' which replaces them with null; and 'fail' which will result in
an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: Returns a pandas DataFrame.
:rtype: pandas.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_pandas_dataframe', 'TabularDataset')
df = _try_execute(lambda: dataflow.to_pandas_dataframe(on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_pandas_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
fine_grain_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
if fine_grain_timestamp is not None and df.empty is False:
df.set_index(fine_grain_timestamp, drop=False, inplace=True)
df.index.rename(None, inplace=True)
return df
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_dask_dataframe(self, sample_size=10000, dtypes=None, on_error='null', out_of_range_datetime='null'):
"""Return a Dask DataFrame that can lazily read the data in the dataset.
:param sample_size: The number of records to read to determine schema and types.
:param dtypes: An optional dict specifying the expected columns and their dtypes.
`sample_size` is ignored if this is provided.
:param on_error: How to handle any error values in the dataset,
such as those produced by an error while parsing values.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: dask.dataframe.core.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_dask_dataframe', 'TabularDataset')
dd = _try_execute(lambda: dataflow.to_dask_dataframe(sample_size=sample_size,
dtypes=dtypes,
on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_dask_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
return dd
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_spark_dataframe(self):
"""Load all records from the dataset into a Spark DataFrame.
:return: Returns a Spark DataFrame.
:rtype: pyspark.sql.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_spark_dataframe', 'TabularDataset')
return _try_execute(dataflow.to_spark_dataframe,
'to_spark_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def skip(self, count):
"""Skip records from top of the dataset by the specified count.
:param count: The number of records to skip.
:type count: int
:return: Returns a new TabularDataset object representing a dataset with records skipped.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.skip(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take(self, count):
"""Take a sample of records from top of the dataset by the specified count.
:param count: The number of records to take.
:type count: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take_sample(self, probability, seed=None):
"""Take a random sample of records in the dataset approximately by the probability specified.
:param probability: The probability of a record being included in the sample.
:type probability: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take_sample(probability, seed), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def random_split(self, percentage, seed=None):
"""Split records in the dataset into two parts randomly and approximately by the percentage specified.
The first dataset contains approximately ``percentage`` of the total records and the second dataset the
remaining records.
:param percentage: The approximate percentage to split the dataset by. This must be a number between
0.0 and 1.0.
:type percentage: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a tuple of new TabularDataset objects representing the two datasets after the split.
:rtype: (azureml.data.TabularDataset, azureml.data.TabularDataset)
"""
dataflow1, dataflow2 = self._dataflow.random_split(percentage, seed)
return (
TabularDataset._create(dataflow1, self._properties, telemetry_info=self._telemetry_info),
TabularDataset._create(dataflow2, self._properties, telemetry_info=self._telemetry_info)
)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def keep_columns(self, columns, validate=False):
"""Keep the specified columns and drops all others from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to keep.
:type columns: typing.Union[str, builtin.list[str]]
:param validate: Indicates whether to validate if data can be loaded from the returned dataset.
The default is False. Validation requires that the data source is accessible from current compute.
:type validate: bool
:return: Returns a new TabularDataset object with only the specified columns kept.
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow.keep_columns(columns, validate_column_exists=False)
if validate:
_validate_has_data(dataflow,
('Cannot load any data from the dataset with only columns {} kept. Make sure the '
'specified columns exist in the current dataset.')
.format(columns if isinstance(columns, list) else [columns]))
dataset = TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] not in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] not in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during keep_columns.'
.format(trait_dropped, self.id))
return dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def partition_by(self, partition_keys, target, name=None, show_progress=True, partition_as_file_dataset=False):
"""Partitioned data will be copied and output to the destination specified by target.
create the dataset from the outputted data path with partition format, register dataset if name is provided,
return the dataset for the new data path with partitions
.. code-block:: python
ds = Dataset.get_by_name('test') # indexed by country, state, partition_date
# #1: call partition_by locally
new_ds = ds.partition_by(name="repartitioned_ds", partition_keys=['country'],
target=DataPath(datastore, "repartition"))
partition_keys = newds.partition_keys # ['country']
# new_ds can be passed to PRS as input dataset
:param partition_keys: Required, partition keys
:type partition_keys: builtin.list[str]
:param target: Required, the datastore path where the dataframe parquet data will be uploaded to.
A guid folder will be generated under the target path to avoid conflict.
:type target: azureml.data.datapath.DataPath, azureml.core.datastore.Datastore
or tuple(azureml.core.datastore.Datastore, str) object
:param name: Optional, The registration name.
:type name: str
:param show_progress: Optional, indicates whether to show progress of the upload in the console.
Defaults to be True.
:type show_progress: bool
:param partition_as_file_dataset: Optional, indicates whether returns a filedataset or not.
Defaults to be False.
:type show_progress: bool
:return: The saved or registered dataset.
:rtype: azureml.data.TabularDataset
"""
from uuid import uuid4
from azureml.exceptions import UserErrorException
from azureml.core import Dataset
from azureml.data.data_reference import DataReference
from azureml.data._dataset_factory_helper import get_progress_logger, parse_target
from azureml.dataprep import FieldType
from azureml.data.dataset_factory import TabularDatasetFactory
import time
starting_time = time.process_time()
console = get_progress_logger(show_progress)
console("Validating arguments.")
if len(partition_keys) == 0:
raise UserErrorException("partition_keys cannot be empty")
column_types = self._dataflow.dtypes
invalid_keys = []
for key in partition_keys:
if key not in column_types:
invalid_keys.append(key)
if len(invalid_keys) != 0:
raise UserErrorException("{0} are invalid partition keys".format(invalid_keys))
if len(partition_keys) != len(set(partition_keys)):
raise UserErrorException("partition_keys cannot have duplicates")
console("Arguments validated.")
guid = uuid4()
datastore, relative_path = parse_target(target)
relative_path_with_guid = "/%s/%s/" % (relative_path, guid)
partition_format = relative_path_with_guid
partition_path = relative_path_with_guid
saved_dataset_key_column_types = {}
for key in partition_keys:
if column_types[key] == FieldType.DATE:
partition_format = partition_format + '{' + key + ':yyyyMMddHHmmss}*/'
del column_types[key]
else:
partition_format = partition_format + '{' + key + '}/'
partition_path = partition_path + '*/'
if key in column_types:
saved_dataset_key_column_types[key] = column_types[key]
partition_format = partition_format + '*.parquet'
partition_path = partition_path + '*.parquet'
console("Uploading file to {}".format(relative_path_with_guid))
self._dataflow.write_to_parquet(partition_keys=partition_keys,
directory_path=DataReference(datastore=datastore).
path(relative_path_with_guid)).run_local()
console("Successfully uploaded file to datastore.")
console("Creating a new dataset.")
if partition_as_file_dataset:
saved_dataset = Dataset.File.\
from_files(path=(datastore, partition_path), partition_format=partition_format)
else:
saved_dataset = TabularDatasetFactory.\
from_parquet_files(path=(datastore, partition_path), partition_format=partition_format)
saved_dataset = TabularDataset._create(saved_dataset._dataflow.
set_column_types(saved_dataset_key_column_types),
self._properties, telemetry_info=self._telemetry_info)
console("Successfully created a new dataset.")
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _PATITION_BY_ACTIVITY,
[self], self._registration.workspace, "N/A",
{"execution_time": time.process_time() - starting_time,
"number_of_partition_keys": len(partition_keys)})
if name is None:
return saved_dataset
console("registering a new dataset.")
registered_dataset = saved_dataset.register(datastore.workspace, name, create_new_version=True)
console("Successfully created and registered a new dataset.")
return registered_dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def filter(self, expression):
"""
Filter the data, leaving only the records that match the specified expression.
.. remarks::
Expressions are started by indexing the Dataset with the name of a column. They support a variety of
functions and operators and can be combined using logical operators. The resulting expression will be
lazily evaluated for each record when a data pull occurs and not where it is defined.
.. code-block:: python
dataset['myColumn'] > dataset['columnToCompareAgainst']
dataset['myColumn'].starts_with('prefix')
:param expression: The expression to evaluate.
:type expression: any
:return: The modified dataset (unregistered).
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow
dataflow = dataflow.filter(expression)
return TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def drop_columns(self, columns):
"""Drop the specified columns from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to drop.
:type columns: typing.Union[str, builtin.list[str]]
:return: Returns a new TabularDataset object with the specified columns dropped.
:rtype: azureml.data.TabularDataset
"""
dataset = TabularDataset._create(
self._dataflow.drop_columns(columns), self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during drop_columns.'
.format(trait_dropped, self.id))
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_parquet_files(self):
"""Convert the current dataset into a FileDataset containing Parquet files.
The resulting dataset will contain one or more Parquet files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:return: Returns a new FileDataset object with a set of Parquet files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
parquet_dataflow = self._dataflow.to_parquet_streams()
parquet_dataflow = get_dataflow_with_meta_flags(parquet_dataflow, file_projection='parquet')
return FileDataset._create(parquet_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_csv_files(self, separator=','):
"""Convert the current dataset into a FileDataset containing CSV files.
The resulting dataset will contain one or more CSV files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:param separator: The separator to use to separate values in the resulting file.
:type separator: str
:return: Returns a new FileDataset object with a set of CSV files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
csv_dataflow = self._dataflow.to_csv_streams(separator=separator)
csv_dataflow = get_dataflow_with_meta_flags(csv_dataflow, file_projection='csv')
return FileDataset._create(csv_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_before(self, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns before a specified end time.
:param end_time: Upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``end_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BEFORE_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_before.__name__,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_after(self, start_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns after a specified start time.
:param start_time: The lower bound for filtering data.
:type start_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_AFTER_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_after.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_recent(self, time_delta, include_boundary=True, validate=True):
"""Filter TabularDataset to contain only the specified duration (amount) of recent data.
:param time_delta: The duration (amount) of recent data to retrieve.
:type time_delta: datetime.timedelta
:param include_boundary: Indicate if the row associated with the boundary time (``time_delta``)
should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_RECENT_ACTIVITY,
[self], self._registration.workspace, "N/A")
start_time = datetime.now() - time_delta
return self._time_filter(self.time_recent.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_between(self, start_time, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset between a specified start and end time.
:param start_time: The Lower bound for filtering data.
:type start_time: datetime.datetime
:param end_time: The upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_end`` and
``end_time``) should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BETWEEN_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_between.__name__,
lower_bound=start_time,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def submit_profile_run(self, compute_target, experiment):
"""Submit an experimentation run to calculate data profile.
A data profile can be very useful to understand the input data, identify anomalies and missing values
by providing useful information about the data like column type, missing values, etc.
:param compute_target: The compute target to run the
profile calculation experiment on. Specify 'local' to use local compute.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget
for more information on compute targets.
:type compute_target: typing.Union[str, azureml.core.compute.ComputeTarget]
:param experiment: The experiment object.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment.experiment
for more information on experiments.
:type experiment: azureml.core.experiment.Experiment
:return: An object of type DatasetProfileRun class.
:rtype: azureml.data.dataset_profile_run.DatasetProfileRun
"""
from azureml.core import Experiment, ComputeTarget
if not (isinstance(compute_target, ComputeTarget) or isinstance(compute_target, str)):
raise UserErrorException('Invalid type. compute_target should be either of type ComputeTarget or string '
'but was found to be of type {0}.'.format(type(compute_target)))
if not isinstance(experiment, Experiment):
raise UserErrorException('Invalid type. experiment should be of type azureml.core.Experiment but '
'was found to be of type {0}.'.format(type(experiment)))
from azureml.data.dataset_profile_run_config import DatasetProfileRunConfig
dprc = DatasetProfileRunConfig(self, compute_target=compute_target)
profile_run = experiment.submit(dprc)
profile_run.run.wait_for_completion(raise_on_error=True, wait_post_processing=True)
return profile_run
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile(self, workspace=None):
"""Get data profile from the latest profile run submitted for this or the same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: Profile result from the latest profile run of type DatasetProfile.
:rtype: azureml.data.dataset_profile.DatasetProfile
"""
workspace = self._ensure_workspace(workspace)
saved_dataset_id = self._ensure_saved(workspace)
# arguments [{'generate_preview': 'True', 'row_count': '1000'}] are added to ensure
# that requestHash is same. The GenerateProfileWithPreview API add these arguments on service side.
# If any changes are made there, this should also be changed.
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=saved_dataset_id,
arguments={'generate_preview': 'True', 'row_count': '1000'})
action_result_dto = _restclient(workspace).dataset.get_action_result(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
custom_headers=_custom_headers)
result_artifact_ids = action_result_dto.result_artifact_ids
if result_artifact_ids is None or len(result_artifact_ids) == 0:
raise AzureMLException('Unable to fetch profile results. Please submit a new profile run.')
result_artifact = result_artifact_ids[0]
from azureml._restclient.artifacts_client import ArtifactsClient
content = ArtifactsClient(workspace.service_context).download_artifact_contents_to_string(
*result_artifact.split("/", 2))
try:
from azureml.data.dataset_profile import DatasetProfile
profile = DatasetProfile(saved_dataset_id, action_result_dto.run_id, action_result_dto.experiment_name,
workspace, dataprep().DataProfile._from_json(content))
except Exception:
errormsg = 'Unable to fetch profile since profile result is corrupted. Please submit a new profile run.'
_get_logger().error(errormsg)
raise AzureMLException(errormsg)
return profile
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile_runs(self, workspace=None):
"""Return previous profile runs associated with this or same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: iterator object of type azureml.core.Run.
:rtype: iter(azureml.core.Run)
"""
workspace = self._ensure_workspace(workspace)
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=self._ensure_saved(workspace),
arguments={'generate_preview': 'True', 'row_count': '1000'})
continuation_token = None
paginated_action_dto_list = []
index = 0
while index == 0 or continuation_token is not None:
paginated_action_dto = _restclient(workspace).dataset.list_actions_from_request(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
count=1000,
custom_headers=_custom_headers,
continuation_token=continuation_token)
index = index + 1
for item in paginated_action_dto.value:
paginated_action_dto_list.append(item)
continuation_token = paginated_action_dto.continuation_token
if not paginated_action_dto_list:
raise AzureMLException('Unable to find any run information. Please submit a new profile run.')
run_list = []
for item in paginated_action_dto_list:
flag = True
# This is done to ensure backward compatibility. Earlier we do not persist
# run_id for local runs. Hence for older runs run_id is empty.
if item.run_id is None:
continue
from azureml.core import Experiment, get_run
experiment = Experiment(workspace, item.experiment_name)
try:
run = get_run(experiment, item.run_id)
except Exception:
flag = False
if flag:
run_list.append(run)
return iter(run_list)
| [
2,
20368,
22369,
12,
201,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
201,
198,
2,
20368,
22369,
12,
201,
198,
201,
198,
37811,
4264,
1299,
11244,
329,
10200,
1366,
287,
257,
7400,
934,
5794,
416,
32096,
262,
... | 2.402197 | 17,566 |
"""Functions for importing Losungen from the official download page"""
from typing import List
from zipfile import ZipFile
import datetime
import xml.etree.ElementTree as ET
import os
import re
import requests
import logging
from sqlalchemy.orm import Session
from losungen import SessionMaker
from losungen.models import TagesLosung
from losungen.repositories import TagesLosungRepository
LOSUNGEN_URL = "https://www.losungen.de/fileadmin/media-losungen/download"
LOSUNGEN_XML = "losungen.xml"
logger = logging.getLogger("telegram-losungen.importer")
def download_zip(year: int) -> bool:
"""Downloads the zipped XML file containing the Losungen of the given year"""
url = f"{LOSUNGEN_URL}/Losung_{year}_XML.zip"
try:
response = requests.get(url, allow_redirects=True)
if response.status_code == 404:
return False
logger.info("Successfully downloaded %s", url)
except requests.exceptions.RequestException as exc:
logger.exception("Unable to download %s", url)
return False
open(f"{LOSUNGEN_XML}.zip", "wb").write(response.content)
return True
def extract_zip(filename: str = f"{LOSUNGEN_XML}.zip") -> None:
"""Extracts the XML file from a Losungen zip file"""
with ZipFile(filename) as zipfile:
with open(LOSUNGEN_XML, "wb") as xmlfile:
xmlfile.write(
[
zipfile.read(file)
for file in zipfile.namelist()
if file.endswith(".xml")
][0]
)
os.remove(filename)
logger.info("Successfully extracted %s", filename)
def import_xml(filename: str = LOSUNGEN_XML) -> None:
"""Imports all Losungen contained in the given XML file"""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
for losung in _load_xml(filename):
repo.add(losung)
session.commit()
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = repo.get_by_year(year)
session.close()
if losungen:
return True # Already imported
if download_zip(year):
extract_zip()
import_xml()
logger.info("Successfully imported Losungen for %i", year)
return True
logger.warning("Failed to download zip archive for %i", year)
return False
def initial_import() -> None:
"""Imports all available zip archives from the Losungen download page"""
year = datetime.date.today().year
year_iter = year
while import_year(year_iter):
year_iter -= 1
year_iter = year + 1
while import_year(year_iter):
year_iter += 1
| [
37811,
24629,
2733,
329,
33332,
5401,
2150,
268,
422,
262,
1743,
4321,
2443,
37811,
198,
6738,
19720,
1330,
7343,
198,
6738,
19974,
7753,
1330,
38636,
8979,
198,
11748,
4818,
8079,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
... | 2.583706 | 1,117 |
import os
# set mkl thread count for numpy einsum/tensordot calls
# leave one CPU un used so we can still access this computer
import scipy.optimize
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
# os.environ["MKL_NUM_THREADS"] = "40" # "{}".format(os.cpu_count() - 1)
import jax.numpy as jnp
from jax.config import config
config.update("jax_enable_x64", True)
# from jax.experimental import optimizers
from jax import jit, grad
from .adagrad import adagrad
import h5py
import numpy
import numpy.random
import numpy.linalg
from scipy.optimize import minimize
from uuid import uuid4
def thc_objective_jax(xcur, norb, nthc, eri):
"""
Loss function for THC factorization using jax numpy
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
return res
def thc_objective_grad_jax(xcur, norb, nthc, eri):
"""
Gradient for THC least-squares objective jax compatible
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
# O(norb^5)
dL_dZab = -jnp.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=[(0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * jnp.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
optimize=[(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
# optimize=[(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * jnp.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp,
optimize=[(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp,
# optimize=[(0, 1), (0, 2), (0, 1)])
return jnp.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
return res
def thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
# res = 0.5 * numpy.sum((deri)**2)
SPQ = etaPp.dot(etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = jnp.diag(jnp.diag(SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu) on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = jnp.sum(jnp.abs(MPQ_normalized)) * 0.5
# lambda_z = jnp.sum(MPQ_normalized**2) * 0.5
res = 0.5 * jnp.sum((deri)**2) + penalty_param * (lambda_z**2)
if verbose:
print("res, max, lambda**2 = {}, {}".format(res, lambda_z**2))
return res
def thc_objective_grad(xcur, norb, nthc, eri, verbose=False):
"""
Gradient for THC least-squares objective
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = numpy.array(xcur[:norb*nthc]).reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = numpy.array(xcur[norb*nthc:norb*nthc+nthc*nthc]).reshape(nthc,nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective_and_grad(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
# path = numpy.einsum_path('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize='optimal')
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^4 * nthc)
# leaving the commented out code for documentation purposes
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return res, numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def cp_ls_cholesky_factor_objective(beta_gamma, norb, nthc, cholesky_factor, calcgrad=False):
"""cholesky_factor is reshaped into (norb, norb, num_cholesky)
Cholesky factor B_{ab,x}
Least squares fit objective ||B_{ab,x} - \sum_{r}beta_{a,x}beta_{b,x}gamma_{ab,x}||
This function provides the objective function value and gradient with respect to beta and gamma
"""
# compute objective
num_cholfactors = cholesky_factor.shape[-1]
beta_bR = beta_gamma[:norb*nthc].reshape((norb, nthc))
gamma_yR = beta_gamma[norb*nthc:norb*nthc+nthc*num_cholfactors].reshape((num_cholfactors, nthc))
beta_abR = numpy.einsum('aR,bR->abR', beta_bR, beta_bR)
chol_approx = numpy.einsum('abR,XR->abX', beta_abR, gamma_yR)
delta = cholesky_factor - chol_approx
fval = 0.5 * numpy.sum((delta)**2)
if calcgrad:
# compute grad
# \partial O / \partial beta_{c,s}
grad_beta = -2 * numpy.einsum('Cbx,bS,xS->CS', delta, beta_bR, gamma_yR, optimize=['einsum_path', (0, 2), (0, 1)])
grad_gamma = -numpy.einsum('abY,aS,bS->YS', delta, beta_bR, beta_bR, optimize=['einsum_path', (1, 2), (0, 1)])
grad = numpy.hstack((grad_beta.ravel(), grad_gamma.ravel()))
return fval, grad
else:
return fval
if __name__ == "__main__":
numpy.random.seed(25)
norb = 2
nthc = 10
penalty_param = 1.0E-6
etaPp = numpy.random.randn(norb * nthc).reshape((nthc, norb))
MPQ = numpy.random.randn(nthc**2).reshape((nthc, nthc))
MPQ = MPQ + MPQ.T
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
CprP_jax = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
eri = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
eri_jax = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
assert numpy.allclose(eri_jax, eri)
xcur = numpy.hstack((etaPp.ravel(), MPQ.ravel()))
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
assert numpy.allclose(etaPp2, etaPp)
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
assert numpy.allclose(MPQ2, MPQ)
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
assert numpy.allclose(CprP2, CprP)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
assert numpy.allclose(Iapprox, eri)
deri = eri - Iapprox
res = thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=True)
print(res)
thc_grad = grad(thc_objective_regularized, argnums=[0])
print(thc_grad(jnp.array(xcur), norb, nthc, jnp.array(eri), penalty_param))
res = scipy.optimize.minimize(thc_objective_regularized, jnp.array(xcur), args=(norb, nthc, jnp.array(eri), penalty_param), method='L-BFGS-B',
jac=thc_grad, options={'disp': None, 'iprint': 98})
print(res)
xcur = numpy.array(res.x)
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
print(jnp.linalg.norm(deri))
| [
11748,
28686,
198,
2,
900,
33480,
75,
4704,
954,
329,
299,
32152,
304,
1040,
388,
14,
83,
641,
585,
313,
3848,
198,
2,
2666,
530,
9135,
555,
973,
220,
523,
356,
460,
991,
1895,
428,
3644,
198,
11748,
629,
541,
88,
13,
40085,
1096,... | 1.984344 | 6,451 |
from project_template.main import return_true
| [
6738,
1628,
62,
28243,
13,
12417,
1330,
1441,
62,
7942,
628
] | 4.272727 | 11 |
import sklearn.cross_validation as cv
from sklearn import tree
import pandas as pd
import numpy as np
import os.path
import pprint
import matplotlib.pyplot as plt
import seaborn as sns
import runExperiments
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
def lossCalculation(model):
"""Evaluates the total loss on the dataset"""
w1, b1, w2, b2 = model["w1"], model['b1'], model["w2"], model["b2"]
z1 = X.dot
def normalize(lst):
"""Normalizes the list, reducing the x by 640 and y by 720"""
normed = map(lambda p: (p[0]/640.0, p[1]/720.0), lst)
return normed
def createNet():
"""Create and seed the intial neural network"""
#CONSTANTS
nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]
allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()
ds = SupervisedDataSet(nn_input_dim, nn_output_dim)
#normalizes and adds it to the dataset
for i in range(0, len(allyTrainingPos)):
x = normalize(enemyTrainingPos[i])
y = normalize(allyTrainingPos[i])
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
for inpt, target in ds:
print inpt, target
net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
NetworkWriter.writeToFile(net, "net.xml")
enemyTestPos = runExperiments.makeTestDataset()
print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
return ds
def startTrials(ds, maxTrials = 2, maxExperiments = 2):
"""start and run the trials"""
hpCount = []
for i in range(0, maxExperiments):
for j in range(0, maxTrials):
enemyTestPos = runExperiments.makeTestDataset()
net = NetworkReader.readFrom("net.xml")
netResults = net.activate([val for pair in normalize(enemyTestPos) for val in pair])
netIter = iter(netResults)
allyTestPos = zip(netIter, netIter)
#undo normalization
allyTestPos = map(lambda p: (abs(p[0]*640), abs(p[1]*720)), allyTestPos)
print(allyTestPos)
runExperiments.writeTestData(allyTestPos)
runExperiments.run()
with open("exp_results_raw.txt", "r") as resultsFile:
lines = resultsFile.readlines()
if "Zerg_Zergling" in lines[1]:
x = normalize(enemyTestPos)
y = normalize(allyTestPos)
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
lineSplit = lines[1].split("Zerg_Zergling")[-1]
hpCount.append(lineSplit.split(" ")[2])
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
return hpCount
ds = createNet()
hpCount = startTrials(ds, 30, 10)
print(hpCount)
fig, ax = plt.subplots()
ax.plot(range(0, len(hpCount)),hpCount)
plt.show() | [
11748,
1341,
35720,
13,
19692,
62,
12102,
341,
355,
269,
85,
198,
6738,
1341,
35720,
1330,
5509,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
13,
6978,
198,
11748,
279,
4798,
220,
198,
1... | 2.616484 | 1,189 |
import scipy.signal as signal
import torch
import torch.nn as nn
import numpy as np
import models
import gym
import wandb
def discounted_cumsum(rewards, reward_decay):
"""Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation"""
return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]
| [
11748,
629,
541,
88,
13,
12683,
282,
355,
6737,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4981,
198,
11748,
11550,
198,
11748,
11569,
65,
198,
198,
4299,
29686,
62,
... | 2.517241 | 145 |
lista = [1,2,3,4,5,6,7,89,9,8,5,5,2,1]
print(busqueda_binaria(lista, 9)) | [
198,
198,
4868,
64,
796,
685,
16,
11,
17,
11,
18,
11,
19,
11,
20,
11,
21,
11,
22,
11,
4531,
11,
24,
11,
23,
11,
20,
11,
20,
11,
17,
11,
16,
60,
198,
4798,
7,
10885,
421,
18082,
62,
8800,
10312,
7,
4868,
64,
11,
860,
4008... | 1.510204 | 49 |
import unittest
from exam_10apr.project.survivor import Survivor
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
2814,
62,
940,
499,
81,
13,
16302,
13,
48846,
452,
273,
1330,
23740,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.674419 | 43 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Tests for security api methods"""
import json
import jwt
from tests.integration_tests.base_tests import SupersetTestCase
from flask_wtf.csrf import generate_csrf
from superset.utils.urls import get_url_host
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.823308 | 266 |
# Parse the input from the input file
input_file = 'example_input.txt'
inputs = []
outputs = []
with open(input_file) as input:
for line in input.readlines():
split_line = line.split(' | ')
# Sort every string alphabetically to make further analysis easier
inputs.append([''.join(sorted(digit)) for digit in split_line[0].split()])
outputs.append([''.join(sorted(digit)) for digit in split_line[1].split()])
# Task 1
# Count the digits 1, 4, 7 and 8 in all outputs
# (They use 2, 4, 3 and 7 segments respectively)
count = 0
for output in outputs:
for digit in output:
if len(digit) in {2, 4, 3, 7}:
count += 1
print(f'Digits 1, 4, 7 and 8 appear {count} times in the output values.')
print()
# Task 2
# Decode every output into a 4 digit number
decoded_outputs = [0] * len(outputs)
for i, input in enumerate(inputs):
# Index of a sequence in decode_key will tell the number that sequence represents
decode_key = [''] * 10
# First the obivous ones (1, 4, 7, 8)
decode_key[1] = [digit for digit in input if len(digit) == 2][0]
decode_key[4] = [digit for digit in input if len(digit) == 4][0]
decode_key[7] = [digit for digit in input if len(digit) == 3][0]
decode_key[8] = [digit for digit in input if len(digit) == 7][0]
# 3 is only one with length of 5 and containing both segments of 1
decode_key[3] = ([digit for digit in input if len(digit) == 5 and
all([decode_key[1][0] in digit, decode_key[1][1] in digit])][0])
# 6 is only one with length of 6 and not containing all segments of 7
decode_key[6] = ([digit for digit in input if len(digit) == 6 and
not all([decode_key[7][0] in digit,
decode_key[7][1] in digit,
decode_key[7][2] in digit])
][0])
# 9 is only one with length of 6 and containing all segments of 3
decode_key[9] = ([digit for digit in input if len(digit) == 6 and
all([decode_key[3][0] in digit,
decode_key[3][1] in digit,
decode_key[3][2] in digit,
decode_key[3][3] in digit,
decode_key[3][4] in digit])
][0])
# 5 is only one that shares all its segments with 6
decode_key[5] = ([digit for digit in input if len(digit) == 5 and
all([digit[0] in decode_key[6],
digit[1] in decode_key[6],
digit[2] in decode_key[6],
digit[3] in decode_key[6],
digit[4] in decode_key[6]])
][0])
# 2 is the remaining one with length 5
decode_key[2] = [digit for digit in input if len(digit) == 5 and digit not in decode_key][0]
# 0 is the remaining one
decode_key[0] = [digit for digit in input if digit not in decode_key][0]
# Use the decode key to decode the outputs
decoded_outputs[i] = int(''.join([str(decode_key.index(digit)) for digit in outputs[i]]))
# Sum of all decoded outputs
output_sum = sum(decoded_outputs)
print(f'Sum of all decoded output values is {output_sum}.') | [
198,
2,
2547,
325,
262,
5128,
422,
262,
5128,
2393,
198,
15414,
62,
7753,
796,
705,
20688,
62,
15414,
13,
14116,
6,
198,
15414,
82,
796,
17635,
198,
22915,
82,
796,
17635,
198,
198,
4480,
1280,
7,
15414,
62,
7753,
8,
355,
5128,
25... | 2.374716 | 1,321 |
"""Unit tests for :mod:`pathcensus.nullmodels`."""
# pylint: disable=redefined-outer-name
import random
from itertools import product
import pytest
import numpy as np
from pathcensus.nullmodels import UBCM, UECM
from pathcensus.utils import rowsums, set_numba_seed
from pathcensus.utils import relclose
from tests.utils import make_er_graph, make_rgg, add_random_weights
from tests.utils import get_largest_component
FAMILY = ("erdos_renyi", "geometric")
SEEDS = (20, 40)
_params = list(product(FAMILY, SEEDS))
_methods = ("newton", "fixed-point")
_ubcm_params = list(product(["cm_exp", "cm"], _methods))
_uecm_params = list(product(["ecm_exp", "ecm"], _methods))
@pytest.fixture(scope="session", params=_params)
def small_graph(request):
"""Generate some small graphs (ER and RGG)."""
family, seed = request.param
random.seed(seed)
if family == "geometric":
graph = get_largest_component(make_rgg(50, 5))
else:
graph = get_largest_component(make_er_graph(50, 5))
return graph, seed
@pytest.fixture(scope="session", params=_ubcm_params)
def small_graph_ubcm(request, small_graph):
"""Generate some small graphs (ER and RGG)."""
model, method = request.param
graph, seed = small_graph
ubcm = UBCM(graph)
ubcm.fit(model, method)
return ubcm, seed, graph
@pytest.fixture(scope="session", params=_uecm_params)
class TestUBCM:
"""Unit tests for Unweighted Binary Configuration Model."""
def test_ubcm(self, small_graph_ubcm):
"""Test whether the expected degree sequence in UBCM approximates
the observed sequence.
"""
ubcm, *_ = small_graph_ubcm
rtol = 1e-6 if ubcm.fit_args["method"] == "newton" else 1e-3
assert ubcm.is_fitted()
assert ubcm.is_valid(rtol)
P = ubcm.get_P(dense=True)
assert relclose(P.sum(axis=1), ubcm.D, rtol=rtol)
def test_ubcm_sampling(self, small_graph_ubcm):
"""Test convergence of the average over degree sequences sampled
from UBCM towards the observed sequence.
"""
ubcm, seed, _ = small_graph_ubcm
rtol = 1e-1 if ubcm.fit_args["method"] == "newton" else 1e-1
D = ubcm.D
E = np.zeros_like(D, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in ubcm.sample(n):
E += rowsums(rand)
E = E / n
assert relclose(D, E, rtol=rtol)
def test_ubcm_seed(self, small_graph_ubcm):
"""Test if setting random seed for sampling works correctly."""
ubcm, seed, _ = small_graph_ubcm
set_numba_seed(seed)
A1 = ubcm.sample_one()
set_numba_seed(seed)
A2 = ubcm.sample_one()
assert (A1 != A2).count_nonzero() == 0
class TestUECM:
"""Unit tests for Unweighted Enhanced Configuration Model."""
def test_uecm(self, small_graph_uecm):
"""Test whether the expected degree and strength sequences in UECM
approximate the observed sequences.
"""
uecm, *_ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
assert uecm.is_fitted()
assert uecm.is_valid(rtol)
P = uecm.get_P(dense=True)
W = uecm.get_W(dense=True)
assert relclose(P.sum(axis=1), uecm.D, rtol=rtol)
assert relclose(W.sum(axis=1), uecm.S, rtol=rtol)
def test_uecm_sampling(self, small_graph_uecm):
"""Test convergence of the averages over degree and strength
sequences sampled from UECM towards the observed sequences.
"""
uecm, seed, _ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
D = uecm.D
S = uecm.S
ED = np.zeros_like(D, dtype=float)
ES = np.zeros_like(S, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in uecm.sample(n):
ES += rowsums(rand)
rand.data[:] = 1
ED += rowsums(rand)
ED /= n
ES /= n
assert relclose(D, ED, rtol=rtol)
assert relclose(S, ES, rtol=rtol)
| [
37811,
26453,
5254,
329,
1058,
4666,
25,
63,
6978,
66,
7314,
13,
8423,
27530,
63,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
39605,
12,
3672,
198,
11748,
4738,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
117... | 2.237991 | 1,832 |
try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
plt = None
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from rta.models.interpolant import Interpolant
from rta.models.spline import Spline
from rta.array_operations.dedupy import dedup_np
from rta.math.splines import beta as beta_spline
class RollingMedian(Interpolant):
"""The rolling median interpolator.
Idea is as straight as a hair of a Mongol: get rid of the noise by
fitting a roling median and interpolate every other k-th median.
Of course, since we calculate all other medians too, we could get more
playful with their evaluation.
"""
def __init__(self, ws=51, k=10):
"""Constructor.
Args:
ws (odd int): window size.
k (int): each k-th median will be used for interpolation
"""
self.ws = ws
self.k = k
self.params = {'ws':ws, 'k':k}
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
if sort:
i = np.argsort(x)
x, y = x[i], y[i]
self.medians = medfilt(y, self.ws)
self.interpo = interp1d(x[::self.k],
self.medians[::self.k],
bounds_error=False,
fill_value=0)
self.x = x
self.y = y
#TODO: implement this.
class RolllingMedianSimple(RollingMedian):
"""Avoid calculating too many medians."""
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
pass
class RollingMedianSpline(Spline):
"""The rolling median spline."""
def __init__(self, ws=51, n=100):
"""Constructor.
Args:
ws (odd int): window size.
n (int): the number of nodes used for the beta spline (roughly correspond to 100/k-percentiles).
"""
self.ws = ws
self.n = n
self.params = {'ws':ws, 'n':n} # this is for copy to work
| [
28311,
25,
198,
220,
220,
220,
1330,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
16341,
19937,
3673,
21077,
12331,
25,
198,
220,
220,
220,
458,
83,
796,
6045,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1... | 2.12406 | 1,064 |
import torch
import numpy as np
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from . import losses
from util.metrics import PSNR
import pytorch_msssim
import random
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
7736,
13,
9060,
62,
7742,
1330,
7412,
27201,
198,
6738,
764,
8692,
62,
19849,
1330,
7308,
17633,
198,
6738,
764,
1330,
7686,
198,
6738,
764,
1330,
9089,
198,
6738,
7736,
13,
... | 3.617647 | 68 |
import h5py
import os
import unittest
import numpy as np
| [
11748,
289,
20,
9078,
198,
11748,
28686,
628,
628,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198
] | 2.904762 | 21 |
import tcod as libtcodpy
from random import randint
from components.ai import BasicMonster
from components.equipment import EquipmentSlots
from components.equippable import Equippable
from components.fighter import Fighter
from components.item import Item
from components.stairs import Stairs
from entity import Entity
from game_messages import Message
from item_functions import cast_confuse, cast_fireball, cast_lightning, heal
from map_objects.rectangle import Rect
from map_objects.tile import Tile
from random_utils import from_dungeon_level, random_choice_from_dict
from render_functions import RenderOrder
DEPTH = 10
MIN_SIZE = 5
FULL_ROOMS = False
MAP_WIDTH = 63
MAP_HEIGHT = 40
bsp_rooms = []
# Create next floor and heal player
| [
11748,
256,
19815,
355,
9195,
83,
19815,
9078,
198,
6738,
4738,
1330,
43720,
600,
198,
198,
6738,
6805,
13,
1872,
1330,
14392,
40872,
198,
6738,
6805,
13,
4853,
4667,
1330,
22412,
11122,
1747,
198,
6738,
6805,
13,
4853,
3974,
540,
1330,... | 3.596154 | 208 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from collections import defaultdict
from random import randint
import six
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1726... | 4.148148 | 54 |
from aocd import get_data
if __name__ == '__main__':
data = get_data(day=5, year=2018)
inp = data
print(part1(inp))
print(part2(inp))
| [
6738,
257,
420,
67,
1330,
651,
62,
7890,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1366,
796,
651,
62,
7890,
7,
820,
28,
20,
11,
614,
28,
7908,
8,
198,
220,
220,
220,
287,
... | 2.123288 | 73 |
from .client import *
from .image_client import *
from .format_client import *
| [
6738,
764,
16366,
1330,
1635,
198,
6738,
764,
9060,
62,
16366,
1330,
1635,
198,
6738,
764,
18982,
62,
16366,
1330,
1635,
198
] | 3.590909 | 22 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="swiftai",
version="0.1",
author="Aakash N S",
author_email="opensource@swiftace.ai",
description="Utilities and helper functions for Pytorch and FastAI deep learning libraries",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aakashns/swiftai",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
) | [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
1438,
... | 3 | 207 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import logging
import nltk.data
from core import utils
from core.progressbar import ProgressBar, Percentage, Bar, ETA, FormatLabel, AnimatedMarker
import codecs
from modules.brain.mlbrain import MLBrain
from modules.machinelogic.imachinelogic.mlimachinelogic import MLInternalMachineLogicTrainer
from modules.nlp.mlnlp import MLNLP
from modules.concept.mlconcept import MLConcept
from modules.mlbendertrainingmodule import MLBenderTrainingModule
import shutil
import pickle
import json
"""
Training-Data-Structure:
class TrainingDataConverter
Copyright (c) 2019 Imdat Solak
Written: 2017-04-12 00:00 CET, ISO
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
25064,
220,
198,
260,
2220,
7,
17597,
8,
198,
17597,
13,
2617,
12286,
12685,
... | 3.214286 | 238 |
#!/usr/bin/env python
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import os
import logging
from jsonargparse import ArgumentParser, namespace_to_dict
import math
import numpy as np
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import Utt2Info
from hyperion.io import RandomAccessDataReaderFactory as DRF
if __name__ == "__main__":
parser = ArgumentParser(description="Transform xvector logits into labels")
parser.add_argument("--list-file", required=True)
parser.add_argument("--logits-file", required=True)
parser.add_argument("--class-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument(
"--sre21",
default=False,
action="store_true",
help="If SRE21 only ENG/CMN/YUE are allowed",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
estimate_lid_labels(**namespace_to_dict(args))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
15069,
2864,
25824,
21183,
2059,
220,
357,
13838,
25,
5803,
9757,
282,
7012,
8,
198,
220,
24843,
362,
13,
15,
220,
357,
4023,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
45... | 2.707483 | 441 |
from django.conf.urls import include
from django.urls import path
from rest_framework import routers
from .viewsets import UserViewSet
ROUTER = routers.DefaultRouter()
ROUTER.register(r'', UserViewSet, basename='User')
urlpatterns = (
path('', include(ROUTER.urls)),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
764,
1177,
28709,
1330,
11787,
7680,
7248,
628,
198,
49,
2606,
5... | 2.957447 | 94 |
"""
Creating standalone Django apps is a PITA because you're not in a project, so
you don't have a settings.py file. I can never remember to define
DJANGO_SETTINGS_MODULE, so I run these commands which get the right env
automatically.
"""
import argparse
import os
import sys
from subprocess import call, check_output
NAME = os.path.basename(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ['PYTHONPATH'] = os.pathsep.join([ROOT,
os.path.join(ROOT, 'examples')])
SETTINGS = (
'locmem_settings',
'settings',
'memcache_byid',
'custom_backend',
'redis_settings',
'redis_byid',
'django_redis_settings',
)
if __name__ == "__main__":
main()
| [
37811,
198,
32071,
27669,
37770,
6725,
318,
257,
49040,
32,
780,
345,
821,
407,
287,
257,
1628,
11,
523,
198,
5832,
836,
470,
423,
257,
6460,
13,
9078,
2393,
13,
220,
314,
460,
1239,
3505,
284,
8160,
198,
35028,
1565,
11230,
62,
284... | 2.403175 | 315 |
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from allauth.utils import generate_unique_username
from profiles.models import Profile
from profiles.forms import UpgradeUserForm, CreateTempAcctForm
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
198,
6738,
42625,
14208,
... | 3.976471 | 85 |
from django.utils.importlib import import_module
| [
6738,
42625,
14208,
13,
26791,
13,
11748,
8019,
1330,
1330,
62,
21412,
628
] | 3.846154 | 13 |
# -*- coding: utf-8 -*-
from bhcrjyApp.AppUtils.HttpMessageTool import HttpUtils
from bhcrjyApp.app import loginCheck
from flask.blueprints import Blueprint
from flask import request, make_response
from flask import render_template, redirect, abort, url_for
bp = Blueprint('index', __name__, url_prefix='/')
@bp.route('/main_index', methods=['GET', 'POST'])
@loginCheck
def main_index():
"""
首页展示
:return:
"""
response = make_response(render_template('index/index.html', skipClass=url_for('looktax.skipClass')))
return response
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
275,
71,
6098,
73,
88,
4677,
13,
4677,
18274,
4487,
13,
43481,
12837,
25391,
1330,
367,
29281,
18274,
4487,
198,
6738,
275,
71,
6098,
73,
88,
4677,
13,
1324... | 2.743842 | 203 |
from osr2mp4.ImageProcess.Objects.FrameObject import FrameObject
| [
6738,
267,
27891,
17,
3149,
19,
13,
5159,
18709,
13,
10267,
82,
13,
19778,
10267,
1330,
25184,
10267,
628
] | 3.473684 | 19 |
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.inline_response2001 import InlineResponse2001
from swagger_server.models.inline_response2002 import InlineResponse2002
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestConceptsController(BaseTestCase):
""" ConceptsController integration test stubs """
def test_get_concept_details(self):
"""
Test case for get_concept_details
"""
response = self.client.open('/api/concepts/{conceptId}'.format(conceptId='conceptId_example'),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_get_concepts(self):
"""
Test case for get_concepts
"""
query_string = [('keywords', 'keywords_example'),
('semgroups', 'semgroups_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/concepts',
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
1509,
7928,
62,
15388,
13,
27530,
13,
45145,
62,
26209,
14585,
1330,
554,
1370,
31077,
14585,
198,
6738,
1509,
7928,
62,
15388,
... | 2.226754 | 613 |
import os
from mock import patch
import pytest
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
TRAVIS = 'TRAVIS' in os.environ
| [
11748,
28686,
198,
6738,
15290,
1330,
8529,
198,
11748,
12972,
9288,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
6738,
384,
993,
549,
13,
9288,
62,
26791,
1330,
7308,
14402,
20448,
198,
198,
... | 3.053571 | 56 |
from inputs import get_gamepad
import serial
ser=serial.Serial('COM4',115200)
prevB=0
while True:
events = get_gamepad()
#X,Y [-32768,+32768]
#Z,RZ(RT,LT) [0,255]
local=ser.read_all()
if len(local)>0:
print(local.decode())
for event in events:
# print(event.ev_type, event.code, event.state)
if event.code=='ABS_X':
snd='X'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Y':
snd='Y'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_RZ':
snd='R'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Z':
snd='L'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='BTN_EAST':
if event.state==0 and prevB==1:
ser.close()
ser.open()
exit(0)
else:
prevB=event.state
| [
6738,
17311,
1330,
651,
62,
6057,
15636,
201,
198,
11748,
11389,
201,
198,
2655,
28,
46911,
13,
32634,
10786,
9858,
19,
3256,
15363,
2167,
8,
201,
198,
47050,
33,
28,
15,
201,
198,
4514,
6407,
25,
201,
198,
220,
220,
220,
2995,
796,... | 1.690438 | 617 |
# -*- coding: utf-8 -*-
import util.flatfiles.shared
from util.conversions import *
'''
helper methods for formatting CSS flatfiles
All methods use ljust to left justify the field.
The integer argument to ljust specifies that a field has a width of exactly that integer value: the field
is padded with whitespace if the contents do not subsume the entire width.
The slicing (truncating) mechanism is added at the end of each field as a safeguard to
ensure that a field has width no more than the integer value.
'''
# Generic format
'''
1 space
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7736,
13,
38568,
16624,
13,
28710,
198,
6738,
7736,
13,
1102,
47178,
1330,
1635,
198,
198,
7061,
6,
198,
2978,
525,
5050,
329,
33313,
17391,
6228,
16624,
198,
323... | 3.198113 | 212 |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021 Dhruv Agarwal and authors of arboEL.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
import faiss
from itertools import compress
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
main(args.__dict__)
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
15069,
357,
66,
8,
33448,
20529,
622,
85,
2449,
283,
16783,
290,
7035,
286,
610,
2127,
3698,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
... | 3.278689 | 366 |
#!/usr/bin/python3
__author__ = "Colin Reese"
__copyright__ = "Copyright 2016, Interface Innovations"
__credits__ = ["Colin Reese"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Colin Reese"
__email__ = "support@interfaceinnovations.org"
__status__ = "Development"
if __name__ == "__main__":
takesnap() | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
834,
9800,
834,
796,
366,
5216,
259,
39929,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
1584,
11,
26491,
43405,
602,
1,
198,
834,
66,
20696,
834,
796,
14631,
5216,
259,
39929,
... | 2.710744 | 121 |
#########################################################
## generate the ensemble predictions for task 2 ##
## created by Isaac Keohane isaackeohane95@gmail.com ##
#########################################################
import os
import sys
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import os
from matplotlib import pyplot as plt
from utils.file import load_from_json
from scripts.ourFuncs_task2 import generate_subset
from scripts.setup_ensemble import setup_ensemble
# load experiment configurations
trans_configs = load_from_json("../src/configs/demo/athena-mnist.json")
model_configs = load_from_json("../src/configs/demo/model-mnist.json")
data_configs = load_from_json("../src/configs/demo/data-mnist.json")
output_dir = "../ourDataFiles/ensembleOuts"
save_output = True
verbose = 10
#####################################################
### setup the ensemble pool of weak defenses
# This wdList can be changed to a list of indexes of weak defenses in the
# athena-mnist.json file to get a custom set of weak defenses used in the
# emsemble. Make sure to then set "customList" True and "useActi..." False
# both set to False makes it use all the transformations in trans_configs
wdList = []
useActiveList = False
customList = False
# run setup_ensemble to make an ensemble pool of weak defenses
athena = setup_ensemble(trans_configs=trans_configs,
model_configs=model_configs,
use_logits=False,
useActiveList=useActiveList,
customList=customList, wdList=wdList)
######################################################
### generate subset indexes for exmaples and save info file
# define the subset parameters
numberToSubset = 100
doRandom = True
totalNumData = 100
# generate subset indexes to grab benign samples
subset, subsetElse = generate_subset(totalSize=totalNumData,doSave=True,
number=numberToSubset,doRandom=doRandom,
opath=[r"../ourInfoSaves/ensPred_subset.npy",
r"../ourInfoSaves/ensPred_subsetElse.npy"])
# save info in a text file
if save_output:
info_file = open(r"../ourInfoSaves/infoFile_ensPred.txt","w")
info_file.write("Info file for ensemble predictions\n\n")
info_file.write("numberToSubset: {}, doRandom: {}\nsubset:\n".format(
numberToSubset, doRandom))
info_file.write("{}\n\n".format(subset))
info_file.write("useActiveList: {}\ncustomList: {}\nwdList: \n{}\n\n".format(
str(useActiveList), str(customList), wdList) )
info_file.write("dimensions of raw npy arrays: wd, input, class")
info_file.close()
############################################################################
## generate and collect probabilities of benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
if(verbose>5): print("\nbenign sample data dimensions: {}\n".format(x_bs.shape))
totalNumData = x_bs.shape[0]
x_bs = [x_bs[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_bs) # raw is False by default
preds_raw = athena.predict(x=x_bs,raw=True)
if(verbose>5): print("\n>>> Shape of benign ensemble predictions: {}\n".format(preds.shape))
if save_output:
np.save(output_dir+"/"+"ensemPredic_benign_raw.npy",preds_raw)
np.save(output_dir+"/"+"ensemPredic_benign.npy",preds)
###########################################################################
### generate and collect the probabilities for our advers. examples
ae_dir, ae_files = data_configs.get('ae_dir'), data_configs.get('ae_files')
for ae_file in ae_files:
ae_file1 = os.path.join(ae_dir, ae_file)
x_ae = np.load(ae_file1)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(ae_file),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(ae_file),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(ae_file,preds.shape))
##################################################33
dirt = '/home/isaac/working_directory/misc/project-athena/data2_genAEs_weakD'
dirs = os.listdir(dirt)
results = []
results += [file for file in dirs]
for filename in results:
x_ae = np.load(dirt + '/' + filename)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(filename),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(filename),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(filename,preds.shape))
| [
29113,
14468,
7804,
2,
198,
2235,
220,
220,
220,
7716,
262,
34549,
16277,
329,
4876,
362,
220,
220,
220,
220,
22492,
198,
2235,
220,
2727,
416,
19068,
3873,
1219,
1531,
318,
64,
330,
365,
1219,
1531,
3865,
31,
14816,
13,
785,
220,
2... | 2.590423 | 1,963 |
import tensorflow as tf
import keras
from keras.preprocessing import image
from keras.initializers import glorot_uniform
import numpy as np
import cv2
import os
import csv
IMG_SIZE = 224
color = {}
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
41927,
292,
198,
6738,
41927,
292,
13,
3866,
36948,
1330,
2939,
198,
6738,
41927,
292,
13,
36733,
11341,
1330,
26996,
313,
62,
403,
6933,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 2.574713 | 87 |
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
from typing import Dict
# Libs
import jsonschema
import tinydb
# Custom
from .base import TopicalRecords, AssociationRecords
from .config import SCHEMAS as schemas
##################
# Configurations #
##################
#############################################
# Data Storage Class - CollaborationRecords #
#############################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
#######################################
# Data Storage Class - ProjectRecords #
#######################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
###########################################
# Data Storage Class - ParticipantRecords #
###########################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
##########################################
# Data Storage Class - ExperimentRecords #
##########################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
###################################
# Data Storage Class - RunRecords #
###################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
#################################################
# Data Storage Association class - Registration #
#################################################
class RegistrationRecords(AssociationRecords):
""" RegistrationRecords documents associative records as a means to allow
participants to interact with different projects and vice-versa.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Registrations are associative records and
will not have a registration ID as part of its composite key.
Instead it will exist under the 'link' key.
"""
###########
# Helpers #
###########
##################
# Core Functions #
##################
###############################################
# Data Storage Association class - TagRecords #
###############################################
class TagRecords(AssociationRecords):
""" TagRecords documents the child associations of a participant with its
registered project, archiving data tags used to locate datasets to be
loaded during FL training.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Tags are associative records and will not
have a tag ID as part of its composite key.
"""
###########
# Helpers #
###########
##################
# Core Functions #
##################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
14468,
4242,
198,
2,
20906,
3401,
5028,
1303,
198,
14468,
4242,
198,
198,
2,
42044,
14,
39582,
12,
259,
198,
11748,
28686,
198,
6738,
19720,
1330,
360,
713,
198,
198,
2,
7980,
8... | 3.559956 | 909 |
"""
Author: Ehsan Sadrfaridpour
Date: Aug 24, 2018
Purpose: map other labels to -1 and 1 labels,
make sure the number of 1 labels are smaller than the number of -1 labels for MLSVM framework
Usage: define the preferred mapping in the label_map which is a dictionary.
The key is the old/current label(s) in the file which needs to change and the
value(s) are the new labels. For the labels which are ok, you can skip them from
adding them to this dictionary and they will be ignored from conversion.
"""
import pandas as pd
import os
ds_path = '/scratch2/esadrfa/mlsvm_data'
in_ds_fname = 'susy.csv'
out_ds_fname = 'susy_fixed_label.csv'
df = pd.read_csv(os.path.join(ds_path, ds_fname),
header=None, sep=' ', error_bad_lines=False, engine='c')
sep = ' '
label_map = {'0': '-1'}
out_file = open(os.path.join(ds_path, out_ds_fname), 'w')
with open(os.path.join(ds_path, in_ds_fname),'r') as in_file:
for idx, line in enumerate(in_file):
if not idx % 100000: print(idx, end=',')
curr_data = line.split(sep)
if(curr_data[0] in label_map):
curr_data[0] = label_map[curr_data[0]]
for item in curr_data:
out_file.write(item + sep)
# out_file.write('\n') # it has the \n already, this cause empty lines
out_file.close()
print('convert is finished successfully!')
| [
37811,
198,
13838,
25,
412,
11994,
272,
14668,
81,
16370,
312,
48681,
198,
10430,
25,
2447,
1987,
11,
2864,
198,
30026,
3455,
25,
3975,
584,
14722,
284,
532,
16,
290,
352,
14722,
11,
220,
198,
197,
15883,
1654,
262,
1271,
286,
352,
... | 2.438503 | 561 |
import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.db.models import Q
from django.template import context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| [
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
3298,
62,
33692,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
28... | 2.590643 | 1,368 |
""" Utility functions for creating Python scripts that expect to turn one file into another, or use stdin/stdout as part of a pipeline.
Copyright 2018 Ben Kehoe
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "1.2.0"
import argparse
import sys
import six
DEFAULT_TO_BINARY_MODE = False
def main(processor,
loader=None,
dumper=None,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Setup the appropriate input and output based on the command line args and
run the given callable processor. The basic arguments allow the program to be
called in the following ways:
prog [-i input_file] [-o output_file]
prog input_file [-o output_file]
prog input_file output_file
The latter two formats can be disabled by specifying positional_args=False
If there is no input or output file given, it will read from stdin or write
to stdout, respectively.
An argparse.ArgumentParser can be provided, as can the arguments to be parsed.
By default, the input is read into a bytestring. If a callable loader is
provided, it is called with the file-like input stream and the parsed args
object and should return the input to pass to the processor.
The processor is called with the input (bytestring or output from loader) and
the parsed args object, and should return the output to write to the file,
normally a bytestring.
If the output of the processor can't be directly written to the output stream,
a callable dumper can be provided, which takes the output from processor, the
output stream, and the parsed args object.
By default, the files are opened in text mode. If binary is desired,
the module field DEFAULT_TO_BINARY_MODE can be set to true. If processor,
loader, or dumper have an attribute named binary, that will be used instead.
Errors are printed to stdout unless the -q flag is given.
"""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.run(processor, loader=loader, dumper=dumper)
def streaming_main(processor,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Identical to main(), but the processor takes as input the file-like
input stream and output stream, and the parsed args object."""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.stream(processor)
def get_io_functions_from_lib(lib, load_func_name='load', dump_func_name='dump', load_kwargs={}, dump_kwargs={}):
"""Helper to create loader and dumper functions for libraries"""
return loader, dumper
def get_pickle_io(load_kwargs={}, dump_kwargs={}, picklelib=None):
"""Returns a loader and dumper for Pickle files"""
return get_io_functions_from_lib(_get_lib(picklelib, 'pickle'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_json_io(load_kwargs={}, dump_kwargs={}, jsonlib=None):
"""Returns a loader and dumper for JSON"""
return get_io_functions_from_lib(_get_lib(jsonlib, 'json'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_yaml_io(load_kwargs={}, dump_kwargs={}, safe=False, yamllib=None):
"""Returns a loader and dumper for YAML"""
load_func_name = 'safe_load' if safe else 'load'
dump_func_name = 'safe_dump' if safe else 'dump'
loader, dumper = get_io_functions_from_lib(_get_lib(yamllib, 'yaml'), load_func_name, dump_func_name, load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
dumper.binary = False
return loader, dumper
| [
37811,
34030,
5499,
329,
4441,
11361,
14750,
326,
1607,
284,
1210,
530,
2393,
656,
1194,
11,
393,
779,
14367,
259,
14,
19282,
448,
355,
636,
286,
257,
11523,
13,
198,
198,
15269,
2864,
3932,
3873,
38979,
198,
198,
26656,
15385,
739,
2... | 2.834146 | 1,640 |
from __future__ import annotations
from typing import Any, Iterable, MutableMapping
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem
from .object import BaseWidget, ContextMenuMixin, PyObjectBound | [
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
19720,
1330,
4377,
11,
40806,
540,
11,
13859,
540,
44,
5912,
198,
6738,
10662,
83,
9078,
13,
48,
83,
54,
312,
11407,
1330,
1195,
10962,
38300,
11,
1195,
10962,
38300,
7449,
198,
198,
67... | 3.678571 | 56 |
#!/usr/bin/env python3
import click
from anormbookmarker.model.__model__ import *
from anormbookmarker.model.BookmarkClassConstructor import tagbookmarks_table
from anormbookmarker.model.Word import WordMisSpelling
from anormbookmarker.test.test_enviroment import Tag
from anormbookmarker.test.test_enviroment import Bookmark
from kcl.sqlalchemy.model.Filename import Filename
#from kcl.sqlalchemy.model.FileRecord import Filename
#from kcl.sqlalchemy.model.FileRecord import Path
from kcl.sqlalchemy.visualization.sa_display import sa_display as kcl_sa_display
@click.command()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
3904,
198,
6738,
281,
579,
2070,
4102,
263,
13,
19849,
13,
834,
19849,
834,
1330,
1635,
198,
6738,
281,
579,
2070,
4102,
263,
13,
19849,
13,
10482,
4102,
9487,
42316,
... | 3.145946 | 185 |
import os
from sovrin_common.setup_util import Setup
BASE_DIR = os.path.join(os.path.expanduser("~"), ".sovrin")
Setup(BASE_DIR).setupClient() | [
11748,
28686,
198,
198,
6738,
523,
85,
12769,
62,
11321,
13,
40406,
62,
22602,
1330,
31122,
198,
198,
33,
11159,
62,
34720,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
11201,
392,
7220,
7203,
93,
12340,
27071,
47272,
12769,... | 2.666667 | 54 |
#!/usr/bin/python3 -B
# Copyright 2022 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import enum
import moteus
import struct
SCALE_TYPES = [
ScaleType([moteus.Register.POSITION,
moteus.Register.ABS_POSITION,
moteus.Register.COMMAND_POSITION,
moteus.Register.COMMAND_STOP_POSITION,
moteus.Register.COMMAND_WITHIN_LOWER_BOUND],
0.01, 0.0001, 0.00001),
ScaleType([moteus.Register.VELOCITY,
moteus.Register.COMMAND_VELOCITY,
moteus.Register.COMMAND_VELOCITY_LIMIT],
0.1, 0.00025, 0.00001),
ScaleType([moteus.Register.TORQUE,
moteus.Register.COMMAND_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_POSITION_MAX_TORQUE,
moteus.Register.POSITION_FEEDFORWARD,
moteus.Register.POSITION_COMMAND,
moteus.Register.COMMAND_WITHIN_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_WITHIN_MAX_TORQUE],
0.5, 0.01, 0.001),
ScaleType([moteus.Register.Q_CURRENT,
moteus.Register.D_CURRENT,
moteus.Register.COMMAND_Q_CURRENT,
moteus.Register.COMMAND_D_CURRENT],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.VOLTAGE,
moteus.Register.VOLTAGE_PHASE_A,
moteus.Register.VOLTAGE_PHASE_B,
moteus.Register.VOLTAGE_PHASE_C,
moteus.Register.VFOC_VOLTAGE,
moteus.Register.VOLTAGEDQ_D,
moteus.Register.VOLTAGEDQ_Q],
0.5, 0.1, 0.001),
ScaleType([moteus.Register.TEMPERATURE],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.PWM_PHASE_A,
moteus.Register.PWM_PHASE_B,
moteus.Register.PWM_PHASE_C,
moteus.Register.COMMAND_KP_SCALE,
moteus.Register.COMMAND_KD_SCALE,
moteus.Register.COMMAND_WITHIN_KP_SCALE,
moteus.Register.COMMAND_WITHIN_KD_SCALE],
1.0 / 127.0, 1.0 / 32767.0, 1.0 / 2147483647.0),
ScaleType([moteus.Register.COMMAND_ACCEL_LIMIT],
0.05, 0.001, 0.00001),
ScaleType([moteus.Register.COMMAND_TIMEOUT,
moteus.Register.COMMAND_WITHIN_TIMEOUT],
0.01, 0.001, 0.000001),
]
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
532,
33,
198,
198,
2,
15069,
33160,
8518,
21690,
525,
11,
474,
34523,
31,
79,
672,
1140,
13,
785,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
36... | 1.927056 | 1,508 |
from setuptools import setup, find_packages
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import package_information
if __name__ == '__main__':
# Setting up
setup(
name=package_information.name,
version=package_information.version,
author=package_information.author,
author_email=package_information.email_author,
description=package_information.description,
long_description_content_type="text/markdown",
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.md').read(),
url="https://github.com/dados-mg/dpkgckanmg",
packages=find_packages(),
install_requires=open('requirements.txt').read(),
keywords=['python', 'ckan'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
entry_points="""
[console_scripts]
dpckan=dpckan.cli:cli
"""
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
10786,
492,
6,
4008,
198,
11748,
... | 2.532009 | 453 |
"""
Predict next tools in the Galaxy workflows
using machine learning (recurrent neural network)
"""
import numpy as np
import argparse
import time
# machine learning library
import tensorflow as tf
from keras import backend as K
import keras.callbacks as callbacks
import extract_workflow_connections
import prepare_data
import optimise_hyperparameters
import utils
if __name__ == "__main__":
start_time = time.time()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-wf", "--workflow_file", required=True, help="workflows tabular file")
arg_parser.add_argument("-tu", "--tool_usage_file", required=True, help="tool usage file")
arg_parser.add_argument("-om", "--output_model", required=True, help="trained model file")
# data parameters
arg_parser.add_argument("-cd", "--cutoff_date", required=True, help="earliest date for taking tool usage")
arg_parser.add_argument("-pl", "--maximum_path_length", required=True, help="maximum length of tool path")
arg_parser.add_argument("-ep", "--n_epochs", required=True, help="number of iterations to run to create model")
arg_parser.add_argument("-oe", "--optimize_n_epochs", required=True, help="number of iterations to run to find best model parameters")
arg_parser.add_argument("-me", "--max_evals", required=True, help="maximum number of configuration evaluations")
arg_parser.add_argument("-ts", "--test_share", required=True, help="share of data to be used for testing")
arg_parser.add_argument("-vs", "--validation_share", required=True, help="share of data to be used for validation")
# neural network parameters
arg_parser.add_argument("-bs", "--batch_size", required=True, help="size of the tranining batch i.e. the number of samples per batch")
arg_parser.add_argument("-ut", "--units", required=True, help="number of hidden recurrent units")
arg_parser.add_argument("-es", "--embedding_size", required=True, help="size of the fixed vector learned for each tool")
arg_parser.add_argument("-dt", "--dropout", required=True, help="percentage of neurons to be dropped")
arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer")
arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers")
arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate")
arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers")
arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers")
arg_parser.add_argument("-cpus", "--num_cpus", required=True, help="number of cpus for parallelism")
# get argument values
args = vars(arg_parser.parse_args())
tool_usage_path = args["tool_usage_file"]
workflows_path = args["workflow_file"]
cutoff_date = args["cutoff_date"]
maximum_path_length = int(args["maximum_path_length"])
trained_model_path = args["output_model"]
n_epochs = int(args["n_epochs"])
optimize_n_epochs = int(args["optimize_n_epochs"])
max_evals = int(args["max_evals"])
test_share = float(args["test_share"])
validation_share = float(args["validation_share"])
batch_size = args["batch_size"]
units = args["units"]
embedding_size = args["embedding_size"]
dropout = args["dropout"]
spatial_dropout = args["spatial_dropout"]
recurrent_dropout = args["recurrent_dropout"]
learning_rate = args["learning_rate"]
activation_recurrent = args["activation_recurrent"]
activation_output = args["activation_output"]
num_cpus = int(args["num_cpus"])
config = {
'cutoff_date': cutoff_date,
'maximum_path_length': maximum_path_length,
'n_epochs': n_epochs,
'optimize_n_epochs': optimize_n_epochs,
'max_evals': max_evals,
'test_share': test_share,
'validation_share': validation_share,
'batch_size': batch_size,
'units': units,
'embedding_size': embedding_size,
'dropout': dropout,
'spatial_dropout': spatial_dropout,
'recurrent_dropout': recurrent_dropout,
'learning_rate': learning_rate,
'activation_recurrent': activation_recurrent,
'activation_output': activation_output
}
# Extract and process workflows
connections = extract_workflow_connections.ExtractWorkflowConnections()
workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path)
# Process the paths from workflows
print("Dividing data...")
data = prepare_data.PrepareData(maximum_path_length, test_share)
train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, usage_pred = data.get_data_labels_matrices(workflow_paths, tool_usage_path, cutoff_date, compatible_next_tools)
# find the best model and start training
predict_tool = PredictTool(num_cpus)
# start training with weighted classes
print("Training with weighted classes and samples ...")
results_weighted = predict_tool.find_train_best_network(config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools)
print()
print("Best parameters \n")
print(results_weighted["best_parameters"])
print()
utils.save_model(results_weighted, data_dictionary, compatible_next_tools, trained_model_path, class_weights)
end_time = time.time()
print()
print("Program finished in %s seconds" % str(end_time - start_time))
| [
37811,
198,
47,
17407,
1306,
4899,
287,
262,
9252,
670,
44041,
198,
3500,
4572,
4673,
357,
8344,
6657,
17019,
3127,
8,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
198,
2,
4572,
4... | 2.90169 | 1,953 |
'''
Facial recognition data challenge
Data from Evgueni Ovtchinnikov: https://www.dropbox.com/sh/a62wxyw9fpzwt95/AABJE0CEAtqOuLXKo_sOTFMVa?dl=0
https://github.com/evgueni-ovtchinnikov
1. clean the dataset: select only images with more than one face
2. select 70% train 30% cross validation
'''
import numpy
from functools import reduce
import matplotlib.pyplot as plt
import pickle
__version__ = '0.1.0'
#from docopt import docopt
'''
args = docopt(__doc__, version=__version__)
file = args['<images>']
pref = args['<prefix>']
path = args['--path']
print('loading images from %s...' % (path + '/' + file))
images = numpy.load(file)
ni, ny, nx = images.shape
'''
# link images numbers to names
names = []
num = []
#index = numpy.ndarray((ni,), dtype = numpy.int16)
# the number following the name indicates at which index the images of
# the person start.
off = []
count = 0
with open('lfw_names.txt') as fp:
line = fp.readline()
while line:
theline = line.split(' ')
names.append(theline[0])
num.append(int(theline[1]))
line = fp.readline()
# PCA matrix
u = numpy.load("lfwfp1140eigim.npy")
# coordinates
v = numpy.load("lfwfp1140coord.npy")
# total number of images
ni = v.shape[1]
count = count_img(num)
# correct the last count
if ni - num[-1] > 1:
count.append(ni-num[-1])
names_repeat = []
index_repeat = []
name_index = {}
min_num_pics = 40
for i in range (len(count)):
if count[i] >= min_num_pics:
for j in range(count[i]):
names_repeat.append(names[i])
index_repeat.append(num[i] + j)
select = 'Bill_Clinton'
select = 'Vladimir_Putin'
i = 0
while (names_repeat[i] != select):
i+=1
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
# the selected person will be in the range [i, i-nselect-1]
nc, ny, nx = u.shape
index = index_repeat[i]
PCA_image = numpy.dot(u.T, v.T[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image %d' % i)
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
#n = nx*ny
#u = numpy.reshape(u, (nc, n))
# create the test set and cross validation set
# if a person has:
# n pics, train/cross validation split
# 2 , 1-1
# 3 , 2-1
# 4 , 3-1
# 5 , 3-2
# 6 , 4-2
# 7 , 5-2
# 8 , 5-3
# 9 , 6-3
# 10 , 70%-30%
# 11 , idem
# 12 ,
training_set_indices = []
cv_set_indices = []
face_index = 0
for select in names:
if select in names_repeat:
i = 0
while (i < len(names_repeat) and names_repeat[i] != select):
i+=1
#print (select, i)
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
#print ("{0}, found {1} images".format(select, nselect))
if nselect == 2:
nts = 1
ncv = 1
elif nselect == 3:
nts = 2
ncv = 1
elif nselect == 4:
nts = 3
ncv = 1
elif nselect == 5:
nts = 3
ncv = 2
elif nselect == 6:
nts = 4
ncv = 2
elif nselect == 7:
nts = 5
ncv = 2
elif nselect == 8:
nts = 5
ncv = 3
elif nselect == 9:
nts = 6
ncv = 3
else:
nts = int(nselect * 0.7)
ncv = nselect - nts
#print (" Number of images in training set {0}".format(nts))
#print (" Number of images in cross validation set {0}".format(ncv))
for n in range(nts):
training_set_indices.append((select, index_repeat[i+n], face_index))
for n in range(ncv):
cv_set_indices.append((select, index_repeat[i+nts+n], face_index))
face_index += 1
neig = v.shape[0]
training_set = numpy.zeros((len(training_set_indices), neig), dtype=v.dtype)
cv_set = numpy.zeros((len(cv_set_indices), neig), dtype=v.dtype)
for i,face in enumerate(training_set_indices):
faceindex = face[1]
training_set[i][:] = v.T[faceindex]
for i,face in enumerate(cv_set_indices):
faceindex = face[1]
cv_set[i][:] = v.T[faceindex]
# show that we are doing well
select = 'Vladimir_Putin'
select = 'Colin_Powell'
index = 0
while (not select == training_set_indices[index][0]):
index += 1
PCA_image = numpy.dot(u.T, training_set[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image {}'.format(training_set_indices[index][0]))
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
# save description of dataset
pickle.dump(training_set_indices, open("training_set_indices.pkl", "wb"))
pickle.dump(cv_set_indices, open("cv_set_indices.pkl", "wb"))
| [
7061,
6,
198,
37,
18150,
9465,
1366,
4427,
198,
198,
6601,
422,
4319,
5162,
43850,
440,
36540,
354,
3732,
1134,
709,
25,
3740,
1378,
2503,
13,
14781,
3524,
13,
785,
14,
1477,
14,
64,
5237,
86,
5431,
86,
24,
46428,
89,
46569,
3865,
... | 2.087575 | 2,318 |
'''OpenGL extension EXT.gpu_program_parameters
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/gpu_program_parameters.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_gpu_program_parameters'
glProgramEnvParameters4fvEXT = platform.createExtensionFunction(
'glProgramEnvParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramEnvParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
glProgramLocalParameters4fvEXT = platform.createExtensionFunction(
'glProgramLocalParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramLocalParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
def glInitGpuProgramParametersEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| [
7061,
6,
11505,
8763,
7552,
27489,
13,
46999,
62,
23065,
62,
17143,
7307,
198,
198,
464,
1743,
6770,
286,
428,
7552,
318,
1695,
994,
25,
198,
197,
4023,
1378,
793,
13,
82,
12397,
13,
785,
14,
42068,
14,
28678,
12,
39873,
14,
2301,
... | 3.155844 | 462 |
import re, unittest
from feature_reduction import *
# user pattern tests
# url pattern tests
# more url tests (if needed)
# https://mathiasbynens.be/demo/url-regex
# repeating pattern tests
# reduce tests
if __name__ == '__main__':
unittest.main()
| [
11748,
302,
11,
555,
715,
395,
198,
6738,
3895,
62,
445,
8110,
1330,
1635,
628,
197,
2,
2836,
3912,
5254,
628,
197,
2,
19016,
3912,
5254,
628,
197,
2,
517,
19016,
5254,
357,
361,
2622,
8,
198,
197,
2,
3740,
1378,
11018,
4448,
1492... | 2.977528 | 89 |
# Functions relevant for LISA-like orbits
#
# A LISA-like orbit has 3 satellites in a triangular configuration.
# Eccentricity and inclination combine to make the triangle
# tumble with a minimal amount of variation in the arm lengths
# See, e.g., K Rajesh Nayak et al, Class. Quantum Grav. 23, 1763 (2006).
import math
from constants import constants as k
import numpy as np
from scipy.optimize import newton
# Defaults: orbit average radius = 1.0 AU
# orbit angualr velocity average = omegaEarthPerDay1Body (assume only Sun)
# delta = 0.0
# Sigma is a phase that depends on whichSatellite = 1,2,3
# Returns a function of eccentric anomaly
# Numerically find the root of this function to get the eccentric anomaly
# $\Omega t - \sigma_k - \psi_k = e \sin\psi_k$
# $\Omega - \frac{d\psi_k}{dt} = e \cos\psi_k \frac{d\psi_k}{dt}$
# $\frac{d\psi_k}{dt} = \Omega / (1 + e \cos\psi_k)$
# Note: whichSatellite = 1,2,3 for the 3 satellites
| [
2,
40480,
5981,
329,
406,
22312,
12,
2339,
37015,
198,
2,
198,
2,
317,
406,
22312,
12,
2339,
13066,
468,
513,
20372,
287,
257,
46963,
8398,
13,
198,
2,
38308,
22317,
414,
290,
36793,
12082,
284,
787,
262,
22950,
220,
198,
2,
47978,
... | 2.507109 | 422 |
from Neural_Network import Neural_Network
if __name__ == '__main__':
ann, model = train_network()
save_ann(ann, model)
| [
6738,
47986,
62,
26245,
1330,
47986,
62,
26245,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1529,
11,
2746,
796,
4512,
62,
27349,
3419,
198,
220,
220,
220,
3613,
62,
1236,
7,
1236,
11... | 2.847826 | 46 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '9.0'
__title__ = 'Printer/Fax Setup Utility'
__mod__ = 'hp-setup'
__doc__ = "Installs HPLIP printers and faxes in the CUPS spooler. Tries to automatically determine the correct PPD file to use. Allows the printing of a testpage. Performs basic fax parameter setup."
# Std Lib
import sys
import getopt
import time
import os.path
import re
import os
import gzip
try:
import readline
except ImportError:
pass
# Local
from base.g import *
from base import device, utils, tui, models, module, services, os_utils
from prnt import cups
from base.sixext.moves import input
from base.sixext import to_unicode, from_unicode_to_str
try:
from importlib import import_module
except ImportError as e:
log.debug(e)
from base.utils import dyn_import_mod as import_module
pm = None
nickname_pat = re.compile(r'''\*NickName:\s*\"(.*)"''', re.MULTILINE)
USAGE = [ (__doc__, "", "name", True),
("Usage: %s [MODE] [OPTIONS] [SERIAL NO.|USB bus:device|IP|DEVNODE]" % __mod__, "", "summary", True),
utils.USAGE_MODE,
utils.USAGE_GUI_MODE,
utils.USAGE_INTERACTIVE_MODE,
utils.USAGE_SPACE,
utils.USAGE_OPTIONS,
("Automatic mode:", "-a or --auto (-i mode only)", "option", False),
("To specify the port on a multi-port JetDirect:", "--port=<port> (Valid values are 1\*, 2, and 3. \*default)", "option", False),
("No testpage in automatic mode:", "-x (-i mode only)", "option", False),
("To specify a CUPS printer queue name:", "-p<printer> or --printer=<printer> (-i mode only)", "option", False),
("To specify a CUPS fax queue name:", "-f<fax> or --fax=<fax> (-i mode only)", "option", False),
("Type of queue(s) to install:", "-t<typelist> or --type=<typelist>. <typelist>: print*, fax\* (\*default) (-i mode only)", "option", False),
("To specify the device URI to install:", "-d<device> or --device=<device> (--qt4 mode only)", "option", False),
("Remove printers or faxes instead of setting-up:", "-r or --rm or --remove", "option", False),
utils.USAGE_LANGUAGE,
utils.USAGE_LOGGING1, utils.USAGE_LOGGING2, utils.USAGE_LOGGING3,
utils.USAGE_HELP,
("[SERIAL NO.|USB ID|IP|DEVNODE]", "", "heading", False),
("USB bus:device (usb only):", """"xxx:yyy" where 'xxx' is the USB bus and 'yyy' is the USB device. (Note: The ':' and all leading zeros must be present.)""", 'option', False),
("", "Use the 'lsusb' command to obtain this information.", "option", False),
("IPs (network only):", 'IPv4 address "a.b.c.d" or "hostname"', "option", False),
("DEVNODE (parallel only):", '"/dev/parportX", X=0,1,2,...', "option", False),
("SERIAL NO. (usb and parallel only):", '"serial no."', "option", True),
utils.USAGE_EXAMPLES,
("Setup using GUI mode:", "$ hp-setup", "example", False),
("Setup using GUI mode, specifying usb:", "$ hp-setup -b usb", "example", False),
("Setup using GUI mode, specifying an IP:", "$ hp-setup 192.168.0.101", "example", False),
("One USB printer attached, automatic:", "$ hp-setup -i -a", "example", False),
("USB, IDs specified:", "$ hp-setup -i 001:002", "example", False),
("Network:", "$ hp-setup -i 66.35.250.209", "example", False),
("Network, Jetdirect port 2:", "$ hp-setup -i --port=2 66.35.250.209", "example", False),
("Parallel:", "$ hp-setup -i /dev/parport0", "example", False),
("USB or parallel, using serial number:", "$ hp-setup -i US12345678A", "example", False),
("USB, automatic:", "$ hp-setup -i --auto 001:002", "example", False),
("Parallel, automatic, no testpage:", "$ hp-setup -i -a -x /dev/parport0", "example", False),
("Parallel, choose device:", "$ hp-setup -i -b par", "example", False),
utils.USAGE_SPACE,
utils.USAGE_NOTES,
("1. If no serial number, USB ID, IP, or device node is specified, the USB and parallel busses will be probed for devices.", "", 'note', False),
("2. Using 'lsusb' to obtain USB IDs: (example)", "", 'note', False),
(" $ lsusb", "", 'note', False),
(" Bus 003 Device 011: ID 03f0:c202 Hewlett-Packard", "", 'note', False),
(" $ hp-setup --auto 003:011", "", 'note', False),
(" (Note: You may have to run 'lsusb' from /sbin or another location. Use '$ locate lsusb' to determine this.)", "", 'note', True),
("3. Parameters -a, -f, -p, or -t are not valid in GUI (-u) mode.", "", 'note', True),
utils.USAGE_SPACE,
utils.USAGE_SEEALSO,
("hp-makeuri", "", "seealso", False),
("hp-probe", "", "seealso", False),
]
mod = module.Module(__mod__, __title__, __version__, __doc__, USAGE,
(INTERACTIVE_MODE, GUI_MODE),
(UI_TOOLKIT_QT3, UI_TOOLKIT_QT4, UI_TOOLKIT_QT5),
run_as_root_ok=True)
opts, device_uri, printer_name, mode, ui_toolkit, loc = \
mod.parseStdOpts('axp:P:f:t:b:d:rq',
['ttl=', 'filter=', 'search=', 'find=',
'method=', 'time-out=', 'timeout=',
'printer=', 'fax=', 'type=', 'port=',
'auto', 'device=', 'rm', 'remove'],
handle_device_printer=False)
selected_device_name = None
printer_name = None
fax_name = None
bus = None
setup_print = True
setup_fax = True
makeuri = None
auto = False
testpage_in_auto_mode = True
jd_port = 1
remove = False
ignore_plugin_check = False
for o, a in opts:
if o == '-x':
testpage_in_auto_mode = False
elif o in ('-P', '-p', '--printer'):
printer_name = a
elif o in ('-f', '--fax'):
fax_name = a
elif o in ('-d', '--device'):
device_uri = a
elif o in ('-b', '--bus'):
bus = [x.lower().strip() for x in a.split(',')]
if not device.validateBusList(bus, False):
mod.usage(error_msg=['Invalid bus name'])
elif o in ('-t', '--type'):
setup_fax, setup_print = False, False
a = a.strip().lower()
for aa in a.split(','):
if aa.strip() not in ('print', 'fax'):
mod.usage(error_msg=['Invalid type.'])
if aa.strip() == 'print':
setup_print = True
elif aa.strip() == 'fax':
if not prop.fax_build:
log.error("Cannot enable fax setup - HPLIP not built with fax enabled.")
else:
setup_fax = True
elif o == '--port':
try:
jd_port = int(a)
except ValueError:
#log.error("Invalid port number. Must be between 1 and 3 inclusive.")
mod.usage(error_msg=['Invalid port number. Must be between 1 and 3 inclusive.'])
elif o in ('-a', '--auto'):
auto = True
elif o in ('-r', '--rm', '--remove'):
remove = True
elif o in ('-q'):
ignore_plugin_check = True
try:
param = mod.args[0]
except IndexError:
param = ''
log.debug("param=%s" % param)
if printer_name is not None:
selected_device_name = printer_name
else:
if fax_name is not None:
selected_device_name = fax_name
log.debug("selected_device_name=%s" % selected_device_name)
if mode == GUI_MODE:
if selected_device_name is not None:
log.warning("-p or -f option is not supported")
if ui_toolkit == 'qt3':
if not utils.canEnterGUIMode():
log.error("%s requires GUI support (try running with --qt4). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
else:
if not utils.canEnterGUIMode4():
log.error("%s requires GUI support (try running with --qt3). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
if mode == GUI_MODE:
if ui_toolkit == 'qt3':
try:
from qt import *
from ui import setupform
except ImportError:
log.error("Unable to load Qt3 support. Is it installed?")
clean_exit(1)
if remove:
log.warn("-r/--rm/--remove not supported in qt3 mode.")
app = QApplication(sys.argv)
QObject.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
if loc is None:
loc = user_conf.get('ui', 'loc', 'system')
if loc.lower() == 'system':
loc = str(QTextCodec.locale())
log.debug("Using system locale: %s" % loc)
if loc.lower() != 'c':
e = 'utf8'
try:
l, x = loc.split('.')
loc = '.'.join([l, e])
except ValueError:
l = loc
loc = '.'.join([loc, e])
log.debug("Trying to load .qm file for %s locale." % loc)
trans = QTranslator(None)
qm_file = 'hplip_%s.qm' % l
log.debug("Name of .qm file: %s" % qm_file)
loaded = trans.load(qm_file, prop.localization_dir)
if loaded:
app.installTranslator(trans)
else:
loc = 'c'
if loc == 'c':
log.debug("Using default 'C' locale")
else:
log.debug("Using locale: %s" % loc)
QLocale.setDefault(QLocale(loc))
prop.locale = loc
try:
locale.setlocale(locale.LC_ALL, locale.normalize(loc))
except locale.Error:
pass
try:
w = setupform.SetupForm(bus, param, jd_port)
except Error:
log.error("Unable to connect to HPLIP I/O. Please (re)start HPLIP and try again.")
clean_exit(1)
app.setMainWidget(w)
w.show()
app.exec_loop()
cups.releaseCupsInstance()
else: # qt4
# if utils.ui_status[1] == "PyQt4":
# try:
# from PyQt4.QtGui import QApplication, QMessageBox
# from ui4.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# elif utils.ui_status[1] == "PyQt5":
# try:
# from PyQt5.QtWidgets import QApplication, QMessageBox
# from ui5.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# else:
# log.error("Unable to load Qt support. Is it installed?")
# clean_exit(1)
QApplication, ui_package = utils.import_dialog(ui_toolkit)
ui = import_module(ui_package + ".setupdialog")
app = QApplication(sys.argv)
log.debug("Sys.argv=%s printer_name=%s param=%s jd_port=%s device_uri=%s remove=%s" % (sys.argv, printer_name, param, jd_port, device_uri, remove))
dlg = ui.SetupDialog(None, param, jd_port, device_uri, remove)
dlg.show()
try:
log.debug("Starting GUI Event Loop...")
app.exec_()
except KeyboardInterrupt:
clean_exit(0)
else: # INTERACTIVE_MODE
try:
try:
from base import password
except ImportError:
log.warn("Failed to import Password Object")
else:
cups.setPasswordCallback(password.showPasswordPrompt)
#Removing Queue
if remove:
tui.header("REMOVING PRINT/FAX QUEUE")
sts, printer_name, device_uri = mod.getPrinterName(selected_device_name,None,['hp','hpfax'])
selected_device_name = printer_name
log.info (log.bold("Removing '%s : %s' Queue"%(printer_name, device_uri)))
status, status_str = cups.cups_operation(cups.delPrinter, INTERACTIVE_MODE, '', None, selected_device_name)
if cups.IPP_OK == status:
log.info("Successfully deleted %s Print/Fax queue"%selected_device_name)
utils.sendEvent(EVENT_CUPS_QUEUES_REMOVED,device_uri, printer_name)
clean_exit(0)
else:
log.error("Failed to delete %s Print/Fax queue. Error : %s"%(selected_device_name,status_str))
clean_exit(1)
if not auto:
log.info("(Note: Defaults for each question are maked with a '*'. Press <enter> to accept the default.)")
log.info("")
# ******************************* MAKEURI
if param:
device_uri, sane_uri, fax_uri = device.makeURI(param, jd_port)
# ******************************* CONNECTION TYPE CHOOSER
if not device_uri and bus is None:
bus = tui.connection_table()
if bus is None:
clean_exit(0)
log.info("\nUsing connection type: %s" % bus[0])
log.info("")
# ******************************* DEVICE CHOOSER
if not device_uri:
log.debug("\nDEVICE CHOOSER setup_fax=%s, setup_print=%s" % (setup_fax, setup_print))
device_uri = mod.getDeviceUri(devices = device.probeDevices(bus))
if not device_uri:
clean_exit(0)
# ******************************* QUERY MODEL AND COLLECT PPDS
log.info(log.bold("\nSetting up device: %s\n" % device_uri))
log.info("")
print_uri = device_uri.replace("hpfax:", "hp:")
fax_uri = device_uri.replace("hp:", "hpfax:")
back_end, is_hp, bus, model, \
serial, dev_file, host, zc, port = \
device.parseDeviceURI(device_uri)
log.debug("Model=%s" % model)
mq = device.queryModelByURI(device_uri)
if not mq or mq.get('support-type', SUPPORT_TYPE_NONE) == SUPPORT_TYPE_NONE:
log.error("Unsupported printer model.")
clean_exit(1)
if mq.get('fax-type', FAX_TYPE_NONE) in (FAX_TYPE_NONE, FAX_TYPE_NOT_SUPPORTED) and setup_fax:
#log.warning("Cannot setup fax - device does not have fax feature.")
setup_fax = False
# ******************************* PLUGIN
norm_model = models.normalizeModelName(model).lower()
plugin = mq.get('plugin', PLUGIN_NONE)
if ignore_plugin_check is False and plugin > PLUGIN_NONE:
from installer import pluginhandler
pluginObj = pluginhandler.PluginHandle()
plugin_sts = pluginObj.getStatus()
if plugin_sts != pluginhandler.PLUGIN_INSTALLED:
if plugin_sts == pluginhandler.PLUGIN_VERSION_MISMATCH:
tui.header("UPDATING PLUGIN")
else:
tui.header("PLUG-IN INSTALLATION")
hp_plugin = utils.which('hp-plugin')
if hp_plugin:
cmd = "hp-plugin -i"
if os_utils.execute(cmd) != 0:
log.error("Failed to install Plugin.")
log.error("The device you are trying to setup requires a binary plug-in. Some functionalities may not work as expected without plug-ins. Please run 'hp-plugin' as normal user to install plug-ins.Visit http://hplipopensource.com for more infomation.")
clean_exit(1)
ppds = cups.getSystemPPDs()
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
installed_print_devices = device.getSupportedCUPSDevices(['hp'])
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
log.debug("found print queue '%s'" % p)
installed_fax_devices = device.getSupportedCUPSDevices(['hpfax'])
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
log.debug("found fax queue '%s'" % f)
# ******************************* PRINT QUEUE SETUP
if setup_print:
tui.header("PRINT QUEUE SETUP")
if not auto and print_uri in installed_print_devices:
log.warning("One or more print queues already exist for this device: %s." %
', '.join(installed_print_devices[print_uri]))
ok, setup_print = tui.enter_yes_no("\nWould you like to install another print queue for this device", 'n')
if not ok: clean_exit(0)
if setup_print:
if auto:
printer_name = default_model
printer_default_model = default_model
installed_printer_names = device.getSupportedCUPSPrinterNames(['hp'])
# Check for duplicate names
if (device_uri in installed_print_devices and printer_default_model in installed_print_devices[device_uri]) \
or (printer_default_model in installed_printer_names):
i = 2
while True:
t = printer_default_model + "_%d" % i
if (t not in installed_printer_names) and(device_uri not in installed_print_devices or t not in installed_print_devices[device_uri]):
printer_default_model += "_%d" % i
break
i += 1
if not auto:
if printer_name is None:
while True:
printer_name = input(log.bold("\nPlease enter a name for this print queue (m=use model name:'%s'*, q=quit) ?" % printer_default_model))
if printer_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not printer_name or printer_name.lower().strip() == 'm':
printer_name = printer_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if printer_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if printer_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in printer_name:
if c in cups.INVALID_PRINTER_NAME_CHARS:
log.error("Invalid character '%s' in printer name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
printer_name = printer_default_model
log.info("Using queue name: %s" % printer_name)
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
log.info("Locating PPD file... Please wait.")
print_ppd = cups.getPPDFile2(mq, default_model, ppds)
enter_ppd = False
if print_ppd is None:
enter_ppd = True
log.error("Unable to find an appropriate PPD file.")
else:
print_ppd, desc = print_ppd
log.info("\nFound PPD file: %s" % print_ppd)
log.info("Description: %s" % desc)
#
if not auto:
log.info("\nNote: The model number may vary slightly from the actual model number on the device.")
ok, ans = tui.enter_yes_no("\nDoes this PPD file appear to be the correct one")
if not ok: clean_exit(0)
if not ans: enter_ppd = True
if enter_ppd:
enter_ppd = False
ok, enter_ppd = tui.enter_yes_no("\nWould you like to specify the path to the correct PPD file to use", 'n')
if not ok: clean_exit(0)
if enter_ppd:
ok = False
while True:
user_input = input(log.bold("\nPlease enter the full filesystem path to the PPD file to use (q=quit) :"))
if user_input.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
file_path = user_input
if os.path.exists(file_path) and os.path.isfile(file_path):
if file_path.endswith('.gz'):
nickname = gzip.GzipFile(file_path, 'r').read(4096)
else:
nickname = open(file_path, 'r').read(4096)
try:
desc = nickname_pat.search(nickname).group(1)
except AttributeError:
desc = ''
if desc:
log.info("Description for the file: %s" % desc)
else:
log.error("No PPD 'NickName' found. This file may not be a valid PPD file.")
ok, ans = tui.enter_yes_no("\nUse this file")
if not ok: clean_exit(0)
if ans: print_ppd = file_path
else:
log.error("File not found or not an appropriate (PPD) file.")
if ok:
break
else:
log.error("PPD file required. Setup cannot continue. Exiting.")
clean_exit(1)
if auto:
location, info = '', '%s Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding print queue to CUPS:"))
log.info("Device URI: %s" % print_uri)
log.info("Queue name: %s" % printer_name)
log.info("PPD file: %s" % print_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
if not os.path.exists(print_ppd): # assume foomatic: or some such
add_prnt_args = (printer_name, print_uri, location, '', print_ppd, info)
else:
add_prnt_args = (printer_name, print_uri, location, print_ppd, '', info)
status, status_str = cups.cups_operation(cups.addPrinter, INTERACTIVE_MODE, '', None, *add_prnt_args)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hp']))
if status != cups.IPP_OK:
log.error("Printer queue setup failed. Error : %s "%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,print_uri, printer_name)
# Updating firmware download for supported devices.
if ignore_plugin_check is False and mq.get('fw-download', False):
try:
d = device.Device(print_uri)
except Error:
log.error("Error opening device. Firmware download is Failed.")
else:
if d.downloadFirmware():
log.info("Firmware download successful.\n")
else:
log.error("Firmware download is Failed.")
d.close()
# ******************************* FAX QUEUE SETUP
if setup_fax and not prop.fax_build:
log.error("Cannot setup fax - HPLIP not built with fax enabled.")
setup_fax = False
if setup_fax:
try:
from fax import fax
except ImportError:
# This can fail on Python < 2.3 due to the datetime module
setup_fax = False
log.warning("Fax setup disabled - Python 2.3+ required.")
log.info("")
if setup_fax:
tui.header("FAX QUEUE SETUP")
if not auto and fax_uri in installed_fax_devices:
log.warning("One or more fax queues already exist for this device: %s." % ', '.join(installed_fax_devices[fax_uri]))
ok, setup_fax = tui.enter_yes_no("\nWould you like to install another fax queue for this device", 'n')
if not ok: clean_exit(0)
if setup_fax:
if auto: # or fax_name is None:
fax_name = default_model + '_fax'
fax_default_model = default_model + '_fax'
installed_fax_names = device.getSupportedCUPSPrinterNames(['hpfax'])
# Check for duplicate names
if (fax_uri in installed_fax_devices and fax_default_model in installed_fax_devices[fax_uri]) \
or (fax_default_model in installed_fax_names):
i = 2
while True:
t = fax_default_model + "_%d" % i
if (t not in installed_fax_names) and (fax_uri not in installed_fax_devices or t not in installed_fax_devices[fax_uri]):
fax_default_model += "_%d" % i
break
i += 1
if not auto:
if fax_name is None:
while True:
fax_name = input(log.bold("\nPlease enter a name for this fax queue (m=use model name:'%s'*, q=quit) ?" % fax_default_model))
if fax_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not fax_name or fax_name.lower().strip() == 'm':
fax_name = fax_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if fax_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if fax_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in fax_name:
if c in (' ', '#', '/', '%'):
log.error("Invalid character '%s' in fax name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
fax_name = fax_default_model
log.info("Using queue name: %s" % fax_name)
fax_ppd,fax_ppd_type,nick = cups.getFaxPPDFile(mq, fax_name)
if not fax_ppd:
log.error("Unable to find HP fax PPD file! Please check you HPLIP installation and try again.")
clean_exit(1)
if auto:
location, info = '', '%s Fax Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding fax queue to CUPS:"))
log.info("Device URI: %s" % fax_uri)
log.info("Queue name: %s" % fax_name)
log.info("PPD file: %s" % fax_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
cups.setPasswordPrompt("You do not have permission to add a fax device.")
if not os.path.exists(fax_ppd): # assume foomatic: or some such
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, '', fax_ppd, info)
else:
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, fax_ppd, '', info)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hpfax']))
if status != cups.IPP_OK:
log.error("Fax queue setup failed. Error : %s"%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,fax_uri, fax_name)
# ******************************* FAX HEADER SETUP
tui.header("FAX HEADER SETUP")
if auto:
setup_fax = False
else:
while True:
user_input = input(log.bold("\nWould you like to perform fax header setup (y=yes*, n=no, q=quit) ?")).strip().lower()
if user_input == 'q':
log.info("OK, done.")
clean_exit(0)
if not user_input:
user_input = 'y'
setup_fax = (user_input == 'y')
if user_input in ('y', 'n', 'q'):
break
log.error("Please enter 'y' or 'n'")
if setup_fax:
d = fax.getFaxDevice(fax_uri, disable_dbus=True)
try:
d.open()
except Error:
log.error("Unable to communicate with the device. Please check the device and try again.")
else:
try:
tries = 0
ok = True
while True:
tries += 1
try:
current_phone_num = str(d.getPhoneNum())
current_station_name = to_unicode(d.getStationName())
except Error:
log.error("Could not communicate with device. Device may be busy. Please wait for retry...")
time.sleep(5)
ok = False
if tries > 12:
break
else:
ok = True
break
if ok:
while True:
if current_phone_num:
phone_num = input(log.bold("\nEnter the fax phone number for this device (c=use current:'%s'*, q=quit) ?" % current_phone_num))
else:
phone_num = input(log.bold("\nEnter the fax phone number for this device (q=quit) ?"))
if phone_num.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_phone_num and (not phone_num or phone_num.strip().lower() == 'c'):
phone_num = current_phone_num
if len(phone_num) > 50:
log.error("Phone number length is too long (>50 characters). Please enter a shorter number.")
continue
ok = True
for x in phone_num:
if x not in '0123456789-(+) ':
log.error("Invalid characters in phone number. Please only use 0-9, -, (, +, and )")
ok = False
break
if not ok:
continue
break
while True:
if current_station_name:
station_name = input(log.bold("\nEnter the name and/or company for this device (c=use current:'%s'*, q=quit) ?"%from_unicode_to_str(current_station_name)))
else:
station_name = input(log.bold("\nEnter the name and/or company for this device (q=quit) ?"))
if station_name.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_station_name and (not station_name or station_name.strip().lower() == 'c'):
station_name = current_station_name
### Here station_name can be unicode or utf-8 sequence.
### making sure to convert data to unicode for all the cases.
try:
station_name.encode('utf-8')
except (UnicodeEncodeError,UnicodeDecodeError):
station_name = station_name.decode('utf-8')
if len(station_name) > 50:
log.error("Name/company length is too long (>50 characters). Please enter a shorter name/company.")
continue
break
try:
d.setStationName(station_name)
d.setPhoneNum(phone_num)
except Error:
log.error("Could not communicate with device. Device may be busy.")
else:
log.info("\nParameters sent to device.")
finally:
d.close()
# ******************************* TEST PAGE
if setup_print:
print_test_page = False
tui.header("PRINTER TEST PAGE")
if auto:
if testpage_in_auto_mode:
print_test_page = True
else:
ok, print_test_page = tui.enter_yes_no("\nWould you like to print a test page")
if not ok: clean_exit(0)
if print_test_page:
path = utils.which('hp-testpage')
if printer_name:
param = "-p%s" % printer_name
else:
param = "-d%s" % print_uri
if len(path) > 0:
cmd = 'hp-testpage -i %s' % param
else:
cmd = 'python ./testpage.py -i %s' % param
os_utils.execute(cmd)
except KeyboardInterrupt:
log.error("User exit")
cups.releaseCupsInstance()
log.info("")
log.info("Done.")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
357,
66,
8,
15069,
5816,
12,
4626,
6574,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
770,
1... | 1.910537 | 19,807 |
#encoding=UTF-8
"""
@author ideawu@163.com
@link http://www.ideawu.net/
"""
import new, socket
from buffer import *
LINK_ROLE_SERVER = 1
LINK_ROLE_CLIENT = 2
LINK_ROLE_ACCEPT = 3
class LinkBase:
# TODO: accept_all(self):
""" 判断是否已经读就绪 """
""" 进行一次网络读操作 """
""" 进行一次网络写操作
@return
-1: 错误
0 : 建议调用者关闭连接
"""
""" 非阻塞发送(数据拷贝到发送缓冲) """
""" 非阻塞读取 """
""" 见 send_packet, 只传入要发送的报体 """
""" 见 recv_packet, 只返回报体部分 """
""" 非阻塞的 send_packet """
""" 非阻塞的 recv_packet """
""" 将报文写到发送缓冲里
@param urgent: 若为True, 则等待网络发送完毕才返回. 默认等待.
@return
-1: 错误
"""
| [
2,
12685,
7656,
28,
48504,
12,
23,
198,
37811,
198,
31,
9800,
1405,
707,
84,
31,
24136,
13,
785,
198,
31,
8726,
2638,
1378,
2503,
13,
485,
707,
84,
13,
3262,
14,
198,
37811,
198,
11748,
649,
11,
17802,
198,
6738,
11876,
1330,
1635... | 1.208511 | 470 |
from aws_cdk import (
aws_lambda as lambda_,
core,
)
if __name__ == "__main__":
main()
| [
6738,
3253,
82,
62,
10210,
74,
1330,
357,
198,
220,
220,
220,
3253,
82,
62,
50033,
355,
37456,
62,
11,
198,
220,
220,
220,
4755,
11,
198,
8,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
... | 2.145833 | 48 |
import dataclasses
from typing import List
@dataclasses.dataclass
class GasAtTemp:
"""Adsorption is in the selected loading units and composition is based on the compositionType of the isotherm """
InChIKey: str
name: str
composition: float
adsorption: float
@dataclasses.dataclass
class TemperaturePoint:
"""A single temperature point on an isotherm. It may contain data for multiple different gases if this is a
multicomponent isotherm. See the species_data field for adsorptions of each gas."""
pressure: float
species_data: List[GasAtTemp]
| [
11748,
4818,
330,
28958,
198,
6738,
19720,
1330,
7343,
628,
198,
31,
19608,
330,
28958,
13,
19608,
330,
31172,
198,
4871,
14345,
2953,
30782,
25,
198,
220,
220,
220,
37227,
2782,
82,
273,
1159,
318,
287,
262,
6163,
11046,
4991,
290,
1... | 3.293785 | 177 |
# -*- coding: utf-8 -*-
from qstrader.risk_manager.example import ExampleRiskManager
import os
import datetime
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.compat import queue
# regime_hmm_backtest.py
import datetime
import pickle
import click
import numpy as np
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import \
YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.position_sizer.naive import NaivePositionSizer
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import \
IBSimulatedExecutionHandler
from qstrader.statistics.tearsheet import TearsheetStatistics
from qstrader.trading_session.backtest import Backtest
from .regime_hmm_strategy import MovingAverageCrossStrategy
from qstrader.risk_manager.regime_hmm_risk_manager import RegimeHMMRiskManager | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
10662,
2536,
5067,
13,
19121,
62,
37153,
13,
20688,
1330,
17934,
49,
1984,
13511,
628,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
6738,
10662,
2536,
5067,
13,
... | 3.428144 | 334 |
"""<internal>"""
'''
zlib License
(C) 2020-2021 DeltaRazero
All rights reserved.
'''
# ***************************************************************************************
class _:
'<imports>'
import abc
from . import textio
from .misc import ptr_t
# ***************************************************************************************
class IMatcher (metaclass=_.abc.ABCMeta):
"""Common interface to a rule matcher object instance.
"""
# --- INTERFACE GETTERS --- #
@_.abc.abstractmethod
def GetVendorId(self) -> str:
"""Gets the lexer implementation identifier string (a.k.a. 'vendor ID').
Returns
-------
str
"""
pass
@_.abc.abstractmethod
def Match(self, ts: _.textio.ITextstream) -> _.ptr_t[str]:
"""Looks for a pattern match and returns string data in case of a match.
Returns
-------
ptr_t[str]
Nullable string object. Contains string data in case of a match, otherwise
NULL/None.
"""
pass
| [
37811,
27,
32538,
29,
37811,
198,
198,
7061,
6,
198,
89,
8019,
13789,
198,
198,
7,
34,
8,
12131,
12,
1238,
2481,
16978,
49,
1031,
3529,
198,
3237,
2489,
10395,
13,
198,
7061,
6,
198,
198,
2,
41906,
17174,
8412,
2466,
8162,
198,
19... | 2.711055 | 398 |
import base64
import json
import logging
from dmsapi import DMSSession
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
288,
907,
15042,
1330,
14848,
5432,
2521,
628
] | 3.789474 | 19 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@autotest(n=1, check_graph=False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@autotest(n=1, check_graph=False)
# PyTorch error if open auto_backward:
# element 0 of tensors does not require grad and does not have a grad_fn
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
15269,
12131,
383,
1881,
37535,
46665,
13,
1439,
2489,
10395,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.022587 | 487 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import keras as k
from keras.layers import Dense, Dropout, Activation, Concatenate
from keras.optimizers import SGD
import os
import argparse
import shutil
import math
import sys
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tensorflow.python.ops import variables
import logging
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Merge, Flatten, Input, concatenate
from keras.regularizers import l1_l2
from keras.models import Model
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=800, help='Number of training epochs.')
parser.add_argument(
'--learning_rate', type=float, default=[0.01], nargs='+',
help='The learning_rate.')
parser.add_argument(
'--decay', type=float, default=0.000001,
help='The decay.')
parser.add_argument(
'--L1', type=float, default=0.0,
help='The l1 regularization coeff.')
parser.add_argument(
'--momentum', type=float, default=0.0,
help='The momentum.')
parser.add_argument(
'--L2', type=float, default=0.0,
help='The l2 regularization coeff.')
parser.add_argument(
'--batch_size', type=int, default=300, help='Number of examples per batch.')
parser.add_argument(
'--all_data', type=str, default='',
help='Path to the test data.')
parser.add_argument('--where', type=str, default='gpu', help='cpu of gpu')
parser.add_argument(
'--airport', type=int, default=0,
help='airport number.')
parser.add_argument(
'--root_dir', type=str, default='./', help='root directory')
# In[95]:
# In[145]:
# weightsVect = class_weight.compute_class_weight('balanced', [0,1,2,3,4,5,6], trainSet['ARRIVAL_DELAY_LABEL'])
# weightsVect
# # In[146]:
# weights = np.zeros(len(y_train))
# i=0
# for x in np.nditer(y_train):
# weights[i] = weightsVect[x]
# i+=1
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
print(FLAGS)
if FLAGS.where == 'gpu':
num_GPU = 1
num_CPU = 2
if FLAGS.where == 'cpu':
num_CPU = 2
num_GPU = 0
config = tf.ConfigProto(device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
k.backend.set_session(session)
flights, trainSet, validationSet, testSet = setup_data(FLAGS.all_data, FLAGS.airport)
size = trainSet.shape[0]
val_size = validationSet.shape[0]
input_train_data = [trainSet['DESTINATION_AIRPORT'],
trainSet['TAIL_NUMBER'],
trainSet['FLIGHT_NUMBER'],
trainSet['AIRLINE'],
trainSet['DAY_OF_WEEK'],
trainSet['DAY'],
trainSet['MONTH'],
trainSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((size, 1, 1)),
trainSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((size, 1, 1)),
trainSet['DISTANCE'].astype('float32').reshape((size, 1, 1))]
input_val_train_data = [validationSet['DESTINATION_AIRPORT'],
validationSet['TAIL_NUMBER'],
validationSet['FLIGHT_NUMBER'],
validationSet['AIRLINE'],
validationSet['DAY_OF_WEEK'],
validationSet['DAY'],
validationSet['MONTH'],
validationSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((val_size, 1, 1)),
validationSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((val_size, 1, 1)),
validationSet['DISTANCE'].astype('float32').reshape((val_size, 1, 1))]
y_train = trainSet['ARRIVAL_DELAY_LABEL'].reshape((size, 1, 1))
y_validation = validationSet['ARRIVAL_DELAY_LABEL'].reshape((val_size, 1, 1))
for lr in FLAGS.learning_rate:
print('Fitting model with learning rate = ', lr)
sgd = SGD(lr=lr,
decay=FLAGS.decay,
momentum=FLAGS.momentum,
nesterov=True)
model = BuildFeedForwardNNClassifier([
flights['DISTANCE'].astype('float32'),
flights['SCHEDULED_DEPARTURE'].astype('float32'),
flights['SCHEDULED_ARRIVAL'].astype('float32')],
[
flights['MONTH'],
flights['DAY'],
flights['DAY_OF_WEEK'],
flights['AIRLINE'],
flights['FLIGHT_NUMBER'],
flights['TAIL_NUMBER'],
flights['DESTINATION_AIRPORT']],
flights['ARRIVAL_DELAY_LABEL'], 1.2, 1, 'sigmoid',sgd,
FLAGS.L1,
FLAGS.L2)
print(model.summary())
model_directory = FLAGS.model_dir+'_lr'+str(lr)
tbCallBack = TensorBoard(log_dir=model_directory,
histogram_freq=0,
write_graph=True,
write_images=False)
model.fit(x=input_train_data, y=y_train, callbacks=[tbCallBack], batch_size=FLAGS.batch_size,
epochs=FLAGS.train_epochs, validation_data=(input_val_train_data, y_validation), shuffle=True)
#/, sample_weight=weights
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
201,
198,
6738,
11593,
37443,
834,
1330,
7297,
201,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,
201,
198,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,... | 1.962651 | 3,320 |
import os
import config
import logging
import time
from task_handler import CodeTask
import subprocess
from flask import Flask,request,render_template,url_for,jsonify
logging.basicConfig(level=logging.DEBUG)
app=Flask(__name__)
app.secret = config.KEY
@app.route('/')
@app.route('/compile')
@app.route('/about')
@app.route('/signup')
@app.route('/login')
if __name__=='__main__':
app.run()
| [
11748,
28686,
198,
11748,
4566,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
4876,
62,
30281,
1330,
6127,
25714,
198,
11748,
850,
14681,
198,
198,
6738,
42903,
1330,
46947,
11,
25927,
11,
13287,
62,
28243,
11,
6371,
62,
1640,
11,
1775... | 2.727891 | 147 |
#!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from models import SpeakerDict
import logging
SPEAKER_IDENTIFIER = 1234
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/add_featured_speaker', AddFeaturedSpeaker),
], debug=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
3992,
1324,
17,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
598,
62,
738,
414,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
6920,
198,
6738,
4495,
1330,
8785,
32,... | 3.045752 | 153 |
import logging
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django_fsm import FSMField, transition, post_transition
from profiles.models import BillingGroup
from profiles.models.role_manager import RoleManager
from . import Service, InstanceState
from .state_hooks import HookManager
logger = logging.getLogger(__name__)
post_transition.connect(HookManager.trigger_hook_handler, sender=Instance)
@receiver(pre_save, sender=Instance)
@receiver(post_save, sender=Instance)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
... | 3.4375 | 208 |
import inspect
import unittest
from itertools import product
from functools import partial
from pytf.core import Test
# Unittest compatibility loader
| [
11748,
10104,
198,
11748,
555,
715,
395,
198,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
12972,
27110,
13,
7295,
1330,
6208,
628,
198,
198,
2,
791,
715,
395,
17764,
40213,
628,
19... | 3.738095 | 42 |
print(*solve([*map(str.rstrip, open('data.txt'))]), sep='')
| [
198,
198,
4798,
46491,
82,
6442,
26933,
9,
8899,
7,
2536,
13,
81,
36311,
11,
1280,
10786,
7890,
13,
14116,
6,
4008,
46570,
41767,
28,
7061,
8,
198
] | 2.214286 | 28 |
from news_website.operations.tools import NewsUrlCache
if __name__=='__main__':
test_cache = TestNewsUrlCache()
test_cache.test_initialization()
test_cache.test_push()
test_cache.test_add_existed_ele()
test_cache.test_full()
test_cache.test_practice()
| [
6738,
1705,
62,
732,
12485,
13,
3575,
602,
13,
31391,
1330,
3000,
28165,
30562,
628,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
220,
220,
220,
1332,
62,
23870,
796,
6208,
9980,
28165,
30562,
3419,
198,
220,
220,
... | 2.632075 | 106 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ffmpymedia import __author__, __version__, __copyright__, __package__
from os.path import join as joinpath
import unittest
from ffmpymedia.media import *
from tests import TEST_FILE_PATH
class TestMediaStream(unittest.TestCase):
"""
Testes de criação dos objetos de fluxo de mídia
"""
class TestMediaStreamTemplate(unittest.TestCase):
"""
Testes de criação dos objetos template de fluxo de mídia
"""
class TestMediaStreamTemplateAnalysis(unittest.TestCase):
"""
Testes das funcionalidades de análise dos templates de fluxos de mídia
"""
def test_empty_template_equal1(self):
"""
Deve retornar verdadeiro sempre pois o template não faz nenhuma exigência
"""
self.assertTrue(MediaStreamTemplate(**{'type': 'video'}) == MediaStream(**{'type': 'video',
'sample_format': 'yuv420p', 'width': '66718', 'height': '643816hsa',
'blablabla': 'sakjhfashkjf'}))
def test_template_equality(self):
"""
Testa um Template com todas as informações
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video',
'height': '1080'}) ==
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080'}))
def test_full_template_equal3(self):
"""
Testa Um media stream sem uma chave que esta no Template
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}) ==\
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'codec': 'mpeg2video', 'height': '1080'}))
def test_stream_difference_with_different_height(self):
"""
Testa um MediaFile diferente
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1280', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '720'})))
def test_stream_difference_with_equal_streams(self):
"""
Testa a diferença
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})))
def test_stream_difference_with_different_metadata(self):
"""
Testa um MediaFile diferente
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Different Metadata!'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}),
include_metadata=True),
{'metadata': {'title': ('Different Metadata!', 'Test with Metadata')}})
def test_stream_difference_with_different_dispositions(self):
"""
Testa a diferença
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 0, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})),
{'disposition': {'default': (0, 1)}})
class TestMediaFileCreation(unittest.TestCase):
"""
Testes das funcionalidades da classe MediaFile
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
31246,
3149,
88,
11431,
1330,
11593,
9800,
834,
11,
11593,
9641,
834,
11,
11593,
22163,
4766,
834,
11,
... | 1.689244 | 4,695 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.reportview import build_match_conditions
from frappe.utils import flt, cint, getdate, now, date_diff
| [
2,
15069,
357,
66,
8,
2211,
11,
39313,
27768,
21852,
18367,
83,
13,
12052,
13,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
5... | 3.472527 | 91 |
"""
Main plugin tests.
.. note:: Please to not run nosetests with this plugin for testing the plugin
itself. Also, mock.patch is not much help here as it would try to mock the
module that would be already imported by nose itself if plugin was installed
globally (it would take module from sys.modules).
"""
import unittest
from mock import Mock, patch
from nosewatch.plugin import WatchPlugin
| [
37811,
198,
13383,
13877,
5254,
13,
198,
198,
492,
3465,
3712,
4222,
284,
407,
1057,
43630,
316,
3558,
351,
428,
13877,
329,
4856,
262,
13877,
198,
220,
220,
2346,
13,
4418,
11,
15290,
13,
17147,
318,
407,
881,
1037,
994,
355,
340,
... | 4 | 101 |
from enum import Enum
from collections import deque, defaultdict
from functools import partial
class node_status(Enum):
"""List of possible visit status of graph nodes (graph traversal)."""
UNVISITED = 0
VISITED = 1
VISITING = 2
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
17268,
1330,
390,
4188,
11,
4277,
11600,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
4871,
10139,
62,
13376,
7,
4834,
388,
2599,
198,
220,
220,
220,
37227,
8053,
286,
1744,
3187,
3722,
... | 3.194805 | 77 |
from __future__ import absolute_import
from six import text_type
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
import sqlalchemy.orm as saorm
from . import entities as ents
from . import interfaces
class PermissionMixin(interfaces.HasPermissions, KegBouncerMixin):
"""A mixin that adds permission facilities to a SQLAlchemy declarative user entity.
A class which mixes this in must provide one of the following:
* An `id` column member which represents the primary key. The actual column may have any
name and any type.
* Or, a `primary_key_column` class variable that gives the name of the primary key column
as a string.
"""
# Instances will shadow this when populating their own cache.
_cached_permissions = None
@declared_attr
@declared_attr
def user_user_group_map(cls):
"""A linking (mapping) table between users and user groups."""
return ents.make_user_to_user_group_link(cls._primary_key_column(), cls.__tablename__)
@hybrid_property
@hybrid_property
def permissions_query(self):
"""A query that maps users to permissions through all possible avenues."""
return ents.joined_permission_query().join(
self.user_user_group_map,
sa.or_(
self.user_user_group_map.c.user_group_id
== ents.user_group_permission_map.c.user_group_id, # noqa
self.user_user_group_map.c.user_group_id
== ents.user_group_bundle_map.c.user_group_id # noqa
)
)
@hybrid_property
def permissions_with_user_id_query(self):
"""
Like `permissions_query` but adds a column called `user_id` that can be used to
filter/join on a particular user ID or user ID column.
"""
return self.permissions_query.add_columns(
self.user_mapping_column.label('user_id')
)
def get_all_permissions_without_cache(self):
"""Get all permissions that are joined to this User, whether directly, through permission
bundles, or through user groups.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(self.permissions_query.filter(
self.user_mapping_column == self._primary_key
))
def get_all_permissions(self):
"""Same as `get_all_permissions_without_cache` but uses a cached result after the first
call.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
self._cached_permissions = (self._cached_permissions
or self.get_all_permissions_without_cache())
return self._cached_permissions
def has_permissions(self, *tokens):
"""Returns True IFF every given permission token is present in the user's permission set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(tokens) <= {x.token for x in self.get_all_permissions()}
def has_any_permissions(self, *tokens):
"""Returns True IFF any of the given permission tokens are present in the user's permission
set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return not frozenset(tokens).isdisjoint(x.token for x in self.get_all_permissions())
def make_password_mixin(history_entity_mixin=object, crypt_context=None):
"""Returns a mixin that adds password history and utility functions for working with passwords.
:param history_entity_mixin: is an optional mixin to add to the password history entity.
Supply a mixin if you want to include customized meta-information
for each password in the history log.
:param crypt_context: is an optional default :class:`CryptContext` object for hashing passwords.
If not supplied you must override the `get_crypt_context` method to
provide one.
"""
return PasswordMixin
def make_login_history_mixin(history_entity_mixin=object):
"""Returns a mixin that adds login history relationships.
:param history_entity_mixin: an optional mixin to add to the login history entity. Supply a
mixin if you want to include customized meta-information for each
entry in the history log.
"""
return LoginHistoryMixin
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
2237,
1330,
2420,
62,
4906,
198,
11748,
44161,
282,
26599,
355,
473,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
6875,
62,
35226,
198,
6738,
44161,
... | 2.573326 | 1,882 |
#
# ayame.exception
#
# Copyright (c) 2011-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
__all__ = ['AyameError', 'ComponentError', 'ConversionError', 'MarkupError',
'RenderingError', 'ResourceError', 'RouteError', 'ValidationError']
| [
2,
198,
2,
38762,
480,
13,
1069,
4516,
198,
2,
198,
2,
220,
220,
15069,
357,
66,
8,
2813,
12,
1238,
2481,
48857,
10145,
48509,
10145,
1279,
5183,
774,
64,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
220,
220,
30628,
55,
12,
34156,
... | 2.557522 | 113 |
"""
The evaluation entry point for WIDER Challenge 2019: Face Detection Accuracy+Runtime Track.
It will be the entrypoint for the evaluation docker once built.
Basically It downloads a list of images and run the face detector on each image.
Then the runtime and detection output will be reported to the evaluation system.
The participants are expected to implement a face detector class. The sample detector illustrates the interface.
Do not modify other part of the evaluation toolkit otherwise the evaluation will fail.
Author: Yuanjun Xiong
Contact: bitxiong@gmail.com
WIDER Challenge 2019
"""
import time
import sys
import logging
import numpy as np
from eval_kit.client import upload_eval_output, get_image_iter, get_job_id
logging.basicConfig(level=logging.INFO)
########################################################################################################
# please change these lines to include your own face detector extending the eval_kit.detector.FaceDetector base class.
sys.path.append("mmdetection")
from mm_detector import MMDetector as WIDERTestFaceDetectorClass
########################################################################################################
def evaluate_runtime(detector_class, image_iter, job_id):
"""
Please DO NOT modify this part of code or the eval_kit
Modification of the evaluation toolkit could result in cancellation of your award.
In this function we create the detector instance. And evaluate the wall time for performing face detection.
"""
# initialize the detector
logging.info("Initializing face detector.")
try:
detector = detector_class()
except:
# send errors to the eval frontend
raise
logging.info("Detector initialized.")
# run the images one-by-one and get runtime
overall_time = 0
output_boxes = {}
output_time = {}
eval_cnt = 0
logging.info("Starting runtime evaluation")
for image_id, image in image_iter:
time_before = time.time()
try:
boxes = detector.process_image(image)
assert isinstance(boxes, np.ndarray)
output_boxes[image_id] = boxes
except:
# send errors to the eval frontend
logging.error("Image id failed: {}".format(image_id))
raise
elapsed = time.time() - time_before
output_time[image_id] = elapsed
logging.info("image {} run time: {}".format(image_id, elapsed))
overall_time += elapsed
eval_cnt += 1
if eval_cnt % 100 == 0:
logging.info("Finished {} images".format(eval_cnt))
logging.info("all image finished, uploading evaluation outputs for evaluation.")
# send evaluation output to the server
upload_eval_output(output_boxes, output_time, job_id)
if __name__ == '__main__':
job_id = get_job_id()
wider_test_image_iter = get_image_iter()
evaluate_runtime(WIDERTestFaceDetectorClass, wider_test_image_iter, job_id)
| [
37811,
198,
464,
12660,
5726,
966,
329,
370,
41237,
13879,
13130,
25,
15399,
46254,
33222,
10,
41006,
17762,
13,
198,
198,
1026,
481,
307,
262,
5726,
4122,
329,
262,
12660,
36253,
1752,
3170,
13,
198,
31524,
632,
21333,
257,
1351,
286,
... | 3.221865 | 933 |
import cv2
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
import cv2
def normalize(imgs, img_type, **kwargs):
""" Normalizes differently depending on type
"""
if img_type in {"image"}:
return imgs/255.0
elif img_type == "mask" or img_type == "class":
return imgs.long()
elif img_type in {"pcd"}:
imgs = imgs
return imgs
else:
return imgs | [
11748,
269,
85,
17,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
269,
85,
17,
628,
198,
4299,
3487,
1096,
7,
9600,
82,
11,
33705,
62,... | 2.353261 | 184 |
# Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.http import Http404
from functools import wraps
# Définition des exceptions
# =========================
# Utilitaires
# ===========
| [
2,
15069,
357,
66,
8,
2177,
12,
1238,
2481,
21227,
469,
78,
12,
25574,
5823,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
... | 3.660377 | 212 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Another useless python script
@author: ratin
"""
#TODO: Implement Authentication System
#TODO: Implement feature to save in Mongo the books which are asked often
#TODO: Implement feature to ask for the number of books sent in the API
import datetime
import sys
from json import dumps
from flask import Flask, request
from flask.json import jsonify
from flask_cors import CORS, cross_origin
from flask_restful import Api, Resource
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# api = Api(app , errors={
# 'NotFound': {
# 'message': "Something is missing.",
# 'status': 404,
# }
# }
# )
# class Serve(Resource):
# def __init__(self,book_type="novel"):
# self.message= "Working"
# self.git_name = request.args.get("git_name")
# self.git_email = request.args.get("git_email")
# self.data = request.args.get('data')
# self.error = None
# def do(self):
# from Art import GithubArt
# self.message = "done"
# temp = GithubArt(self.git_email,self.git_name)
# temp.cnv_DataURL_image(self.data)
# temp.cleanup()
# # self.message = temp.everything()
# # temp.finish()
# def get(self):
# self.do()
# print("==================")
# print(self.data)
# print("==================")
# print(type(self.data))
# if self.error:
# return {
# "error": self.error,
# "data" : "",
# 'name' : "",
# 'email': "",
# "error": "",
# "time": str(datetime.datetime.now())[:]
# }
# else:
# return {
# "data": self.data,
# 'name': self.git_name,
# 'email': self.git_email,
# "error": self.error,
# "time": str(datetime.datetime.now())[:]
# }
# api.add_resource(Serve, "/api")
@app.route('/test/', methods=['GET','POST'])
@cross_origin()
if __name__ == "__main__":
app.run(port="5002",debug=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
6610,
13894,
21015,
4226,
198,
31,
9800,
25,
4227,
259,
198,
37811,
198,
2,
51,
3727,
46,
25,
48282,
... | 2.045923 | 1,067 |