content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import pandas as pd
from phenotrex.io.flat import load_genotype_file, DEFAULT_TRAIT_SIGN_MAPPING
from phenotrex.io.serialization import load_classifier
from phenotrex.ml.shap_handler import ShapHandler
try:
from phenotrex.transforms import fastas_to_grs
except ModuleNotFoundError:
from phenotrex.util.helpers import fail_missing_dependency as fastas_to_grs
def predict(
fasta_files=tuple(),
genotype=None,
classifier=None,
min_proba=0.5,
out_explain_per_sample=None,
out_explain_summary=None,
shap_n_samples=None,
n_max_explained_features=None,
deepnog_threshold=None,
verb=False,
):
"""
Predict phenotype from a set of (possibly gzipped) DNA or protein FASTA files
or a single genotype file. Optionally, compute SHAP explanations individually and/or summarily
for the predicted samples.
NB: Genotype computation is highly expensive and performed on the fly on FASTA files.
For increased speed when predicting multiple phenotypes, create a .genotype file to reuse
with the command `compute-genotype`.
NB: As opposed to XGB models where they are trivially available, computing SHAP explanations
on SVM models entails training a model-agnostic KernelExplainer which is highly costly (dozens
to hundreds of seconds per sample if using a somewhat reasonable value for `shap_n_samples`).
:param fasta_files: An iterable of fasta file paths
:param genotype: A genotype file path
:param classifier: A pickled classifier file path
:param min_proba: Confidence threshold of the phenotrex prediction below which
predictions will be masked by 'N/A'.
:param out_explain_per_sample: Where to save the most influential features by SHAP for each
predicted sample.
:param out_explain_summary: Where to save the SHAP summary of the predictions.
:param shap_n_samples: The n_samples parameter -
only used by models which incorporate a `shap.KernelExplainer`.
:param n_max_explained_features: How many of the most influential features by SHAP to consider.
:param deepnog_threshold: Confidence threshold of deepnog annotations below which annotations
will be discarded.
:param verb: Whether to show progress of fasta file annotation.
"""
if not len(fasta_files) and genotype is None:
raise RuntimeError('Must supply FASTA file(s) and/or single genotype file for prediction.')
if len(fasta_files):
grs_from_fasta = fastas_to_grs(
fasta_files, confidence_threshold=deepnog_threshold, n_threads=None, verb=verb
)
else:
grs_from_fasta = []
grs_from_file = load_genotype_file(genotype) if genotype is not None else []
gr = grs_from_fasta + grs_from_file
model = load_classifier(filename=classifier, verb=verb)
if out_explain_per_sample is not None or out_explain_summary is not None:
try:
fs, sv, bv = model.get_shap(
gr, n_samples=shap_n_samples, n_features=n_max_explained_features
)
except TypeError:
raise RuntimeError('This TrexClassifier is not capable of generating SHAP explanations.')
except MemoryError as e:
os._exit(137) # exit immediately with catchable exit code
raise e
sh = ShapHandler.from_clf(model)
sh.add_feature_data(
sample_names=[x.identifier for x in gr], features=fs, shaps=sv, base_value=bv
)
if out_explain_per_sample is not None:
shap_df = pd.concat([
sh.get_shap_force(x.identifier, n_max_features=n_max_explained_features) for x in gr
], axis=0)
shap_df.to_csv(out_explain_per_sample, sep='\t', index=False)
if out_explain_summary is not None:
sum_df = sh.get_shap_summary(n_max_explained_features)
sum_df.to_csv(out_explain_summary, sep='\t', index=False)
preds, probas = model.predict(X=gr)
translate_output = {
trait_id: trait_sign for trait_sign, trait_id in DEFAULT_TRAIT_SIGN_MAPPING.items()
}
print(f"# Trait: {model.trait_name}")
print("Identifier\tTrait present\tConfidence")
for record, result, probability in zip(gr, preds, probas):
if probability[result] < min_proba:
result_disp = "N/A"
else:
result_disp = translate_output[result]
print(f"{record.identifier}\t{result_disp}\t{str(round(probability[result], 4))}")
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
6566,
313,
21510,
13,
952,
13,
38568,
1330,
3440,
62,
5235,
8690,
62,
7753,
11,
5550,
38865,
62,
51,
3861,
2043,
62,
46224,
62,
44,
24805,
2751,
198,
6738,
6566,
31... | 2.504647 | 1,829 |
import sqlite3
if __name__ == '__main__':
main()
| [
11748,
44161,
578,
18,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.333333 | 24 |
import collections
import heapq
import numpy as np
ans = solveWithBackTracking(7, 7) | [
11748,
17268,
198,
11748,
24575,
80,
198,
11748,
299,
32152,
355,
45941,
220,
628,
198,
198,
504,
796,
8494,
3152,
7282,
2898,
5430,
7,
22,
11,
767,
8
] | 3.142857 | 28 |
import json
from cryptojwt.utils import as_unicode
from cryptojwt.jws.utils import alg2keytype
from cryptojwt.jws.jws import JWS
from cryptojwt.jws.jws import factory
from fedoidcmsg import MetadataStatement
from fedoidcmsg.bundle import jwks_to_keyjar
from oidcmsg.oidc import JsonWebToken
from cryptojwt.jwt import JWT
def self_sign_jwks(keyjar, iss, kid='', lifetime=3600):
"""
Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT
"""
# _json = json.dumps(jwks)
_jwt = JWT(keyjar, iss=iss, lifetime=lifetime)
jwks = keyjar.export_jwks(issuer=iss)
return _jwt.pack(payload={'jwks': jwks}, owner=iss, kid=kid)
def verify_self_signed_jwks(sjwt):
"""
Verify the signature of a signed JWT containing a JWKS.
The JWT is signed by one of the keys in the JWKS.
In the JWT the JWKS is stored using this format ::
'jwks': {
'keys': [ ]
}
:param sjwt: Signed Jason Web Token
:return: Dictionary containing 'jwks' (the JWKS) and 'iss' (the issuer of
the JWT)
"""
_jws = factory(sjwt)
_json = _jws.jwt.part[1]
_body = json.loads(as_unicode(_json))
iss = _body['iss']
_jwks = _body['jwks']
_kj = jwks_to_keyjar(_jwks, iss)
try:
_kid = _jws.jwt.headers['kid']
except KeyError:
_keys = _kj.get_signing_key(owner=iss)
else:
_keys = _kj.get_signing_key(owner=iss, kid=_kid)
_ver = _jws.verify_compact(sjwt, _keys)
return {'jwks': _ver['jwks'], 'iss': iss}
def request_signed_by_signing_keys(keyjar, msreq, iss, lifetime, kid=''):
"""
A metadata statement signing request with 'signing_keys' signed by one
of the keys in 'signing_keys'.
:param keyjar: A KeyJar instance with the private signing key
:param msreq: Metadata statement signing request. A MetadataStatement
instance.
:param iss: Issuer of the signing request also the owner of the signing
keys.
:return: Signed JWT where the body is the metadata statement
"""
try:
jwks_to_keyjar(msreq['signing_keys'], iss)
except KeyError:
jwks = keyjar.export_jwks(issuer=iss)
msreq['signing_keys'] = jwks
_jwt = JWT(keyjar, iss=iss, lifetime=lifetime)
return _jwt.pack(owner=iss, kid=kid, payload=msreq.to_dict())
def verify_request_signed_by_signing_keys(smsreq):
"""
Verify that a JWT is signed with a key that is inside the JWT.
:param smsreq: Signed Metadata Statement signing request
:return: Dictionary containing 'ms' (the signed request) and 'iss' (the
issuer of the JWT).
"""
_jws = factory(smsreq)
_json = _jws.jwt.part[1]
_body = json.loads(as_unicode(_json))
iss = _body['iss']
_jwks = _body['signing_keys']
_kj = jwks_to_keyjar(_jwks, iss)
try:
_kid = _jws.jwt.headers['kid']
except KeyError:
_keys = _kj.get_signing_key(owner=iss)
else:
_keys = _kj.get_signing_key(owner=iss, kid=_kid)
_ver = _jws.verify_compact(smsreq, _keys)
# remove the JWT specific claims
for k in JsonWebToken.c_param.keys():
try:
del _ver[k]
except KeyError:
pass
try:
del _ver['kid']
except KeyError:
pass
return {'ms': MetadataStatement(**_ver), 'iss': iss}
| [
11748,
33918,
198,
198,
6738,
8194,
13210,
46569,
13,
26791,
1330,
355,
62,
46903,
1098,
198,
6738,
8194,
13210,
46569,
13,
73,
18504,
13,
26791,
1330,
435,
70,
17,
2539,
4906,
198,
6738,
8194,
13210,
46569,
13,
73,
18504,
13,
73,
185... | 2.340708 | 1,582 |
from PyQt4 import QtGui
import bisect
| [
6738,
9485,
48,
83,
19,
1330,
33734,
8205,
72,
198,
11748,
47457,
478,
628
] | 2.785714 | 14 |
import re
registers = {}
MAX_VAL = 0
instruction_re = re.compile("([a-z]+) (inc|dec) (-*\d+) if ([a-z]+) (.+ -*\d+)")
with open("inputs/day8.txt") as f:
for line in f:
line = line.strip()
(register, command, value, condition_register, condition) = instruction_re.match(line).groups()
expression = "%d %s" % (get_register(condition_register), condition)
if eval(expression):
update_register(register, value, command)
print("part1:", max(registers.values()))
print("part2:", MAX_VAL) | [
11748,
302,
198,
198,
2301,
6223,
796,
23884,
198,
22921,
62,
23428,
796,
657,
198,
198,
8625,
2762,
62,
260,
796,
302,
13,
5589,
576,
7203,
26933,
64,
12,
89,
60,
28988,
357,
1939,
91,
12501,
8,
13841,
9,
59,
67,
28988,
611,
2956... | 2.415179 | 224 |
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from zenml.preprocessing import parse_date
from zenml.utils import array_to_series
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
13,
33407,
1330,
6818,
62,
14535,
62,
40496,
198,
6738,
1976,
268,
4029,
13,
3866,
36948,
1330,
21136,
62,
4475,
198,
6738,
1976,
268,
4029,
13,
26791,... | 3.014085 | 71 |
import pandas as pd
from data.process_data.process_data import process_data
TA_FENG_PATH = 'data/tafeng/tafeng.csv'
TA_FENG_OUTPUT = 'tafeng'
TA_FENG_USER_COL = 'CUSTOMER_ID'
TA_FENG_TIME_COL = 'TRANSACTION_DT'
TA_FENG_ITEM_COL = 'PRODUCT_ID'
TA_FENG_ITEM_TYPE_COL = 'PRODUCT_SUBCLASS'
TA_FENG_TIME_FORMAT = '%m/%d/%Y'
TA_FENG_MIN_ITEM_CNT = 10
TA_FENG_MIN_USER_ITEM_CNT = 10
TA_FENG_MAX_USER_ITEM_CNT = 1000
TA_FENG_MIN_USER_SET_CNT = 4
TA_FENG_MAX_USER_SET_CNT = 100
if __name__ == '__main__':
main()
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1366,
13,
14681,
62,
7890,
13,
14681,
62,
7890,
1330,
1429,
62,
7890,
198,
198,
5603,
62,
37,
26808,
62,
34219,
796,
705,
7890,
14,
83,
1878,
1516,
14,
83,
1878,
1516,
13,
40664,
6,
198,... | 2.011858 | 253 |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""A widget dedicated to display scatter plots
It is based on a :class:`~silx.gui.plot.PlotWidget` with additional tools
for scatter plots.
"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "14/06/2018"
import logging
import weakref
import numpy
from . import items
from . import PlotWidget
from . import tools
from .tools.profile import ScatterProfileToolBar
from .ColorBar import ColorBarWidget
from .ScatterMaskToolsWidget import ScatterMaskToolsWidget
from ..widgets.BoxLayoutDockWidget import BoxLayoutDockWidget
from .. import qt, icons
from ...utils.proxy import docstring
from ...utils.weakref import WeakMethodProxy
_logger = logging.getLogger(__name__)
class ScatterView(qt.QMainWindow):
"""Main window with a PlotWidget and tools specific for scatter plots.
:param parent: The parent of this widget
:param backend: The backend to use for the plot (default: matplotlib).
See :class:`~silx.gui.plot.PlotWidget` for the list of supported backend.
:type backend: Union[str,~silx.gui.plot.backends.BackendBase.BackendBase]
"""
_SCATTER_LEGEND = ' '
"""Legend used for the scatter item"""
def __createEmptyScatter(self):
"""Create an empty scatter item that is used to display the data
:rtype: ~silx.gui.plot.items.Scatter
"""
plot = self.getPlotWidget()
plot.addScatter(x=(), y=(), value=(), legend=self._SCATTER_LEGEND)
scatter = plot._getItem(
kind='scatter', legend=self._SCATTER_LEGEND)
# Profile is not selectable,
# so it does not interfere with profile interaction
scatter._setSelectable(False)
return scatter
def _pickScatterData(self, x, y):
"""Get data and index and value of top most scatter plot at position (x, y)
:param float x: X position in plot coordinates
:param float y: Y position in plot coordinates
:return: The data index and value at that point or None
"""
pickingPos = x, y
if self.__lastPickingPos != pickingPos:
self.__pickingCache = None
self.__lastPickingPos = pickingPos
plot = self.getPlotWidget()
if plot is not None:
pixelPos = plot.dataToPixel(x, y)
if pixelPos is not None:
# Start from top-most item
result = plot._pickTopMost(
pixelPos[0], pixelPos[1],
lambda item: isinstance(item, items.Scatter))
if result is not None:
item = result.getItem()
if item.getVisualization() is items.Scatter.Visualization.BINNED_STATISTIC:
# Get highest index of closest points
selected = result.getIndices(copy=False)[::-1]
dataIndex = selected[numpy.argmin(
(item.getXData(copy=False)[selected] - x)**2 +
(item.getYData(copy=False)[selected] - y)**2)]
else:
# Get last index
# with matplotlib it should be the top-most point
dataIndex = result.getIndices(copy=False)[-1]
self.__pickingCache = (
dataIndex,
item.getXData(copy=False)[dataIndex],
item.getYData(copy=False)[dataIndex],
item.getValueData(copy=False)[dataIndex])
return self.__pickingCache
def _getPickedIndex(self, x, y):
"""Get data index of top most scatter plot at position (x, y)
:param float x: X position in plot coordinates
:param float y: Y position in plot coordinates
:return: The data index at that point or '-'
"""
picking = self._pickScatterData(x, y)
return '-' if picking is None else picking[0]
def _getPickedX(self, x, y):
"""Returns X position snapped to scatter plot when close enough
:param float x:
:param float y:
:rtype: float
"""
picking = self._pickScatterData(x, y)
return x if picking is None else picking[1]
def _getPickedY(self, x, y):
"""Returns Y position snapped to scatter plot when close enough
:param float x:
:param float y:
:rtype: float
"""
picking = self._pickScatterData(x, y)
return y if picking is None else picking[2]
def _getPickedValue(self, x, y):
"""Get data value of top most scatter plot at position (x, y)
:param float x: X position in plot coordinates
:param float y: Y position in plot coordinates
:return: The data value at that point or '-'
"""
picking = self._pickScatterData(x, y)
return '-' if picking is None else picking[3]
def _mouseInPlotArea(self, x, y):
"""Clip mouse coordinates to plot area coordinates
:param float x: X position in pixels
:param float y: Y position in pixels
:return: (x, y) in data coordinates
"""
plot = self.getPlotWidget()
left, top, width, height = plot.getPlotBoundsInPixels()
xPlot = numpy.clip(x, left, left + width - 1)
yPlot = numpy.clip(y, top, top + height - 1)
return xPlot, yPlot
def getPlotWidget(self):
"""Returns the :class:`~silx.gui.plot.PlotWidget` this window is based on.
:rtype: ~silx.gui.plot.PlotWidget
"""
return self._plot()
def getPositionInfoWidget(self):
"""Returns the widget display mouse coordinates information.
:rtype: ~silx.gui.plot.tools.PositionInfo
"""
return self._positionInfo
def getMaskToolsWidget(self):
"""Returns the widget controlling mask drawing
:rtype: ~silx.gui.plot.ScatterMaskToolsWidget
"""
return self._maskToolsWidget
def getInteractiveModeToolBar(self):
"""Returns QToolBar controlling interactive mode.
:rtype: ~silx.gui.plot.tools.InteractiveModeToolBar
"""
return self._interactiveModeToolBar
def getScatterToolBar(self):
"""Returns QToolBar providing scatter plot tools.
:rtype: ~silx.gui.plot.tools.ScatterToolBar
"""
return self._scatterToolBar
def getScatterProfileToolBar(self):
"""Returns QToolBar providing scatter profile tools.
:rtype: ~silx.gui.plot.tools.profile.ScatterProfileToolBar
"""
return self._profileToolBar
def getOutputToolBar(self):
"""Returns QToolBar containing save, copy and print actions
:rtype: ~silx.gui.plot.tools.OutputToolBar
"""
return self._outputToolBar
def setColormap(self, colormap=None):
"""Set the colormap for the displayed scatter and the
default plot colormap.
:param ~silx.gui.colors.Colormap colormap:
The description of the colormap.
"""
self.getScatterItem().setColormap(colormap)
# Resilient to call to PlotWidget API (e.g., clear)
self.getPlotWidget().setDefaultColormap(colormap)
def getColormap(self):
"""Return the colormap object in use.
:return: Colormap currently in use
:rtype: ~silx.gui.colors.Colormap
"""
return self.getScatterItem().getColormap()
# Control displayed scatter plot
def setData(self, x, y, value, xerror=None, yerror=None, alpha=None, copy=True):
"""Set the data of the scatter plot.
To reset the scatter plot, set x, y and value to None.
:param Union[numpy.ndarray,None] x: X coordinates.
:param Union[numpy.ndarray,None] y: Y coordinates.
:param Union[numpy.ndarray,None] value:
The data corresponding to the value of the data points.
:param xerror: Values with the uncertainties on the x values.
If it is an array, it can either be a 1D array of
same length as the data or a 2D array with 2 rows
of same length as the data: row 0 for positive errors,
row 1 for negative errors.
:type xerror: A float, or a numpy.ndarray of float32.
:param yerror: Values with the uncertainties on the y values
:type yerror: A float, or a numpy.ndarray of float32. See xerror.
:param alpha: Values with the transparency (between 0 and 1)
:type alpha: A float, or a numpy.ndarray of float32
:param bool copy: True make a copy of the data (default),
False to use provided arrays.
"""
x = () if x is None else x
y = () if y is None else y
value = () if value is None else value
self.getScatterItem().setData(
x=x, y=y, value=value, xerror=xerror, yerror=yerror, alpha=alpha, copy=copy)
@docstring(items.Scatter)
def getScatterItem(self):
"""Returns the plot item displaying the scatter data.
This allows to set the style of the displayed scatter.
:rtype: ~silx.gui.plot.items.Scatter
"""
plot = self.getPlotWidget()
scatter = plot._getItem(kind='scatter', legend=self._SCATTER_LEGEND)
if scatter is None: # Resilient to call to PlotWidget API (e.g., clear)
scatter = self.__createEmptyScatter()
return scatter
# Convenient proxies
@docstring(PlotWidget)
@docstring(PlotWidget)
@docstring(PlotWidget)
@docstring(PlotWidget)
@docstring(PlotWidget)
@docstring(ScatterMaskToolsWidget)
@docstring(ScatterMaskToolsWidget)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
11900,
29113,
29113,
7804,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
12,
42334,
3427,
16065,
354,
10599,
1313,
47532,
29118,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,... | 2.481855 | 4,464 |
import pytest
from redbot.core import modlog
__all__ = ["mod"]
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
2266,
13645,
13,
7295,
1330,
953,
6404,
198,
198,
834,
439,
834,
796,
14631,
4666,
8973,
628,
198,
31,
9078,
9288,
13,
69,
9602,
198
] | 2.733333 | 30 |
from datetime import date
import pyimgur
| [
6738,
4818,
8079,
1330,
3128,
198,
11748,
12972,
19791,
198
] | 4.1 | 10 |
from unittest.case import TestCase
from probability.distributions import Lomax
| [
6738,
555,
715,
395,
13,
7442,
1330,
6208,
20448,
198,
198,
6738,
12867,
13,
17080,
2455,
507,
1330,
406,
296,
897,
628
] | 3.681818 | 22 |
import math
import os
import numpy as np
from pathlib import Path
from data_functions import *
from random_forest import *
# printing modifiers
PRINT_BOLD_COLOR = "\033[1m"
PRINT_DISABLE_COLOR = "\033[0m"
def clear_file(file_name):
"""Clear the contents of a file"""
# clear any previous content in the file
open(file_name, "w").close()
def print_and_write(message, file_name):
"""Print the passed string in the console and write it to the current global file"""
print(message)
file = open(file_name, "a")
file.write(message + "\n")
file.close()
def main():
""" Main function"""
# show the title
print(PRINT_BOLD_COLOR + "Random Forest Classifier" + PRINT_DISABLE_COLOR + "\n")
# path to the data sets
parent_dir_path = os.path.dirname(os.getcwd())#str(Path(__file__).parents[1])
data_dir_path = parent_dir_path + "/Data/"
results_dir_path = parent_dir_path + "/Results/"
# data sets to use
data_set_names = ["contact-lenses", "labor", "hepatitis", "breast-cancer", "car"]
# read the data sets as data frames
data_frames = read_data_sets(data_dir_path, data_set_names)
# pre-process the data sets
data_frames = [pre_process_data_frame(data_frame, False) for data_frame in data_frames]
for data_set_name, data_frame in zip(data_set_names, data_frames):
# prepare the file where to write the results
data_set_file_name = results_dir_path + data_set_name + ".txt"
clear_file(data_set_file_name)
print_and_write("Data set: {}".format(data_set_name), data_set_file_name)
# proportion to define the size of bootstrap samples
bootstrap_sample_proportion = 1
# proportion of data to use as test
test_proportion = max(0.1, 5 / len(data_frame))
# split the data in training and test
x_train, y_train, x_test, y_test = split_data_frame(data_frame, test_proportion)
# total number of features
feature_num = len(data_frame.columns)
log_feature_num = round(math.log2(feature_num) + 1)
root_feature_num = round(math.sqrt(feature_num))
# combinations of parameters for the random forest (number of trees and number of splitting features)
parameter_combinations = list()
for tree_num in [50, 100]:
split_feature_nums = [1, 3]
if log_feature_num not in split_feature_nums:
split_feature_nums.append(log_feature_num)
if root_feature_num not in split_feature_nums:
split_feature_nums.append(root_feature_num)
for split_feature_num in split_feature_nums:
parameter_combinations.append((tree_num, split_feature_num))
parameter_combinations.sort()
accuracies = list()
feature_lists = list()
# build a model for each set of parameters
for (tree_num, split_feature_num) in parameter_combinations:
print_and_write("\tTraining model for {} data set (number of trees: {}, number of splitting features: {})".format(data_set_name, tree_num, split_feature_num), data_set_file_name)
# use the training set to build the classifier and get the features ordered by importance
random_forest, features_by_importance = build_random_forest_classifier(x_train, y_train, tree_num, split_feature_num, bootstrap_sample_proportion)
# show the features in order of importance
print_and_write("\t\tFeatures (in order of importance):", data_set_file_name)
for i, feature in enumerate(features_by_importance):
print_and_write("\t\t\t({}) {}".format(i+1, feature), data_set_file_name)
# evaluate the test set to assess the classifier's accuracy
accuracy, _ = evaluate_random_forest(random_forest, x_test, y_test)
accuracies.append(accuracy)
feature_lists.append(features_by_importance)
# show the accuracy
print_and_write("\t\tTest accuracy: {}".format(round(accuracy, 3)), data_set_file_name)
# compute the mean accuracy
mean_accuracy = float(np.mean(accuracies))
accuracy_std = float(np.std(accuracies))
print_and_write("\tMean accuracy (among parameter setups): {} ± {}\n\n".format(round(mean_accuracy, 3), round(accuracy_std, 3)), data_set_file_name)
# save the accuracy information in a separate tabular file
results_data_frame = pd.DataFrame({"Tree num": [parameters[0] for parameters in parameter_combinations], "Split feature num": [parameters[1] for parameters in parameter_combinations], "Accuracy": accuracies, "Features by importance": feature_lists})
accuracy_file_path = results_dir_path + data_set_name + "_accuracy.xlsx"
clear_file(accuracy_file_path)
writer = pd.ExcelWriter(accuracy_file_path)
results_data_frame.to_excel(writer, "Matrix")
writer.save()
if __name__ == "__main__":
main()
| [
11748,
10688,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
1366,
62,
12543,
2733,
1330,
1635,
198,
6738,
4738,
62,
29623,
1330,
1635,
628,
198,
2,
13570,
37395,
198,
198,
480... | 2.574807 | 1,945 |
from os import getenv
from typing import List, Tuple
from subprocess import check_output
CONFIG = {
"REPOS": {
"AUTH": {
"CONTAINER": "hashcloak/authority",
"REPOSITORY": "https://github.com/katzenpost/authority",
"BRANCH": "master",
"GITHASH": "",
"NAMEDTAG": "",
"HASHTAG": "",
},
"SERVER" : {
"CONTAINER": "hashcloak/server",
"REPOSITORY": "https://github.com/katzenpost/server",
"BRANCH": "master",
"GITHASH": "",
"NAMEDTAG": "",
"HASHTAG": "",
},
"MESON": {
"CONTAINER": "hashcloak/meson",
"BRANCH": "",
"GITHASH": "",
"NAMEDTAG": "",
"HASHTAG": "",
},
},
"TEST": {
"PKS": {
"ETHEREUM": "",
"BINANCE": ""
},
"CLIENTCOMMIT": "master",
"NODES": 2,
"PROVIDERS": 2,
"ATTEMPTS": 3,
},
"LOG": "",
"WARPED": "true",
"BUILD": "",
}
HASH_LENGTH=7
def get_remote_git_hash(repositoryURL: str, branchOrTag: str) -> str:
"""Gets the first 7 characters of a git commit hash in a remote repository"""
args = ["git", "ls-remote", repositoryURL, branchOrTag]
return check_output(args).decode().split('\t')[0][:HASH_LENGTH]
def get_local_repo_info() -> Tuple[str, str]:
"""
Gets the local repository information.
This is changes depending on whether it is is running in Travis.
"""
arguments = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
gitBranch = check_output(arguments).decode().strip()
arguments = ["git", "rev-parse", "HEAD"]
gitHash = check_output(arguments).decode().strip()
if getenv('TRAVIS_EVENT_TYPE') == "pull_request":
gitBranch = getenv('TRAVIS_PULL_REQUEST_BRANCH', gitBranch)
gitHash = getenv('TRAVIS_PULL_REQUEST_SHA', gitHash)
else:
gitBranch = getenv('TRAVIS_BRANCH', gitBranch)
gitHash = getenv('TRAVIS_COMMIT', gitHash)
return gitBranch, gitHash[:HASH_LENGTH]
def expand_dict(dictionary: dict, separator="_") -> List[str]:
"""
Joins all the keys of a dictionary with a separator string
separator default is '_'
"""
tempList = []
for key, value in dictionary.items():
if type(value) == dict:
tempList.extend([key+separator+item for item in expand_dict(value)])
else:
tempList.append(key)
return tempList
def set_nested_value(dictionary: dict, value: str, keys: List[str]) -> None:
"""Sets a nested value inside a dictionary"""
if keys and dictionary:
if len(keys) == 1:
dictionary[keys[0]] = value
else:
set_nested_value(dictionary.get(keys[0]), value, keys[1:])
def get_nested_value(dictionary: dict, *args: List[str]) -> str:
"""Gets a nested value from a dictionary"""
if args and dictionary:
subkey = args[0]
if subkey:
value = dictionary.get(subkey)
return value if len(args) == 1 else get_nested_value(value, *args[1:])
| [
6738,
28686,
1330,
651,
24330,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
6738,
850,
14681,
1330,
2198,
62,
22915,
198,
198,
10943,
16254,
796,
1391,
198,
220,
220,
220,
366,
35316,
2640,
1298,
1391,
198,
220,
220,
220,
220,
2... | 2.114919 | 1,488 |
#!/usr/bin/python3
""" User class module """
from models.base_model import BaseModel
class User(BaseModel):
""" class User which inherits from class BaseModel """
email = ""
password = ""
first_name = ""
last_name = ""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
11787,
1398,
8265,
37227,
198,
198,
6738,
4981,
13,
8692,
62,
19849,
1330,
7308,
17633,
628,
198,
4871,
11787,
7,
14881,
17633,
2599,
198,
220,
220,
220,
37227,
1398,
11787,
543,
1... | 3 | 81 |
##############
## Script listens to serial port and writes contents into a file
import serial # sudo pip install pyserial should work
from datetime import datetime
serial_port = '/dev/cu.usbmodem1411';
baud_rate = 9600; #In arduino, Serial.begin(baud_rate)
write_to_file_path = "week.txt";
output_file = open(write_to_file_path, "w+");
ser = serial.Serial(serial_port, baud_rate)
output_file.write(str(datetime.now().timestamp()) + "\n")
count = 0
while True:
count += 1;
line = ser.readline();
line = line.decode("utf-8") #ser.readline returns a binary, convert to string
print(line);
if (count % 3 == 0):
output_file.write(line + "\n");
else:
output_file.write(line);
| [
7804,
4242,
2235,
198,
2235,
12327,
35019,
284,
11389,
2493,
290,
6797,
10154,
656,
257,
2393,
198,
198,
11748,
11389,
220,
1303,
21061,
7347,
2721,
279,
893,
48499,
815,
670,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
46911,
62... | 2.698113 | 265 |
#!/usr/bin/env python3
from flask import Flask, render_template, request, send_from_directory, url_for, redirect
from flask_basicauth import BasicAuth
from flask_apscheduler import APScheduler
import requests
import json
import copy
import atexit
import time
import timeago
from datetime import datetime
import paho.mqtt.client as mqtt
import meshtastic
from meshtastic import remote_hardware, portnums_pb2, remote_hardware_pb2
from pubsub import pub
import configparser
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('iMesh-Dashboard')
except DistributionNotFound:
__version__ = 'Unknown version'
else:
__version__ = _dist.version
dataPath = '/usr/local/iMeshDashboard'
config = configparser.ConfigParser()
config.read(dataPath+'/conf/app.conf')
from waitress import serve
oldReceivedNodes = dict()
receivedNodes = dict()
myNodeInfo = dict()
mapNodes = []
positionBeacon = False
interface = meshtastic.SerialInterface()
client = mqtt.Client()
client.username_pw_set(username=config['MQTT']['username'], password=config['MQTT']['password'])
app = Flask(__name__, template_folder=dataPath+'/templates')
app.config['BASIC_AUTH_USERNAME'] = config['AUTH']['username']
app.config['BASIC_AUTH_PASSWORD'] = config['AUTH']['password']
basic_auth = BasicAuth(app)
appData = {"version":__version__}
@app.route('/js/<path:path>')
@app.route('/css/<path:path>')
@app.route('/img/<path:path>')
@app.route('/')
@app.route('/lh')
@app.route('/map')
@app.route('/private/config')
@basic_auth.required
@app.route('/getNodes')
@app.route('/getNodeInfo')
@app.route('/sendMessage', methods=['POST'])
@basic_auth.required
@app.route('/setNode', methods=['POST'])
@basic_auth.required
@app.route('/setGpio', methods=['POST'])
@basic_auth.required
@app.route('/getGpio')
@basic_auth.required
@app.route('/login', methods=['GET', 'POST'])
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
3758,
62,
6738,
62,
34945,
11,
19016,
62,
1640,
11,
18941,
198,
6738,
42903,
62,
12093,
3970,
1071,
1330,
14392,
30515,... | 2.797721 | 702 |
import numpy as np
from railrl.envs.contextual import ContextualEnv
from railrl.policies.base import Policy
from railrl.samplers.data_collector import MdpPathCollector
| [
11748,
299,
32152,
355,
45941,
198,
6738,
6787,
45895,
13,
268,
14259,
13,
22866,
723,
1330,
30532,
723,
4834,
85,
198,
6738,
6787,
45895,
13,
79,
4160,
444,
13,
8692,
1330,
7820,
198,
6738,
6787,
45895,
13,
37687,
489,
364,
13,
7890,... | 3.25 | 52 |
from collections import defaultdict
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from ..models import CategoryModel, ProjectModel
from . import subscribe
@subscribe
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
198,
198,
6738,
11485,
27530,
1330,
21743,
17633,
11,
4935,
17633,
198,
6738,
764,
1330,
123... | 4.1875 | 48 |
import pandas as pd
from .classes import Dependencies
def normalize(dependencies, df):
"""
Normalizes the dependency relationships in dependencies into new
groups by breaking up all partial and transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups
"""
dependencies.remove_implied_extroneous()
no_part_deps = remove_part_deps(dependencies, df)
no_trans_deps = []
for grp in no_part_deps:
no_trans_deps += remove_trans_deps(grp, df)
return no_trans_deps
class DepDF(object):
"""
Represents dataframe and functional dependencies between columns in it.
Used in the normalization process.
Attributes:
deps
df
parent
children
index
"""
def __init__(self, deps, df, index, parent=None):
"""
Creates a DepDF.
Arguments:
deps (Dependencies) : dependenies among the df
df (pd.DataFrame) : dataframe for the object
index (list[str]) : index columns for dataframe
parent (DepDF, optional) : parent DepDF object
"""
self.deps = deps
self.df = df
self.parent = parent
self.children = []
self.index = index
def return_dfs(self):
"""
Returns the dataframes stored in self and all its descendents.
Returns:
dfs (list[pd.DataFrame]) : dataframes
"""
if self.children == []:
return [self.df]
result = [self.df]
for child in self.children:
result += child.return_dfs()
return result
def make_indexes(depdf):
"""
Goes through depdf, and all of its descendents, and if any have primary keys
of more than one attribute, creates a new index column, and replaces the
old primary key columns with the new column in the parent df.
Arguments:
depdf (DepDF) : depDF to make indexes for
"""
prim_key = depdf.deps.get_prim_key()
if len(prim_key) > 1:
depdf.df.insert(0, '_'.join(prim_key), range(0, len(depdf.df)))
depdf.index = ['_'.join(prim_key)]
# now need to replace it in the parent df...
if depdf.parent is not None:
add = [None] * len(depdf.parent.df)
indices = depdf.parent.df.groupby(prim_key).indices
for name in indices:
mask = None
for i in range(len(prim_key)):
m = depdf.df[prim_key[i]] == name[i]
if mask is None:
mask = m
else:
mask = mask & m
new_val = depdf.df[mask]['_'.join(prim_key)].item()
for index in indices[name]:
add[index] = new_val
depdf.parent.df.drop(columns=prim_key, inplace=True)
depdf.parent.df.insert(len(depdf.parent.df.columns), '_'.join(prim_key), add)
for child in depdf.children:
make_indexes(child)
def normalize_dataframe(depdf):
"""
Normalizes the dataframe represetned by depdf, created descendents
as needed.
Arguments:
depdf (DepDF) : depdf to normalize
"""
part_deps = depdf.deps.find_partial_deps()
filter(part_deps, depdf.df)
if part_deps != []:
split_on = find_most_comm(part_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
trans_deps = depdf.deps.find_trans_deps()
filter(trans_deps, depdf.df)
if trans_deps != []:
split_on = find_most_comm(trans_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
def split_up(split_on, depdf):
"""
Breaks off a depdf and forms its child. Recursively calls normalize on
the original depdf, and its newly formed child.
Arguments:
split_on (list[str]) : attributes to split the dataframe on
depdf (DepDF) : the depdf ot split
"""
parent_deps, child_deps = split_on_dep(split_on, depdf.deps)
child = DepDF(child_deps, form_child(depdf.df, child_deps), split_on, depdf)
depdf.deps = parent_deps
depdf.df = depdf.df.drop(columns=list(set(depdf.df.columns).difference(parent_deps.all_attrs())))
depdf.children.append(child)
normalize_dataframe(depdf)
normalize_dataframe(child)
def form_child(df, deps):
"""
Returns a new dataframe based off of the dependencies in deps.
Arguments:
df (pd.DataFrame) : dataframe to create new dataframe from
deps (Dependencies) : dependencies to base new dataframe off of
"""
attrs = deps.all_attrs()
drops = set(df.columns).difference(attrs)
new_df = df.drop(columns=list(drops))
new_df = drop_primary_dups(new_df, deps.get_prim_key())
return new_df
def remove_part_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more partial dependencies.
Arguments:
dependencies (Dependncies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups with no partial depenencies
"""
part_deps = dependencies.find_partial_deps()
filter(part_deps, df)
if part_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(part_deps, dependencies), dependencies)
return remove_part_deps(new_deps[0], df) + remove_part_deps(new_deps[1], df)
def remove_trans_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]): list of new dependencies objects
representing the new groups with no transitive depenencies
"""
trans_deps = dependencies.find_trans_deps()
filter(trans_deps, df)
if trans_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(trans_deps, dependencies), dependencies)
return remove_trans_deps(new_deps[0], df) + remove_trans_deps(new_deps[1], df)
def find_most_comm(deps, dependencies, df=None):
"""
Given a list of dependency relations, finds the most common set of
LHS attributes. If more than one LHS set occurs the same amount of
times, chooses the set with the least number of attributes.
Arguments:
deps (list[(set[str], str)]) : list of tuples representing relations
where the lhs is a set of attribute names, and the rhs is an attribute.
Returns:
most_comm (set[str]) : the most common lhs set of attributes
"""
positions = {}
priority_lst = []
for lhs, rhs in deps:
if frozenset(lhs) in positions:
ind = positions[frozenset(lhs)]
score = priority_lst[ind][0] + 1
while ind != 0 and priority_lst[ind - 1][0] < score:
priority_lst[ind] = priority_lst[ind - 1]
positions[frozenset(priority_lst[ind - 1][1])] = ind
ind -= 1
priority_lst[ind] = (score, lhs)
positions[frozenset(lhs)] = ind
else:
priority_lst.append((1, lhs))
positions[frozenset(lhs)] = len(priority_lst) - 1
# IF THEY ARE THE SAME, CHOOSE ONE WITH SHORTEST LENGHT
options = [item[1] for item in priority_lst if item[0] == priority_lst[0][0]]
max_lhs = choose_index(options, df)
# max_lhs = priority_lst[0][1]
# scr = priority_lst[0][0]
# i = 1
# while i < len(priority_lst) and priority_lst[i][0] == scr:
# if len(priority_lst[i][1]) < len(max_lhs):
# max_lhs = priority_lst[i][1]
# i += 1
for i in range(len(max_lhs)):
for key in dependencies.get_prim_key():
if dependencies.equiv_attrs(max_lhs[i], key):
max_lhs[i] = key
return max_lhs
def split_on_dep(lhs_dep, dependencies):
"""
Given the LHS attributes of a dependency, breaks up the dependency
relations in dependencies into two groups so that the LHS given is
the primary key of the new group. The old group keeps the same
primary key.
Arguments:
lhs_dep (list[str]) : set of attributes to be the new group's
primary key
dependencies (Dependencies) : dependency relations to be split up
Returns:
new_groups ((Dependencies, Dependencies)) : the new groups
"""
new_deps = {}
old_deps = dependencies.serialize()
new_rhs = set()
# new primary key
for attr in lhs_dep:
new_deps[attr] = old_deps[attr][:]
for rhs in list(old_deps.keys()):
for lhs in old_deps[rhs]:
if set(lhs).issubset(lhs_dep):
# if lhs_dep in old_deps[rhs]:
new_deps[rhs] = old_deps[rhs]
old_deps.pop(rhs)
new_rhs.add(rhs)
break
for rhs in old_deps:
for lhs in old_deps[rhs][:]:
if len(new_rhs.intersection(lhs)) != 0:
old_deps[rhs].remove(lhs)
old_rhs = set(list(old_deps.keys()))
for attr in lhs_dep:
old_rhs.remove(attr)
for rhs in new_deps:
for lhs in new_deps[rhs][:]:
if len(old_rhs.intersection(lhs)) != 0:
new_deps[rhs].remove(lhs)
return (Dependencies(old_deps, dependencies.get_prim_key()), Dependencies(new_deps, lhs_dep))
def drop_primary_dups(df, prim_key):
"""
Drops all duplicates based off of the columns in prim_key. If there isn't a
unique value for the other columns, for every unique instance of columns in
prim_key, keeps the "mode" of the unique instances' occurance.
Arguments:
df (pd.DataFrame) : dataframe to drop duplicates of
prim_key (list[str]) : columns that form the primary key of the dataframe
Returns:
new_df (pd.DataFrame) : dataframe with duplicates dropped
"""
df_lst = []
if df.drop_duplicates(prim_key).shape[0] == df.shape[0]:
return df
groups = df.groupby(prim_key)
for name, group in groups:
df_lst.append(group.mode().iloc[0])
# new_df = new_df.append(group.mode().iloc[0], ignore_index=True)
result = (pd.DataFrame(df_lst, columns=df.columns)).reset_index(drop=True)
return result.astype(dict(df.dtypes))
def choose_index(keys, df):
"""
Chooses key from a list of keys. Order of priority:
1) shortest length
2) has "id" in some form in name of an attribute
3) has attribute furthest to the left in table
Arguments:
keys (list[set[str]]) : list of keys to choose from
df (pd.DataFrame) : pandas dataframe keys are for
Returns:
index (list[str]) : chosen key
"""
sort_key = sorted(keys, key=len)
m = len(sort_key[0])
options = [key for key in sort_key if len(key) == m]
for key in options:
for attr in key:
if "_id" in attr.lower() or " id" in attr.lower() or "id _" in attr.lower() or "id " in attr.lower():
return list(key)
if df is None:
return list(options[0])
for col in df.columns:
includes = [option for option in options if col in option]
if len(includes) == 1:
return list(includes[0])
if len(includes) > 1:
options = includes
return list(options[0])
def filter(keys, df):
"""
Filters out any keys that contain attributes that are not strings, ints, or
categories from a list of relations.
Arguments:
keys (list[(list[str], str)]) : relationships to filter out
df (pd.DataFrame) : dataframe attributes in keys are from
"""
for key, rhs in keys[:]:
for attr in key:
if df[attr].dtypes.name not in set(['category', 'int64', 'object']):
keys.remove((key, rhs))
break
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
764,
37724,
1330,
37947,
3976,
628,
198,
4299,
3487,
1096,
7,
45841,
3976,
11,
47764,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
14435,
4340,
262,
20203,
6958,
287,
20086,
656... | 2.315919 | 5,264 |
from typing import Any, Union, List
from rdict.macros import INDEX_SPECIAL_CHARACTER
| [
6738,
19720,
1330,
4377,
11,
4479,
11,
7343,
198,
198,
6738,
374,
11600,
13,
20285,
4951,
1330,
24413,
6369,
62,
48451,
12576,
62,
38019,
2246,
5781,
628,
628,
628
] | 3.137931 | 29 |
"""
Space time dataset import functions
Usage:
.. code-block:: python
import grass.temporal as tgis
input="/tmp/temp_1950_2012.tar.gz"
output="temp_1950_2012"
directory="/tmp"
title="My new dataset"
descr="May new shiny dataset"
location=None
link=True
exp=True
overr=False
create=False
tgis.import_stds(input, output, directory, title, descr, location,
link, exp, overr, create, "strds")
(C) 2012-2013 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:authors: Soeren Gebbert
"""
import os
import os.path
import tarfile
from .core import get_current_mapset, get_tgis_message_interface
from .register import register_maps_in_space_time_dataset
from .factory import dataset_factory
import grass.script as gscript
from grass.exceptions import CalledModuleError
proj_file_name = "proj.txt"
init_file_name = "init.txt"
list_file_name = "list.txt"
# This global variable is for unique vector map export,
# since single vector maps may have several layer
# and therefore several attribute tables
imported_maps = {}
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
def import_stds(input, output, directory, title=None, descr=None, location=None,
link=False, exp=False, overr=False, create=False,
stds_type="strds", base=None, set_current_region=False,
memory=300):
"""Import space time datasets of type raster and vector
:param input: Name of the input archive file
:param output: The name of the output space time dataset
:param directory: The extraction directory
:param title: The title of the new created space time dataset
:param descr: The description of the new created
space time dataset
:param location: The name of the location that should be created,
maps are imported into this location
:param link: Switch to link raster maps instead importing them
:param exp: Extend location extents based on new dataset
:param overr: Override projection (use location's projection)
:param create: Create the location specified by the "location"
parameter and exit.
Do not import the space time datasets.
:param stds_type: The type of the space time dataset that
should be imported
:param base: The base name of the new imported maps, it will be
extended using a numerical index.
:param memory: Cache size for raster rows, used in r.in.gdal
"""
old_state = gscript.raise_on_error
gscript.set_raise_on_error(True)
# Check if input file and extraction directory exits
if not os.path.exists(input):
gscript.fatal(_("Space time raster dataset archive <%s> not found")
% input)
if not create and not os.path.exists(directory):
gscript.fatal(_("Extraction directory <%s> not found") % directory)
tar = tarfile.open(name=input, mode='r')
# Check for important files
msgr = get_tgis_message_interface()
msgr.message(_("Checking validity of input file (size: %0.1f MB). Make take a while..."
% (os.path.getsize(input)/(1024*1024.0))))
members = tar.getnames()
# Make sure that the basenames of the files are used for comparison
member_basenames = [os.path.basename(name) for name in members]
if init_file_name not in member_basenames:
gscript.fatal(_("Unable to find init file <%s>") % init_file_name)
if list_file_name not in member_basenames:
gscript.fatal(_("Unable to find list file <%s>") % list_file_name)
if proj_file_name not in member_basenames:
gscript.fatal(_("Unable to find projection file <%s>") % proj_file_name)
msgr.message(_("Extracting data..."))
tar.extractall(path=directory)
tar.close()
# We use a new list file name for map registration
new_list_file_name = list_file_name + "_new"
# Save current working directory path
old_cwd = os.getcwd()
# Switch into the data directory
os.chdir(directory)
# Check projection information
if not location:
temp_name = gscript.tempfile()
temp_file = open(temp_name, "w")
proj_name = os.path.abspath(proj_file_name)
# We need to convert projection strings generated
# from other programs than g.proj into
# new line format so that the grass file comparison function
# can be used to compare the projections
proj_name_tmp = temp_name + "_in_projection"
proj_file = open(proj_name, "r")
proj_content = proj_file.read()
proj_content = proj_content.replace(" +", "\n+")
proj_content = proj_content.replace("\t+", "\n+")
proj_file.close()
proj_file = open(proj_name_tmp, "w")
proj_file.write(proj_content)
proj_file.close()
p = gscript.start_command("g.proj", flags="j", stdout=temp_file)
p.communicate()
temp_file.close()
if not gscript.compare_key_value_text_files(temp_name, proj_name_tmp,
sep="="):
if overr:
gscript.warning(_("Projection information does not match. "
"Proceeding..."))
else:
diff = ''.join(gscript.diff_files(temp_name, proj_name))
gscript.warning(_("Difference between PROJ_INFO file of "
"imported map and of current location:"
"\n{diff}").format(diff=diff))
gscript.fatal(_("Projection information does not match. "
"Aborting."))
# Create a new location based on the projection information and switch
# into it
old_env = gscript.gisenv()
if location:
try:
proj4_string = open(proj_file_name, 'r').read()
gscript.create_location(dbase=old_env["GISDBASE"],
location=location,
proj4=proj4_string)
# Just create a new location and return
if create:
os.chdir(old_cwd)
return
except Exception as e:
gscript.fatal(_("Unable to create location %(l)s. Reason: %(e)s")
% {'l': location, 'e': str(e)})
# Switch to the new created location
try:
gscript.run_command("g.mapset", mapset="PERMANENT",
location=location,
dbase=old_env["GISDBASE"])
except CalledModuleError:
gscript.fatal(_("Unable to switch to location %s") % location)
# create default database connection
try:
gscript.run_command("t.connect", flags="d")
except CalledModuleError:
gscript.fatal(_("Unable to create default temporal database "
"in new location %s") % location)
try:
# Make sure the temporal database exists
from .core import init
init()
fs = "|"
maplist = []
mapset = get_current_mapset()
list_file = open(list_file_name, "r")
new_list_file = open(new_list_file_name, "w")
# get number of lines to correctly form the suffix
max_count = -1
for max_count, l in enumerate(list_file):
pass
max_count += 1
list_file.seek(0)
# Read the map list from file
line_count = 0
while True:
line = list_file.readline()
if not line:
break
line_list = line.split(fs)
# The filename is actually the base name of the map
# that must be extended by the file suffix
filename = line_list[0].strip().split(":")[0]
if base:
mapname = "%s_%s" % (base, gscript.get_num_suffix(line_count + 1,
max_count))
mapid = "%s@%s" % (mapname, mapset)
else:
mapname = filename
mapid = mapname + "@" + mapset
row = {}
row["filename"] = filename
row["name"] = mapname
row["id"] = mapid
row["start"] = line_list[1].strip()
row["end"] = line_list[2].strip()
new_list_file.write("%s%s%s%s%s\n" % (mapname, fs, row["start"],
fs, row["end"]))
maplist.append(row)
line_count += 1
list_file.close()
new_list_file.close()
# Read the init file
fs = "="
init = {}
init_file = open(init_file_name, "r")
while True:
line = init_file.readline()
if not line:
break
kv = line.split(fs)
init[kv[0]] = kv[1].strip()
init_file.close()
if "temporal_type" not in init or \
"semantic_type" not in init or \
"number_of_maps" not in init:
gscript.fatal(_("Key words %(t)s, %(s)s or %(n)s not found in init"
" file.") % {'t': "temporal_type",
's': "semantic_type",
'n': "number_of_maps"})
if line_count != int(init["number_of_maps"]):
gscript.fatal(_("Number of maps mismatch in init and list file."))
format_ = "GTiff"
type_ = "strds"
if "stds_type" in init:
type_ = init["stds_type"]
if "format" in init:
format_ = init["format"]
if stds_type != type_:
gscript.fatal(_("The archive file is of wrong space time dataset"
" type"))
# Check the existence of the files
if format_ == "GTiff":
for row in maplist:
filename = row["filename"] + ".tif"
if not os.path.exists(filename):
gscript.fatal(_("Unable to find GeoTIFF raster file "
"<%s> in archive.") % filename)
elif format_ == "AAIGrid":
for row in maplist:
filename = row["filename"] + ".asc"
if not os.path.exists(filename):
gscript.fatal(_("Unable to find AAIGrid raster file "
"<%s> in archive.") % filename)
elif format_ == "GML":
for row in maplist:
filename = row["filename"] + ".xml"
if not os.path.exists(filename):
gscript.fatal(_("Unable to find GML vector file "
"<%s> in archive.") % filename)
elif format_ == "pack":
for row in maplist:
if type_ == "stvds":
filename = str(row["filename"].split(":")[0]) + ".pack"
else:
filename = row["filename"] + ".pack"
if not os.path.exists(filename):
gscript.fatal(_("Unable to find GRASS package file "
"<%s> in archive.") % filename)
else:
gscript.fatal(_("Unsupported input format"))
# Check the space time dataset
id = output + "@" + mapset
sp = dataset_factory(type_, id)
if sp.is_in_db() and gscript.overwrite() is False:
gscript.fatal(_("Space time %(t)s dataset <%(sp)s> is already in"
" the database. Use the overwrite flag.") %
{'t': type_, 'sp': sp.get_id()})
# Import the maps
if type_ == "strds":
if format_ == "GTiff" or format_ == "AAIGrid":
_import_raster_maps_from_gdal(maplist, overr, exp, location,
link, format_, set_current_region,
memory)
if format_ == "pack":
_import_raster_maps(maplist, set_current_region)
elif type_ == "stvds":
if format_ == "GML":
_import_vector_maps_from_gml(
maplist, overr, exp, location, link)
if format_ == "pack":
_import_vector_maps(maplist)
# Create the space time dataset
if sp.is_in_db() and gscript.overwrite() is True:
gscript.info(_("Overwrite space time %(sp)s dataset "
"<%(id)s> and unregister all maps.") %
{'sp': sp.get_new_map_instance(None).get_type(),
'id': sp.get_id()})
sp.delete()
sp = sp.get_new_instance(id)
temporal_type = init["temporal_type"]
semantic_type = init["semantic_type"]
relative_time_unit = None
if temporal_type == "relative":
if "relative_time_unit" not in init:
gscript.fatal(_("Key word %s not found in init file.") %
("relative_time_unit"))
relative_time_unit = init["relative_time_unit"]
sp.set_relative_time_unit(relative_time_unit)
gscript.verbose(_("Create space time %s dataset.") %
sp.get_new_map_instance(None).get_type())
sp.set_initial_values(temporal_type=temporal_type,
semantic_type=semantic_type, title=title,
description=descr)
sp.insert()
# register the maps
fs = "|"
register_maps_in_space_time_dataset(
type=sp.get_new_map_instance(None).get_type(),
name=output, file=new_list_file_name, start="file",
end="file", unit=relative_time_unit, dbif=None, fs=fs,
update_cmd_list=False)
os.chdir(old_cwd)
except:
raise
# Make sure the location is switched back correctly
finally:
if location:
# Switch to the old location
try:
gscript.run_command("g.mapset", mapset=old_env["MAPSET"],
location=old_env["LOCATION_NAME"],
gisdbase=old_env["GISDBASE"])
except CalledModuleError:
gscript.warning(_("Switching to original location failed"))
gscript.set_raise_on_error(old_state)
| [
37811,
198,
14106,
640,
27039,
1330,
5499,
198,
198,
28350,
25,
198,
198,
492,
2438,
12,
9967,
3712,
21015,
628,
220,
220,
220,
1330,
8701,
13,
11498,
35738,
355,
256,
70,
271,
628,
220,
220,
220,
5128,
35922,
22065,
14,
29510,
62,
... | 2.087342 | 7,213 |
from flask import Flask
import os
app = Flask(__name__) | [
6738,
42903,
1330,
46947,
198,
11748,
28686,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8
] | 3.294118 | 17 |
#!/usr/bin/env python
# coding: utf-8
# zdy.py
'''
Squid+站大爷搭建代理IP池
Author: Nathan
Blog: www.xnathan.com
Github: github.com/xNathan
'''
from gevent import monkey
monkey.patch_all()
import os
import time
import logging
import requests
from gevent.pool import Pool
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# 使用StreamHandler输出到屏幕
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Squid的配置文件语法
# 将请求转发到父代理
PEER_CONF = "cache_peer %s parent %s 0 no-query weighted-round-robin weight=1 connect-fail-limit=2 allow-miss max-conn=5\n"
# 可用代理
GOOD_PROXIES = []
pool = Pool(50)
def check_proxy(proxy):
"""验证代理是否可用
:param proxy list:[ip, port]"""
global GOOD_PROXIES
ip, port = proxy
_proxies = {
'http': '{}:{}'.format(ip, port)
}
try:
res = requests.get(
'http://1212.ip138.com/ic.asp', proxies=_proxies, timeout=10)
assert ip in res.content
logger.info('[GOOD] - {}:{}'.format(ip, port))
GOOD_PROXIES.append(proxy)
except Exception, e:
logger.error('[BAD] - {}:{}, {}'.format(ip, port, e))
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
1976,
9892,
13,
9078,
198,
7061,
6,
198,
22266,
312,
10,
44165,
247,
32014,
163,
230,
115,
162,
238,
255,
161,
119,
118,
47987,
49426,
228,
... | 2.024316 | 658 |
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivan Gavrilin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import datetime
mpl.rcParams['axes.facecolor'] = '#d3d3d3'
mpl.rcParams['axes.edgecolor'] = '#303030'
mpl.rcParams['axes.grid'] = 'True'
mpl.rcParams['grid.color'] = '#323232'
mpl.rcParams['grid.linewidth'] = 0.5
mpl.rcParams['patch.facecolor'] = 'blue'
mpl.rcParams['patch.edgecolor'] = '#eeeeee'
mpl.rcParams['figure.facecolor'] = '#e3e3e3'
mpl.rcParams['font.family'] = 'sans-serif'
#plt.style.use('fivethirtyeight')
from .stream import Stream
_all_windows = []
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
21798,
402,
615,
22379,
259,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428... | 3.133333 | 540 |
# Numpy
#
from numpy import linspace, arange
# Numpy array
from numpy import array, append, zeros, ones, where
# Numpy common math function
from numpy import exp, sqrt, arctan2, cos, sin, angle, radians, sign, log, ceil
# Numpy constant
from numpy import pi
from pandas import infer_freq
# def set_QubitRegister():
if __name__ == "__main__":
import matplotlib.pyplot as plt
import scipy.fft as spfft
dt = 1.
print("register IQMixerChannel")
mixerInfo = IQMixerChannel()
OPS = QubitOperationSequence(100, 1.)
print(f"set new operation")
op1 = PulseBuilder(20,dt)
op1.arbXYGate([pi,0])
op2 = PulseBuilder(50,dt)
op2.rotXY([1,0.25,5,0])
op3 = PulseBuilder(20,dt)
op3.idle([0])
print("register operation to sequence")
OPS.set_operation([op3, op2])
print("calculate XY waveform of the sequence")
OPS.generate_sequenceWaveform(mixerInfo=mixerInfo)
xyWf = OPS.xywaveform
print("calculate IQ waveform of the sequence")
iqWf = OPS.iqwaveform
plot1 = plt.figure(1)
timeAxis = get_timeAxis(xyWf)
plt.plot(timeAxis, xyWf["data"].real)
plt.plot(timeAxis, xyWf["data"].imag)
plt.plot(timeAxis, iqWf["data"].real)
plt.plot(timeAxis, iqWf["data"].imag)
plot2 = plt.figure(2)
plt.plot(xyWf["data"].real, xyWf["data"].imag)
plt.plot(iqWf["data"].real, iqWf["data"].imag)
#plot3 = plt.figure(3)
fq = 5e9
pmixer = mixerInfo.phaseBalance
fIF = mixerInfo.ifFreq/1e3
# plt.plot(timeAxis, cos(2*pi*fq*timeAxis) )
# xymix = xyWf["data"].real*cos(2*pi*fq*timeAxis) +xyWf["data"].imag*cos(2*pi*fq*timeAxis +abs(radians(pmixer)) )
# plt.plot(timeAxis, xymix)
# iqmix = iqWf["data"].real*cos(2*pi*(fq+fIF)*timeAxis) +iqWf["data"].imag*cos(2*pi*(fq+fIF)*timeAxis +radians(pmixer) )
# plt.plot(timeAxis, iqmix)
# data_points = len(timeAxis)
# f_points = data_points//2
# faxis = spfft.fftfreq(data_points,iqWf["dt"])[0:f_points]
# plot4 = plt.figure(4)
# xyvector = spfft.fft(xymix)[0:f_points]/len(timeAxis)
# plt.plot(faxis, abs(xyvector))
# iqvector = spfft.fft(iqmix)[0:f_points]/len(timeAxis)
# plt.plot(faxis, 10*log(abs(iqvector)))
plt.show()
| [
2,
399,
32152,
198,
2,
220,
198,
6738,
299,
32152,
1330,
300,
1040,
10223,
11,
610,
858,
198,
2,
399,
32152,
7177,
198,
6738,
299,
32152,
1330,
7177,
11,
24443,
11,
1976,
27498,
11,
3392,
11,
810,
198,
2,
399,
32152,
2219,
10688,
... | 2.090741 | 1,080 |
from loguru import logger
import subprocess
import os
PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
from test_cutter import test_default as cutter_default
from test_cutter import RESULT_DIR as CUTTER_RESULT_DIR
# prepare
cutter_default()
| [
6738,
2604,
14717,
1330,
49706,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
198,
31190,
23680,
62,
34219,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
42937,
62,
34219,
79... | 2.87037 | 108 |
from .base import AuthenticatedHandler
| [
6738,
764,
8692,
1330,
31885,
3474,
25060,
628
] | 5 | 8 |
import os
import glob
current_path = os.path.dirname(__file__)
__all__ = [os.path.basename(x) for x in glob.glob(os.path.join(current_path, "*"))
if os.path.isdir(x) and not os.path.basename(x).startswith("_")]
if __name__ == '__main__':
print(__all__)
| [
11748,
28686,
198,
11748,
15095,
198,
198,
14421,
62,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
834,
439,
834,
796,
685,
418,
13,
6978,
13,
12093,
12453,
7,
87,
8,
329,
2124,
287,
15095,
13,
4743,
672... | 2.339286 | 112 |
""" Clustering users with k-means algorithm
:Author: Yassmine Chebaro <yassmnine.chebaro@mssm.edu>
:Date: 2019-09-25
:License: MIT
"""
# Import Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error
import itertools
import os
from collections import Counter
import unittest
from clustering_users import ClusteringUsersStep1, ClusteringUsersStep2, GetAllClusters
| [
37811,
1012,
436,
1586,
2985,
351,
479,
12,
1326,
504,
11862,
198,
198,
25,
13838,
25,
575,
562,
3810,
2580,
5657,
78,
1279,
88,
562,
10295,
500,
13,
2395,
5657,
78,
31,
76,
824,
76,
13,
15532,
29,
198,
25,
10430,
25,
13130,
12,
... | 3.053571 | 168 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.891892 | 37 |
def define(hub):
'''
Define how the onchanges requisite should run
'''
return {
'result': True,
'changes': True,
}
| [
4299,
8160,
7,
40140,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
2896,
500,
703,
262,
319,
36653,
37088,
815,
1057,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
1441,
1391,
198,
220,
220,
220,
220,
220,
220,
22... | 2.152778 | 72 |
number = int(input())
n_to_count = int(input())
counter = 0
while number > 0:
result = number % 2
number //= 2
if result == n_to_count:
counter += 1
print(counter)
| [
17618,
796,
493,
7,
15414,
28955,
198,
77,
62,
1462,
62,
9127,
796,
493,
7,
15414,
28955,
198,
24588,
796,
657,
198,
198,
4514,
1271,
1875,
657,
25,
198,
220,
220,
220,
1255,
796,
1271,
4064,
362,
198,
220,
220,
220,
1271,
3373,
2... | 2.447368 | 76 |
# coding=UTF8
# Python TicTacToe game with Tk GUI and minimax AI
# Author: Maurits van der Schee <maurits@vdschee.nl>
# Repo: https://github.com/mevdschee/python-tictactoe
"""
Run this file to play manually with any agent.
"""
from Tkinter import Tk, Button, PhotoImage
from PIL import ImageTk, Image
from tkFont import Font
from copy import deepcopy
import numpy as np
from MCTS import MCTS
from utils import *
from tictactoe.TicTacToeGame import TicTacToeGame, display
from tictactoe.TicTacToePlayers import *
from tictactoe.tensorflow.NNet import NNetWrapper as NNet
BOARD_SIZE = 3
g = TicTacToeGame(BOARD_SIZE)
# nnet players
nn = NNet(g)
nn.load_checkpoint('./temp/', 'best.pth.tar')
args1 = dotdict({'numMCTSSims': 50, 'cpuct':1.0})
mcts1 = MCTS(g, nn, args1)
nnp = lambda x: np.argmax(mcts1.getActionProb(x, temp=0))
if __name__ == '__main__':
GUI().mainloop()
| [
2,
19617,
28,
48504,
23,
198,
198,
2,
11361,
309,
291,
51,
330,
2514,
68,
983,
351,
309,
74,
25757,
290,
10356,
897,
9552,
198,
2,
6434,
25,
18867,
896,
5719,
4587,
10011,
68,
1279,
2611,
333,
896,
31,
20306,
15952,
68,
13,
21283,... | 2.480337 | 356 |
"""Utilities for EveryVoter project"""
| [
37811,
18274,
2410,
329,
3887,
53,
19543,
1628,
37811,
198
] | 3.9 | 10 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/ui_binningWindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
26124,
263,
14,
9019,
62,
8800,
768,
27703,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,... | 2.865169 | 89 |
import cv2
import os
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
video_path = '/home/shixi/C3D-keras/datasets/ucf101/'
save_path = '/home/shixi/C3D-keras/datasets/ucfimgs/'
action_list = os.listdir(video_path)
for action in action_list:
if not os.path.exists(save_path+action):
os.mkdir(save_path+action)
video_list = os.listdir(video_path+action)
for video in video_list:
prefix = video.split('.')[0]
if not os.path.exists(save_path+action+'/'+prefix):
os.mkdir(save_path+action+'/'+prefix)
save_name = save_path + action + '/' + prefix + '/'
#save_name = save_path + prefix + '/'
video_name = video_path+action+'/'+video
name = video_name.split('.')[1]
if name == "avi":
cap = cv2.VideoCapture(video_name)
fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps_count = 0
for i in range(fps):
ret, frame = cap.read()
if ret:
cv2.imwrite(save_name + str(10000 + fps_count) + '.jpg', frame)
fps_count += 1
if name == "gif":
im = Image.open(video_name)
#当打开一个序列文件时,PIL库自动加载第一帧。你可以使用seek()函数tell()函数在不同帧之间移动。实现保存
try:
while True:
current = im.tell()
img = im.convert('RGB') #为了保存为jpg格式,需要转化。否则只能保存png
img.save(save_name+'/'+str(10000+current)+'.jpg')
im.seek(current + 1)
except EOFError:
pass
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
8979,
198,
5159,
8979,
13,
35613,
62,
5446,
4944,
34,
11617,
62,
3955,
25552,
796,
6407,
198,
198,
15588,
62,
6978,
796,
31051,... | 1.71536 | 931 |
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common import raise_exception_from_futures
from cuml.dask.common.comms import worker_state, CommsContext
from cuml.dask.common.input_utils import to_output
from cuml.dask.common.part_utils import flatten_grouped_results
from dask.distributed import wait
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.input_utils import DistributedDataHandler
| [
2,
15069,
357,
66,
8,
13130,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.443662 | 284 |
from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
import os
router = APIRouter()
DATA_FILEPATH1 = os.path.join(os.path.dirname(__file__), "..", "..","data", "current_pop_data_final.csv")
DATA_FILEPATH2 = os.path.join(os.path.dirname(__file__), "..", "..","data", "historical_pop_data_final.csv")
@router.get('/population/{city_id}')
async def pop_to_dict(city_id: int):
"""
Pull demographic data for specific city, state, and year
### Query Parameters:
- `city_id`: [city_id], unique numeric mapping (ex: 0 returns Anchorage, AK)
### Response
Dictionary object
"""
rt_dict = {}
rt_data_dict = {}
df = pd.read_csv(DATA_FILEPATH1, encoding='utf-8')
dataframe = df[df['city_id']==city_id]
rt_data = dataframe.to_numpy()
rt_data_dict["total_pop"] = rt_data[0][4]
rt_data_dict["land_area"] = rt_data[0][5]
rt_data_dict["pop_density"] = rt_data[0][6]
rt_data_dict["male_pop"] = rt_data[0][7]
rt_data_dict["female_pop"] = rt_data[0][8]
rt_data_dict["age_under_20"] = rt_data[0][9]
rt_data_dict["age_20-29"] = rt_data[0][10]
rt_data_dict["age_30-39"] = rt_data[0][11]
rt_data_dict["age_40-49"] = rt_data[0][12]
rt_data_dict["age_50-59"] = rt_data[0][13]
rt_data_dict["age_above_60"] = rt_data[0][14]
rt_dict["data"] = rt_data_dict
rt_dict["viz"] = citypopviz(city=rt_data[0][1], state=rt_data[0][2])
return rt_dict
def citypopviz(city, state,metric = 'total_pop'):
"""
Visualize historical population metrics from 2010 to 2018 for one city
### Query Parameters:
- `metric`: 'total_pop', 'land_area', 'pop_density', 'male_pop', 'female_pop',
'age_under_20', 'age_20-29', 'age_30-39', 'age_40-49', 'age_50-59', or 'age_above_60';
default='total_pop',case sensitive, total/male/female pop in thousands, land area
in sq mi, pop_density in person/sqmi, age demographics in percentages
- `city`: [city name], case sensitive(ex: Birmingham)
- `state `: [state abbreviation], 2-letters; case sensitive (ex: AL)
### Response
JSON string to render with react-plotly.js
"""
df = pd.read_csv(DATA_FILEPATH2, encoding='utf-8')
subset = df[(df.city == city) & (df.state == state)]
fig = px.line(subset, x='year', y=metric, title=f'{metric} in {city},{state}')
return fig.to_json()
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
14626,
16922,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
7110,
306,
13,
42712,
355,
279,
87,
201,
198,
11748,
28686,
201,
198,
201,
198,
472,
353,
796,
3486,
4663,
39605... | 2.223222 | 1,111 |
# Digit fifth powers
#
# Problem 30
#
# Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:
#
# 1634 = 14 + 64 + 34 + 44
# 8208 = 84 + 24 + 04 + 84
# 9474 = 94 + 44 + 74 + 44
# As 1 = 14 is not a sum it is not included.
#
# The sum of these numbers is 1634 + 8208 + 9474 = 19316.
#
# Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
# inspired by silverfish's comment:
# the sum of the digits is not dependent on the order of these!
# -> create only unique lists of digits
pow5_sums = [(sum([d**5 for d in digit_representation]), "".join(map(str, digit_representation)))
for digit_representation in gen_candidates()]
identical = [(a, b) for a, b in pow5_sums if sorted(list(str(a))) == sorted(list(b))]
print(sum([a for a, _ in identical])) # 443839
| [
2,
7367,
270,
8150,
5635,
198,
2,
198,
2,
20647,
1542,
198,
2,
198,
2,
47183,
612,
389,
691,
1115,
3146,
326,
460,
307,
3194,
355,
262,
2160,
286,
5544,
5635,
286,
511,
19561,
25,
198,
2,
198,
2,
1467,
2682,
796,
1478,
1343,
559... | 2.996575 | 292 |
# Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Client side of the metrics task manager RPC API.
"""
import oslo_messaging as messaging
from oslo_config import cfg
from delfin import rpc
CONF = cfg.CONF
class TaskAPI(object):
"""Client side of the metrics task rpc API.
API version history:
1.0 - Initial version.
"""
RPC_API_VERSION = '1.0'
| [
2,
15069,
33448,
383,
311,
3727,
32,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 3.365314 | 271 |
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
import tensorflow as tf
# parameters for loading data and images
emotion_model_path = './models/model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('./models/face_box.xml')
emotion_classifier = load_model(emotion_model_path)
graph = tf.get_default_graph()
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# Select video or webcam feed
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
6738,
7869,
1330,
4235,
198,
6738,
3384,
4487,
13,
19608,
292,
1039,
1330,
651,
62,
23912,
1424,
198,
6738,
3384,
44... | 3.266447 | 304 |
_NOOP=const(0)
_DIGIT0=const(1)
_DIGIT1=const(2)
_DIGIT2=const(3)
_DIGIT3=const(4)
_DIGIT4=const(5)
_DIGIT5=const(6)
_DIGIT6=const(7)
_DIGIT7=const(8)
_DECODEMODE=const(9)
_INTENSITY=const(10)
_SCANLIMIT=const(11)
_SHUTDOWN=const(12)
_DISPLAYTEST=const(15)
| [
62,
15285,
3185,
28,
9979,
7,
15,
8,
198,
62,
35,
3528,
2043,
15,
28,
9979,
7,
16,
8,
198,
62,
35,
3528,
2043,
16,
28,
9979,
7,
17,
8,
198,
62,
35,
3528,
2043,
17,
28,
9979,
7,
18,
8,
198,
62,
35,
3528,
2043,
18,
28,
997... | 1.708609 | 151 |
from flask import Flask, request, render_template, redirect, url_for
from decouple import config
from flask_pymongo import PyMongo
from flask_cors import CORS
from apps.city_spelling_matcher import *
from apps.charts import *
from apps.pred_models import *
from apps.data_rangle import *
from flask import json
from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
import netaddr
from sklearn.neighbors import KDTree
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
198,
6738,
875,
43846,
1330,
4566,
198,
6738,
42903,
62,
79,
4948,
25162,
1330,
9485,
44,
25162,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,... | 3.553719 | 121 |
import pytest,torch
from fastai.basics import have_min_pkg_version
from fastai.gen_doc.doctest import this_tests
from fastai.text.models.qrnn import ForgetMultGPU, BwdForgetMultGPU, forget_mult_CPU, QRNN, QRNNLayer
@pytest.mark.cuda
@pytest.mark.cpp
@pytest.mark.cuda
@pytest.mark.cpp
# bug in pytorch=1.0.1, fixed in 1.0.2 https://github.com/pytorch/pytorch/issues/18189
@pytest.mark.skipif(not have_min_pkg_version("torch", "1.0.2"), reason="requires torch>=1.0.2")
| [
11748,
12972,
9288,
11,
13165,
354,
198,
6738,
3049,
1872,
13,
12093,
873,
1330,
423,
62,
1084,
62,
35339,
62,
9641,
198,
6738,
3049,
1872,
13,
5235,
62,
15390,
13,
4598,
310,
395,
1330,
428,
62,
41989,
198,
6738,
3049,
1872,
13,
52... | 2.465969 | 191 |
import sys
import torch
import torchvision
from embryovision.torchnn import Classifier
| [
11748,
25064,
198,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
198,
6738,
20748,
709,
1166,
13,
13165,
1349,
77,
1330,
5016,
7483,
628,
198
] | 3.64 | 25 |
#!/usr/bin/env python
from Bio import SeqIO
from Bio import Seq
import sys
inFile = open(sys.argv[1])
for rec in SeqIO.parse(inFile, "fasta"):
i = 0
l = len(rec.seq)
while (i < l):
j=i
while (j < l and rec.seq[j] == 'N'):
j += 1
if (j - i > 100000):
print rec.id + "\t" + str(i) + "\t" + str(j)
i = j+1
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
6738,
16024,
1330,
1001,
80,
198,
198,
11748,
25064,
198,
198,
259,
8979,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
1640,
664... | 1.842365 | 203 |
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import configparser
import pathlib
import shlex
from appimagebuilder.context import AppInfo
| [
2,
220,
15069,
220,
12131,
31078,
22593,
47828,
1155,
64,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
220,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
... | 4.252941 | 170 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_SGIX_pbuffer'
GLX_ACCUM_BUFFER_BIT_SGIX=_C('GLX_ACCUM_BUFFER_BIT_SGIX',0x00000080)
GLX_AUX_BUFFERS_BIT_SGIX=_C('GLX_AUX_BUFFERS_BIT_SGIX',0x00000010)
GLX_BACK_LEFT_BUFFER_BIT_SGIX=_C('GLX_BACK_LEFT_BUFFER_BIT_SGIX',0x00000004)
GLX_BACK_RIGHT_BUFFER_BIT_SGIX=_C('GLX_BACK_RIGHT_BUFFER_BIT_SGIX',0x00000008)
GLX_BUFFER_CLOBBER_MASK_SGIX=_C('GLX_BUFFER_CLOBBER_MASK_SGIX',0x08000000)
GLX_DAMAGED_SGIX=_C('GLX_DAMAGED_SGIX',0x8020)
GLX_DEPTH_BUFFER_BIT_SGIX=_C('GLX_DEPTH_BUFFER_BIT_SGIX',0x00000020)
GLX_EVENT_MASK_SGIX=_C('GLX_EVENT_MASK_SGIX',0x801F)
GLX_FRONT_LEFT_BUFFER_BIT_SGIX=_C('GLX_FRONT_LEFT_BUFFER_BIT_SGIX',0x00000001)
GLX_FRONT_RIGHT_BUFFER_BIT_SGIX=_C('GLX_FRONT_RIGHT_BUFFER_BIT_SGIX',0x00000002)
GLX_HEIGHT_SGIX=_C('GLX_HEIGHT_SGIX',0x801E)
GLX_LARGEST_PBUFFER_SGIX=_C('GLX_LARGEST_PBUFFER_SGIX',0x801C)
GLX_MAX_PBUFFER_HEIGHT_SGIX=_C('GLX_MAX_PBUFFER_HEIGHT_SGIX',0x8017)
GLX_MAX_PBUFFER_PIXELS_SGIX=_C('GLX_MAX_PBUFFER_PIXELS_SGIX',0x8018)
GLX_MAX_PBUFFER_WIDTH_SGIX=_C('GLX_MAX_PBUFFER_WIDTH_SGIX',0x8016)
GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX=_C('GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX',0x801A)
GLX_OPTIMAL_PBUFFER_WIDTH_SGIX=_C('GLX_OPTIMAL_PBUFFER_WIDTH_SGIX',0x8019)
GLX_PBUFFER_BIT_SGIX=_C('GLX_PBUFFER_BIT_SGIX',0x00000004)
GLX_PBUFFER_SGIX=_C('GLX_PBUFFER_SGIX',0x8023)
GLX_PRESERVED_CONTENTS_SGIX=_C('GLX_PRESERVED_CONTENTS_SGIX',0x801B)
GLX_SAMPLE_BUFFERS_BIT_SGIX=_C('GLX_SAMPLE_BUFFERS_BIT_SGIX',0x00000100)
GLX_SAVED_SGIX=_C('GLX_SAVED_SGIX',0x8021)
GLX_STENCIL_BUFFER_BIT_SGIX=_C('GLX_STENCIL_BUFFER_BIT_SGIX',0x00000040)
GLX_WIDTH_SGIX=_C('GLX_WIDTH_SGIX',0x801D)
GLX_WINDOW_SGIX=_C('GLX_WINDOW_SGIX',0x8022)
@_f
@_p.types(_cs.GLXPbufferSGIX,ctypes.POINTER(_cs.Display),_cs.GLXFBConfigSGIX,_cs.c_uint,_cs.c_uint,ctypes.POINTER(_cs.c_int))
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXPbufferSGIX)
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXDrawable,ctypes.POINTER(_cs.c_ulong))
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXPbufferSGIX,_cs.c_int,ctypes.POINTER(_cs.c_uint))
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXDrawable,_cs.c_ulong)
| [
7061,
6,
16541,
519,
877,
515,
416,
35555,
62,
8612,
378,
4226,
11,
466,
407,
4370,
0,
7061,
6,
201,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
26515,
201,
198,
2,
6127,
5270,
3544,
428,
201,
198,
6738,
30672,
13,
1831,
13,
... | 1.884091 | 1,320 |
if __name__ == '__main__':
p = HEAP()
q = [1, 2, 3]
e = []
while True:
a = str(input())
q = []
qq = []
senten = ""
kk = 0
k = 1
if a[0] == ";":
break
if a[0] == "+": p.insert(int(a[1:len(a)]))
if a[0] == "-": print(p.rem())
if a[0] == "?": p.pri()
if a[0] == "=": p.tarsim(1)
if a[0] == "*":
qq = p.heapsort()
kk = len(qq) - 1
if len(qq) == 0:
print("Empty")
else:
while kk >= 0:
senten = senten + str(qq[kk]) + " "
kk -= 1
print(senten)
if a[0] == "[":
s = []
p = HEAP()
a = str(input())
while a != "]":
s.append(int(a))
a = str(input())
p.newsort(s)
# print( s[0:len(s)])
if a[0] == "!":
p = HEAP()
| [
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
201,
198,
220,
220,
220,
279,
796,
11179,
2969,
3419,
201,
198,
220,
220,
220,
10662,
796,
685,
16,
11,
362,
11,
513,
60,
201,
198,
220,
220,
220,
304,
796,
17635... | 1.451117 | 716 |
"""Base class for collections interactive tests.
"""
import difflib
import os
from typing import Generator
import pytest
from ....defaults import FIXTURES_COLLECTION_DIR
from ..._common import copytree
from ..._common import retrieve_fixture_for_step
from ..._common import update_fixtures
from ..._interactions import SearchFor
from ..._interactions import UiTestStep
from ..._tmux_session import TmuxSession
EXPECTED_COLLECTIONS = ["ansible.builtin", "company_name.coll_1", "company_name.coll_2"]
base_steps = (
UiTestStep(user_input=":1", comment="Browse company_name.coll_1 plugins window"),
UiTestStep(user_input=":0", comment="lookup_1 plugin docs window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_1 plugins window"),
UiTestStep(user_input=":1", comment="mod_1 plugin docs window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_1 plugins window"),
UiTestStep(user_input=":2", comment="role_full details window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_1 plugins window"),
UiTestStep(user_input=":3", comment="role_minimal details window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_1 plugins window"),
UiTestStep(
user_input=":back",
comment="Back to ansible-navigator collections browse window",
present=EXPECTED_COLLECTIONS,
),
UiTestStep(user_input=":2", comment="Browse company_name.coll_2 plugins window"),
UiTestStep(user_input=":0", comment="lookup_2 plugin docs window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_2 plugins window"),
UiTestStep(user_input=":1", comment="mod_2 plugin docs window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_2 plugins window"),
UiTestStep(
user_input=":back",
comment="Back to ansible-navigator collections browse window",
present=EXPECTED_COLLECTIONS,
),
# Try some things that should not work but not fail (#1061 and #1062)
UiTestStep(
user_input=":collections --ee FFFFF",
comment="Provide an invalid ee value",
present=["Errors were encountered while parsing the last command"],
search_within_response=SearchFor.WARNING,
),
# Dismiss the warning
UiTestStep(
user_input="Enter",
comment="ansible-navigator collections browse window",
present=EXPECTED_COLLECTIONS,
),
# and repeat some basic browsing
UiTestStep(user_input=":1", comment="Browse company_name.coll_1 plugins window"),
UiTestStep(user_input=":0", comment="lookup_1 plugin docs window"),
UiTestStep(user_input=":back", comment="Back to browse company_name.coll_1 plugins window"),
UiTestStep(
user_input=":back",
comment="Back to ansible-navigator collections browse window",
present=EXPECTED_COLLECTIONS,
),
UiTestStep(
user_input=":0",
comment="Browse ansible.builtin plugins window",
present=["yum_repository"],
),
UiTestStep(
user_input=":0",
comment="Browse ansible.builtin.add_host module",
present=["ansible.builtin.add_host"],
),
)
class BaseClass:
"""Base class for interactive ``collections`` tests."""
update_fixtures = False
pane_height = 2000
pane_width = 200
@pytest.fixture(scope="module", name="tmux_session")
def fixture_tmux_session(
self,
request: pytest.FixtureRequest,
os_independent_tmp: str,
) -> Generator[TmuxSession, None, None]:
"""Tmux fixture for this module.
:param request: The request for this fixture
:param os_independent_tmp: An OS independent tmp directory
:yields: A tmux session
"""
tmp_coll_dir = os.path.join(os_independent_tmp, request.node.name, "")
os.makedirs(tmp_coll_dir, exist_ok=True)
copytree(
FIXTURES_COLLECTION_DIR,
os.path.join(tmp_coll_dir, "collections"),
dirs_exist_ok=True,
)
params = {
"setup_commands": [
f"cd {tmp_coll_dir}",
f"export ANSIBLE_COLLECTIONS_PATH={tmp_coll_dir}",
"export ANSIBLE_DEVEL_WARNING=False",
"export ANSIBLE_DEPRECATION_WARNINGS=False",
],
"pane_height": self.pane_height,
"pane_width": self.pane_width,
"unique_test_id": request.node.nodeid,
}
with TmuxSession(**params) as tmux_session:
yield tmux_session
def test(
self,
os_independent_tmp: str,
request: pytest.FixtureRequest,
step: UiTestStep,
tmux_session: TmuxSession,
):
"""Run the tests for ``collections``, mode and ``ee`` set in child class.
:param os_independent_tmp: An OS independent tmp directory
:param request: The request for this fixture
:param step: The UI test step
:param tmux_session: A tmux session
"""
if step.search_within_response is SearchFor.HELP:
search_within_response = ":help help"
elif step.search_within_response is SearchFor.PROMPT:
search_within_response = tmux_session.cli_prompt
elif step.search_within_response is SearchFor.WARNING:
search_within_response = "Warning"
else:
raise ValueError("test mode not set")
received_output = tmux_session.interaction(
value=step.user_input,
search_within_response=search_within_response,
)
received_output = [
line.replace(os_independent_tmp, "FIXTURES_COLLECTION_DIR") for line in received_output
]
fixtures_update_requested = (
self.update_fixtures
or os.environ.get("ANSIBLE_NAVIGATOR_UPDATE_TEST_FIXTURES") == "true"
)
if fixtures_update_requested:
update_fixtures(
request,
step.step_index,
received_output,
step.comment,
additional_information={
"present": step.present,
"absent": step.absent,
"compared_fixture": not any((step.present, step.absent)),
},
)
page = " ".join(received_output)
if step.present:
assert all(present in page for present in step.present)
if step.absent:
assert not any(look_not in page for look_not in step.absent)
if not any((step.present, step.absent)):
expected_output = retrieve_fixture_for_step(request, step.step_index)
assert expected_output == received_output, "\n" + "\n".join(
difflib.unified_diff(expected_output, received_output, "expected", "received"),
)
| [
37811,
14881,
1398,
329,
17268,
14333,
5254,
13,
198,
37811,
198,
11748,
814,
8019,
198,
11748,
28686,
198,
198,
6738,
19720,
1330,
35986,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
19424,
12286,
82,
1330,
44855,
51,
29514,
62,
25154,
... | 2.361102 | 2,941 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import sys
import time
import types
import eventlet
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
from six import reraise as raise_
from heat.common.i18n import _
from heat.common.i18n import _LI
LOG = logging.getLogger(__name__)
# Whether TaskRunner._sleep actually does an eventlet sleep when called.
ENABLE_SLEEP = True
wallclock = time.time
def task_description(task):
"""
Return a human-readable string description of a task suitable for logging
the status of the task.
"""
name = task.__name__ if hasattr(task, '__name__') else None
if isinstance(task, types.MethodType):
if name is not None and hasattr(task, '__self__'):
return '%s from %s' % (name, task.__self__)
elif isinstance(task, types.FunctionType):
if name is not None:
return six.text_type(name)
return repr(task)
class Timeout(BaseException):
"""
Timeout exception, raised within a task when it has exceeded its allotted
(wallclock) running time.
This allows the task to perform any necessary cleanup, as well as use a
different exception to notify the controlling task if appropriate. If the
task suppresses the exception altogether, it will be cancelled but the
controlling task will not be notified of the timeout.
"""
def __init__(self, task_runner, timeout):
"""
Initialise with the TaskRunner and a timeout period in seconds.
"""
message = _('%s Timed out') % six.text_type(task_runner)
super(Timeout, self).__init__(message)
# Note that we don't attempt to handle leap seconds or large clock
# jumps here. The latter are assumed to be rare and the former
# negligible in the context of the timeout. Time zone adjustments,
# Daylight Savings and the like *are* handled. PEP 418 adds a proper
# monotonic clock, but only in Python 3.3.
self._endtime = wallclock() + timeout
def trigger(self, generator):
"""Trigger the timeout on a given generator."""
try:
generator.throw(self)
except StopIteration:
return True
else:
# Clean up in case task swallows exception without exiting
generator.close()
return False
class ExceptionGroup(Exception):
'''
Container for multiple exceptions.
This exception is used by DependencyTaskGroup when the flag
aggregate_exceptions is set to True and it's re-raised again when all tasks
are finished. This way it can be caught later on so that the individual
exceptions can be acted upon.
'''
class TaskRunner(object):
"""
Wrapper for a resumable task (co-routine).
"""
def __init__(self, task, *args, **kwargs):
"""
Initialise with a task function, and arguments to be passed to it when
it is started.
The task function may be a co-routine that yields control flow between
steps.
"""
assert callable(task), "Task is not callable"
self._task = task
self._args = args
self._kwargs = kwargs
self._runner = None
self._done = False
self._timeout = None
self.name = task_description(task)
def __str__(self):
"""Return a human-readable string representation of the task."""
text = 'Task %s' % self.name
return encodeutils.safe_encode(text)
def __unicode__(self):
"""Return a human-readable string representation of the task."""
text = 'Task %s' % self.name
return encodeutils.safe_decode(text)
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
LOG.debug('%s sleeping' % six.text_type(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None):
"""
Start and run the task to completion.
The task will first sleep for zero seconds, then sleep for `wait_time`
seconds between steps. To avoid sleeping, pass `None` for `wait_time`.
"""
self.start(timeout=timeout)
# ensure that zero second sleep is applied only if task
# has not completed.
if not self.done() and wait_time:
self._sleep(0)
self.run_to_completion(wait_time=wait_time)
def start(self, timeout=None):
"""
Initialise the task and run its first step.
If a timeout is specified, any attempt to step the task after that
number of seconds has elapsed will result in a Timeout being
raised inside the task.
"""
assert self._runner is None, "Task already started"
assert not self._done, "Task already cancelled"
LOG.debug('%s starting' % six.text_type(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
result = self._task(*self._args, **self._kwargs)
if isinstance(result, types.GeneratorType):
self._runner = result
self.step()
else:
self._runner = False
self._done = True
LOG.debug('%s done (not resumable)' % six.text_type(self))
def step(self):
"""
Run another step of the task, and return True if the task is complete;
False otherwise.
"""
if not self.done():
assert self._runner is not None, "Task not started"
if self._timeout is not None and self._timeout.expired():
LOG.info(_LI('%s timed out'), six.text_type(self))
self._done = True
self._timeout.trigger(self._runner)
else:
LOG.debug('%s running' % six.text_type(self))
try:
next(self._runner)
except StopIteration:
self._done = True
LOG.debug('%s complete' % six.text_type(self))
return self._done
def run_to_completion(self, wait_time=1):
"""
Run the task to completion.
The task will sleep for `wait_time` seconds between steps. To avoid
sleeping, pass `None` for `wait_time`.
"""
while not self.step():
self._sleep(wait_time)
def cancel(self, grace_period=None):
"""Cancel the task and mark it as done."""
if self.done():
return
if not self.started() or grace_period is None:
LOG.debug('%s cancelled' % six.text_type(self))
self._done = True
if self.started():
self._runner.close()
else:
timeout = TimedCancel(self, grace_period)
if self._timeout is None or timeout < self._timeout:
self._timeout = timeout
def started(self):
"""Return True if the task has been started."""
return self._runner is not None
def done(self):
"""Return True if the task is complete."""
return self._done
def __nonzero__(self):
"""Return True if there are steps remaining."""
return not self.done()
def wrappertask(task):
"""
Decorator for a task that needs to drive a subtask.
This is essentially a replacement for the Python 3-only "yield from"
keyword (PEP 380), using the "yield" keyword that is supported in
Python 2. For example::
@wrappertask
def parent_task(self):
self.setup()
yield self.child_task()
self.cleanup()
"""
@six.wraps(task)
return wrapper
class DependencyTaskGroup(object):
"""
A task which manages a group of subtasks that have ordering dependencies.
"""
def __init__(self, dependencies, task=lambda o: o(),
reverse=False, name=None, error_wait_time=None,
aggregate_exceptions=False):
"""
Initialise with the task dependencies and (optionally) a task to run on
each.
If no task is supplied, it is assumed that the tasks are stored
directly in the dependency tree. If a task is supplied, the object
stored in the dependency tree is passed as an argument.
If an error_wait_time is specified, tasks that are already running at
the time of an error will continue to run for up to the specified
time before being cancelled. Once all remaining tasks are complete or
have been cancelled, the original exception is raised.
If aggregate_exceptions is True, then execution of parallel operations
will not be cancelled in the event of an error (operations downstream
of the error will be cancelled). Once all chains are complete, any
errors will be rolled up into an ExceptionGroup exception.
"""
self._runners = dict((o, TaskRunner(task, o)) for o in dependencies)
self._graph = dependencies.graph(reverse=reverse)
self.error_wait_time = error_wait_time
self.aggregate_exceptions = aggregate_exceptions
if name is None:
name = '(%s) %s' % (getattr(task, '__name__',
task_description(task)),
six.text_type(dependencies))
self.name = name
def __repr__(self):
"""Return a string representation of the task."""
text = '%s(%s)' % (type(self).__name__, self.name)
return encodeutils.safe_encode(text)
def __call__(self):
"""Return a co-routine which runs the task group."""
raised_exceptions = []
while any(six.itervalues(self._runners)):
try:
for k, r in self._ready():
r.start()
yield
for k, r in self._running():
if r.step():
del self._graph[k]
except Exception:
exc_info = sys.exc_info()
if self.aggregate_exceptions:
self._cancel_recursively(k, r)
else:
self.cancel_all(grace_period=self.error_wait_time)
raised_exceptions.append(exc_info)
except: # noqa
with excutils.save_and_reraise_exception():
self.cancel_all()
if raised_exceptions:
if self.aggregate_exceptions:
raise ExceptionGroup(v for t, v, tb in raised_exceptions)
else:
exc_type, exc_val, traceback = raised_exceptions[0]
raise_(exc_type, exc_val, traceback)
def _ready(self):
"""
Iterate over all subtasks that are ready to start - i.e. all their
dependencies have been satisfied but they have not yet been started.
"""
for k, n in six.iteritems(self._graph):
if not n:
runner = self._runners[k]
if runner and not runner.started():
yield k, runner
def _running(self):
"""
Iterate over all subtasks that are currently running - i.e. they have
been started but have not yet completed.
"""
running = lambda k_r: k_r[0] in self._graph and k_r[1].started()
return six.moves.filter(running, six.iteritems(self._runners))
class PollingTaskGroup(object):
"""
A task which manages a group of subtasks.
When the task is started, all of its subtasks are also started. The task
completes when all subtasks are complete.
Once started, the subtasks are assumed to be only polling for completion
of an asynchronous operation, so no attempt is made to give them equal
scheduling slots.
"""
def __init__(self, tasks, name=None):
"""Initialise with a list of tasks."""
self._tasks = list(tasks)
if name is None:
name = ', '.join(task_description(t) for t in self._tasks)
self.name = name
@staticmethod
def _args(arg_lists):
"""Return a list containing the positional args for each subtask."""
return zip(*arg_lists)
@staticmethod
def _kwargs(kwarg_lists):
"""Return a list containing the keyword args for each subtask."""
keygroups = (six.moves.zip(itertools.repeat(name),
arglist)
for name, arglist in six.iteritems(kwarg_lists))
return [dict(kwargs) for kwargs in six.moves.zip(*keygroups)]
@classmethod
def from_task_with_args(cls, task, *arg_lists, **kwarg_lists):
"""
Return a new PollingTaskGroup where each subtask is identical except
for the arguments passed to it.
Each argument to use should be passed as a list (or iterable) of values
such that one is passed in the corresponding position for each subtask.
The number of subtasks spawned depends on the length of the argument
lists.
For example::
PollingTaskGroup.from_task_with_args(my_task,
[1, 2, 3],
alpha=['a', 'b', 'c'])
will start three TaskRunners that will run::
my_task(1, alpha='a')
my_task(2, alpha='b')
my_task(3, alpha='c')
respectively.
If multiple arguments are supplied, each list should be of the same
length. In the case of any discrepancy, the length of the shortest
argument list will be used, and any extra arguments discarded.
"""
args_list = cls._args(arg_lists)
kwargs_list = cls._kwargs(kwarg_lists)
if kwarg_lists and not arg_lists:
args_list = [[]] * len(kwargs_list)
elif arg_lists and not kwarg_lists:
kwargs_list = [{}] * len(args_list)
task_args = six.moves.zip(args_list, kwargs_list)
tasks = (functools.partial(task, *a, **kwa) for a, kwa in task_args)
return cls(tasks, name=task_description(task))
def __repr__(self):
"""Return a string representation of the task group."""
text = '%s(%s)' % (type(self).__name__, self.name)
return encodeutils.safe_encode(text)
def __call__(self):
"""Return a co-routine which runs the task group."""
runners = [TaskRunner(t) for t in self._tasks]
try:
for r in runners:
r.start()
while runners:
yield
runners = list(itertools.dropwhile(lambda r: r.step(),
runners))
except: # noqa
with excutils.save_and_reraise_exception():
for r in runners:
r.cancel()
| [
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
... | 2.387211 | 6,490 |
my_list = [7, 5, 3, 3, 2]
i = int(input('Ввидите новый элемент рейтинга: '))
my_list.append(i)
my_list.sort()
my_list.reverse()
print(my_list)
| [
1820,
62,
4868,
796,
685,
22,
11,
642,
11,
513,
11,
513,
11,
362,
60,
198,
72,
796,
493,
7,
15414,
10786,
140,
240,
38857,
18849,
43666,
18849,
20375,
16843,
12466,
121,
25443,
110,
45035,
140,
117,
220,
141,
235,
30143,
16843,
4310... | 1.576087 | 92 |
"""
Library for Adaptive Design of Experiments.
-- kandasamy@cs.cmu.edu
"""
from .exd.goal_oriented_exd import GoalOrientedExperimentDesigner
from .exd.worker_manager import get_default_worker_manager
from .policies.policy_utils import load_options_for_policy
| [
37811,
198,
220,
10074,
329,
30019,
425,
8495,
286,
8170,
6800,
13,
198,
220,
1377,
479,
392,
292,
14814,
31,
6359,
13,
11215,
84,
13,
15532,
198,
37811,
198,
198,
6738,
764,
1069,
67,
13,
35231,
62,
17107,
62,
1069,
67,
1330,
25376... | 3.093023 | 86 |
import argparse
import datetime
import h5py
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
_main()
| [
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
289,
20,
9078,
198,
11748,
2603,
29487,
8019,
13,
19581,
355,
285,
19581,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
628,
628... | 2.836066 | 61 |
# This file is a part of WUnderground PWS Python API
# Copyright (c) 2019 Marco Aceti <mail@marcoaceti.it>
# See LICENSE file for more details about licensing and copying.
| [
2,
770,
2393,
318,
257,
636,
286,
370,
9203,
2833,
350,
19416,
11361,
7824,
198,
2,
15069,
357,
66,
8,
13130,
16556,
4013,
316,
72,
1279,
4529,
31,
3876,
1073,
23253,
72,
13,
270,
29,
198,
2,
4091,
38559,
24290,
2393,
329,
517,
33... | 3.411765 | 51 |
from output_reader import Output_Reader
from parslet import Parslet
from report import Report
from report import Plan
import re | [
6738,
5072,
62,
46862,
1330,
25235,
62,
33634,
198,
6738,
13544,
1616,
1330,
23042,
1616,
198,
6738,
989,
1330,
6358,
198,
6738,
989,
1330,
5224,
198,
11748,
302
] | 4.535714 | 28 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-29 21:10
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1314,
319,
13130,
12,
486,
12,
1959,
2310,
25,
940,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.754386 | 57 |
import sys
import os
import libsbml
from tqdm import tqdm
'''
Usage: annotate_links_from_mp.py <path_model-polisher_sbml-file> <path_input_sbml-file> <path_output_sbml-file>
Extracts annotations form ModelPolisher file and adds it to previous file.
'''
if __name__ == '__main__':
main(sys.argv)
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
9195,
36299,
4029,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
7061,
6,
198,
28350,
25,
24708,
378,
62,
28751,
62,
6738,
62,
3149,
13,
9078,
1279,
6978,
62,
19849,
12,
16104,... | 2.657895 | 114 |
#! /usr/bin/env python3
"""
http://oj.leetcode.com/problems/populating-next-right-pointers-in-each-node/
Since Apr-10-2014 18:33
"""
# Definition for a binary tree node
# @param root, a tree node
# @return nothing
if __name__ == '__main__':
s = Solution()
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(4)
n5 = TreeNode(5)
n6 = TreeNode(6)
n7 = TreeNode(7)
n1.left = n2
n1.right = n3
n2.left = n4
n2.right = n5
n3.left = n6
n3.right = n7
s.connect(n1)
locals = locals().copy()
for i in range(1, 8):
next = locals['n%s' % i].next
print(next) if next is None else print(next.val)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
4023,
1378,
13210,
13,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
12924,
8306,
12,
19545,
12,
3506,
12,
47809,
12,
259,
12,
27379,
12,
17440,
14,
198,
198... | 2.09009 | 333 |
# 主函数
import os
# 自写
import Tools
# 设置需要爬取的页面
BASE_URL = 'http://www.9ku.com'
CHILD_URLS = ['/douyin/bang.htm','/douyin/new.htm','/douyin/zhongwen.htm','/douyin/bgm.htm',
'/laoge/500shou.htm','/laoge/80.htm','/laoge/70.htm',
'/wangluo/zuixin.htm','/wangluo/haoting.htm']
SAVE_PATH = os.path.abspath(os.curdir)+'/audios'
COUNT = 0
PAGES = 20
# http://www.9ku.com/x1/music/by_new.php?act=t_hits&page=3 最新
# http://www.9ku.com/x1/music/by_new.php?act=t_new&page= 页码
if __name__ == '__main__':
for child_url in CHILD_URLS:
path = SAVE_PATH + child_url.split(".")[0] + '/'
if not os.path.exists(path):
os.makedirs(path)
COUNT +=Tools.getAudios(Tools.getAudioIds(BASE_URL,child_url),path)
print("共计下载:"+ str(COUNT)) | [
2,
220,
10310,
119,
49035,
121,
46763,
108,
198,
198,
11748,
28686,
198,
2,
5525,
229,
103,
37863,
247,
198,
11748,
20003,
628,
198,
2,
5525,
106,
122,
163,
121,
106,
165,
250,
222,
17358,
223,
163,
230,
105,
20998,
244,
21410,
165,... | 1.723312 | 459 |
# Generated by Django 3.0.3 on 2020-11-03 07:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
18,
319,
12131,
12,
1157,
12,
3070,
8753,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import json
import math
import collections
import sys
scores = []
elements = []
end_res = {"files": []}
| [
11748,
33918,
198,
11748,
10688,
198,
11748,
17268,
198,
11748,
25064,
198,
198,
1416,
2850,
796,
17635,
198,
68,
3639,
796,
17635,
198,
437,
62,
411,
796,
19779,
16624,
1298,
17635,
92,
628,
628,
198
] | 3.114286 | 35 |
# -*- coding: utf-8 -*-
from pony import orm
import pony.orm.dbproviders.sqlite
import datetime
#orm.set_sql_debug(True)
db = orm.Database()
@db.on_connect(provider='sqlite')
db.bind(provider='sqlite', filename='checkytdb.sqlite', create_db=True)
db.generate_mapping(create_tables=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
6738,
26902,
1330,
393,
76,
201,
198,
11748,
26902,
13,
579,
13,
9945,
15234,
4157,
13,
25410,
578,
201,
198,
11748,
4818,
8079,
201,
198,
2,
579,
13,
2617,
62,
... | 2.26087 | 138 |
from unittest.mock import patch, MagicMock
from dataclasses import dataclass
@patch("flaskerize.generate._generate")
@patch("flaskerize.generate._generate")
@patch("flaskerize.generate._generate")
@patch("flaskerize.generate._generate")
@patch("flaskerize.generate._generate")
@patch("flaskerize.generate._generate")
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
6139,
44,
735,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
628,
198,
31,
17147,
7203,
2704,
2093,
263,
1096,
13,
8612,
378,
13557,
8612,
378,
4943,
628,
198,
31,
17147,... | 2.737705 | 122 |
import numpy as np
from phi.flow import Scene
from control.control_training import ControlTraining
from control.pde.value import IncrementPDE
# Data generation
from control.sequences import StaggeredSequence
if len(Scene.list('~/phi/data/value')) == 0:
scene = Scene.create('~/phi/data/value')
for frame in range(32):
scene.write_sim_frame([np.zeros([1])+frame], ['data'], frame)
app = ControlTraining(n=8,
pde=IncrementPDE(),
datapath='~/phi/data/value',
val_range=range(1),
train_range=None,
obs_loss_frames=[-1],
trainable_networks=[],
sequence_class=StaggeredSequence,
batch_size=1)
print("Training app was set up. The values of 'scalar.data' should be equal to the frame index. 'fieldeffect_xxx_.field.data' should be 1 for frame>=1.")
for fieldname in app.fieldnames:
if 'Sim' in fieldname:
value = app.get_field(fieldname)
print("%s = %s" % (fieldname, value[..., 0]))
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
872,
72,
13,
11125,
1330,
28315,
198,
198,
6738,
1630,
13,
13716,
62,
34409,
1330,
6779,
44357,
198,
6738,
1630,
13,
79,
2934,
13,
8367,
1330,
10791,
434,
5760,
36,
628,
198,
2,
6060,
5... | 2.159763 | 507 |
import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
PROJECT_NAME = 'sebastianwalter'
HOMEPAGE = 'http://sebastianwalter.org'
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 2
thread_queue = Queue()
# very first spider, creates list and folder and gets all urls from the staring page
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create worker threads (will die when main exists); _ means I do not care about the value it self
# Do the next job in the queue
# Each queue link is a new job
# Check if there are items in the queue, if so crawl them
create_workers()
crawl() | [
11748,
4704,
278,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
19230,
1330,
12648,
198,
6738,
7386,
1330,
1635,
198,
6738,
2276,
1330,
1635,
198,
198,
31190,
23680,
62,
20608,
796,
705,
325,
65,
459,
666,
16783,
353,
6,
198,
39069,
4... | 3.167382 | 233 |
# MIDI input and output ports, on windows you'll have to
# start them manually using LoopMIDI or another software
MIDI_INPUT_PORT: str = "magenta_in"
MIDI_OUTPUT_PORT: str = "magenta_out"
# The server GUI for the app
WS_SERVER_HOST: str = "127.0.0.1"
WS_SERVER_PORT: int = 5000
| [
2,
33439,
5128,
290,
5072,
14090,
11,
319,
9168,
345,
1183,
423,
284,
198,
2,
923,
606,
14500,
1262,
26304,
44,
2389,
40,
393,
1194,
3788,
198,
44,
2389,
40,
62,
1268,
30076,
62,
15490,
25,
965,
796,
366,
19726,
29188,
62,
259,
1,... | 2.682692 | 104 |
from direct.directnotify import DirectNotifyGlobal
from . import CatalogItem
from . import CatalogItemList
from .CatalogFurnitureItem import CatalogFurnitureItem, nextAvailableCloset, getAllClosets, get50ItemCloset, getMaxClosets, get50ItemTrunk
from .CatalogAnimatedFurnitureItem import CatalogAnimatedFurnitureItem
from .CatalogClothingItem import CatalogClothingItem, getAllClothes
from .CatalogChatItem import CatalogChatItem, getChatRange
from .CatalogEmoteItem import CatalogEmoteItem
from .CatalogWallpaperItem import CatalogWallpaperItem, getWallpapers
from .CatalogFlooringItem import CatalogFlooringItem, getFloorings
from .CatalogMouldingItem import CatalogMouldingItem, getAllMouldings
from .CatalogWainscotingItem import CatalogWainscotingItem, getAllWainscotings
from .CatalogWindowItem import CatalogWindowItem
from .CatalogPoleItem import nextAvailablePole, getAllPoles
from .CatalogPetTrickItem import CatalogPetTrickItem, getAllPetTricks
from .CatalogGardenItem import CatalogGardenItem
from .CatalogToonStatueItem import CatalogToonStatueItem
from .CatalogRentalItem import CatalogRentalItem
from .CatalogGardenStarterItem import CatalogGardenStarterItem
from .CatalogNametagItem import CatalogNametagItem
from .CatalogAccessoryItem import CatalogAccessoryItem
from direct.actor import Actor
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
import types
import random
import time
from pandac.PandaModules import *
MetaItems = {100: getAllClothes(101, 102, 103, 104, 105, 106, 107, 108, 109, 109, 111, 115, 201, 202, 203, 204, 205, 206, 207, 208, 209, 209, 211, 215),
300: getAllClothes(301, 302, 303, 304, 305, 308, 401, 403, 404, 405, 407, 451, 452, 453),
2000: getChatRange(0, 1999),
2010: getChatRange(2000, 2999),
2020: getChatRange(3000, 3999),
2030: getChatRange(4000, 4999),
2040: getChatRange(6000, 6999),
2050: getChatRange(7000, 7999),
2900: getChatRange(10000, 10002, 10005, 10005, 10007, 10008, 10010, 10099),
2910: getChatRange(11000, 11005, 11008, 11008, 11012, 11015, 11017, 11019, 11021, 11022),
2920: getChatRange(12000, 12049),
2921: getChatRange(12050, 12099),
2930: getChatRange(13000, 13099),
2940: getChatRange(14000, 14099),
3000: getWallpapers(1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100),
3010: getWallpapers(2200, 2300, 2400, 2500, 2600, 2700, 2800),
3020: getWallpapers(2900, 3000, 3100, 3200, 3300, 3400, 3500, 3600),
3030: getWallpapers(3700, 3800, 3900),
3500: getAllWainscotings(1000, 1010),
3510: getAllWainscotings(1020),
3520: getAllWainscotings(1030),
3530: getAllWainscotings(1040),
4000: getFloorings(1000, 1010, 1020, 1030, 1040, 1050, 1060, 1070, 1080, 1090, 1100),
4010: getFloorings(1110, 1120, 1130),
4020: getFloorings(1140, 1150, 1160, 1170, 1180, 1190),
4500: getAllMouldings(1000, 1010),
4510: getAllMouldings(1020, 1030, 1040),
4520: getAllMouldings(1070),
5000: getAllPetTricks()}
MetaItemChatKeysSold = (2000,
2010,
2020,
2030,
2040,
2050,
2900,
2910,
2920,
2921,
2930)
MonthlySchedule = ((7,
1,
8,
31,
(CatalogAccessoryItem(101),
CatalogAccessoryItem(103),
CatalogAccessoryItem(117),
CatalogAccessoryItem(118),
CatalogAccessoryItem(123),
CatalogAccessoryItem(124),
CatalogAccessoryItem(125),
CatalogAccessoryItem(126),
CatalogAccessoryItem(127),
CatalogAccessoryItem(128),
CatalogAccessoryItem(129),
CatalogAccessoryItem(130),
CatalogAccessoryItem(202),
CatalogAccessoryItem(204),
CatalogAccessoryItem(205),
CatalogAccessoryItem(206),
CatalogAccessoryItem(208),
CatalogAccessoryItem(209),
CatalogAccessoryItem(210),
CatalogAccessoryItem(302),
CatalogAccessoryItem(308),
CatalogAccessoryItem(309),
CatalogAccessoryItem(310),
CatalogAccessoryItem(317),
CatalogAccessoryItem(402),
CatalogAccessoryItem(403),
CatalogAccessoryItem(405),
CatalogAccessoryItem(406),
CatalogAccessoryItem(407),
CatalogAccessoryItem(408),
CatalogAccessoryItem(409),
CatalogAccessoryItem(410),
CatalogAccessoryItem(411),
CatalogAccessoryItem(412),
CatalogAccessoryItem(413))),
(9,
1,
10,
31,
(CatalogAccessoryItem(306),
CatalogAccessoryItem(318),
CatalogAccessoryItem(121),
CatalogAccessoryItem(212),
CatalogAccessoryItem(214),
CatalogAccessoryItem(312),
CatalogAccessoryItem(150),
CatalogAccessoryItem(151),
CatalogAccessoryItem(147),
CatalogAccessoryItem(422),
CatalogAccessoryItem(141),
CatalogAccessoryItem(146),
CatalogAccessoryItem(444),
CatalogAccessoryItem(122),
CatalogAccessoryItem(430),
CatalogAccessoryItem(145),
CatalogAccessoryItem(132),
CatalogAccessoryItem(161),
CatalogAccessoryItem(134),
CatalogAccessoryItem(149),
CatalogAccessoryItem(207),
CatalogAccessoryItem(215),
CatalogAccessoryItem(216),
CatalogAccessoryItem(417),
CatalogAccessoryItem(222),
CatalogAccessoryItem(321),
CatalogAccessoryItem(322),
CatalogAccessoryItem(307),
CatalogAccessoryItem(135),
CatalogAccessoryItem(174))),
(11,
1,
12,
31,
(CatalogAccessoryItem(434),
CatalogAccessoryItem(435),
CatalogAccessoryItem(441),
CatalogAccessoryItem(446),
CatalogAccessoryItem(429),
CatalogAccessoryItem(110),
CatalogAccessoryItem(148),
CatalogAccessoryItem(443),
CatalogAccessoryItem(426),
CatalogAccessoryItem(439),
CatalogAccessoryItem(143),
CatalogAccessoryItem(313),
CatalogAccessoryItem(311),
CatalogAccessoryItem(437),
CatalogAccessoryItem(415),
CatalogAccessoryItem(167),
CatalogAccessoryItem(157),
CatalogAccessoryItem(106),
CatalogAccessoryItem(109),
CatalogAccessoryItem(421),
CatalogAccessoryItem(401),
CatalogAccessoryItem(447),
CatalogAccessoryItem(213),
CatalogAccessoryItem(330))),
(1,
1,
2,
29,
(CatalogAccessoryItem(440),
CatalogAccessoryItem(425),
CatalogAccessoryItem(158),
CatalogAccessoryItem(431),
CatalogAccessoryItem(420),
CatalogAccessoryItem(155),
CatalogAccessoryItem(419),
CatalogAccessoryItem(436),
CatalogAccessoryItem(428),
CatalogAccessoryItem(304),
CatalogAccessoryItem(301),
CatalogAccessoryItem(416),
CatalogAccessoryItem(414),
CatalogAccessoryItem(164),
CatalogAccessoryItem(323),
CatalogAccessoryItem(108),
CatalogAccessoryItem(139),
CatalogAccessoryItem(316),
CatalogAccessoryItem(131),
CatalogAccessoryItem(170),
CatalogAccessoryItem(221),
CatalogAccessoryItem(225))),
(3,
1,
4,
30,
(CatalogAccessoryItem(305),
CatalogAccessoryItem(303),
CatalogAccessoryItem(144),
CatalogAccessoryItem(120),
CatalogAccessoryItem(116),
CatalogAccessoryItem(217),
CatalogAccessoryItem(218),
CatalogAccessoryItem(219),
CatalogAccessoryItem(445),
CatalogAccessoryItem(418),
CatalogAccessoryItem(432),
CatalogAccessoryItem(427),
CatalogAccessoryItem(423),
CatalogAccessoryItem(137),
CatalogAccessoryItem(163),
CatalogAccessoryItem(165),
CatalogAccessoryItem(153),
CatalogAccessoryItem(319),
CatalogAccessoryItem(154),
CatalogAccessoryItem(159),
CatalogAccessoryItem(162),
CatalogAccessoryItem(315),
CatalogAccessoryItem(160),
CatalogAccessoryItem(102))),
(5,
1,
6,
30,
(CatalogAccessoryItem(119),
CatalogAccessoryItem(136),
CatalogAccessoryItem(169),
CatalogAccessoryItem(140),
CatalogAccessoryItem(168),
CatalogAccessoryItem(138),
CatalogAccessoryItem(220),
CatalogAccessoryItem(433),
CatalogAccessoryItem(442),
CatalogAccessoryItem(424),
CatalogAccessoryItem(404),
CatalogAccessoryItem(156),
CatalogAccessoryItem(142),
CatalogAccessoryItem(152),
CatalogAccessoryItem(133),
CatalogAccessoryItem(166),
CatalogAccessoryItem(211),
CatalogAccessoryItem(314),
CatalogAccessoryItem(320),
CatalogAccessoryItem(173),
CatalogAccessoryItem(328),
CatalogAccessoryItem(329))),
(10,
3,
11,
2,
((3, 2900),
CatalogChatItem(10003),
CatalogClothingItem(1001, 0),
CatalogClothingItem(1002, 0),
CatalogWallpaperItem(10100),
CatalogWallpaperItem(10200),
CatalogFurnitureItem(10000),
CatalogFurnitureItem(10010),
CatalogNametagItem(9))),
(10,
3,
11,
2,
(CatalogClothingItem(1744, 0),
CatalogClothingItem(1745, 0),
CatalogClothingItem(1748, 0),
CatalogClothingItem(1771, 0),
CatalogClothingItem(1774, 0),
CatalogClothingItem(1775, 0),
CatalogClothingItem(1743, 0),
CatalogClothingItem(1746, 0),
CatalogClothingItem(1747, 0),
CatalogClothingItem(1112, 0),
CatalogClothingItem(1113, 0),
CatalogClothingItem(1114, 0),
CatalogClothingItem(1115, 0),
CatalogClothingItem(1116, 0),
CatalogClothingItem(1117, 0),
CatalogClothingItem(1118, 0),
CatalogClothingItem(1119, 0),
CatalogClothingItem(1120, 0),
CatalogClothingItem(1121, 0),
CatalogClothingItem(1122, 0),
CatalogClothingItem(1123, 0),
CatalogClothingItem(1124, 0),
CatalogClothingItem(1125, 0),
CatalogClothingItem(1126, 0),
CatalogClothingItem(1127, 0),
CatalogAccessoryItem(171),
CatalogAccessoryItem(172),
CatalogAccessoryItem(224),
CatalogAccessoryItem(324),
CatalogAccessoryItem(325),
CatalogAccessoryItem(326),
CatalogAccessoryItem(327),
CatalogAccessoryItem(448),
CatalogAccessoryItem(449),
CatalogClothingItem(1801, 0))),
(2,
1,
2,
28,
((3, 2920),
(2, 2921),
CatalogClothingItem(1200, 0),
CatalogClothingItem(1201, 0),
CatalogClothingItem(1202, 0),
CatalogClothingItem(1203, 0),
CatalogClothingItem(1204, 0),
CatalogClothingItem(1205, 0),
CatalogWallpaperItem(12000),
CatalogWallpaperItem(12100),
CatalogWallpaperItem(12200),
CatalogWallpaperItem(12300),
CatalogWainscotingItem(1030, 0),
CatalogWainscotingItem(1030, 1),
CatalogMouldingItem(1060, 0),
CatalogMouldingItem(1060, 1),
CatalogClothingItem(1206, 0),
CatalogClothingItem(1207, 0),
CatalogClothingItem(1208, 0),
CatalogClothingItem(1209, 0),
CatalogClothingItem(1210, 0),
CatalogClothingItem(1211, 0),
CatalogClothingItem(1212, 0),
CatalogFurnitureItem(1670),
CatalogFurnitureItem(1680),
CatalogFurnitureItem(1450),
CatalogMouldingItem(1100, 0),
CatalogMouldingItem(1110, 0),
CatalogMouldingItem(1120, 0))),
(3,
1,
3,
20,
((3, 2930),
CatalogClothingItem(1300, 0),
CatalogClothingItem(1301, 0),
CatalogClothingItem(1302, 0),
CatalogClothingItem(1303, 0),
CatalogClothingItem(1304, 0),
CatalogClothingItem(1305, 0),
CatalogClothingItem(1306, 0),
CatalogWallpaperItem(13000),
CatalogWallpaperItem(13100),
CatalogWallpaperItem(13200),
CatalogWallpaperItem(13300),
CatalogFlooringItem(11000),
CatalogFlooringItem(11010))),
(5,
25,
6,
25,
(CatalogClothingItem(1400, 0), CatalogClothingItem(1401, 0), CatalogClothingItem(1402, 0))),
(8,
1,
8,
31,
(CatalogClothingItem(1403, 0),
CatalogClothingItem(1404, 0),
CatalogClothingItem(1405, 0),
CatalogClothingItem(1406, 0))),
(9,
24,
10,
24,
(CatalogFurnitureItem(450),
CatalogAnimatedFurnitureItem(460),
CatalogAnimatedFurnitureItem(270),
CatalogAnimatedFurnitureItem(990))),
(6,
15,
8,
15,
2010,
2010,
((4, 2940),)),
(9,
1,
9,
30,
(CatalogGardenItem(135, 1),)),
(1,
1,
1,
31,
(CatalogGardenItem(135, 1),)),
(4,
1,
4,
30,
(CatalogGardenItem(135, 1),)),
(6,
1,
6,
30,
(CatalogGardenItem(135, 1),)),
(6,
26,
7,
16,
(CatalogClothingItem(1500, 0),
CatalogClothingItem(1501, 0),
CatalogClothingItem(1502, 0),
CatalogClothingItem(1503, 0))),
(12,
4,
1,
4,
((3, 2910),)),
(12,
4,
1,
4,
(CatalogFurnitureItem(680),
CatalogFurnitureItem(681),
CatalogGardenItem(130, 1),
CatalogGardenItem(131, 1),
CatalogAnimatedFurnitureItem(10020),
CatalogFurnitureItem(10030, 0))),
(12,
4,
1,
4,
(CatalogWallpaperItem(11000),
CatalogWallpaperItem(11100),
CatalogFlooringItem(10010),
CatalogMouldingItem(1090, 0),
CatalogClothingItem(1100, 0),
CatalogClothingItem(1101, 0),
CatalogClothingItem(1104, 0),
CatalogClothingItem(1105, 0),
CatalogClothingItem(1108, 0),
CatalogClothingItem(1109, 0),
CatalogClothingItem(1802, 0))),
(12,
11,
1,
4,
(CatalogFurnitureItem(1040),
CatalogFurnitureItem(1050),
CatalogWallpaperItem(11200),
CatalogFlooringItem(10000),
CatalogMouldingItem(1080, 0),
CatalogMouldingItem(1085, 0),
CatalogClothingItem(1102, 0),
CatalogClothingItem(1103, 0),
CatalogClothingItem(1106, 0),
CatalogClothingItem(1107, 0),
CatalogClothingItem(1110, 0),
CatalogClothingItem(1111, 0))),
(6,
9,
7,
15,
2010,
2010,
(CatalogClothingItem(1751, 0),)),
(6,
14,
7,
15,
2010,
2010,
(CatalogClothingItem(1754, 0), CatalogClothingItem(1755, 0), CatalogClothingItem(1756, 0))),
(7,
21,
8,
17,
2010,
2010,
(CatalogClothingItem(1749, 0),
CatalogClothingItem(1750, 0),
CatalogClothingItem(1757, 0),
CatalogClothingItem(1758, 0))),
(8,
25,
9,
21,
2010,
2010,
(CatalogClothingItem(1763, 0),)),
(6,
5,
7,
1,
(CatalogClothingItem(1768, 0), CatalogClothingItem(1769, 0))),
(1,
1,
12,
31,
(CatalogGardenItem(100, 1),
CatalogGardenItem(101, 1),
CatalogGardenItem(103, 1),
CatalogGardenItem(104, 1),
CatalogToonStatueItem(105, endPoseIndex=108),
CatalogRentalItem(1, 2880, 1000),
CatalogGardenStarterItem(),
CatalogNametagItem(100),
CatalogNametagItem(0),
CatalogClothingItem(1608, 0, 720),
CatalogClothingItem(1605, 0, 720),
CatalogClothingItem(1602, 0, 720),
CatalogClothingItem(1607, 0, 540),
CatalogClothingItem(1604, 0, 540),
CatalogClothingItem(1601, 0, 540),
CatalogClothingItem(1606, 0, 360),
CatalogClothingItem(1603, 0, 360),
CatalogClothingItem(1600, 0, 360),
CatalogEmoteItem(20, 90),
CatalogEmoteItem(21, 180),
CatalogEmoteItem(22, 360),
CatalogEmoteItem(23, 540),
CatalogEmoteItem(24, 720))),
(5,
26,
6,
30,
2013,
2013,
(CatalogAccessoryItem(175),)),
(8,
27,
9,
5,
2013,
2013,
((3, 2900),
CatalogChatItem(10003),
CatalogClothingItem(1001, 0),
CatalogClothingItem(1002, 0),
CatalogWallpaperItem(10100),
CatalogWallpaperItem(10200),
CatalogFurnitureItem(10000),
CatalogFurnitureItem(10010),
CatalogNametagItem(9),
CatalogClothingItem(1744, 0),
CatalogClothingItem(1745, 0),
CatalogClothingItem(1748, 0),
CatalogClothingItem(1771, 0),
CatalogClothingItem(1774, 0),
CatalogClothingItem(1775, 0),
CatalogClothingItem(1743, 0),
CatalogClothingItem(1746, 0),
CatalogClothingItem(1747, 0),
CatalogClothingItem(1112, 0),
CatalogClothingItem(1113, 0),
CatalogClothingItem(1114, 0),
CatalogClothingItem(1115, 0),
CatalogClothingItem(1116, 0),
CatalogClothingItem(1117, 0),
CatalogClothingItem(1118, 0),
CatalogClothingItem(1119, 0),
CatalogClothingItem(1120, 0),
CatalogClothingItem(1121, 0),
CatalogClothingItem(1122, 0),
CatalogClothingItem(1123, 0),
CatalogClothingItem(1124, 0),
CatalogClothingItem(1125, 0),
CatalogClothingItem(1126, 0),
CatalogClothingItem(1127, 0),
CatalogAccessoryItem(171),
CatalogAccessoryItem(172),
CatalogAccessoryItem(224),
CatalogAccessoryItem(324),
CatalogAccessoryItem(325),
CatalogAccessoryItem(326),
CatalogAccessoryItem(327),
CatalogAccessoryItem(448),
CatalogAccessoryItem(449),
CatalogClothingItem(1801, 0),
CatalogAccessoryItem(175))),
(9,
3,
9,
12,
2013,
2013,
((3, 2910),
CatalogFurnitureItem(680),
CatalogFurnitureItem(681),
CatalogGardenItem(130, 1),
CatalogGardenItem(131, 1),
CatalogAnimatedFurnitureItem(10020),
CatalogFurnitureItem(10030, 0),
CatalogWallpaperItem(11000),
CatalogWallpaperItem(11100),
CatalogFlooringItem(10010),
CatalogMouldingItem(1090, 0),
CatalogClothingItem(1100, 0),
CatalogClothingItem(1101, 0),
CatalogClothingItem(1104, 0),
CatalogClothingItem(1105, 0),
CatalogClothingItem(1108, 0),
CatalogClothingItem(1109, 0),
CatalogClothingItem(1802, 0),
CatalogFurnitureItem(1040),
CatalogFurnitureItem(1050),
CatalogWallpaperItem(11200),
CatalogFlooringItem(10000),
CatalogMouldingItem(1080, 0),
CatalogMouldingItem(1085, 0),
CatalogClothingItem(1102, 0),
CatalogClothingItem(1103, 0),
CatalogClothingItem(1106, 0),
CatalogClothingItem(1107, 0),
CatalogClothingItem(1110, 0),
CatalogClothingItem(1111, 0))),
(8,
20,
9,
19,
2013,
2013,
(CatalogAccessoryItem(101),
CatalogAccessoryItem(103),
CatalogAccessoryItem(117),
CatalogAccessoryItem(118),
CatalogAccessoryItem(123),
CatalogAccessoryItem(124),
CatalogAccessoryItem(125),
CatalogAccessoryItem(126),
CatalogAccessoryItem(127),
CatalogAccessoryItem(128),
CatalogAccessoryItem(129),
CatalogAccessoryItem(130),
CatalogAccessoryItem(202),
CatalogAccessoryItem(204),
CatalogAccessoryItem(205),
CatalogAccessoryItem(206),
CatalogAccessoryItem(208),
CatalogAccessoryItem(209),
CatalogAccessoryItem(210),
CatalogAccessoryItem(302),
CatalogAccessoryItem(308),
CatalogAccessoryItem(309),
CatalogAccessoryItem(310),
CatalogAccessoryItem(317),
CatalogAccessoryItem(402),
CatalogAccessoryItem(403),
CatalogAccessoryItem(405),
CatalogAccessoryItem(406),
CatalogAccessoryItem(407),
CatalogAccessoryItem(408),
CatalogAccessoryItem(409),
CatalogAccessoryItem(410),
CatalogAccessoryItem(411),
CatalogAccessoryItem(412),
CatalogAccessoryItem(413),
CatalogAccessoryItem(306),
CatalogAccessoryItem(318),
CatalogAccessoryItem(121),
CatalogAccessoryItem(212),
CatalogAccessoryItem(214),
CatalogAccessoryItem(312),
CatalogAccessoryItem(150),
CatalogAccessoryItem(151),
CatalogAccessoryItem(147),
CatalogAccessoryItem(422),
CatalogAccessoryItem(141),
CatalogAccessoryItem(146),
CatalogAccessoryItem(444),
CatalogAccessoryItem(122),
CatalogAccessoryItem(430),
CatalogAccessoryItem(145),
CatalogAccessoryItem(132),
CatalogAccessoryItem(161),
CatalogAccessoryItem(134),
CatalogAccessoryItem(149),
CatalogAccessoryItem(207),
CatalogAccessoryItem(215),
CatalogAccessoryItem(216),
CatalogAccessoryItem(417),
CatalogAccessoryItem(222),
CatalogAccessoryItem(321),
CatalogAccessoryItem(322),
CatalogAccessoryItem(307),
CatalogAccessoryItem(135),
CatalogAccessoryItem(174),
CatalogAccessoryItem(434),
CatalogAccessoryItem(435),
CatalogAccessoryItem(441),
CatalogAccessoryItem(446),
CatalogAccessoryItem(429),
CatalogAccessoryItem(110),
CatalogAccessoryItem(148),
CatalogAccessoryItem(443),
CatalogAccessoryItem(426),
CatalogAccessoryItem(439),
CatalogAccessoryItem(143),
CatalogAccessoryItem(313),
CatalogAccessoryItem(311),
CatalogAccessoryItem(437),
CatalogAccessoryItem(415),
CatalogAccessoryItem(167),
CatalogAccessoryItem(157),
CatalogAccessoryItem(106),
CatalogAccessoryItem(109),
CatalogAccessoryItem(421),
CatalogAccessoryItem(401),
CatalogAccessoryItem(447),
CatalogAccessoryItem(213),
CatalogAccessoryItem(330),
CatalogAccessoryItem(440),
CatalogAccessoryItem(425),
CatalogAccessoryItem(158),
CatalogAccessoryItem(431),
CatalogAccessoryItem(420),
CatalogAccessoryItem(155),
CatalogAccessoryItem(419),
CatalogAccessoryItem(436),
CatalogAccessoryItem(428),
CatalogAccessoryItem(304),
CatalogAccessoryItem(301),
CatalogAccessoryItem(416),
CatalogAccessoryItem(414),
CatalogAccessoryItem(164),
CatalogAccessoryItem(323),
CatalogAccessoryItem(108),
CatalogAccessoryItem(139),
CatalogAccessoryItem(316),
CatalogAccessoryItem(131),
CatalogAccessoryItem(170),
CatalogAccessoryItem(221),
CatalogAccessoryItem(225),
CatalogAccessoryItem(305),
CatalogAccessoryItem(303),
CatalogAccessoryItem(144),
CatalogAccessoryItem(120),
CatalogAccessoryItem(116),
CatalogAccessoryItem(217),
CatalogAccessoryItem(218),
CatalogAccessoryItem(219),
CatalogAccessoryItem(445),
CatalogAccessoryItem(418),
CatalogAccessoryItem(432),
CatalogAccessoryItem(427),
CatalogAccessoryItem(423),
CatalogAccessoryItem(137),
CatalogAccessoryItem(163),
CatalogAccessoryItem(165),
CatalogAccessoryItem(153),
CatalogAccessoryItem(319),
CatalogAccessoryItem(154),
CatalogAccessoryItem(159),
CatalogAccessoryItem(162),
CatalogAccessoryItem(315),
CatalogAccessoryItem(160),
CatalogAccessoryItem(102),
CatalogAccessoryItem(119),
CatalogAccessoryItem(136),
CatalogAccessoryItem(169),
CatalogAccessoryItem(140),
CatalogAccessoryItem(168),
CatalogAccessoryItem(138),
CatalogAccessoryItem(220),
CatalogAccessoryItem(433),
CatalogAccessoryItem(442),
CatalogAccessoryItem(424),
CatalogAccessoryItem(404),
CatalogAccessoryItem(156),
CatalogAccessoryItem(142),
CatalogAccessoryItem(152),
CatalogAccessoryItem(133),
CatalogAccessoryItem(166),
CatalogAccessoryItem(211),
CatalogAccessoryItem(314),
CatalogAccessoryItem(320),
CatalogAccessoryItem(173),
CatalogAccessoryItem(328),
CatalogAccessoryItem(329))))
WeeklySchedule = ((100,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogEmoteItem(5),
CatalogFurnitureItem(210, 0),
CatalogFurnitureItem(220, 0)),
(100,
(5, 2000),
CatalogFurnitureItem(1400),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(600),
CatalogFurnitureItem(610),
CatalogClothingItem(116, 0),
CatalogClothingItem(216, 0)),
(300,
(5, 2000),
CatalogFurnitureItem(1410),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(1100),
CatalogFurnitureItem(1020),
CatalogClothingItem(408, 0),
5000),
(100,
(5, 2000),
CatalogWindowItem(40),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(110),
CatalogFurnitureItem(100),
nextAvailablePole,
nextAvailableCloset),
(100,
(5, 2000),
CatalogFurnitureItem(1420),
CatalogEmoteItem(9),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(700),
CatalogFurnitureItem(710)),
(300,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(410),
CatalogAnimatedFurnitureItem(490),
CatalogFurnitureItem(1000),
CatalogClothingItem(117, 0),
CatalogClothingItem(217, 0),
nextAvailableCloset),
(100,
(5, 2000),
CatalogFurnitureItem(1430),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(1510),
CatalogFurnitureItem(1610),
5000,
CatalogNametagItem(1)),
(100,
(5, 2000),
CatalogWindowItem(70),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(1210),
CatalogClothingItem(409, 0),
nextAvailablePole,
nextAvailableCloset),
(300,
(5, 2000),
CatalogEmoteItem(13),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(1200),
CatalogFurnitureItem(900)),
(100,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(910),
CatalogFurnitureItem(1600),
CatalogClothingItem(118, 0),
CatalogClothingItem(218, 0),
nextAvailableCloset),
(100,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(800),
CatalogFurnitureItem(1010),
CatalogClothingItem(410, 0),
5000),
(300,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogFurnitureItem(620),
nextAvailablePole,
nextAvailableCloset),
(300,
(5, 2000),
3000,
3500,
4000,
4500,
CatalogClothingItem(119, 0),
CatalogClothingItem(219, 0)),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1110),
CatalogFurnitureItem(630),
CatalogFurnitureItem(1630),
CatalogEmoteItem(11),
CatalogNametagItem(11),
nextAvailableCloset),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(230),
CatalogFurnitureItem(920),
CatalogFurnitureItem(1440)),
(300,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(420),
CatalogAnimatedFurnitureItem(480),
CatalogFurnitureItem(120),
CatalogClothingItem(120, 0),
CatalogClothingItem(220, 0),
nextAvailablePole,
5000,
nextAvailableCloset),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1700),
CatalogFurnitureItem(640),
CatalogWindowItem(50)),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1120),
CatalogFurnitureItem(930),
CatalogFurnitureItem(1500),
CatalogEmoteItem(6),
nextAvailableCloset),
(300,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(430),
CatalogAnimatedFurnitureItem(491),
CatalogFurnitureItem(1620),
CatalogFurnitureItem(1442)),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(610),
CatalogFurnitureItem(940),
CatalogClothingItem(121, 0),
CatalogClothingItem(221, 0),
nextAvailablePole,
5000),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1710),
CatalogFurnitureItem(1030),
CatalogWindowItem(60),
CatalogNametagItem(7)),
(300,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1130),
CatalogFurnitureItem(130),
CatalogEmoteItem(8)),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(1530),
CatalogFurnitureItem(1640),
CatalogFurnitureItem(1441)),
(100,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(300),
CatalogFurnitureItem(1220),
nextAvailablePole,
5000),
(300,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(810),
CatalogFurnitureItem(1230),
CatalogFurnitureItem(1443)),
(300,
(2, 2000),
(3, 2010),
3010,
3510,
4010,
4510,
CatalogFurnitureItem(310),
CatalogFurnitureItem(1520),
CatalogFurnitureItem(1650),
CatalogWindowItem(80),
CatalogClothingItem(222, 0)),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(1240),
CatalogFurnitureItem(1661),
CatalogEmoteItem(5)),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(1800),
CatalogFurnitureItem(240),
CatalogFurnitureItem(1200),
CatalogNametagItem(12)),
(300,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(145),
CatalogClothingItem(123, 0),
CatalogClothingItem(224, 0),
nextAvailablePole,
5000),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogWindowItem(100),
CatalogFurnitureItem(1810)),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(650),
CatalogFurnitureItem(1900)),
(300,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(1725)),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogWindowItem(90),
CatalogClothingItem(124, 0),
CatalogClothingItem(411, 0),
nextAvailablePole),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(140),
CatalogFurnitureItem(1020),
CatalogEmoteItem(13)),
(300,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(950),
CatalogFurnitureItem(1660),
CatalogClothingItem(310, 0),
CatalogNametagItem(2)),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(400),
CatalogAnimatedFurnitureItem(470),
CatalogFurnitureItem(660),
CatalogFurnitureItem(1200),
5000),
(100,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(1910),
nextAvailablePole,
CatalogFurnitureItem(1000)),
(300,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(1720),
CatalogEmoteItem(9)),
(300,
(1, 2000),
(2, 2010),
(3, 2020),
3020,
3530,
4020,
4520,
CatalogWindowItem(110),
CatalogClothingItem(311, 0)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogWindowItem(120),
CatalogClothingItem(125, 0),
5000),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(412, 0),
CatalogClothingItem(312, 0),
CatalogFurnitureItem(1920)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
nextAvailablePole,
CatalogWallpaperItem(3900),
CatalogFurnitureItem(980),
CatalogNametagItem(13)),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(130, 0),
CatalogFurnitureItem(150)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(128, 0),
CatalogWallpaperItem(3700),
CatalogFurnitureItem(160)),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(313, 0),
CatalogClothingItem(413, 0),
CatalogFurnitureItem(960),
CatalogEmoteItem(7)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
nextAvailablePole,
CatalogFurnitureItem(1930),
CatalogFurnitureItem(670)),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(126, 0),
CatalogFurnitureItem(1970),
5000),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(720),
CatalogFurnitureItem(970)),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogClothingItem(127, 0),
CatalogFurnitureItem(1950),
CatalogNametagItem(4)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
nextAvailablePole,
CatalogFurnitureItem(1940),
CatalogWindowItem(130)),
(300,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogWallpaperItem(3800),
CatalogClothingItem(129, 0),
CatalogEmoteItem(10)),
(100,
(1, 2010),
(2, 2020),
(3, 2030),
3020,
3530,
4020,
4520,
CatalogFurnitureItem(250),
CatalogFurnitureItem(1960),
nextAvailablePole),
Sale(CatalogFurnitureItem(210, 0), CatalogFurnitureItem(220, 0), CatalogFurnitureItem(1100), CatalogFurnitureItem(110), CatalogFurnitureItem(100), CatalogFurnitureItem(700), CatalogFurnitureItem(710), CatalogFurnitureItem(410), CatalogAnimatedFurnitureItem(490), CatalogFurnitureItem(1210), CatalogFurnitureItem(1200), CatalogFurnitureItem(800), CatalogFurnitureItem(1110), CatalogFurnitureItem(230), CatalogFurnitureItem(420), CatalogAnimatedFurnitureItem(480), CatalogFurnitureItem(120), CatalogFurnitureItem(1700), CatalogFurnitureItem(1120), CatalogFurnitureItem(430), CatalogAnimatedFurnitureItem(491), CatalogFurnitureItem(1130), CatalogFurnitureItem(130), CatalogFurnitureItem(300), CatalogFurnitureItem(1220), CatalogFurnitureItem(810), CatalogFurnitureItem(1230), CatalogFurnitureItem(310), CatalogFurnitureItem(1240), CatalogFurnitureItem(240), CatalogFurnitureItem(145), CatalogFurnitureItem(1725), CatalogFurnitureItem(140), CatalogFurnitureItem(950), CatalogFurnitureItem(1720)),
Sale(CatalogClothingItem(116, 0), CatalogClothingItem(216, 0), CatalogClothingItem(408, 0), CatalogClothingItem(117, 0), CatalogClothingItem(217, 0), CatalogClothingItem(409, 0), CatalogClothingItem(118, 0), CatalogClothingItem(218, 0), CatalogClothingItem(410, 0), CatalogClothingItem(119, 0), CatalogClothingItem(219, 0), CatalogClothingItem(120, 0), CatalogClothingItem(220, 0), CatalogClothingItem(121, 0), CatalogClothingItem(221, 0), CatalogClothingItem(222, 0), CatalogClothingItem(123, 0), CatalogClothingItem(224, 0), CatalogClothingItem(411, 0), CatalogClothingItem(311, 0), CatalogClothingItem(310, 0)),
Sale(CatalogWindowItem(40), CatalogWindowItem(70), CatalogWindowItem(50), CatalogWindowItem(60), CatalogWindowItem(80), CatalogWindowItem(100), CatalogWindowItem(90), CatalogWindowItem(110)),
Sale(CatalogEmoteItem(5), CatalogEmoteItem(9), CatalogEmoteItem(13), CatalogEmoteItem(11), CatalogEmoteItem(6), CatalogEmoteItem(8), CatalogNametagItem(10)),
Sale(CatalogFurnitureItem(600), CatalogFurnitureItem(610), CatalogFurnitureItem(620), CatalogFurnitureItem(630), CatalogFurnitureItem(640), CatalogFurnitureItem(650), CatalogFurnitureItem(660), CatalogFurnitureItem(900), CatalogFurnitureItem(910), CatalogFurnitureItem(920), CatalogFurnitureItem(930), CatalogFurnitureItem(940), CatalogFurnitureItem(1000), CatalogFurnitureItem(1010), CatalogFurnitureItem(1020), CatalogFurnitureItem(1030), CatalogFurnitureItem(1400), CatalogFurnitureItem(1410), CatalogFurnitureItem(1420), CatalogFurnitureItem(1430), CatalogFurnitureItem(1440), CatalogFurnitureItem(1441), CatalogFurnitureItem(1442), CatalogFurnitureItem(1443), CatalogFurnitureItem(1500), CatalogFurnitureItem(1510), CatalogFurnitureItem(1520), CatalogFurnitureItem(1530), CatalogFurnitureItem(1600), CatalogFurnitureItem(1610), CatalogFurnitureItem(1620), CatalogFurnitureItem(1630), CatalogFurnitureItem(1640), CatalogFurnitureItem(1650), CatalogFurnitureItem(1660), CatalogFurnitureItem(1661), CatalogFurnitureItem(1710), CatalogFurnitureItem(1800), CatalogFurnitureItem(1810), CatalogFurnitureItem(1900), CatalogFurnitureItem(1910)),
(300,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(730),
nextAvailablePole),
(100,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(260)),
(300,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(440),
CatalogAnimatedFurnitureItem(492),
5000),
(100,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(170),
CatalogFurnitureItem(1250)),
(300,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(1140),
nextAvailablePole),
(100,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(2010),
CatalogNametagItem(8)),
(300,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(2000),
5000),
(100,
(1, 2020),
(2, 2030),
(3, 2040),
CatalogFurnitureItem(3000)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogClothingItem(131, 0),
CatalogClothingItem(225, 0),
nextAvailablePole),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(105)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(205)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(625)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
nextAvailablePole,
CatalogEmoteItem(12),
CatalogNametagItem(5)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogClothingItem(314, 0),
CatalogClothingItem(414, 0)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(715)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(1015),
CatalogNametagItem(6)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(1215),
nextAvailablePole),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogEmoteItem(14)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(1260)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
CatalogFurnitureItem(705),
CatalogNametagItem(3)),
(300,
(1, 2030),
(2, 2040),
(3, 2050),
nextAvailablePole))
| [
6738,
1277,
13,
12942,
1662,
1958,
1330,
4128,
3673,
1958,
22289,
198,
6738,
764,
1330,
44515,
7449,
198,
6738,
764,
1330,
44515,
7449,
8053,
198,
6738,
764,
49015,
37,
700,
8089,
7449,
1330,
44515,
37,
700,
8089,
7449,
11,
1306,
10493,... | 2.651882 | 13,257 |
import numpy as np
import pandas as pd
import traceback
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open("model.pkl", "rb"))
modelcol = pickle.load(open("modelcol.pkl", "rb"))
@app.route('/')
@app.route('/predict', methods = ["POST"])
if __name__ == "__main__":
app.run(debug=True)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12854,
1891,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
11,
8543,
62,
28243,
198,
11748,
2298,
293,
198,
1324,
796,
46947,
7,
834,
3672,
8... | 2.740458 | 131 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'liying'
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
834,
9800,
834,
796,
705,
4528,
1112,
6,
628
] | 2.09375 | 32 |
# File containing the driver workspace usecase.
from typing import List
from cabrenter.entities.cab import Cab
from cabrenter.use_cases.driver_workspace_repo_interface import \
DriverWorkspaceRepoInterface
| [
2,
9220,
7268,
262,
4639,
44573,
779,
7442,
13,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
16212,
918,
353,
13,
298,
871,
13,
66,
397,
1330,
15976,
198,
6738,
16212,
918,
353,
13,
1904,
62,
33964,
13,
26230,
62,
5225,
10223,
62,
... | 3.491803 | 61 |
#!/usr/bin/env python3
import uuid
from pathlib import Path
import cv2
import depthai as dai
from collections import deque
import numpy as np
from common.camera_info import H_CR
from common.config import NNConfig
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
# rgbCenter = pipeline.createColorCamera()
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)
# featureTrackerCenter = pipeline.create(dai.node.FeatureTracker)
camRgb = pipeline.createColorCamera()
spatialDetectionNetwork = pipeline.createYoloSpatialDetectionNetwork()
stereo = pipeline.createStereoDepth()
xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
# xoutPassthroughFrameCenter = pipeline.create(dai.node.XLinkOut)
# xoutTrackedFeaturesCenter = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
xoutRgb = pipeline.createXLinkOut()
camRgb.preview.link(xoutRgb.input)
xoutNN = pipeline.createXLinkOut()
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
# xoutPassthroughFrameCenter.setStreamName("passthroughFrameCenter")
# xoutTrackedFeaturesCenter.setStreamName("trackedFeaturesCenter")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# rgbCenter.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
# rgbCenter.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setPreviewSize(416, 416)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
stereo.setConfidenceThreshold(255)
stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
model_name = "infiniteRecharge2020sandbox"
model_dir = Path(__file__).parent / Path(f"../resources/nn/") / model_name
blob_path = model_dir / Path(model_name).with_suffix(f".blob")
config_path = model_dir / Path(model_name).with_suffix(f".json")
nn_config = NNConfig(config_path)
LABELS = nn_config.labels
spatialDetectionNetwork.setBlobPath(str(blob_path))
spatialDetectionNetwork.setConfidenceThreshold(nn_config.confidence)
spatialDetectionNetwork.setNumClasses(nn_config.metadata["classes"])
spatialDetectionNetwork.setCoordinateSize(nn_config.metadata["coordinates"])
spatialDetectionNetwork.setAnchors(nn_config.metadata["anchors"])
spatialDetectionNetwork.setAnchorMasks(nn_config.metadata["anchor_masks"])
spatialDetectionNetwork.setIouThreshold(nn_config.metadata["iou_threshold"])
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(5000)
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
# rgbCenter.video.link(featureTrackerCenter.inputImage)
# featureTrackerCenter.passthroughInputImage.link(xoutPassthroughFrameCenter.input)
# featureTrackerCenter.outputFeatures.link(xoutTrackedFeaturesCenter.input)
# xinTrackedFeaturesConfig.out.link(featureTrackerCenter.inputConfig)
# By default the least mount of resources are allocated
# increasing it improves performance
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
camRgb.preview.link(spatialDetectionNetwork.input)
spatialDetectionNetwork.out.link(xoutNN.input)
stereo.depth.link(spatialDetectionNetwork.inputDepth)
numShaves = 2
numMemorySlices = 2
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)
# featureTrackerCenter.setHardwareResources(numShaves, numMemorySlices)
featureTrackerConfig = featureTrackerRight.initialConfig.get()
print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!")
detections = []
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
# passthroughImageCenterQueue = device.getOutputQueue("passthroughFrameCenter", 8, False)
# outputFeaturesCenterQueue = device.getOutputQueue("trackedFeaturesCenter", 8, False)
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
leftWindowName = "left"
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)
rightWindowName = "right"
rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName)
# centerWindowName = "center"
# centerFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", centerWindowName)
while True:
frame = previewQueue.get().getCvFrame()
inDet = detectionNNQueue.tryGet()
if inDet is not None:
detections = inDet.detections
bboxes = []
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
bboxes.append({
'id': uuid.uuid4(),
'label': detection.label,
'confidence': detection.confidence,
'x_min': int(detection.xmin * width),
'x_max': int(detection.xmax * width),
'y_min': int(detection.ymin * height),
'y_max': int(detection.ymax * height),
'depth_x': detection.spatialCoordinates.x / 1000,
'depth_y': detection.spatialCoordinates.y / 1000,
'depth_z': detection.spatialCoordinates.z / 1000,
})
for detection in bboxes:
cv2.rectangle(frame, (detection['x_min'], detection['y_min']), (detection['x_max'], detection['y_max']), (0, 255, 0), 2)
cv2.putText(frame, "x: {}".format(round(detection['depth_x'], 2)), (detection['x_min'], detection['y_min'] + 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(detection['depth_y'], 2)), (detection['x_min'], detection['y_min'] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "z: {}".format(round(detection['depth_z'], 2)), (detection['x_min'], detection['y_min'] + 70), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(detection['confidence'], 2)), (detection['x_min'], detection['y_min'] + 90), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "label: {}".format(LABELS[detection['label']], 1), (detection['x_min'], detection['y_min'] + 110), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("Frame", frame)
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
if bboxes:
for detection in bboxes:
y_min = int(detection['y_min'] * (2.0 / 3.0))
y_max = int(detection['y_max'] * (2.0 / 3.0))
x_min = int(detection['x_min'] * (2.0 / 3.0))
x_max = int(detection['x_max'] * (2.0 / 3.0))
# leftFrame = leftFrame[y_min:y_max, x_min:x_max]
inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
# inPassthroughFrameCenter = passthroughImageCenterQueue.get()
# passthroughFrameCenter = inPassthroughFrameCenter.getFrame()
# centerFrame = cv2.cvtColor(passthroughFrameCenter, cv2.COLOR_GRAY2BGR)
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft, bboxes)
leftFeatureDrawer.drawFeatures(leftFrame)
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight, bboxes)
rightFeatureDrawer.drawFeatures(rightFrame)
# trackedFeaturesCenter = outputFeaturesCenterQueue.get().trackedFeatures
# centerFeatureDrawer.trackFeaturePath(trackedFeaturesCenter)
# centerFeatureDrawer.drawFeatures(centerFrame)
# Show the frame
for detection in bboxes:
# x_shift = int(detection['depth_z'] * 1.25)
# scale = detection['depth_z'] * 1.25
# bboxNorm = frameNorm(leftFrame, (detection['x_min'] / 416, detection['y_min'] / 416, detection['x_max'] / 416, detection['y_max'] / 416))
# bboxNorm = np.array([432 + detection['x_min'],
# 152 + detection['y_min'],
# 432 + detection['x_max'],
# 152 + detection['y_max']])
# bboxNorm = np.round(bboxNorm * scale)
# x1 = detection['x_min']
# x2 = detection['x_max']
# y1 = detection['y_min']
# y2 = detection['y_max']
#
# pts = np.float32([[x1, y1], [x1, y2], [x2, y2], [x2, y1]]).reshape(-1, 1, 2)
# dst = cv2.perspectiveTransform(pts, H_CR)
# leftFrame = cv2.polylines(leftFrame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
x_shift = int(detection['depth_z'] * 47.4257) + 286 + int(detection['depth_x'] * -100)
y_shift = int(detection['depth_z'] * 40)
x1 = x_shift + int((detection['x_min'] / 416 * 720))
x2 = x_shift + int((detection['x_max'] / 416 * 720))
y1 = y_shift + int((detection['y_min'] / 416 * 720))
y2 = y_shift + int((detection['y_max'] / 416 * 720))
bboxNorm = (x1, y1, x2, y2)
cv2.rectangle(leftFrame, (bboxNorm[0], bboxNorm[1]), (bboxNorm[2], bboxNorm[3]), (0, 255, 0), 2)
x_shift = int(detection['depth_z'] * 47.4257) + 143 + int(detection['depth_x'] * -100)
x1 = x_shift + int((detection['x_min'] / 416 * 720))
x2 = x_shift + int((detection['x_max'] / 416 * 720))
bboxNorm = (x1, y1, x2, y2)
cv2.rectangle(rightFrame, (bboxNorm[0], bboxNorm[1]), (bboxNorm[2], bboxNorm[3]), (0, 255, 0), 2)
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)
# cv2.imshow(centerWindowName, centerFrame)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
print("Switching to hardware accelerated motion estimation")
else:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
print("Switching to Lucas-Kanade optical flow")
cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
334,
27112,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
269,
85,
17,
198,
11748,
6795,
1872,
355,
288,
1872,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
11748,
299... | 2.500675 | 5,183 |
# -*- coding: utf-8 -*-
import os
from zhconv import convert_for_mw
POETRY_DIRECTORY = './add_poetry/'
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
6738,
1976,
71,
42946,
1330,
10385,
62,
1640,
62,
76,
86,
198,
198,
16402,
2767,
18276,
62,
17931,
23988,
15513,
796,
705,
19571,
2860,
62,
7501,
1197... | 2.212121 | 66 |
import random
from enterprise import MontgomeryScott as Scotty
scotty = Scotty()
part_one()
part_two() | [
11748,
4738,
198,
6738,
13953,
1330,
21532,
19040,
355,
7130,
774,
198,
198,
1416,
313,
774,
796,
7130,
774,
3419,
628,
220,
220,
220,
220,
198,
3911,
62,
505,
3419,
198,
198,
3911,
62,
11545,
3419
] | 3.055556 | 36 |
import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from IMLearn.metalearners import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
import powerpuff.agoda_cancellation_prediction as agoda_cancellation_prediction
if __name__ == "__main__":
testBagging()
testEx4AdaBoost()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
347,
16406,
9487,
7483,
11,
47395,
45686,
9487,
7483,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
277,
16,
62,
26675,... | 3.173077 | 156 |
from django.shortcuts import render
from django.http import HttpResponse
from scms import models
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
629,
907,
1330,
4981,
198,
198,
2,
13610,
534,
5009,
994,
13,
198
] | 3.757576 | 33 |
# encoding=UTF-8
"""convert-requirements-to-conda-yml.py"""
import sys
import argparse
import pkg_resources
import requests
FEEDSTOCK_URL = 'https://github.com/conda-forge/{package}-feedstock'
YML_TEMPLATE = """channels:
- conda-forge
- defaults
dependencies:
{conda_dependencies}
{pip_dependencies}
"""
SCM_MAP = {
'hg': 'mercurial',
'git': 'git',
}
def build_environment_from_requirements(cli_args):
"""Build a conda environment.yml from requirements.txt files.
This script assumes a couple of rules about what should be installed by
conda and what should be installed by pip:
1. If the requirement is installed from git or hg, it should be
installed by pip.
2. If the requirement is followed by the commend '# pip-only', it
should be installed by pip.
3. If the requirement is available on conda-forge, it should be
installed by conda.
4. Otherwise, it should be installed by pip.
Arguments:
cli_args (list): A list of command-line arguments.
Returns:
``None``
"""
parser = argparse.ArgumentParser(description=(
'Convert a ser of pip requirements.txt files into an environment '
'file for use by `conda create`.'
), prog=__file__)
parser.add_argument('req', nargs='+',
help='A requirements.txt file to analyze')
args = parser.parse_args(cli_args)
requirements_files = args.req
pip_requirements = set([])
# conda likes it when you list pip if you're using pip.
conda_requirements = set(['pip'])
for requirement_file in requirements_files:
for line in open(requirement_file):
line = line.strip()
# Blank line or comment
if len(line) == 0 or line.startswith('#'):
continue
# Checked out from scm
if line.startswith(tuple(SCM_MAP.keys())):
pip_requirements.add(line)
conda_requirements.add(SCM_MAP[line.split('+')[0]])
continue
requirement = pkg_resources.Requirement.parse(line)
conda_forge_url = FEEDSTOCK_URL.format(
package=requirement.project_name.lower())
if (requests.get(conda_forge_url).status_code == 200 and not
line.endswith('# pip-only')):
conda_requirements.add(line)
else:
pip_requirements.add(line)
conda_deps_string = '\n'.join(['- %s' % dep for dep in
sorted(conda_requirements,
key=lambda x: x.lower())])
pip_deps_string = '- pip:\n' + '\n'.join([' - %s' % dep for dep in
sorted(pip_requirements,
key=lambda x: x.lower())])
print(YML_TEMPLATE.format(
conda_dependencies=conda_deps_string,
pip_dependencies=pip_deps_string))
if __name__ == '__main__':
build_environment_from_requirements(sys.argv[1:])
# TODO: resolve dependencies by calling conda?
| [
2,
21004,
28,
48504,
12,
23,
198,
37811,
1102,
1851,
12,
8897,
18883,
12,
1462,
12,
66,
13533,
12,
88,
4029,
13,
9078,
37811,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
279,
10025,
62,
37540,
198,
198,
11748,
7007,
... | 2.185495 | 1,434 |
from sqlalchemy import Column, UniqueConstraint
from sqlalchemy import String, Integer, Text
from authlib.oauth1 import (
ClientMixin,
TemporaryCredentialMixin,
TokenCredentialMixin,
)
| [
6738,
44161,
282,
26599,
1330,
29201,
11,
30015,
3103,
2536,
2913,
198,
6738,
44161,
282,
26599,
1330,
10903,
11,
34142,
11,
8255,
198,
6738,
6284,
8019,
13,
12162,
1071,
16,
1330,
357,
198,
220,
220,
220,
20985,
35608,
259,
11,
198,
... | 3 | 67 |
# Higher Class Language compiler for PYVM
# transpiles a tiny language to ppvm for assembler
#
# By: Ari Stehney
#
import sys
import os
import time
import crayons
at_compiler_tags = dict()
at_stack = dict()
at_functions = dict()
at_includes = dict()
filename1 = sys.argv[1]
filename2 = sys.argv[2]
print(crayons.cyan("opening..."))
#os.getcwd()+"/"
if os.path.isfile(filename1):
with open(filename1,mode="r") as fn1:
codeIn = fn1.read()
print(crayons.cyan("compiling..."))
outCode = CompileCode(codeIn)
print(crayons.cyan("making output..."))
with open(filename2,mode="w") as fn2:
fn2.write(outCode)
else:
print(crayons.red("tc-pvm: Input file not found: "+filename1))
| [
2,
16038,
5016,
15417,
17050,
329,
350,
56,
15996,
198,
2,
1007,
79,
2915,
257,
7009,
3303,
284,
9788,
14761,
329,
11156,
1754,
198,
2,
198,
2,
2750,
25,
6069,
2441,
71,
1681,
198,
2,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
... | 2.472414 | 290 |
from multiselect_core import Multiselect
fruits=["Apple","Pear","Apricot","Banana","Orange","Raspberry","Blueberry","Kiwi","Pineapple"]
fruit_multiselect=[{"key":"Apple","value":False},{"key":"Pear","value":False},{"key":"Apricot","value":True},{"key":"Banana","value":False},
{"key":"Orange","value":True},{"key":"Raspberry","value":True},{"key":"Kiwi","value":False},{"key":"Pineapple","value":False}]
multiselect=Multiselect(fruit_multiselect) #1st way how to initialize
multiselect2=Multiselect.init_from_options(fruits) #2nd way how to initialize
multiselect2.tick_by_indices([2,4,5])
multiselect3=Multiselect.init_from_options(fruits) #3rd way how to initialize
multiselect3.tick_all_by_key("Apricot")
multiselect3.tick_all_by_key("Orange")
multiselect3.tick_all_by_key("Raspberry")
multiselect4=Multiselect.init_from_options(fruits) #4th way how to initialize
multiselect4.tick_all_by_keys(["Apricot","Orange","Raspberry"])
print(multiselect.data)
#[{'key': 'Apple', 'value': False}, {'key': 'Pear', 'value': False}, {'key': 'Apricot', 'value': True}, {'key': 'Banana', 'value': False}, {'key': 'Orange', 'value': True}, {'key': 'Raspberry', 'value': True}, {'key': 'Kiwi', 'value': False}, {'key': 'Pineapple', 'value': False}]
print(multiselect.keys())
#[Apple', 'Pear', 'Apricot', 'Banana', 'Orange', 'Raspberry', 'Kiwi', 'Pineapple']
print(multiselect.values())
#[False, False, True, False, True, True, False, False]
print(multiselect)
#<Multiselect object> [{'key': 'Apple', 'value': False}, {'key': 'Pear', 'value': False}, {'key': 'Apricot', 'value': True}, {'key': 'Banana', 'value': False}, {'key': 'Orange', 'value': True}, {'key': 'Raspberry', 'value': True}, {'key': 'Kiwi', 'value': False}, {'key': 'Pineapple', 'value': False}]
print(multiselect["Apple"])
#False
print(multiselect.items())
#[('Apple', False), ('Pear', False), ('Apricot', True), ('Banana', False), ('Orange', True), ('Raspberry', True), ('Kiwi', False), ('Pineapple', False)]
print(multiselect.get_ticked_indices())
#[2, 4, 5]
print(multiselect.get_unticked_indices())
#[0, 1, 3, 6, 7]
multiselect["Apple"]=True | [
6738,
1963,
786,
801,
62,
7295,
1330,
7854,
786,
801,
198,
198,
69,
50187,
28,
14692,
16108,
2430,
46262,
2430,
13680,
291,
313,
2430,
30457,
2271,
2430,
40141,
2430,
49,
17653,
2430,
14573,
8396,
2430,
42,
14246,
72,
2430,
47,
500,
1... | 2.683417 | 796 |
import collections
import random
import game.content
import gears
import pbge
from game.content import gharchitecture
from game.content.ghplots import missionbuilder
from game.ghdialogue import context
from pbge.dialogue import Offer, ContextTag
from pbge.plots import Plot
from . import dd_customobjectives
from game import memobrowser
Memo = memobrowser.Memo
# *****************************************
# *** ROAD EDGE PLOT DESCRIPTORS ***
# *****************************************
#
# For the basic "road is dangerous because of giant robots raiding convoys" plots, I'm going to use the random
# story technique I call Propp's Ratchet to generate the missions leading up to the boss fight.
E_MOTIVE = "DZREPR_MOTIVE"
DZRE_MOTIVE_UNKNOWN = "DZRE_EGOAL_UNKNOWN"
DZRE_MOTIVE_PROFIT = "DZRE_MOTIVE_PROFIT"
DZRE_MOTIVE_CONQUEST = "DZRE_MOTIVE_CONQUEST"
DZRE_MOTIVE_TREASURE = "DZRE_MOTIVE_TREASURE" # There's some kind of hidden treasure they're after. Lostech?
E_ACE = "DZREPR_ACE"
DZRE_ACE_UNKNOWN = "DZRE_ACE_UNKNOWN"
DZRE_ACE_HIDDENBASE = "DZRE_ACE_HIDDENBASE"
DZRE_ACE_ZEUSCANNON = "DZRE_ACE_ZEUSCANNON"
DZRE_ACE_SPONSOR = "DZRE_ACE_SPONSOR"
E_TOWN = "DZREPR_TOWN"
DZRE_TOWN_NEUTRAL = "DZRE_TOWN_NEUTRAL"
DZRE_TOWN_AGAINST = "DZRE_TOWN_AGAINST"
DZRE_TOWN_AFRAID = "DZRE_TOWN_AFRAID"
DZRE_TOWN_DEVASTATED = "DZRE_TOWN_DEVASTATED"
DZRE_TOWN_INSPIRED = "DZRE_TOWN_INSPIRED"
E_MISSION_NUMBER = "DZREPR_MISSION_NUMBER"
E_MISSION_WINS = "DZREPR_MISSION_WINS"
# *****************************************
# *** ROAD EDGE RATCHET MISSIONS ***
# *****************************************
#
# The missions leading up to the boss fight against the bandits or whoever.
# *******************************************
# *** ROAD EDGE RATCHET CONCLUSION ***
# *******************************************
# ***************************************
# *** ROAD EDGE RATCHET SETUPS ***
# ***************************************
#
# The base plot that launches the initial missions and eventually sends a win signal to the roadedge plot.
# Mostly, what this plot has to do is provide backstory and set the start_mission property to True.
# ******************************
# *** DZRE_BanditProblem ***
# ******************************
# ******************************
# *** DZRE_InvaderProblem ***
# ******************************
def _typecheck():
""" Make sure there is no mixup i.e. E_ACE: E_MOTIVE_CONQUEST
"""
for c in get_all_subclasses(DZREPR_BaseMission):
# Ensure correctness.
keys_correct(c.__name__, 'REQUIRES', c.REQUIRES)
keys_correct(c.__name__, 'CHANGES', c.CHANGES)
label_correct(c.__name__, c.LABEL, c.REQUIRES)
for k in c.CHANGES.keys():
assert k in c.REQUIRES, "{}.CHANGES: key {} not in .REQUIRES".format(c.__name__, k)
_typecheck()
| [
11748,
17268,
198,
11748,
4738,
198,
198,
11748,
983,
13,
11299,
198,
11748,
28713,
198,
11748,
279,
65,
469,
198,
6738,
983,
13,
11299,
1330,
24997,
998,
5712,
495,
198,
6738,
983,
13,
11299,
13,
456,
489,
1747,
1330,
4365,
38272,
19... | 2.619643 | 1,120 |
from .gamedata import getTeams, getScores, adjustScores
from .pwr import PWRsystems
from .regression import Regression
from .simulate import simulateBracket, simulateGame, simulateGamelog
from .teams import Team, Teams
from .tiebreak import getPlayoffSeeding
from .util import playoff_game_ids
from joblib import Parallel, delayed
import pandas as pd
import numpy as np
#simulates nfl playoffs
| [
6738,
764,
70,
2434,
1045,
1330,
651,
6767,
4105,
11,
651,
3351,
2850,
11,
4532,
3351,
2850,
201,
198,
6738,
764,
79,
18351,
1330,
350,
18564,
10057,
82,
201,
198,
6738,
764,
2301,
2234,
1330,
3310,
2234,
201,
198,
6738,
764,
14323,
... | 3.080882 | 136 |
''' Problem Tower of Hanoi:
About: Tower of Hanoi is a mathematical puzzle where we have three rods and n disks. The objective of the puzzle is to move the entire stack to another rod, obeying the following simple rules:
1) Only one disk can be moved at a time.
2) Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack i.e. a disk can only be moved if it is the uppermost disk on a stack.
3) No disk may be placed on top of a smaller disk
4) That is we cant have a bigger disk over smaller disk'''
'''Sources: https://www.geeksforgeeks.org/c-program-for-tower-of-hanoi/
Sources: https://www.khanacademy.org/computing/computer-science/algorithms/towers-of-hanoi/e/move-three-disks-in-towers-of-hanoi'''
# Khan Academy link provides excellent visualization of problem
# Used a recursive approach
# Sample inputs
print('Enter number of disks :',end='')
disks = input() # Sample input
tower_of_hanoi('A','B','C',int(disks)) #Passing value to the function
| [
7061,
6,
20647,
8765,
286,
367,
5733,
72,
25,
198,
8585,
25,
8765,
286,
367,
5733,
72,
318,
257,
18069,
15027,
810,
356,
423,
1115,
32858,
290,
299,
32505,
13,
383,
9432,
286,
262,
15027,
318,
284,
1445,
262,
2104,
8931,
284,
1194,
... | 3.328947 | 304 |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\whims\whim_modifiers.py
# Compiled at: 2020-10-08 02:43:22
# Size of source mod 2**32: 3151 bytes
from protocolbuffers.DistributorOps_pb2 import SetWhimBucks
from date_and_time import create_time_span
from game_effect_modifier.base_game_effect_modifier import BaseGameEffectModifier
from game_effect_modifier.game_effect_type import GameEffectType
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit, TunableRange, TunableRate, TunableSimMinute
from sims4.tuning.tunable_base import RateDescriptions
import alarms
| [
2,
34318,
2349,
21,
2196,
513,
13,
22,
13,
19,
198,
2,
11361,
18022,
8189,
513,
13,
22,
357,
2091,
5824,
8,
198,
2,
4280,
3361,
3902,
422,
25,
11361,
513,
13,
22,
13,
24,
357,
31499,
14,
85,
18,
13,
22,
13,
24,
25,
1485,
66,... | 2.864151 | 265 |
"""
This example is largely adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
Before you can run this example, you will need to download the ImageNet dataset manually from the
`official website <http://image-net.org/download>`_ and place it into a folder `path/to/imagenet`.
Train on ImageNet with default parameters:
.. code-block: bash
python imagenet.py --data-path /path/to/imagenet
or show all options you can change:
.. code-block: bash
python imagenet.py --help
"""
import os
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
import pytorch_lightning as pl
from pytorch_lightning.core import LightningModule
if __name__ == '__main__':
run_cli()
| [
37811,
198,
1212,
1672,
318,
5688,
16573,
422,
3740,
1378,
12567,
13,
785,
14,
9078,
13165,
354,
14,
1069,
12629,
14,
2436,
672,
14,
9866,
14,
320,
11286,
316,
14,
12417,
13,
9078,
198,
198,
8421,
345,
460,
1057,
428,
1672,
11,
345,... | 3.362776 | 317 |
"""Backpropagation-like methods for interpretability
Wrapper around:
- (pytorch) Captum [1]
- (keras) DeepExplain [2]
References:
[1] https://captum.ai/
[2] https://arxiv.org/abs/1711.06104
"""
from captum.attr import IntegratedGradients, Saliency, DeepLift,\
DeepLiftShap, GradientShap, InputXGradient
from tensorflow.keras import backend as K
from deepexplain.tensorflow import DeepExplain
from tensorflow.keras.models import Model
from deepexplain.tensorflow.methods import attribution_methods
from copy import deepcopy
import warnings
from captum.attr import visualization as viz
import numpy as np
from matplotlib.colors import Normalize
from ...core import DataType, Task
from ..base.base_interpreter import BaseInterpreter
from depiction.models.torch.core import TorchModel
from depiction.models.keras.core import KerasModel
class BackPropeter(BaseInterpreter):
"""Backpropagation-like Explainability Method
Wrapper for Captum and DeepExplain implementations.
"""
SUPPORTED_TASK = {Task.CLASSIFICATION}
SUPPORTED_DATATYPE = {DataType.TABULAR, DataType.IMAGE, DataType.TEXT}
METHODS = {
'torch': {
'integrated_grads': IntegratedGradients,
'saliency': Saliency,
'deeplift': DeepLift,
'deeplift_shap': DeepLiftShap,
'gradient_shap': GradientShap,
'inputxgrad': InputXGradient
},
'keras': _preprocess_att_methods_keras()
}
@classmethod
def __init__(self, model, method, **method_kwargs):
"""
Constructor for backpropagation-like methods.
Reference:
https://captum.ai/api/attribution.html
Args:
model (TorchModel or KerasModel): model to explain
method (str): method to use
method_kwargs: keyword args to pass on to the explainer constrcutor.
Please refer to the the specific algorithm (following the above link)
to see and understand the available arguments.
"""
super(BackPropeter, self).__init__(model)
self._model = model
self._method = method
if isinstance(self._model, TorchModel):
self._check_supported_method('torch', method)
self._explainer = self.METHODS['torch'][method](self._model._model, **method_kwargs)
elif isinstance(self._model, KerasModel):
self._check_supported_method('keras', method)
else:
raise ValueError('Model not supported! At the moment we only support {}.'
'\nPlease check again in the future!'.format(self.METHODS.keys()))
def interpret(self, samples, target_layer=-1, show_in_notebook=False,
explanation_configs={},
vis_configs={}):
"""Explain instance and return PP or PN with metadata. If pyTorch (captum) is used,
the convergence delta is NOT returned by default.
Args:
samples (tensor or tuple of tensors): Samples to explain
target_layer (int): for KerasModel, specify the target layer.
Following example in: https://github.com/marcoancona/DeepExplain/blob/master/examples/mint_cnn_keras.ipynb
interpret_kwargs (optinal): optional arguments to pass to the explainer for attribution
Returns:
tensor (or tuple of tensors) containing attributions
"""
if isinstance(self._model, TorchModel):
if self._explainer.has_convergence_delta() and 'return_convergence_delta' not in explanation_configs:
explanation_configs['return_convergence_delta'] = False
explanation = self._explainer.attribute(inputs=self._model._prepare_sample(samples), **explanation_configs)
if show_in_notebook:
if 'return_convergence_delta' in explanation_configs and explanation_configs['return_convergence_delta']:
exp = explanation[0]
else:
exp = explanation
exp = np.transpose(exp.detach().numpy()[0], (1,2,0))
normalizer = Normalize()
if 'method' not in vis_configs:
vis_configs['method'] = 'masked_image'
viz.visualize_image_attr(exp, normalizer(samples[0]), **vis_configs)
return explanation
else:
with DeepExplain(session=K.get_session()) as de:
input_tensor = self._model._model.inputs
smpls = samples if isinstance(samples, list) else [samples]
if self._method in {'occlusion', 'shapley_sampling'}:
warnings.warn('For perturbation methods, multiple inputs (modalities) are not supported.', UserWarning)
smpls = smpls[0]
input_tensor = input_tensor[0]
model = Model(inputs=input_tensor, outputs=self._model._model.outputs)
target_tensor = model(input_tensor)
if show_in_notebook:
warnings.warn('Sorry! Visualization not implemented yet!', UserWarning)
return de.explain(self._method, T=target_tensor, X=input_tensor, xs=smpls, **explanation_configs)
| [
37811,
7282,
22930,
363,
341,
12,
2339,
5050,
329,
6179,
1799,
198,
198,
36918,
2848,
1088,
25,
198,
12,
357,
9078,
13165,
354,
8,
6790,
388,
685,
16,
60,
198,
12,
357,
6122,
292,
8,
10766,
18438,
391,
685,
17,
60,
198,
198,
19927... | 2.289587 | 2,324 |
# coding: utf-8
"""
Honeywell Home
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ThermostatSettingsSpecialMode(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'auto_changeover_active': 'bool',
'emergency_heat_active': 'bool'
}
attribute_map = {
'auto_changeover_active': 'autoChangeoverActive',
'emergency_heat_active': 'emergencyHeatActive'
}
def __init__(self, auto_changeover_active=None, emergency_heat_active=None): # noqa: E501
"""ThermostatSettingsSpecialMode - a model defined in OpenAPI""" # noqa: E501
self._auto_changeover_active = None
self._emergency_heat_active = None
self.discriminator = None
if auto_changeover_active is not None:
self.auto_changeover_active = auto_changeover_active
if emergency_heat_active is not None:
self.emergency_heat_active = emergency_heat_active
@property
def auto_changeover_active(self):
"""Gets the auto_changeover_active of this ThermostatSettingsSpecialMode. # noqa: E501
:return: The auto_changeover_active of this ThermostatSettingsSpecialMode. # noqa: E501
:rtype: bool
"""
return self._auto_changeover_active
@auto_changeover_active.setter
def auto_changeover_active(self, auto_changeover_active):
"""Sets the auto_changeover_active of this ThermostatSettingsSpecialMode.
:param auto_changeover_active: The auto_changeover_active of this ThermostatSettingsSpecialMode. # noqa: E501
:type: bool
"""
self._auto_changeover_active = auto_changeover_active
@property
def emergency_heat_active(self):
"""Gets the emergency_heat_active of this ThermostatSettingsSpecialMode. # noqa: E501
:return: The emergency_heat_active of this ThermostatSettingsSpecialMode. # noqa: E501
:rtype: bool
"""
return self._emergency_heat_active
@emergency_heat_active.setter
def emergency_heat_active(self, emergency_heat_active):
"""Sets the emergency_heat_active of this ThermostatSettingsSpecialMode.
:param emergency_heat_active: The emergency_heat_active of this ThermostatSettingsSpecialMode. # noqa: E501
:type: bool
"""
self._emergency_heat_active = emergency_heat_active
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ThermostatSettingsSpecialMode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
21788,
4053,
5995,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
4946,
15042,
35986,
3740,
1378,
12567,
13,
785,
14,
9654,
499,
270,
10141,
14,
9654,
1504... | 2.375406 | 1,846 |
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use, line-too-long
"""Function implementation
test with: resilient-circuits selftest -l fn_digital_shadows_search
"""
import logging
import json
import requests
from requests.auth import HTTPBasicAuth
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
LOG.addHandler(logging.StreamHandler())
def get_config_option(option_name, opts, optional=False):
"""Given option_name, checks if it is in appconfig. Raises ValueError if a mandatory option is missing"""
option = opts.get(option_name)
if not option and optional is False:
err = "'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function".format(option_name)
raise ValueError(err)
else:
return option
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get("fn_digital_shadows_search", {})
api_key = get_config_option("ds_api_key", options)
api_secret = get_config_option("ds_api_secret", options)
base_url = get_config_option("ds_base_url", options)
headers = {'content-type': 'application/json; charset=utf-8', 'Accept': 'application/json'}
basic_auth = HTTPBasicAuth(api_key, api_secret)
url = "{0}{1}".format(base_url, "/api/search/find")
try:
res = requests.post(
url,
json.dumps({"query": "8.8.8.8"}),
auth=basic_auth,
headers=headers,
verify=True)
res.raise_for_status()
if res.status_code == 200:
return {"state": "success"}
return {
"state": "failure",
"reason": "Status Code {0}".format(res.status_code)
}
except Exception as exp:
LOG.error(exp)
return {
"state": "failure",
"reason": exp
}
| [
2,
357,
66,
8,
15069,
19764,
11421,
13,
3050,
11,
2864,
13,
1439,
6923,
33876,
13,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
23864,
2611,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
11,
645,... | 2.504785 | 836 |
import click
from rezide.utils import interfaces
from rezide.utils import sway
@click.command()
| [
11748,
3904,
198,
198,
6738,
302,
89,
485,
13,
26791,
1330,
20314,
198,
6738,
302,
89,
485,
13,
26791,
1330,
20009,
628,
198,
198,
31,
12976,
13,
21812,
3419,
198
] | 3.333333 | 30 |
import unittest
from click.testing import CliRunner
import gksdud
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
11748,
308,
591,
67,
463,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419... | 2.659091 | 44 |
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import sys
MAX_SIZE = 50000
if __name__ == '__main__':
filename = sys.argv[1]
prefix = sys.argv[2]
partition_sizes = {}
# first, read in all the cluster sizes
fp = open(filename)
for n, x in enumerate(read_partition_file(fp)):
if n % 100000 == 0:
print '...', n
name, partition_id, readcount, surrendered, seq = x
if not surrendered:
partition_sizes[partition_id] = readcount
# sort by # of reads in each cluster
divvy = sorted(partition_sizes.items(), key=lambda y: y[1])
# divvy up into different groups, based on having MAX_SIZE sequences
# in each group.
total = 0
group = set()
group_n = 0
group_d = {}
for partition_id, n_reads in divvy:
group.add(partition_id)
total += n_reads
if total > MAX_SIZE:
for partition_id in group:
group_d[partition_id] = group_n
group_n += 1
group = set()
total = 0
print '%d groups' % group_n
# open a bunch of output files for the different groups
group_fps = {}
for n in range(group_n):
fp = open('%s.group%d.fa' % (prefix, n), 'w')
group_fps[n] = fp
surrendered_fp = open('%s.surrender.fa' % prefix, 'w')
# write 'em all out!
fp = open(filename)
for n, x in enumerate(read_partition_file(fp)):
if n % 100000 == 0:
print '...x2', n
name, partition_id, readcount, surrendered, seq = x
if surrendered:
outfp = surrendered_fp
else:
group_n = group_d[partition_id]
outfp = group_fps[group_n]
outfp.write('>%s %s %s\n%s\n' % (name, partition_id, readcount, seq))
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
44081,
647,
11,
2638,
1378,
12567,
13,
785,
14,
2004,
12,
23912,
14,
14636,
647,
47454,
290,
318,
198,
2,
15069,
357,
34,
8,
7055,
1812... | 2.241071 | 896 |
from collections import UserList
from typing import *
from .iterables import *
from .math import *
# (sample mean, sample standard deviation)
@overload
@overload
def mean(X: list[int | float], weights: Optional[list[int | float]] = None):
'''return the population mean = sample mean of X in O(n)'''
if weights == None:
acc = 0.0
for x in X:
acc += x
return acc / len(X)
else:
acc = 0.0
for i in range(len(X)):
acc += X[i] * weights[i]
return acc
def stdev(X: list[int | float], u: float) -> float:
'''return the sample standard deviation of X given the sample mean u in O(n)'''
acc = 0.0
for x in X:
acc += (x - u)**2
return (acc / (len(X) - 1))**0.5
# NTP stuff
def mode(X: list[int | float]) -> float:
'''return an estimated in-distribution mode of a sorted X in O(n)'''
u = mean(X)
A = LinkedList(X)
for n in range(len(X) - 1, 0, -1):
# remove farthest neighbor of the mean
a, a_distance = A[0], abs(A[0] - u)
b, b_distance = A[-1], abs(A[-1] - u)
if a_distance >= b_distance:
u -= (a - u) / n
A.popleft()
else:
u -= (b - u) / n
A.pop()
return A[0]
# d > 1
# https://stackoverflow.com/questions/59672100/how-to-find-farthest-neighbors-in-euclidean-space
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.386.8193&rep=rep1&type=pdf
# https://en.wikipedia.org/wiki/Priority_queue
# https://en.wikipedia.org/wiki/R-tree
# https://en.wikipedia.org/wiki/Ball_tree
# infinite streams
def _quantile(X: list[float], p: float) -> float:
'''return a linearly interpolated quantile of a sorted X in O(1)'''
a = p * (len(X) - 1)
i = int(a)
j = -int(-a // 1)
return ((i + 1) - a) * X[i] + (a - i) * X[j]
def pQuantile(X: list[float], p: float) -> Optional[float]:
'''return an estimated p-quantile of X in O(n) via P-Squared algorithm'''
g = pSquare(p)
y = None
for x in X:
y = g.next(x)
return y
# visual analysis
def sortedplot(*Y: Union[NamedList, list], **kwargs):
'''plot the Y as a sorted plot in O(n log n)'''
import matplotlib.pyplot as plt
LINESTYLES = [
(0, (3, 2, 3, 2, 3, 4)), # --- ---
(0, (1, 1, 1, 1, 1, 3)), # *** ***
(0, (3, 2, 1, 2, 3, 4)), # -*- -*-
(0, (1, 2, 1, 2, 3, 4)), # **- **-
(0, (1, 2, 3, 2, 1, 4)), # *-* *-*
]
fig, ax = plt.subplots()
Y_named = [NamedList(y.name if isinstance(y, NamedList) else f'{i}', y) for i, y in enumerate(Y)]
Y_named = sorted(Y_named, key=lambda y: mean(y.data))
X = None
for i, y in enumerate(Y_named):
X = [j / (len(y) - 1) for j in range(len(y))]
ax.plot(X, sorted(y), linestyle=LINESTYLES[i % len(LINESTYLES)], linewidth=1.8, label=y.name)
p05 = [_quantile(sorted(y.data), 0.05) for y in Y_named]
p95 = [_quantile(sorted(y.data), 0.95) for y in Y_named]
kwargs_default = {
'title': 'Sorted plot',
'xlabel': 'p-quantile',
'ylabel': 'y',
'xticks': [0.05, 0.95],
'ylim': (min(p05), max(p95)),
}
for k, v in kwargs_default.items():
if k not in kwargs: kwargs[k] = v
ax.set(**kwargs)
ax.grid()
ax.legend(prop={'size': 12})
plt.show()
# combinatorics
@overload
@overload
# todo: step?
def V(n: int | float, k: int | float) -> int | float:
'''return variations = (n choose k) * k! in O(k)'''
if isinstance(n, int) and isinstance(k, int):
return P(n)**2 // (P(k) * P(n - k))
else:
return P(n)**2 / (P(k) * P(n - k))
@overload
@overload
def P(n: int | float):
'''return permutations = n! in O(log n)'''
if isinstance(n, int):
return round(Gamma(n + 1))
else:
return Gamma(n + 1)
@overload
@overload
def C(n: float, k: float) -> float:
'''return combinations = (n choose k) in O(k)'''
if isinstance(n, int) and isinstance(k, int):
return P(n) // (P(k) * P(n - k))
else:
return P(n) / (P(k) * P(n - k))
if __name__ == '__main__':
X = sorted([0, .24, .25, 1])
print(mode(X), X)
phi = (1 + 5**.5) / 2
X = sorted([(0.5 + i * 1 / phi) % 1 for i in range(6)])
print(mode(X), X)
print(mean(X), stdev(X, mean(X)))
#sortedplot([0.8, 1, 1.1], [0.75, 0.75, 0.75], X)
Z = [0.02, 0.15, 0.74, 0.83, 3.39, 22.37, 10.15, 15.43, 38.62, 15.92, 34.60, 10.28, 1.47, 0.40, 0.05, 11.39, 0.27, 0.42, 0.09, 11.37]
print(pQuantile(Z, 0.5)) # correct answer: 6.931, pSquared answer: 4.440634353260338, population median: 2.43
r = FractalRand(2)
X = []
for i in range(100000):
X.append(r.next())
sortedplot(NamedList('fractal_rand() [not normalized]', X))
| [
6738,
17268,
1330,
11787,
8053,
198,
6738,
19720,
1330,
1635,
198,
6738,
764,
2676,
2977,
1330,
1635,
198,
6738,
764,
11018,
1330,
1635,
198,
198,
2,
357,
39873,
1612,
11,
6291,
3210,
28833,
8,
198,
31,
2502,
2220,
198,
198,
31,
2502,... | 2.194742 | 2,054 |
from django.contrib import admin
from .models import Post, Project, Author
admin.site.register(Post)
admin.site.register(Project)
admin.site.register(Author)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
2947,
11,
4935,
11,
6434,
198,
198,
28482,
13,
15654,
13,
30238,
7,
6307,
8,
198,
28482,
13,
15654,
13,
30238,
7,
16775,
8,
198,
28482,
13,
15654,
13,
3023... | 3.382979 | 47 |
import json
import os
import subprocess
import sys
import time
from shutil import copyfile
from enum import Enum
import boto3
import ray
from ray.tune import run_experiments
from .tf_serving_utils import export_tf_serving, natural_keys, change_permissions_recursive
from .configuration_list import ConfigurationList
from .sage_cluster_communicator import SageClusterCommunicator
from .docker_utils import get_ip_from_host
TERMINATION_SIGNAL = "JOB_TERMINATED"
INTERMEDIATE_DIR = "/opt/ml/output/intermediate"
CHECKPOINT_DIR = "/opt/ml/input/data/checkpoint"
MODEL_OUTPUT_DIR = "/opt/ml/model"
class Cluster(Enum):
"""
Used when training is done in heterogeneous mode, i.e. 2 SageMaker jobs are launched with
different instance types. Usually, primary cluster has a single GPU instance responsible
for Neural Network training and secondary cluster has CPU instances for rollouts.
For single machine or homogeneous cluster, primary is the default type.
"""
Primary = "primary"
Secondary = "secondary"
class SageMakerRayLauncher(object):
"""Base class for SageMaker RL applications using Ray-RLLib.
Customers should sub-class this, fill in the required methods, and
call .train_main() to start a training process.
Example::
def create_environment(env_config):
# Import must happen inside the method so workers re-import
import roboschool
return gym.make('RoboschoolHumanoid-v1')
class MyLauncher(SageMakerRayLauncher):
def register_env_creator(self):
register_env("RoboschoolHumanoid-v1", create_environment)
def get_experiment_config(self):
return {
"training": {
"env": "RoboschoolHumanoid-v1",
"run": "PPO",
...
}
}
if __name__ == "__main__":
MyLauncher().train_main()
"""
def register_env_creator(self):
"""Sub-classes must implement this.
"""
raise NotImplementedError("Subclasses should implement this to call ray.tune.registry.register_env")
def customize_experiment_config(self, config):
"""Applies command-line hyperparameters to the config.
"""
# TODO: use ConfigList from Coach launcher, and share customization code.
hyperparams_dict = json.loads(os.environ.get("SM_HPS", "{}"))
# Set output dir to intermediate
# TODO: move this to before customer-specified so they can override
hyperparams_dict["rl.training.local_dir"] = INTERMEDIATE_DIR
hyperparams_dict["rl.training.checkpoint_at_end"] = True
hyperparams_dict["rl.training.checkpoint_freq"] = config['training'].get('checkpoint_freq', 10)
self.hyperparameters = ConfigurationList() # TODO: move to shared
for name, value in hyperparams_dict.items():
# self.map_hyperparameter(name, val) #TODO
if name.startswith("rl."):
# self.apply_hyperparameter(name, value) #TODO
self.hyperparameters.store(name, value)
# else:
# raise ValueError("Unknown hyperparameter %s" % name)
self.hyperparameters.apply_subset(config, "rl.")
return config
def launch(self):
"""Actual entry point into the class instance where everything happens.
Lots of delegating to classes that are in subclass or can be over-ridden.
"""
self.register_env_creator()
# All worker nodes will block at this step during training
ray_cluster_config = self.ray_init_config()
if not self.is_master_node:
return
# Start the driver on master node
ray.init(**ray_cluster_config)
experiment_config = self.get_experiment_config()
experiment_config = self.customize_experiment_config(experiment_config)
experiment_config = self.set_up_checkpoint(experiment_config)
print("Important! Ray with version <=0.7.2 may report \"Did not find checkpoint file\" even if the",
"experiment is actually restored successfully. If restoration is expected, please check",
"\"training_iteration\" in the experiment info to confirm."
)
run_experiments(experiment_config)
all_workers_host_names = self.get_all_host_names()[1:]
# If distributed job, send TERMINATION_SIGNAL to all workers.
if len(all_workers_host_names) > 0:
self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)
algo = experiment_config["training"]["run"]
env_string = experiment_config["training"]["config"]["env"]
# default is tensorflow
use_pytorch = False
if ray.__version__ >= "1.0.0":
if experiment_config["training"]["config"]["framework"] == "torch":
use_pytorch = True
else:
use_pytorch = experiment_config["training"]["config"].get("use_pytorch", False)
self.save_checkpoint_and_serving_model(algorithm=algo,
env_string=env_string,
use_pytorch=use_pytorch)
@classmethod
def train_main(cls):
"""main function that kicks things off
"""
launcher = cls()
launcher.launch()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
275,
2069,
18,
198,
198,
11748,
26842,
198,
6738,
2... | 2.41619 | 2,273 |
# -*- coding: UTF-8 -*-
logger.info("Loading 3 objects to table cal_dailyplannerrow...")
# fields: id, seqno, designation, start_time, end_time
loader.save(create_cal_dailyplannerrow(1,1,['AM', 'Vormittags', 'Avant-midi'],None,time(12,0,0)))
loader.save(create_cal_dailyplannerrow(2,2,['PM', 'Nachmittags', 'Apr\xe8s-midi'],time(12,0,0),None))
loader.save(create_cal_dailyplannerrow(3,3,['All day', 'Ganztags', 'Journ\xe9e enti\xe8re'],None,None))
loader.flush_deferred_objects()
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6404,
1362,
13,
10951,
7203,
19031,
513,
5563,
284,
3084,
2386,
62,
29468,
11578,
1008,
808,
9313,
8,
198,
2,
7032,
25,
4686,
11,
33756,
3919,
11,
22566,
11,
923,
62,
243... | 2.405 | 200 |
import os
from django.shortcuts import render
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.db.models import Count, Sum, Q
from django.db.models.functions import Concat
from django.db.models import Value as V
import json
from employees.models import *
from facilities.models import *
from fleet_management.exporter import *
from fleet_management.importer import *
import threading
import re
from forms import StockItemForm, StockTransactionForm, StockItemImportForm
from forms import StockFilterForm, StockTransactionFilterForm
@login_required
@csrf_exempt
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required | [
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
... | 3.577703 | 296 |
import click
import recipes.sync_recipe_utils as sr
import solvebio as sb
__version__ = '1.0.0'
@click.group()
@click.option('--access-token', help='Manually provide a SolveBio Access Token')
@click.option('--api-host', help='Override the default SolveBio API host')
@click.option('--api-key', help='Manually provide a SolveBio API key')
@click.pass_context
@sync_recipes.command()
@click.argument('recipes_file', nargs=1)
@click.option('--name', help='Name of the recipe')
@click.option('--all', is_flag=True,
help='Apply the selected mode to all recipes in YAML file')
@click.pass_context
@sync_recipes.command()
@click.argument('recipes_file', nargs=1, required=False)
@click.option('--name', help='Name of the recipe')
@click.option('--all', is_flag=True,
help='Apply the selected mode to all recipes in YAML file')
@click.pass_context
@sync_recipes.command()
@click.argument('recipes_file', nargs=1)
@click.option('--account-recipes', is_flag=True, help='Export recipes for logged in account')
@click.option('--public-recipes', is_flag=True, help='Export public recipes')
@click.pass_context
if __name__ == '__main__':
sync_recipes()
| [
11748,
3904,
198,
11748,
14296,
13,
27261,
62,
29102,
431,
62,
26791,
355,
19677,
198,
11748,
8494,
65,
952,
355,
264,
65,
198,
198,
834,
9641,
834,
796,
705,
16,
13,
15,
13,
15,
6,
628,
198,
31,
12976,
13,
8094,
3419,
198,
31,
... | 2.859903 | 414 |
# Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides a model for a monitoring station, and tools
for manipulating/modifying station data
"""
class MonitoringStation:
"""This class represents a river level monitoring station"""
# Task 1F
def typical_range_consistent(self):
"""Returns a boolean value True, if the first for range is less than the second
Returns a boolean value False, if there is no typical range, or if the first is greater than the second"""
if self.typical_range is None:
return False
return self.typical_range[0] < self.typical_range[1]
# Task 2B
def relative_water_level(self):
"""Returns a ratio of a monitoring station's current level as a fraction of its typical range"""
if not self.typical_range_consistent() or self.latest_level is None:
return None
low = self.typical_range[0]
dif = self.typical_range[1]-self.typical_range[0]
ratio = round((self.latest_level-low)/dif, 4)
return ratio
# Task 1F
def inconsistent_typical_range_stations(stations):
"""Builds a list of stations with inconsistent typical ranges, i.e. no range given, or the second
value being greater than the first, and returns it"""
inconsistent_stations = []
for i in stations:
if i.typical_range_consistent() is False:
inconsistent_stations.append(i.name)
inconsistent_stations.sort()
return inconsistent_stations
| [
2,
15069,
357,
34,
8,
2864,
7164,
400,
399,
13,
18292,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
37811,
1212,
8265,
3769,
257,
2746,
329,
257,
9904,
4429,
11,
290,
4899,
198,
1640,
29349,
14,
4666,
403... | 2.817185 | 547 |
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import src.pattern.PatternBuilder as PB
test_build3()
| [
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
35514,
198,
198,
11748,
12351,
13,
33279,
... | 2.785714 | 56 |