content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.env.lxc_env_stocktrading import lxcStockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
from stable_baselines3 import A2C | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
1330,
662,
36948,
198,
198,
6759,
29487,
8019,
13,... | 3.390374 | 187 |
from functools import partial
import community as community_louvain
import torch
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.data import Batch
from torch_geometric.nn import avg_pool
from torch_geometric.utils.convert import to_networkx
import gnns.ogbmol_conv
from gnns.gcn_wparent import GCNConvWParent
class HierarchicalGraphNet(torch.nn.Module):
"""The Hierarchical GraphNet
TODO: update docstring
"""
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
2055,
355,
2055,
62,
75,
280,
85,
391,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
267,
22296,
13,
34960,
1676,
381,
445,
13,
43132,
62,
12685,... | 3.093168 | 161 |
# PURPOSE:script to get coverage of test suite for Defects4J defects
# INPUT: script requires <path-to-defects4j> as command line argument
# OUTPUT: output of the script is Defects4JCoverage.csv that lists Project, DefectId, and StatementCoverage for all the defects of Defects4J
# HOW TO RUN: run the script using command: python getCoverageDetails.py <path-to-defects4j>
# REQUIREMENTS AND DEPENDENCIES: script requires Defects4J installed on system"
import os
import commands
import sys
if len(sys.argv) < 2:
print "ERROR: Please provide path to Defects4J directory"
sys.exit()
defects4jpath = str(sys.argv[1]) # path to Defects4J
outputfile = open("Defects4JCoverage.csv", 'w')
outputfile.write("Project,DefectId,StatementCoverage\n")
projects = ["Chart", "Lang", "Math", "Time"]
noofdefects = {}
noofdefects["Chart"] = 26
noofdefects["Lang"] = 65
noofdefects["Math"] = 106
noofdefects["Time"] = 27
for proj in projects:
for i in range(1,noofdefects[proj]+1):
command = defects4jpath + "/framework/bin/defects4j checkout -p " + proj + " -v " + str(i) + "b -w /tmp/" + proj.lower() + "_" + str(i) + "_buggy"
print command
checkoutput = commands.getoutput(command)
if checkoutput:
os.chdir("/tmp/" + proj.lower() + "_" + str(i) + "_buggy")
command = defects4jpath + "/framework/bin/defects4j coverage"
print command
covoutput = commands.getoutput(command)
print covoutput
lines = covoutput.split('\n')
found=0
for l in lines:
if l.find("Line coverage:")!=-1 :
found=1
stmtcoverage = l[l.find(":")+2:len(l)]
if found==1:
outline = proj + "," + str(i) + "," + str(stmtcoverage)
outputfile.write(outline)
outputfile.write('\n')
outputfile.close()
| [
2,
33079,
48933,
25,
12048,
284,
651,
5197,
286,
1332,
18389,
329,
2896,
478,
82,
19,
41,
22448,
198,
2,
3268,
30076,
25,
4226,
4433,
1279,
6978,
12,
1462,
12,
4299,
478,
82,
19,
73,
29,
355,
3141,
1627,
4578,
198,
2,
16289,
30076... | 2.472651 | 713 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2021, Francisco Palm
#
# Licensed under the terms of the MIT license
# ----------------------------------------------------------------------------
"""
Spyder Pomodoro Timer Preferences Page.
"""
from qtpy.QtWidgets import QGridLayout, QGroupBox, QVBoxLayout
from spyder.api.preferences import PluginConfigPage
from spyder.api.translations import get_translation
from spyder_pomodoro_timer.spyder.config import POMODORO_DEFAULT
_ = get_translation("spyder_pomodoro_timer.spyder")
# --- PluginConfigPage API
# ------------------------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
10541,
198,
2,
15069,
10673,
33448,
11,
6033,
18358,
198,
2,
198,
2,
49962,
739,
262,
2846,
286,
262,
17168,
5964,
198,
2,
16529,
10541,
198,
37811,
198,
456... | 4.016949 | 177 |
import argparse
arg_lists = []
parser = argparse.ArgumentParser()
data_arg = add_argument_group('data')
data_arg.add_argument('--path', type=str, default='frames/')
data_arg.add_argument('--img_fmt', type=str, default='jpg')
data_arg.add_argument('--model', type=str, default='output.pth')
data_arg.add_argument('--run', type=int, default= 2) # example 1=2x 2=4x 3=8x ...
startnum=0
args = parser.parse_args()
import modules.generate
while args.runtimes>startnum:
generate.interpolation(batch_size=4, temp_img = args.path, fp16=True, modelp=args.model,img_fmt=args.img_fmt)
startnum+=1
| [
11748,
1822,
29572,
628,
198,
853,
62,
20713,
796,
17635,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
628,
198,
7890,
62,
853,
796,
751,
62,
49140,
62,
8094,
10786,
7890,
11537,
198,
7890,
62,
853,
13,
2860,
62,
49140,... | 2.657778 | 225 |
import featurize, load_data, numpy as np
import sklearn.linear_model
from scipy import stats
import statsmodels.api as sm
import sys
from sklearn import preprocessing
import pandas as pd
data = load_data.load_data()
data.tempo /= np.max(data.tempo)
audio_features1 = ['mode','tempo','danceability','acousticness','instrumentalness']
topics=["topic-0-nice-bit", "topic-1-sir-dear", "topic-2-christmas-la", "topic-3-dad-mom", "topic-4-sir-colonel", "topic-5-um-work", "topic-6-president-mr.", "topic-7-japanese-dawson", "topic-8-unsub-garcia", "topic-9-game-team", "topic-10-sir-captain", "topic-11-mr.-court", "topic-12-boat-water", "topic-13-leave-understand", "topic-14-fuck-shit", "topic-15-war-country", "topic-16-years-world", "topic-17-plane-move", "topic-18-captain-ship", "topic-19-police-kill", "topic-20-bit-mum", "topic-21-ah-aah", "topic-22-'t-narrator", "topic-23-sighs-chuckles", "topic-24-ya-'em", "topic-25-remember-feel", "topic-26-boy-huh", "topic-27-mr.-sir", "topic-28-dr.-doctor", "topic-29-father-lord", "topic-30-money-business", "topic-31-alright-lt", "topic-32-sir-brother", "topic-33-school-class", "topic-34-vic-jax", "topic-35-gibbs-mcgee", "topic-36-monsieur-madame", "topic-37-baby-yo", "topic-38-agent-security", "topic-39-kill-dead", "topic-40-music-show", "topic-41-ofthe-thankyou", "topic-42-dude-cool", "topic-43-spanish-el", "topic-44-eat-nice", "topic-45-murder-killed", "topic-46-car-drive", "topic-47-town-horse", "topic-48-film-movie", "topic-49-woman-married"]
genre_features = [d for d in list(data.columns) if 'genre_' in d]
feats = topics + audio_features1 + genre_features
# e.g. python regress_topics_on_audio.py danceability
run_regression_cem(sys.argv[1], feats) | [
11748,
2218,
333,
1096,
11,
3440,
62,
7890,
11,
299,
32152,
355,
45941,
198,
11748,
1341,
35720,
13,
29127,
62,
19849,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
9756,
27530,
13,
15042,
355,
895,
198,
11748,
25064,
198,
6738,
13... | 2.528719 | 679 |
import argparse
import json
import sys
import os
import torch
import misc_utils as utils
"""
Arg parse
opt = parse_args()
"""
opt = parse_args()
opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'
if opt.opt:
with open(opt.opt, 'r') as f:
a = json.load(f)
for k, v in a.items():
setattr(opt, k, v)
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
if opt.tag != 'cache':
pid = f'[PID:{os.getpid()}]'
with open('run_log.txt', 'a') as f:
f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + pid + ' ' + get_command_run() + '\n')
# utils.print_args(opt)
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
11748,
28034,
198,
198,
11748,
12747,
62,
26791,
355,
3384,
4487,
198,
198,
37811,
198,
220,
220,
220,
20559,
21136,
198,
220,
220,
220,
2172,
796,
2... | 2.082111 | 341 |
#
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Basic syntactic rewrites for Copperhead compiler.
This module implements the rewrite passes used by the Copperhead
compiler to transform the input program into a more easily analyzed
form. The routines in this module assume that the syntax trees they are
given are well-formed, but they do not generally make any assumptions
about type soundness.
The rewrites provided by this module are fairly standard and operate on
the source program without consideration for any parallelism.
Supported rewrites include:
o Closure conversion
o Lambda lifting
o Single assignment conversion
"""
import coresyntax as S
import pltools
from utility import flatten
import copy
import coretypes as T
def single_assignment_conversion(stmt, env={}, exceptions=set()):
'Rename locally declared variables so that each is bound exactly once'
rewrite = SingleAssignmentRewrite(env, exceptions)
return rewrite.rewrite(stmt)
class LambdaLifter(S.SyntaxRewrite):
"""
Convert every expression of the form:
lambda x1,...,xn: E
into a reference to a proceduce __lambdaN and add
def __lambdaN(x1,...,xn): return E
to the procedure list.
This rewriter assumes that closure conversion has already been
performed. In other words, there are no freely occurring
local variables in the body of the lambda expression.
"""
class ProcedureFlattener(S.SyntaxRewrite):
"""
Flatten the list of defined procedures so that no definition is
nested within another procedure. This should only be applied after
closure conversion and lambda lifting are complete.
"""
# XXX If things other than procedures become allowed as top-level
# forms, make sure that they are handled here.
# XXX Most of the code in this rewriter simply serves to track
# variables defined in the current scope. That should be
# abstracted into a more generic base class that could be used
# elsewhere.
# XXX This rewrite rule -- coupled with the rule for _Procedure in
# _ClosureConverter -- is an ugly hack for rewriting calls to
# procedures. We should find a more elegant solution!
def closure_conversion(ast, globals=None):
"""
Detect and explicitly tag all variables in the given syntax tree
which are lexically closed over by lambdas or nested procedure
definitions.
A variable occurring within a lambda/procedure is considered to form
a closure if:
- it is not bound as a formal parameter of the lambda/procedure
- it is bound in the containing scope of the lambda/procedure
Such variables are lifted into arguments to explicit "closure"
forms, and are passed as explicit arguments to the nested
lambda/procedure.
e.g., lambda x: lambda y: x =>
lambda x: closure([x], lambda y, _K0: _K0)
Global variables (if any) defined in the globals parameter are never
closed over, since they are globally visible.
The copperhead.interlude module provide a native Python
implementation of the Copperhead closure() expression.
"""
converter = _ClosureConverter(globals=globals)
converted = converter.rewrite(ast)
return converted
class ConditionalProtector(S.SyntaxRewrite):
"""
Convert every expression of the form:
E1 if P else E2
into the equivalent form:
((lambda: E1) if P else (lambda: E2))()
The purpose of this rewriter is to protect the branches of the
conditional during later phases of the compiler. It guarantees that
exactly one of E1/E2 will ever be evaluated.
"""
| [
2,
198,
2,
220,
220,
15069,
3648,
12,
6999,
15127,
10501,
198,
2,
220,
15069,
3717,
12,
10333,
2059,
286,
3442,
198,
2,
220,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
19... | 3.334866 | 1,305 |
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
| [
6738,
1052,
27315,
271,
62,
32,
1416,
437,
21872,
13,
6310,
622,
535,
295,
274,
13,
8625,
622,
535,
295,
1330,
2262,
622,
535,
295,
201,
198,
201,
198
] | 2.517241 | 29 |
from fathom import Point, ORIGIN
from fathom.tikz import Canvas
import fathom.geometry as geo
import fathom.layout as layout
import fathom.tikz.colors as colors
from itertools import *
BRANCH = 3
| [
6738,
277,
32910,
1330,
6252,
11,
43901,
1268,
198,
6738,
277,
32910,
13,
83,
1134,
89,
1330,
1680,
11017,
198,
11748,
277,
32910,
13,
469,
15748,
355,
40087,
198,
11748,
277,
32910,
13,
39786,
355,
12461,
198,
11748,
277,
32910,
13,
... | 3.078125 | 64 |
# Could eventually remove this code: Is this needed in unit tests?
"""
Object definitions that are used for testing.
"""
from typing import Iterator, Tuple, Dict
import numpy as np
from ..datatypes.common import StateIdAndCandidate
from ..datatypes.hp_ranges import HyperparameterRanges_Impl, \
HyperparameterRangeContinuous, HyperparameterRangeInteger, \
HyperparameterRangeCategorical, HyperparameterRanges
from ..datatypes.scaling import LogScaling, LinearScaling
from ..datatypes.tuning_job_state import TuningJobState
from ..gpautograd.constants import MCMCConfig, OptimizationConfig
from ..gpautograd.gp_regression import GaussianProcessRegression
from ..gpautograd.gpr_mcmc import GPRegressionMCMC
from ..gpautograd.kernel import Matern52, KernelFunction
from ..gpautograd.warping import WarpedKernel, Warping
from ..tuning_algorithms.base_classes import CandidateGenerator, dictionarize_objective
class RepeatedCandidateGenerator(CandidateGenerator):
"""Generates candidates from a fixed set. Used to test the deduplication logic."""
# Example black box function, with adjustable location of global minimum.
# Potentially could catch issues with optimizer, e.g. if the optimizer
# ignoring somehow candidates on the edge of search space.
# A simple quadratic function is used.
| [
2,
10347,
4191,
4781,
428,
2438,
25,
1148,
428,
2622,
287,
4326,
5254,
30,
198,
198,
37811,
198,
10267,
17336,
326,
389,
973,
329,
4856,
13,
198,
37811,
198,
198,
6738,
19720,
1330,
40806,
1352,
11,
309,
29291,
11,
360,
713,
198,
11... | 3.554348 | 368 |
from django.db.models.signals import post_delete, post_init, post_save
from django.dispatch import receiver
from .audit_log import log
@receiver(post_delete)
@receiver(post_init)
@receiver(post_save)
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
33678,
11,
1281,
62,
15003,
11,
1281,
62,
21928,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
198,
6738,
764,
3885,
270,
62,
6404,
1330,
2604,
628,... | 2.875 | 72 |
from setuptools import setup
setup(
name='pyutils',
version='0.0.1rc',
packages=['pyutils'],
url='https://github.com/DANS-repo/pyutils',
license='Apache License Version 2.0',
author='hvdb',
author_email='',
description='A collection of utility methods, primarily written for use in notebooks',
install_requires=['pandas']
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9078,
26791,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
6015,
3256,
198,
220,
220,
220,
10392,
28,
17816,
9078,
26791,
6,
4357... | 2.734848 | 132 |
import zajem_podatkov_orodja as orodja
import csv
import os
# naredimo slovar slovarjev za ustavljanje podatkov iz csv datoteke
slovar = {}
for leto in range(1994, 2020):
slovar[leto] = {}
# noter ustavimo podatke
with open(os.path.join(orodja.mapa, 'esc.csv'), newline='') as dat:
reader = csv.DictReader(dat, delimiter=';')
for vrstica in reader:
leto, država, točke = int(vrstica['Year']), vrstica['To country'], int(vrstica['Points '])
if leto < 1994 or vrstica['(semi-) final'] != 'f': # upoštevam le finalna tekmovanja od 1994 naprej
continue
elif država == 'Serbia & Montenegro': # te države, ki je obstajala le nekaj let, ne bom posebej obravnaval
continue
elif 'Macedonia' in država: # da se izognem 'F.Y.R. Macedonia' in 'North Macedonia'
država = 'Macedonia'
elif vrstica['From country'] not in slovar[leto]: # dodamo še države, ki se niso uvrstile v finale. Te bodo imele 0 točk
slovar[leto][vrstica['From country']] = 0
if država not in slovar[leto]:
slovar[leto][država] = točke
else:
slovar[leto][država] += točke
# preuredimo slovar slovarjev v seznam slovarjev
seznam_podatkov = []
for leto in slovar:
for država in slovar[leto]:
if leto >= 2016: # leta 2016 so spremenili točkovni sistem, tako da se štejejo dvojne točke
točke = slovar[leto][država] // 2
else:
točke = slovar[leto][država]
seznam_podatkov.append({'leto': leto, 'država': država, 'točke': točke})
orodja.zapisi_csv(seznam_podatkov, ['leto', 'država', 'točke'], os.path.join(orodja.mapa, 'uvrstitve.csv')) | [
11748,
1976,
1228,
368,
62,
33320,
265,
21862,
62,
273,
375,
6592,
355,
267,
14892,
6592,
198,
11748,
269,
21370,
198,
11748,
28686,
628,
198,
2,
299,
1144,
25147,
1017,
709,
283,
1017,
709,
283,
73,
1990,
1976,
64,
334,
301,
615,
7... | 1.963387 | 874 |
import numpy
N, M, P = map(int, input().split())
arr_n = numpy.array([input().strip().split() for _ in range(N)], int)
arr_m = numpy.array([input().strip().split() for _ in range(M)], int)
ar_N = numpy.reshape(arr_n, (N, P))
ar_M = numpy.reshape(arr_m, (M, P))
print(numpy.concatenate((ar_N, ar_M), axis = 0)) | [
11748,
299,
32152,
198,
198,
45,
11,
337,
11,
350,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
198,
3258,
62,
77,
796,
299,
32152,
13,
18747,
26933,
15414,
22446,
36311,
22446,
35312,
3419,
329,
4808,
287,
2837,
7,
45,
8,... | 2.284672 | 137 |
import requests
from scraping.quotes.pages.quotes_page import QuotesPage
page_content = requests.get('http://quotes.toscrape.com').content
page = QuotesPage(page_content)
for quote in page.quotes:
print(quote)
| [
11748,
7007,
198,
198,
6738,
46743,
13,
421,
6421,
13,
31126,
13,
421,
6421,
62,
7700,
1330,
2264,
6421,
9876,
198,
198,
7700,
62,
11299,
796,
7007,
13,
1136,
10786,
4023,
1378,
421,
6421,
13,
83,
17500,
13484,
13,
785,
27691,
11299,
... | 3.013889 | 72 |
r"""
Modules performing small, commonly used tasks throughout the package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from math import ceil
import numpy as np
from warnings import warn
from astropy.table import Table
from ..custom_exceptions import HalotoolsError
__all__ = ['SampleSelector']
def compute_conditional_percentiles(**kwargs):
r"""
In bins of the ``prim_haloprop``, compute the rank-order percentile
of the input ``table`` based on the value of ``sec_haloprop``.
Parameters
----------
table : astropy table, optional
a keyword argument that stores halo catalog being used to make mock galaxy population
If a `table` is passed, the `prim_haloprop_key` and `sec_haloprop_key` keys
must also be passed. If not passing a `table`, you must directly pass the
`prim_haloprop` and `sec_haloprop` keyword arguments.
prim_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
primary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key`` when computing the result.
sec_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
secondary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key``, and in each bin uses the value stored in ``sec_haloprop_key``
to compute the ``prim_haloprop``-conditioned rank-order percentile.
prim_haloprop : array_like, optional
Array storing the primary halo property used to bin the input points.
If a `prim_haloprop` is passed, you must also pass a `sec_haloprop`.
sec_haloprop : array_like, optional
Array storing the secondary halo property used to define the conditional percentiles
in each bin of `prim_haloprop`.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Examples
--------
>>> from halotools.sim_manager import FakeSim
>>> fakesim = FakeSim()
>>> result = compute_conditional_percentiles(table = fakesim.halo_table, prim_haloprop_key = 'halo_mvir', sec_haloprop_key = 'halo_vmax')
Notes
-----
The sign of the result is such that in bins of the primary property,
*smaller* values of the secondary property
receive *smaller* values of the returned percentile.
"""
if 'table' in kwargs:
table = kwargs['table']
try:
prim_haloprop_key = kwargs['prim_haloprop_key']
prim_haloprop = table[prim_haloprop_key]
sec_haloprop_key = kwargs['sec_haloprop_key']
sec_haloprop = table[sec_haloprop_key]
except KeyError:
msg = ("\nWhen passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must also pass ``prim_haloprop_key`` and ``sec_haloprop_key`` keyword arguments\n"
"whose values are column keys of the input ``table``\n")
raise HalotoolsError(msg)
else:
try:
prim_haloprop = kwargs['prim_haloprop']
sec_haloprop = kwargs['sec_haloprop']
except KeyError:
msg = ("\nIf not passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must pass a ``prim_haloprop`` and ``sec_haloprop`` arguments\n")
raise HalotoolsError(msg)
def compute_prim_haloprop_bins(dlog10_prim_haloprop=0.05, **kwargs):
r"""
Parameters
----------
prim_haloprop : array
Array storing the value of the primary halo property column of the ``table``
passed to ``compute_conditional_percentiles``.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Returns
--------
output : array
Numpy array of integers storing the bin index of the prim_haloprop bin
to which each halo in the input table was assigned.
"""
try:
prim_haloprop = kwargs['prim_haloprop']
except KeyError:
msg = ("The ``compute_prim_haloprop_bins`` method "
"requires the ``prim_haloprop`` keyword argument")
raise HalotoolsError(msg)
try:
prim_haloprop_bin_boundaries = kwargs['prim_haloprop_bin_boundaries']
except KeyError:
lg10_min_prim_haloprop = np.log10(np.min(prim_haloprop))-0.001
lg10_max_prim_haloprop = np.log10(np.max(prim_haloprop))+0.001
num_prim_haloprop_bins = (lg10_max_prim_haloprop-lg10_min_prim_haloprop)/dlog10_prim_haloprop
prim_haloprop_bin_boundaries = np.logspace(
lg10_min_prim_haloprop, lg10_max_prim_haloprop,
num=int(ceil(num_prim_haloprop_bins)))
# digitize the masses so that we can access them bin-wise
output = np.digitize(prim_haloprop, prim_haloprop_bin_boundaries)
# Use the largest bin for any points larger than the largest bin boundary,
# and raise a warning if such points are found
Nbins = len(prim_haloprop_bin_boundaries)
if Nbins in output:
msg = ("\n\nThe ``compute_prim_haloprop_bins`` function detected points in the \n"
"input array of primary halo property that were larger than the largest value\n"
"of the input ``prim_haloprop_bin_boundaries``. All such points will be assigned\n"
"to the largest bin.\nBe sure that this is the behavior you expect for your application.\n\n")
warn(msg)
output = np.where(output == Nbins, Nbins-1, output)
return output
compute_prim_haloprop_bins_dict = {}
compute_prim_haloprop_bins_dict['prim_haloprop'] = prim_haloprop
try:
compute_prim_haloprop_bins_dict['prim_haloprop_bin_boundaries'] = (
kwargs['prim_haloprop_bin_boundaries'])
except KeyError:
pass
try:
compute_prim_haloprop_bins_dict['dlog10_prim_haloprop'] = kwargs['dlog10_prim_haloprop']
except KeyError:
pass
prim_haloprop_bins = compute_prim_haloprop_bins(**compute_prim_haloprop_bins_dict)
output = np.zeros_like(prim_haloprop)
# sort on secondary property only with each mass bin
bins_in_halocat = set(prim_haloprop_bins)
for ibin in bins_in_halocat:
indices_of_prim_haloprop_bin = np.where(prim_haloprop_bins == ibin)[0]
num_in_bin = len(sec_haloprop[indices_of_prim_haloprop_bin])
# Find the indices that sort by the secondary property
ind_sorted = np.argsort(sec_haloprop[indices_of_prim_haloprop_bin])
percentiles = np.zeros(num_in_bin)
percentiles[ind_sorted] = (np.arange(num_in_bin) + 1.0) / float(num_in_bin)
# place the percentiles into the catalog
output[indices_of_prim_haloprop_bin] = percentiles
return output
class SampleSelector(object):
r""" Container class for commonly used sample selections.
"""
@staticmethod
def host_halo_selection(return_subhalos=False, **kwargs):
r""" Method divides sample in to host halos and subhalos, and returns
either the hosts or the hosts and the subs depending
on the value of the input ``return_subhalos``.
"""
table = kwargs['table']
mask = table['halo_upid'] == -1
if return_subhalos is False:
return table[mask]
else:
return table[mask], table[~mask]
@staticmethod
def property_range(lower_bound=-float("inf"), upper_bound=float("inf"),
return_complement=False, host_halos_only=False, subhalos_only=False, **kwargs):
r""" Method makes a cut on an input table column based on an input upper and lower bound, and
returns the cut table.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to apply the cut
lower_bound : float, optional keyword argument
Minimum value for the input column of the returned table. Default is :math:`-\infty`.
upper_bound : float, optional keyword argument
Maximum value for the input column of the returned table. Default is :math:`+\infty`.
return_complement : bool, optional keyword argument
If True, `property_range` gives the table elements that do not pass the cut
as the second return argument. Default is False.
host_halos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only host halos are returned.
Default is False
subhalos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only subhalos are returned.
Default is False
Returns
-------
cut_table : Astropy Table object
Examples
---------
To demonstrate the `property_range` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
To make a cut on the halo catalog to select halos in a specific mass range:
>>> halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13)
To apply this same cut, and also only select host halos passing the cut, we use the ``host_halos_only`` keyword:
>>> host_halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, host_halos_only=True)
The same applies if we only want subhalos returned only now we use the ``subhalos_only`` keyword:
>>> subhalo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, subhalos_only=True)
"""
table = kwargs['table']
# First apply the host halo cut, if applicable
if (host_halos_only is True) & (subhalos_only is True):
raise KeyError("You cannot simultaneously select only host halos and only subhalos")
elif host_halos_only is True:
table = SampleSelector.host_halo_selection(table=table)
elif subhalos_only is True:
hosts, table = SampleSelector.host_halo_selection(table=table, return_subhalos=True)
key = kwargs['key']
mask = (table[key] >= lower_bound) & (table[key] <= upper_bound)
if return_complement is True:
return table[mask], table[np.invert(mask)]
else:
return table[mask]
@staticmethod
def split_sample(**kwargs):
r""" Method divides a sample into subsamples based on the percentile ranking of a given property.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to define the percentiles
percentiles : array_like
Sequence of percentiles used to define the returned subsamples. If ``percentiles``
has more than one element, the elements must be monotonically increasing.
If ``percentiles`` is length-N, there will be N+1 returned subsamples.
Returns
-------
subsamples : list
Examples
--------
To demonstrate the `split_sample` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
We can easily use `split_sample` to divide the sample into a high-Vmax and low-Vmax subsamples:
>>> sample_below_median, sample_above_median = SampleSelector.split_sample(table = halos, key = 'halo_vmax', percentiles = 0.5)
Likewise, we can do the same thing to divide the sample into quartiles:
>>> lowest, lower, higher, highest = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
The following alternative syntax is also supported:
>>> subsample_collection = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
>>> lowest, lower, higher, highest = subsample_collection
"""
table = kwargs['table']
if not isinstance(table, Table):
raise TypeError("Input table must be an Astropy Table instance")
key = kwargs['key']
if key not in list(table.keys()):
raise KeyError("The ``{0}`` key does not appear in the table you are trying \n"
"to split into subsamples".format(key))
table.sort(key)
percentiles = kwargs['percentiles']
percentiles = np.array(percentiles)
if np.shape(percentiles) == ():
percentiles = np.array([percentiles])
num_total = len(table)
if len(percentiles) >= num_total:
raise ValueError("Input length of percentiles must be less than input table length")
indices = percentiles*num_total
indices = np.insert(indices, 0, 0)
percentiles = np.insert(percentiles, 0, 0)
indices = indices.astype(int)
indices = np.append(indices, len(table))
percentiles = np.append(percentiles, 1.0)
d = np.diff(indices)
d[-1] -= 1
if 0 in d:
print("Raise exception: too many percentile bins")
idx_too_few = np.nanargmin(d)
raise ValueError("The input percentiles spacing is too fine.\n"
"For example, there are no table elements in the percentile range (%.2f, %.2f)" %
(percentiles[idx_too_few], percentiles[idx_too_few+1]))
result = np.zeros(len(indices)-1, dtype=object)
for i, first_idx, last_idx in zip(list(range(len(result))), indices[:-1], indices[1:]):
result[i] = table[first_idx:last_idx]
return result
| [
81,
37811,
198,
5841,
5028,
9489,
1402,
11,
8811,
973,
8861,
3690,
262,
5301,
13,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
1... | 2.50156 | 6,091 |
from flask import Flask, send_from_directory, session, request
import os
app = Flask(__name__)
from route.top import top
from route.prj import prj
app.register_blueprint(top)
app.register_blueprint(prj)
# sessionを有効にするための秘密鍵
app.secret_key = os.environ.get('SECRET_KEY')
# limit upload file size : 1MB
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
@app.route('/favicon.ico')
if __name__ == '__main__':
app.debug = True
app.run(host='127.0.0.1',port=5000)
| [
6738,
42903,
1330,
46947,
11,
3758,
62,
6738,
62,
34945,
11,
6246,
11,
2581,
198,
11748,
28686,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
6738,
6339,
13,
4852,
1330,
1353,
198,
6738,
6339,
13,
1050,
73,
1330,
778,
... | 2.473684 | 190 |
import dis
import unittest
import sys
from typing import List
from typing import Optional
from crosshair.diff_behavior import BehaviorDiff
from crosshair.diff_behavior import diff_behavior
from crosshair.fnutil import walk_qualname
from crosshair.fnutil import FunctionInfo
from crosshair.options import AnalysisOptions
from crosshair.options import DEFAULT_OPTIONS
from crosshair.util import debug
from crosshair.util import set_debug
foo1 = FunctionInfo.from_fn(_foo1)
foo2 = FunctionInfo.from_fn(_foo2)
foo3 = FunctionInfo.from_fn(_foo3)
if __name__ == "__main__":
if ("-v" in sys.argv) or ("--verbose" in sys.argv):
set_debug(True)
unittest.main()
| [
11748,
595,
198,
11748,
555,
715,
395,
198,
11748,
25064,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
3272,
27108,
13,
26069,
62,
46571,
1330,
20181,
28813,
198,
6738,
3272,
27108,
13,
26069,
62,
46571,
... | 3.076577 | 222 |
import pytest
import pyogg
import os
from config import Config
# FIXME: This shouldn't be a source of error, but it currently is.
# This works in macOS and probably Linux, but not Windows.
# def test_unicode_filename(pyogg_config: Config):
# filename = str(
# pyogg_config.rootdir
# / "examples/unicode filename 🎵.opus"
# )
# opus_file = pyogg.OpusFile(filename)
| [
11748,
12972,
9288,
198,
11748,
12972,
10332,
198,
11748,
28686,
198,
198,
6738,
4566,
1330,
17056,
628,
220,
220,
220,
220,
198,
2,
44855,
11682,
25,
770,
6584,
470,
307,
257,
2723,
286,
4049,
11,
475,
340,
3058,
318,
13,
198,
2,
7... | 2.649007 | 151 |
from dataclasses import dataclass
from hashlib import md5
MAX_PACKET_SIZE: int = 64
DATA_SIZE: int = 43
CONTINUATION_PREFIX: bytes = b"\xFE\xFD"
SEQ_LIM = 2**32
FINISHER_DATA = b"TEKCAP TSAL"
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
198,
22921,
62,
47,
8120,
2767,
62,
33489,
25,
493,
796,
5598,
198,
26947,
62,
33489,
25,
493,
796,
5946,
198,
37815,
1268,
52,
6234,
... | 2.285714 | 91 |
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
from scipy import stats
from IPython import embed
import provider
from model import *
# from test_utils import *
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--verbose', action='store_true', help='if specified, output color-coded seg obj files')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--bandwidth', type=float, default=1., help='Bandwidth for meanshift clustering [default: 1.]')
parser.add_argument('--input_list', type=str, default='data/test_hdf5_file_list_Area5.txt', help='Input data list file')
parser.add_argument('--model_path', type=str, default='log/model.ckpt', help='Path of model')
FLAGS = parser.parse_args()
BATCH_SIZE = 1
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
TEST_FILE_LIST = FLAGS.input_list
BANDWIDTH = FLAGS.bandwidth
output_verbose = FLAGS.verbose
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
OUTPUT_DIR = os.path.join(LOG_DIR, 'test_results')
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
os.system('cp inference_merge.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_inference.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 13
NEW_NUM_CLASSES = 13
HOSTNAME = socket.gethostname()
print("ROOTDIR", ROOT_DIR)
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(os.path.join(ROOT_DIR, FLAGS.input_list))]
len_pts_files = len(ROOM_PATH_LIST)
if __name__ == "__main__":
test()
LOG_FOUT.close()
| [
11748,
1822,
29572,
201,
198,
11748,
10688,
201,
198,
11748,
289,
20,
9078,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
17802,
201,
198,
6738,
629,
541,
88,
1330,
9756,
201,
... | 2.47876 | 871 |
# Importing the libraries
import numpy as np
import pandas as pd
import keras
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import time
# Getting the dataset
X = pd.read_csv("2021VAERSDATA.csv").iloc[:5000, 0: 18].values
Y = pd.read_csv("2021VAERSVAX.csv").iloc[:5000, 0: 5].values
# Ultimate Array (Initialization)
total_data = []
# Matching the patients with the paramaters
for i in range(0, len(X)): # Length of X as X is shorter
primary_key_X = X[i, 0]
selected_data = []
for j in range(0, len(Y)):
primary_key_Y = Y[j, 0]
if primary_key_X == primary_key_Y:
selected_data = [primary_key_Y, Y[j, 2], Y[j, 4], X[i, 17]]
total_data.append(selected_data)
# # Preprocessing the input dataset
# total_data = np.array(total_data, dtype=object)
# Splitting independant to dependant
indep_value = []
dep_value = []
for i in range(0, 4930): # len(total_data)):
curr_element = total_data[i]
if len(curr_element) <= 3:
continue
else:
indep_value.append(curr_element[3])
dep_value.append(curr_element[0:3])
# Applying the Machine Learning Model
indep_value = np.array(indep_value).reshape(-1, 1)
dep_value = np.array(dep_value)
training_set = np.concatenate((dep_value, indep_value), axis=1)
# Further Preprocessing of data
# Removing the nan and U
new_training_set = []
for setv in training_set:
if setv[3] == "Y" or setv[3] == "N":
new_training_set.append(setv)
else:
continue
new_training_set = np.array(new_training_set)
training_set = new_training_set
# Processing the Vaccine Types
vac_type_le = LabelEncoder()
training_set[:, 1] = vac_type_le.fit_transform(training_set[:, 1])
re_covid_le = LabelEncoder()
training_set[:, 3] = re_covid_le.fit_transform(training_set[:, 3])
training_set = np.array(training_set)
le = LabelEncoder()
training_set[:, 2] = le.fit_transform(training_set[:, 2])
# Removing the patient ID
training_set = training_set[:, 1:]
# Extracting the labels
vac_type_label_mapping = dict(
zip(vac_type_le.classes_, vac_type_le.transform(vac_type_le.classes_)))
re_covid_label_mapping = dict(
zip(re_covid_le.classes_, re_covid_le.transform(re_covid_le.classes_)))
le_mapping = dict(
zip(le.classes_, le.transform(le.classes_)))
# Converting values to dep and indep
dep_training = training_set[:, 1].reshape(-1, 1) # !Y
indep_training = np.concatenate(
(training_set[:, 0].reshape(-1, 1), training_set[:, 2].reshape(-1, 1)), axis=1) # !X
# Scaling the indep data
sc = StandardScaler()
sc.fit_transform(indep_training)
print(indep_training.shape)
# Applying the Machine Learning model
nn = Sequential()
# input_shape=(indep_training.shape)))
nn.add(Dense(units=120, activation='relu'))
nn.add(Dense(units=60, activation='relu'))
# nn.add(Dense(units=15, activation='relu'))
nn.add(Dense(units=1))
nn.compile(optimizer="adam", loss="mse",
metrics=[tf.keras.metrics.MeanSquaredError()])
nn.fit(indep_training, dep_training, batch_size=100, epochs=4000)
# # Printing the Labels
print(le_mapping)
print(vac_type_label_mapping)
print(re_covid_label_mapping)
#! Moderna (2)
moderna_optim = nn.predict_classes(np.array(([2, 0], [2, 1])))
print(moderna_optim)
#! Pfizer (2)
pfizer_optim = nn.predict_classes(np.array(([3, 0], [3, 1])))
print(pfizer_optim)
| [
2,
17267,
278,
262,
12782,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
41927,
292,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
1341,
35720... | 2.384363 | 1,522 |
from django.urls import path, include
from . import views
urlpatterns=[
path("" , views.index, name="index"),
path("signin", views.signin , name="signin" ),
path("login", views.login , name="login"),
path("logout", views.logout , name="logout"),
path("taskcreation", views.task_creation , name="creation"),
path("taskdisplay", views.task_display , name="display"),
path("zzincrezz", views.increase , name="inc")
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
41888,
198,
220,
220,
220,
3108,
7203,
1,
837,
5009,
13,
9630,
11,
1438,
2625,
9630,
12340,
198,
220,
220,
220,
3108,
720... | 2.927152 | 151 |
#!/usr/bin/env python3
import os
import logging
import sys
try:
import multiprocessing
except:
pass
# nose requires multiprocessing and logging to be initialized before the setup
# call, or we'll get a spurious crash on exit.
from setuptools import setup, find_packages
from setuptools.dist import Distribution
is_release = False
if "--release" in sys.argv:
is_release = True
sys.argv.remove("--release")
base = os.path.dirname(os.path.abspath(__file__))
def read(fname):
'''Utility function to read the README file.'''
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# figure out what the install will need
install_requires = [
"setuptools >=0.5",
"flask",
"pyyaml",
"requests",
"schematics",
"python-dateutil",
"requests-futures",
"httpretty",
"aiohttp>=2.3.0",
"aiotask_context",
"pytest-aiohttp",
"pytest-asyncio",
'cryptography',
'python-jose[cryptography]',
'jinja2'
]
setup(
name="rest-helpers",
setup_requires=["vcver"],
vcver={
"is_release": is_release,
"path": base
},
url="https://github.com/WillFr/rest-helpers",
author="Guillaume Grosbois",
author_email="grosbois.guillaume@gmail.com",
description="A set of method to help creating rest services",
packages=find_packages(),
long_description=read('README.md'),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent"],
package_data={"rest_helpers": ["templates/swagger-ui.html"]},
install_requires=install_requires,
include_package_data=True,
tests_require=[ "mock >=0.7.2",
"coverage",
"httpretty",
"httmock",
"pytest-aiohttp",
"pytest-cov"] + install_requires
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
25064,
198,
28311,
25,
198,
220,
220,
220,
1330,
18540,
305,
919,
278,
198,
16341,
25,
198,
220,
220,
220,
1208,
198,
2,
9686,
4... | 2.347985 | 819 |
import abc
from .window import Window
from gbvision.models.system import EMPTY_PIPELINE
from gbvision.utils.recorders.recorder import Recorder
class RecordingWindow(Window, abc.ABC):
"""
A basic window that records the stream it receives
:param recording_pipeline: a drawing pipeline to run on the recorded frame, usually you will want this to be the
same as the drawing pipeline
"""
| [
11748,
450,
66,
198,
198,
6738,
764,
17497,
1330,
26580,
198,
6738,
308,
65,
10178,
13,
27530,
13,
10057,
1330,
38144,
9936,
62,
47,
4061,
3698,
8881,
198,
6738,
308,
65,
10178,
13,
26791,
13,
8344,
6361,
13,
8344,
2875,
1330,
3311,
... | 3.349593 | 123 |
from django.contrib import admin
from .models import Transaction
# Register your models here.
admin.site.register(Transaction,TransactionAdmin) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
45389,
198,
2,
17296,
534,
4981,
994,
13,
628,
198,
28482,
13,
15654,
13,
30238,
7,
48720,
11,
48720,
46787,
8
] | 4.264706 | 34 |
# tests the --template argument of voila
import pytest
@pytest.fixture
@pytest.mark.gen_test
| [
2,
5254,
262,
1377,
28243,
4578,
286,
7608,
10102,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
4102,
13,
5235,
62,
9288,
198
] | 2.939394 | 33 |
from abc import ABCMeta, abstractmethod | [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396
] | 4.333333 | 9 |
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module contains a pure Qt widget that displays an image"""
from __future__ import absolute_import
from taurus.external.qt import Qt
__all__ = ["QPixmapWidget"]
__docformat__ = 'restructuredtext'
class QPixmapWidget(Qt.QWidget):
"""This widget displays an image (pixmap). By default the pixmap is
scaled to the widget size and the aspect ratio is kept.
The default alignment of the pixmap inside the widget space is horizontal
left, vertical center."""
DefaultAlignment = Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter
DefaultAspectRatioMode = Qt.Qt.KeepAspectRatio
DefaultTransformationMode = Qt.Qt.SmoothTransformation
def paintEvent(self, paintEvent):
"""Overwrite the paintEvent from QWidget to draw the pixmap"""
pixmap = self._getPixmap()
w, h = self.width(), self.height()
painter = Qt.QPainter(self)
painter.setRenderHint(Qt.QPainter.Antialiasing)
pw, ph = pixmap.width(), pixmap.height()
align = self._alignment
hAlign = align & Qt.Qt.AlignHorizontal_Mask
vAlign = align & Qt.Qt.AlignVertical_Mask
x, y = 0, 0
if hAlign & Qt.Qt.AlignHCenter:
x = (w - pw) // 2
elif hAlign & Qt.Qt.AlignRight:
x = w - pw
if vAlign & Qt.Qt.AlignVCenter:
y = (h - ph) // 2
elif vAlign & Qt.Qt.AlignBottom:
y = h - ph
x, y = max(0, x), max(0, y)
painter.drawPixmap(x, y, pixmap)
@classmethod
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# QT property definition
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def getPixmap(self):
"""Returns the pixmap.Returns None if no pixmap is set.
:return: the current pixmap
:rtype: PyQt4.Qt.QPixmap"""
return self._pixmap
def setPixmap(self, pixmap):
"""Sets the pixmap for this widget. Setting it to None disables pixmap
:param pixmap: the new pixmap
:type pixmap: PyQt4.Qt.QPixmap"""
# make sure to make a copy because of bug in PyQt 4.4. This is actually
# copying the internal bitmap, just the qpixmap, so there is no performance
# penalty here
self._pixmap = Qt.QPixmap(pixmap)
self._setDirty()
self.update()
def resetPixmap(self):
"""Resets the pixmap for this widget."""
self.setPixmap(Qt.QPixmap())
def getAspectRatioMode(self):
"""Returns the aspect ratio to apply when drawing the pixmap.
:return: the current aspect ratio
:rtype: PyQt4.Qt.AspectRatioMode"""
return self._pixmapAspectRatioMode
def setAspectRatioMode(self, aspect):
"""Sets the aspect ratio mode to apply when drawing the pixmap.
:param pixmap: the new aspect ratio mode
:type pixmap: PyQt4.Qt.AspectRatioMode"""
self._pixmapAspectRatioMode = aspect
self._setDirty()
self.update()
def resetAspectRatioMode(self):
"""Resets the aspect ratio mode to KeepAspectRatio"""
self.setAspectRatioMode(self.DefaultAspectRatioMode)
def getTransformationMode(self):
"""Returns the transformation mode to apply when drawing the pixmap.
:return: the current transformation mode
:rtype: PyQt4.Qt.TransformationMode"""
return self._pixmapTransformationMode
def setTransformationMode(self, transformation):
"""Sets the transformation mode to apply when drawing the pixmap.
:param pixmap: the new transformation mode
:type pixmap: PyQt4.Qt.TransformationMode"""
self._pixmapTransformationMode = transformation
self._setDirty()
self.update()
def resetTransformationMode(self):
"""Resets the transformation mode to SmoothTransformation"""
self.setTransformationMode(self.DefaultTransformationMode)
def getAlignment(self):
"""Returns the alignment to apply when drawing the pixmap.
:return: the current alignment
:rtype: PyQt4.Qt.Alignment"""
return self._alignment
def setAlignment(self, alignment):
"""Sets the alignment to apply when drawing the pixmap.
:param pixmap: the new alignment
:type pixmap: PyQt4.Qt.Alignment"""
self._alignment = Qt.Qt.Alignment(alignment)
self.update()
def resetAlignment(self):
"""Resets the transformation mode to Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter"""
self.setAlignment(self.DefaultAlignment)
#: This property holds the widget's pixmap
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getPixmap`
#: * :meth:`QPixmapWidget.setPixmap`
#: * :meth:`QPixmapWidget.resetLedStatus`
pixmap = Qt.pyqtProperty("QPixmap", getPixmap, setPixmap,
resetPixmap, doc="the widget's pixmap")
#: This property holds the widget's pixmap aspect ratio mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAspectRatioMode`
#: * :meth:`QPixmapWidget.setAspectRatioMode`
#: * :meth:`QPixmapWidget.resetAspectRatioMode`
aspectRatioMode = Qt.pyqtProperty("Qt::AspectRatioMode", getAspectRatioMode,
setAspectRatioMode, resetAspectRatioMode,
doc="the widget's pixmap aspect ratio mode")
#: This property holds the widget's pixmap transformation mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getTransformationMode`
#: * :meth:`QPixmapWidget.setTransformationMode`
#: * :meth:`QPixmapWidget.resetTransformationMode`
transformationMode = Qt.pyqtProperty("Qt::TransformationMode", getTransformationMode,
setTransformationMode, resetTransformationMode,
doc="the widget's pixmap transformation mode")
#: This property holds the widget's pixmap alignment
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAlignment`
#: * :meth:`QPixmapWidget.setAlignment`
#: * :meth:`QPixmapWidget.resetAlignment`
alignment = Qt.pyqtProperty("Qt::Alignment", getAlignment, setAlignment,
resetAlignment, doc="the widget's pixmap alignment")
def demo():
"QPixmap Widget"
from .demo import qpixmapwidgetdemo # after futurize stage1
return qpixmapwidgetdemo.main()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
29113,
29113,
7804,
4242,
2,
198,
2235,
198,
2,
770,
2393,
318,
636,
286,
309,
22302,
198,
2235,
198,
2,
2638,
1378,
83,
22302,
12,
1416,
4763,
13,
2398,
198,
2235,
198,
2,
15... | 2.350593 | 3,206 |
#!/usr/bin/env python
import unittest
import xml.dom.minidom
from dominic import xpath
class TestAbbreviations(unittest.TestCase):
"""Section 2.5: Abbreviated Syntax"""
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
555,
715,
395,
198,
11748,
35555,
13,
3438,
13,
1084,
312,
296,
198,
6738,
7462,
291,
1330,
2124,
6978,
198,
198,
4871,
6208,
4826,
4679,
8903,
602,
7,
403,
715,
395,
13,... | 2.593023 | 86 |
from itertools import tee, zip_longest
from .rangedict import RangeDict
def pairwise(iterable, longest=True):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return (zip_longest if longest else zip)(a, b)
| [
6738,
340,
861,
10141,
1330,
30479,
11,
19974,
62,
6511,
395,
198,
198,
6738,
764,
34457,
713,
1330,
13667,
35,
713,
628,
198,
198,
4299,
5166,
3083,
7,
2676,
540,
11,
14069,
28,
17821,
2599,
198,
220,
220,
220,
366,
82,
4613,
357,
... | 2.345794 | 107 |
import asyncio
import io
import sys
import aiohttp
import aioredis
import attr
from PIL import Image
import imagehash
import numpy as np
"""Posts with these tags will be excluded from indexing.
"""
exclude_tags = [
"loli",
"shota",
"bestiality",
"guro",
"shadman"
]
"""A helper dictionary to convert from single-character ratings to more human-friendly names.
"""
friendly_ratings = {
's': 'Safe',
'q': 'Questionable',
'e': 'Explicit'
}
@attr.s(frozen=True)
async def search_index(redis, imhash, min_threshold=64):
"""Search the index for images with nearby hashes.
Args:
redis (aioredis.Redis): A Redis interface.
imhash (ndarray): An image hash to look up. Must be of type `uint8`.
min_threshold (int): A minimum distance threshold for filtering results.
The result list will only contain images with a result less than
this value.
Returns:
A list of (hash, distance) tuples, sorted by increasing distance.
"""
h_bytes = imhash.tobytes()
keys = []
for idx, val in enumerate(h_bytes):
keys.append(construct_hash_idx_key(idx, val))
hashes = await redis.sunion(*keys)
_t = []
for h in hashes:
arr = np.frombuffer(h, dtype=np.uint8)
dist = hamming_dist(arr, imhash)
if dist < min_threshold:
_t.append((h, dist))
return sorted(_t, key=lambda o: o[1])
async def get_indexed_tags(redis):
"""Get all tags monitored for indexing.
Args:
redis (aioredis.Redis): a Redis interface.
Returns:
A list of monitored tags as `str` objects.
"""
return await redis.lrange('indexed_tags', 0, -1, encoding='utf-8')
async def add_indexed_tag(redis, tag):
"""Add a new tag to be monitored for indexing.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The tag to monitor.
Returns:
The total number of indexed tags (incl. the added tag).
"""
return await redis.lpush('indexed_tags', tag)
async def get_tag_queue_length(redis, tag):
"""Get the current fetch queue length for a given indexed tag.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The indexed tag to inspect.
Returns:
The total number of images awaiting indexing for the tag.
"""
return await redis.llen('index_queue:'+tag)
def diff_hash(img):
"""Compute the difference hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.dhash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def avg_hash(img):
"""Compute the average hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.average_hash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def combined_hash(img):
"""Compute a combined perceptual hash for an image.
Currently, this is just the concatenation of the dHash and the avgHash.
Returns:
A `uint8` ndarray.
"""
h1 = imagehash.dhash(img)
h1 = np.packbits(np.where(h1.hash.flatten(), 1, 0))
h2 = imagehash.average_hash(img)
h2 = np.packbits(np.where(h2.hash.flatten(), 1, 0))
return np.concatenate((h1, h2))
def hamming_dist(h1, h2):
"""Compute the Hamming distance between two uint8 arrays.
"""
return np.count_nonzero(np.unpackbits(np.bitwise_xor(h1, h2)))
| [
11748,
30351,
952,
198,
11748,
33245,
198,
11748,
25064,
198,
198,
11748,
257,
952,
4023,
198,
11748,
257,
72,
1850,
271,
198,
11748,
708,
81,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2939,
17831,
198,
11748,
299,
32152,
355,
45941... | 2.287342 | 1,580 |
from psqlextra.models import PostgresPartitionedModel
from .strategy import PostgresPartitioningStrategy
class PostgresPartitioningConfig:
"""Configuration for partitioning a specific model according to the
specified strategy."""
__all__ = ["PostgresPartitioningConfig"]
| [
6738,
26692,
80,
293,
742,
430,
13,
27530,
1330,
2947,
34239,
7841,
653,
276,
17633,
198,
198,
6738,
764,
2536,
4338,
1330,
2947,
34239,
7841,
653,
278,
13290,
4338,
628,
198,
4871,
2947,
34239,
7841,
653,
278,
16934,
25,
198,
220,
22... | 3.688312 | 77 |
""" Example: Shows how to create, train and use an FAQ matcher. """
import urllib3
import csv
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_instance = feersum_nlu.FaqMatchersApi(feersum_nlu.ApiClient(configuration))
instance_name = 'test_faq'
# text_input_0 = feersum_nlu.TextInput("Can I give my baby tea?",
# lang_code=None) # optional language hint.
caller_name = 'example_caller'
print()
# The testing samples.
text_sample_list = []
with open('testing_samples.csv',
'r', newline='') as csvfile:
csv_reader = csv.reader(csvfile,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
if len(row) >= 3:
lang_code = row[2] if row[2] != '' else None
else:
lang_code = None
text_sample_list.append(feersum_nlu.LabelledTextSample(text=row[1],
label=row[0],
lang_code=lang_code))
try:
# print("Get the parameters:")
# api_response = api_instance.faq_matcher_get_params(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Update the parameters:")
# model_params = \
# feersum_nlu.ModelParams(threshold=1.1)
# api_response = api_instance.faq_matcher_set_params(instance_name, model_params)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Get the details of specific named loaded FAQ matcher:")
# api_response = api_instance.faq_matcher_get_details(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# cm_labels = api_response.cm_labels
# print()
print("Match a question:")
correct = 0
total = 0
for sample in text_sample_list:
if True: # sample.lang_code == 'eng':
text_input = feersum_nlu.TextInput(sample.text,
lang_code=None) # optional language hint.
api_response = api_instance.faq_matcher_retrieve(instance_name, text_input, x_caller=caller_name)
top_k = 2
response_label_set = set()
for i in range(min(top_k, len(api_response))):
response_label_set.add(api_response[i].label[:20])
if sample.label[:20] in response_label_set:
correct = correct + 1
print('.', sample.text, ", ", sample.label)
else:
print('x', sample.text, ", ", sample.label)
total = total + 1
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
print("accuracy =", correct / total)
except ApiException as e:
print("Exception when calling an FAQ matcher operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
| [
37811,
17934,
25,
25156,
703,
284,
2251,
11,
4512,
290,
779,
281,
18749,
2603,
2044,
13,
37227,
201,
198,
201,
198,
11748,
2956,
297,
571,
18,
201,
198,
11748,
269,
21370,
201,
198,
201,
198,
11748,
730,
364,
388,
62,
77,
2290,
201,... | 2.099654 | 1,736 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 2.043478 | 23 |
from time import sleep
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webelement import WebElement
from Pages.BasicHomePage import BasicHomePage
| [
6738,
640,
1330,
3993,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
13282,
220,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
47960,
13,
732,
1350,
1732,
1330,
5313,
20180,
201,
198,
6738,
28221,
13,
26416,
16060,... | 2.878788 | 66 |
#
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovirtcli.platform import vnc, spice
from ovirtcli.command.command import OvirtCommand
from cli.messages import Messages
| [
2,
198,
2,
15069,
357,
66,
8,
1946,
2297,
10983,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 3.722513 | 191 |
# -*- coding: UTF-8 -*-
from larksuiteoapi.api import Request, FormData, FormDataFile, set_timeout, set_path_params, set_query_params, \
set_is_response_stream, set_response_stream, set_tenant_key, set_need_help_desk_auth
from larksuiteoapi import Config, ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP, \
APP_TICKET_KEY_PREFIX, DOMAIN_FEISHU, LEVEL_ERROR, LEVEL_DEBUG
# for Cutome APP(企业自建应用)
app_settings = Config.new_internal_app_settings(app_id='cli_a04677****8d01b',
app_secret='XcplX2QLU7X******VJKHd6Yzvt',
verification_token='', encrypt_key='',
help_desk_id='696874*****390932',
help_desk_token='ht-c82db92*******f5cf6e569aa')
# for redis store and logger(level=debug)
# conf = test_config_with_redis_store(DOMAIN_FEISHU, app_settings)
# for memory store and logger(level=debug)
conf = Config("https://open.feishu-boe.cn", app_settings, log_level=LEVEL_DEBUG)
if __name__ == '__main__':
test_ticket_detail()
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
300,
5558,
84,
578,
78,
15042,
13,
15042,
1330,
19390,
11,
5178,
6601,
11,
5178,
6601,
8979,
11,
900,
62,
48678,
11,
900,
62,
6978,
62,
37266,
11,
900,
62,
2... | 1.950931 | 591 |
from collections import OrderedDict
import torch
import torch.nn as nn
from rc_class.base_model_rc_ridge import Base_model_rc_ridge
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
201,
198,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
201,
198,
6738,
48321,
62,
4871,
13,
8692,
62,
19849,
62,
6015,
62,
12818,
1330,
7308,
62,
19849,
... | 2.84 | 50 |
"""Prepare Trans10K dataset"""
import os
import torch
import numpy as np
import logging
from PIL import Image
from IPython import embed
from ...config import cfg
from .seg_data_base import SegmentationDataset
from .trans10k_with_fakemix import TransSegmentationWithFakeMix
| [
37811,
37534,
533,
3602,
940,
42,
27039,
37811,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
18931,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
6101,
7535,
1330,
11525,
198,
198,
6738,
2644,
... | 3.493671 | 79 |
import sqlite3
from . import sql
from . import views
from . import exceptions
from . import makes
class DoxygenSQLite3(object):
"""TODO"""
indexpage = None
connection = None
types = None
# extend and compile are the same logic, but are separate for API semantics/readability.
def extend(self, func):
"""
Extend this object's API via a user-specified function.
def add_api_methods(api):
api.class_doc = api.make_compound_tree(
["class"],
api.relations.get("methods")
)
manual.extend(add_api_methods)
"""
return func(self)
def compile(self, func):
"""
Extend this object's API via a user-specified function.
def add_sections(man):
man.mount(
"example",
man.class_doc(name="Example_Test")
)
manual.compile(add_sections)
"""
return func(self)
# ---------------------------------- #
# View factories; used to extend the API and generate manual sections.
def topmost(self, kinds, brief_description, search_relation=None):
"""
Generate a view that will find compounds of 'kinds' that have no parent.
Note: I thought I could build something similar to the default HTML manual by throwing the page, class, and group kinds through topmost; I was profoundly wrong. There a number of small caveats regarding what appears in those lists and which relations dictate the hierarchy it encodes.
"""
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}') and base.rowid not in (select distinct rowid from inner_outer where [kind:1] in ('{kinds}'))".format(
kinds="','".join(kinds)
)
),
brief_description,
search_relation=search_relation,
)
def kinds(self, kinds, brief_description, search_relation=None):
"""Generate a view that will find elements of 'kinds' """
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}')".format(kinds="','".join(kinds))
),
brief_description,
search_relation=search_relation,
)
def make_compound_tree(self, kinds, search_relation):
"""
Generate a factory that itself generates views locked on a certain compound.
Easier to grasp with an example of how it can be used at a higher layer.
man.class_doc = man.make_compound_tree(
["class"],
man.relations.get("methods")
)
man.mount(
"example",
man.class_doc(name="Example_Test")
)
First, this creates a view-factory named 'class_doc'. It generates views that will search class compounddefs for one matching a consumer-specified property, lock onto the matched class doc, and support enumerating that class's methods.
Second, it uses the new view factory to generate a view that targets the Example_Test class, and mounts it as a manual section named 'example'.
"""
return compound_tree
# TODO: I'm skipping a potentially useful method for scaffolding a doc section based on searching a directory for compounds. I took 4 quick swings at this functionality that all ran into intractable problems. I don't want to force a solution, and I don't want to get bogged down in another big segment of functionality before launching.
#
# That said, I do want to preserve progress ths far in case it is useful.
#
# Here's a basic query that lets you search a directory by name and enumerate its compounds:
# select def.* from contains join def on contains.inner_rowid=def.rowid where contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def file on file.rowid=contains.inner_rowid where compounddef.name='obj');
#
# This list-view-based model kinda works for the first couple steps of a deeper search:
# >>> x.doc_search("std")
# >>> x.doc_search("std obj_armour")
#
# But the last part falls flat on its face:
#
# >>> x.doc_search("std obj_armour query_ego")
# doxy_db.exceptions.MalformedQuery: ('Malformed query', "SELECT [innercompounds].* FROM def base JOIN contains as relative ON base.rowid=relative.outer_rowid JOIN def [innercompounds] ON [innercompounds].rowid=relative.inner_rowid WHERE contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='obj') AND base.rowid=? AND innercompounds.name=?", (381, 'query_ego'))
#
# This generated query is broken in like 3 places:
#
# - the outer contains.outer_rowid would need to be 'relative.outer_rowid'
# - the first join contains as relative needs to be on relative.inner_rowid
# - and, most critically, the query needs additional layers to even begin to actually query members of the intended compound.
#
# At a conceptual level, I think this approach runs into a few problems (there might be a less-disruptive approach...):
# - We need to extend the search_relation concept to an additional layer of depth in order to support first jumping from the directory compound to the appropriate sub-compound and then again to its members
# - we could in theory just make the feature a little more rigid, and don't allow a restrictable list? or, use inner_compound, but add a custom 'where kind=?' to the query and let people specify a text kind--but we'll also need to overload the parts of the search/find process that lean on relations for depth search.
# - I'm not entirely sure if the minimal query wrapper API I built is actually capable of handling queries nesting through this many joins or nested selects very robustly
# - and it's probably a fool's-errand to try to develop it to that level of edge-case support
# - one potential out might be a more basic raw-query mode. The point of the wrapper is to make easier to build up queries that reference each other's parts, but if this task really is an edge case, that scaffolding isn't essential.
# - It probably needs its own view or view abstraction; there's just too much edge-case stuff.
#
# def directory(self, name, search_relation=None):
# # TODO Not quite sure where, but somewhere I need to test and/or doc this behavior.
# if search_relation is None:
# search_relation = self.relations.get("innercompounds")
# return views.ListView(
# sql.Statement(self, self._def)
# ._select("def.*")
# ._from("contains")
# ._where("contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='{}')".format(name))
# ._join(conditions="def on contains.inner_rowid=def.rowid"),
# "Contents of directory {}".format(name),
# search_relation=search_relation
# )
#
# Some other notes I had on this concept elsewhere:
# - we could cheat and substring fns:
# select * from compounddef where kind='class' and id_file in (select rowid from files where name like 'obj/doxy_guide%');
# man.mount("std", struct().where(id_file=file().where(name like path%)))
# - I could imagine a chainable API like: classes(actions).members(**optionally kind="function")
| [
11748,
44161,
578,
18,
198,
198,
6738,
764,
1330,
44161,
198,
6738,
764,
1330,
5009,
198,
6738,
764,
1330,
13269,
198,
6738,
764,
1330,
1838,
628,
198,
4871,
360,
23536,
5235,
17861,
578,
18,
7,
15252,
2599,
198,
220,
220,
220,
37227,... | 2.869083 | 2,704 |
"""
Setup file for awesome-bib-builder
"""
import codecs
import os
from setuptools import find_packages, setup
# Get __version__ from _meta.py
_meta_file = os.path.join("src", "_meta.py")
with open(_meta_file) as f:
exec(f.read())
DISTNAME = "awesome-bib-builder"
DESCRIPTION = "Tool for generating an awesome README from bib files."
with codecs.open("src/README.md", encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = __author__
MAINTAINER_EMAIL = __email__
URL = "https://github.com/batflyer/awesome-bayes-net"
LICENSE = __license__
DOWNLOAD_URL = "https://github.com/batflyer/awesome-bayes-net"
VERSION = __version__
CLASSIFIERS = [
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
]
INSTALL_REQUIRES = ["liquidpy==0.0.6", "bibtexparser==1.1.0"]
EXTRAS_REQUIRE = {"tests": ["pytest", "pytest-cov"], "docs": ["sphinx"]}
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| [
37811,
198,
40786,
2393,
329,
7427,
12,
65,
571,
12,
38272,
198,
37811,
198,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
2,
3497,
11593,
9641,
834,
422,
4808,... | 2.589695 | 524 |
from enum import Enum
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/"
| [
6738,
33829,
1330,
2039,
388,
198,
198,
834,
45,
29559,
47,
11598,
834,
796,
366,
4023,
1378,
2503,
13,
86,
18,
13,
2398,
14,
14585,
14,
12310,
4146,
1238,
30487,
628
] | 2.354839 | 31 |
from snmpagent_unity.commands import user, community, crypto, service
CMD = [user.AddUser, user.UpdateUser, user.DeleteUser, user.ListUsers,
community.CreateCommunity, community.DeleteCommunity,
crypto.Encrypt, crypto.Decrypt,
service.Start, service.Stop, service.Restart]
CMD_DICT = {cmd.name: cmd for cmd in CMD}
| [
6738,
3013,
3149,
25781,
62,
9531,
13,
9503,
1746,
1330,
2836,
11,
2055,
11,
21473,
11,
2139,
198,
198,
34,
12740,
796,
685,
7220,
13,
4550,
12982,
11,
2836,
13,
10260,
12982,
11,
2836,
13,
38727,
12982,
11,
2836,
13,
8053,
14490,
1... | 2.864407 | 118 |
#!/user/bin/env python
# Author: Wenbo Duan
# Email: pv19120@bristol.ac.uk
# Time: 09/12/2021
# File: utils.py
# Supplementary codes for recording results into .txt files for both network and capital budget problems in dynamic programming.
import pandas as pd
import itertools
import re
from copy import deepcopy
def record_process(value_table, decision_table, n_stage, current_stage):
"""This is used for displaying and storing the result from dynamic programming"""
# beautify the format
# fill the list by negative element
for i, value_list in enumerate(value_table[:-1]):
value_table[i] = [
*value_table[i],
*[-1] * (n_stage - len(value_list)),
]
for i, decision_list in enumerate(decision_table):
decision_table[i] = [
*decision_table[i], *[-1] * (n_stage - len(decision_list))
]
# filling the table by negative element
null_fill = [-1] * n_stage
for _ in range(n_stage - len(value_table)):
value_table.insert(0, null_fill)
for _ in range(n_stage - len(decision_table) - 1):
decision_table.insert(0, null_fill)
decision_table.append([0] * n_stage)
# sythesis decision and value as a whole table
display_table = []
for i in range(len(decision_table)):
display_table.append(list(zip(decision_table[i], value_table[i])))
# format the display table
for i in display_table:
i.reverse()
df = pd.DataFrame(display_table)
df = df.transpose()
# rename column
header = [
"(d{0}(S), V{0}(S))".format(i) for i in range(len(display_table))
]
df.columns = header
# rename row
index_mapper = {}
for i in range(df.shape[0]):
index_mapper[i] = str(df.shape[0] - i - 1)
df = df.rename(index_mapper, axis='index')
df.loc["stage"] = ["{}".format(i) for i in range(df.shape[0])]
txt = str(df).replace(
"(-1, -1)",
"________|") # .replace("))","*").replace(")",")|").replace("*","))|")
txt = re.sub(r'(?<=\d)\)', ")|", txt)
txt = re.sub(r'\)\)', "))|", txt)
txt = txt.replace("(0, 0)", " 0")
with open("lognetwork.txt", 'a+') as f:
f.write(
"____________________________________________________________________________________\n"
)
f.write(txt)
f.write(
"\n____________________________________________________________________________________"
)
f.write("\n\n\n")
print(
"\n\n\n____________________________________________________________________________________"
)
print(txt)
routes_n = len(re.findall(r'or', txt))
if current_stage == 1:
# when the back recursion is finished, create the output file
with open("solutionnetwork.txt", "a+") as f:
f.write(
"The optimal decisions and associated values table is:\n\n ")
f.write(txt)
f.write("\n\nAs illustrated from the table:\n")
if routes_n == 0:
f.write("- There is 1 optimal route\n")
else:
f.write("- There are {} optimal routes\n".format(routes_n + 1))
f.write("- The optimal cost is {}\n".format(value_table[0][0]))
# extra text clearing indicating the nature of the solution
if routes_n > 0:
route_map = list(
set(
itertools.permutations(
[*["U"] * routes_n, *["D"] * routes_n], routes_n)))
assert len(route_map) == 2**routes_n, "check your routmap!"
for i, route in enumerate(route_map):
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route {}\n".format(i + 1))
multi_decision_count = 0
stage = 0
state = 0
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if "or" in next_decision:
next_decision = route[multi_decision_count]
multi_decision_count += 1
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"UP", (stage, state),
(stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state),
(stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(
value_table[0][0]))
else:
stage = 0
state = 0
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route:\n")
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"UP", (stage, state), (stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state), (stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(value_table[0][0]))
with open("solutionnetwork.txt", "r") as f:
contents = f.read()
print(
"\n\n\n Analyzing the final result...\n####################################################"
)
print(contents)
def record_process_bud(value_table: pd.DataFrame, stage: int):
"""This is used for displaying and storing the process of captial budget problem"""
# created a head
table = deepcopy(value_table)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
table = table.dropna(how="all")
with open("logcapbud.txt", "a+") as f:
f.write("Stage {} completed\n".format(stage // 2 + 1))
f.write(str(table))
f.write("\n\n\n")
def record_result_bud(value_table: pd.DataFrame, plan: pd.DataFrame,
budget: int):
"""This is used for displaying and storing the result of captial budget problem"""
table = deepcopy(value_table)
_budget = deepcopy(budget)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
stages = value_table.shape[1] // 2
with open("solutioncapbud.txt", "a+") as f:
f.write("The optimal decisions and associated values table is:\n\n")
f.write(str(table))
f.write("\n\n")
f.write(
" \nwe can find the solution to the original problem by working backwards through the table.\n\n"
)
f.write(
"Since the capital available for the {} stages are {}m Pounds, we can:\n"
.format(stages, _budget))
# Analysis result:
multi_path_list = [] # [(stage, another path), (stage, another path)]
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[_budget, head[stage_index]]
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = _budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write("- Looking up d_{}({}) and find the optimal decision {}\n".
format(stage_dispaly, _budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, _budget, last_cost,
last_buget, stage_dispaly - 1))
_budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write("\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
# Multi paths
if len(multi_path_list) != 0:
with open("solutioncapbud.txt", "a+") as f:
f.write("\n\nAlternatively:\n\n")
__budget = deepcopy(budget)
for index, flag in enumerate(multi_path_list):
_stage_index, _decision = flag
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[__budget, head[stage_index]]
if _stage_index == stage_index:
last_plan = _decision
multi_path_list.pop(index)
else:
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = __budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write(
"- Looking up d_{}({}) and find the optimal decision {}\n"
.format(stage_dispaly, __budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, __budget,
last_cost, last_buget, stage_dispaly - 1))
__budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write(
"\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
if __name__ == "__main__":
pass | [
2,
48443,
7220,
14,
8800,
14,
24330,
21015,
201,
198,
2,
6434,
25,
31164,
2127,
360,
7258,
201,
198,
2,
9570,
25,
279,
85,
1129,
10232,
31,
65,
1585,
349,
13,
330,
13,
2724,
201,
198,
2,
3862,
25,
7769,
14,
1065,
14,
1238,
2481,... | 1.953315 | 5,912 |
##################################################
# All functions related to applying sliding window on a dataset
##################################################
# Author: Marius Bock
# Email: marius.bock(at)uni-siegen.de
##################################################
import numpy as np
def sliding_window_seconds(data, length_in_seconds=1, sampling_rate=50, overlap_ratio=None):
"""
Return a sliding window measured in seconds over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param length_in_seconds: int, default: 1
Window length as seconds
:param sampling_rate: int, default: 50
Sampling rate in hertz as integer value
:param overlap_ratio: int, default: None
Overlap is meant as percentage and should be an integer value
:return: tuple of windows and indices
"""
windows = []
indices = []
curr = 0
overlapping_elements = 0
win_len = int(length_in_seconds * sampling_rate)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * win_len)
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
return np.array(windows), np.array(indices)
def sliding_window_samples(data, samples_per_window, overlap_ratio):
"""
Return a sliding window measured in number of samples over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param samples_per_window: int
Window length as number of samples per window
:param overlap_ratio: int
Overlap is meant as percentage and should be an integer value
:return: dataframe, list
Tuple of windows and indices
"""
windows = []
indices = []
curr = 0
win_len = int(samples_per_window)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * (win_len))
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
try:
result_windows = np.array(windows)
result_indices = np.array(indices)
except:
result_windows = np.empty(shape=(len(windows), win_len, data.shape[1]), dtype=object)
result_indices = np.array(indices)
for i in range(0, len(windows)):
result_windows[i] = windows[i]
result_indices[i] = indices[i]
return result_windows, result_indices
def apply_sliding_window(data_x, data_y, sliding_window_size, unit, sampling_rate, sliding_window_overlap):
"""
Function which transforms a dataset into windows of a specific size and overlap.
:param data_x: numpy float array
Array containing the features (can be 2D)
:param data_y: numpy float array
Array containing the corresponding labels to the dataset (is 1D)
:param sliding_window_size: integer or float
Size of each window (either in seconds or units)
:param unit: string, ['units', 'seconds']
Unit in which the sliding window is measured
:param sampling_rate: integer
Number of hertz in which the dataset is sampled
:param sliding_window_overlap: integer
Amount of overlap between the sliding windows (measured in percentage, e.g. 20 is 20%)
:return:
"""
full_data = np.concatenate((data_x, data_y[:, None]), axis=1)
output_x = None
output_y = None
for i, subject in enumerate(np.unique(full_data[:, 0])):
subject_data = full_data[full_data[:, 0] == subject]
subject_x, subject_y = subject_data[:, :-1], subject_data[:, -1]
if unit == 'units':
tmp_x, _ = sliding_window_samples(subject_x, sliding_window_size, sliding_window_overlap)
tmp_y, _ = sliding_window_samples(subject_y, sliding_window_size, sliding_window_overlap)
elif unit == 'seconds':
tmp_x, _ = sliding_window_seconds(subject_x, sliding_window_size, sampling_rate, sliding_window_overlap)
tmp_y, _ = sliding_window_seconds(subject_y, sliding_window_size, sampling_rate, sliding_window_overlap)
if output_x is None:
output_x = tmp_x
output_y = tmp_y
else:
output_x = np.concatenate((output_x, tmp_x), axis=0)
output_y = np.concatenate((output_y, tmp_y), axis=0)
output_y = [[i[-1]] for i in output_y]
return output_x, np.array(output_y).flatten()
| [
29113,
14468,
2235,
198,
2,
1439,
5499,
3519,
284,
11524,
22292,
4324,
319,
257,
27039,
198,
29113,
14468,
2235,
198,
2,
6434,
25,
1526,
3754,
347,
735,
198,
2,
9570,
25,
1667,
3754,
13,
65,
735,
7,
265,
8,
35657,
12,
44524,
5235,
... | 2.626409 | 1,863 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# skip
from fastai.vision.all import *
! [-e / content] & & pip install - Uqq fastai # upgrade fastai on colab
# # Tutorial - Assemble the data on the pets dataset
#
# > Using `Datasets`, `Pipeline`, `TfmdLists` and `Transform` in computer vision
# ## Overview
# In this tutorial, we look in depth at the middle level API for collecting data in computer vision. First we will see how to use:
#
# - `Transform` to process the data
# - `Pipeline` to composes transforms
#
# Those are just functions with added functionality. For dataset processing, we will look in a second part at
#
# - `TfmdLists` to apply one `Pipeline` of `Tranform`s on a collection of items
# - `Datasets` to apply several `Pipeline` of `Transform`s on a collection of items in parallel and produce tuples
#
# The general rule is to use `TfmdLists` when your transforms will output the tuple (input,target) and `Datasets` when you build separate `Pipeline`s for each of your input(s)/target(s).
#
# After this tutorial, you might be interested by the [siamese tutorial](http://docs.fast.ai/tutorial.siamese) that goes even more in depth in the data APIs, showing you how to write your custom types and how to customize the behavior of `show_batch` and `show_results`.
# ## Processing data
# Cleaning and processing data is one of the most time-consuming things in machine learning, which is why fastai tries to help you as much as it can. At its core, preparing the data for your model can be formalized as a sequence of transformations you apply to some raw items. For instance, in a classic image classification problem, we start with filenames. We have to open the corresponding images, resize them, convert them to tensors, maybe apply some kind of data augmentation, before we are ready to batch them. And that's just for the inputs of our model, for the targets, we need to extract the label of our filename and convert it to an integer.
#
# This process needs to be somewhat reversible, because we often want to inspect our data to double check what we feed the model actually makes sense. That's why fastai represents all those operations by `Transform`s, which you can sometimes undo with a `decode` method.
# ### Transform
# First we'll have a look at the basic steps using a single MNIST image. We'll start with a filename, and see step by step how it can be converted in to a labelled image that can be displayed and used for modeling. We use the usual `untar_data` to download our dataset (if necessary) and get all the image files:
source = untar_data(URLs.MNIST_TINY) / 'train'
items = get_image_files(source)
fn = items[0]
fn
# We'll look at each `Transform` needed in turn. Here's how we can open an image file:
img = PILImage.create(fn)
img
# Then we can convert it to a `C*H*W` tensor (for channel x height x width, which is the convention in PyTorch):
tconv = ToTensor()
img = tconv(img)
img.shape, type(img)
# Now that's done, we can create our labels. First extracting the text label:
lbl = parent_label(fn)
lbl
# And then converting to an int for modeling:
tcat = Categorize(vocab=['3', '7'])
lbl = tcat(lbl)
lbl
# We use `decode` to reverse transforms for display. Reversing the `Categorize` transform result in a class name we can display:
lbld = tcat.decode(lbl)
lbld
# ### Pipeline
# We can compose our image steps using `Pipeline`:
pipe = Pipeline([PILImage.create, tconv])
img = pipe(fn)
img.shape
# A `Pipeline` can decode and show an item.
pipe.show(img, figsize=(1, 1), cmap='Greys')
# The show method works behind the scenes with types. Transforms will make sure the type of an element they receive is preserved. Here `PILImage.create` returns a `PILImage`, which knows how to show itself. `tconv` converts it to a `TensorImage`, which also knows how to show itself.
type(img)
# Those types are also used to enable different behaviors depending on the input received (for instance you don't do data augmentation the same way on an image, a segmentation mask or a bounding box).
# ## Loading the pets dataset using only `Transform`
# Let's see how to use `fastai.data` to process the Pets dataset. If you are used to writing your own PyTorch `Dataset`s, what will feel more natural is to write everything in one `Transform`. We use *source* to refer to the underlying source of our data (e.g. a directory on disk, a database connection, a network connection, etc). Then we grab the items.
source = untar_data(URLs.PETS) / "images"
items = get_image_files(source)
# We'll use this function to create consistently sized tensors from image files:
# Before we can create a `Transform`, we need a type that knows how to show itself (if we want to use the show method). Here we define a `TitledImage`:
# Let's check it works:
img = resized_image(items[0])
TitledImage(img, 'test title').show()
# ### Using decodes for showing processed data
# To decode data for showing purposes (like de-normalizing an image or converting back an index to its corresponding class), we implement a <code>decodes</code> method inside a `Transform`.
# The `Transform` opens and resizes the images on one side, label it and convert that label to an index using `o2i` on the other side. Inside the <code>decodes</code> method, we decode the index using the `vocab`. The image is left as is (we can't really show a filename!).
#
# To use this `Transform`, we need a label function. Here we use a regex on the `name` attribute of our filenames:
labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
# Then we gather all the possible labels, uniqueify them and ask for the two correspondences (vocab and o2i) using `bidir=True`. We can then use them to build our pet transform.
vals = list(map(labeller, items))
vocab, o2i = uniqueify(vals, sort=True, bidir=True)
pets = PetTfm(vocab, o2i, labeller)
# We can check how it's applied to a filename:
x, y = pets(items[0])
x.shape, y
# And we can decode our transformed version and show it:
dec = pets.decode([x, y])
dec.show()
# Note that like `__call__ ` and <code>encodes</code>, we implemented a <code>decodes</code> method but we actually call `decode` on our `Transform`.
#
# Also note that our <code>decodes</code> method received the two objects (x and y). We said in the previous section `Transform` dispatch over tuples (for the encoding as well as the decoding) but here it took our two elements as a whole and did not try to decode x and y separately. Why is that? It's because we pass a list `[x,y]` to decodes. `Transform`s dispatch over tuples, but tuples only. And as we saw as well, to prevent a `Transform` from dispatching over a tuple, we just have to make it an `ItemTransform`:
dec = pets.decode(pets(items[0]))
dec.show()
# ### Setting up the internal state with a setups
# We can now let's make our `ItemTransform` automatically state its state form the data. This way, when we combine together our `Transform` with the data, it will automatically get setup without having to do anything. This is very easy to do: just copy the lines we had before to build the categories inside the transform in a <code>setups</code> method:
# Now we can create our `Transform`, call its setup, and it will be ready to be used:
pets = PetTfm()
pets.setup(items)
x, y = pets(items[0])
x.shape, y
# And like before, there is no problem to decode it:
dec = pets.decode((x, y))
dec.show()
# ### Combining our `Transform` with data augmentation in a `Pipeline`.
# We can take advantage of fastai's data augmentation transforms if we give the right type to our elements. Instead of returning a standard `PIL.Image`, if our transform returns the fastai type `PILImage`, we can then use any fastai's transform with it. Let's just return a `PILImage` for our first element:
# We can then combine that transform with `ToTensor`, `Resize` or `FlipItem` to randomly flip our image in a `Pipeline`:
tfms = Pipeline([PetTfm(), Resize(224), FlipItem(p=1), ToTensor()])
# Calling `setup` on a `Pipeline` will set each transform in order:
tfms.setup(items)
# To check the setup was done properly, we want to see if we did build the vocab. One cool trick of `Pipeline` is that when asking for an attribute, it will look through each of its `Transform`s for that attribute and give you the result (or the list of results if the attribute is in multiple transforms):
tfms.vocab
# Then we can call our pipeline:
x, y = tfms(items[0])
x.shape, y
# We can see `ToTensor` and `Resize` were applied to the first element of our tuple (which was of type `PILImage`) but not the second. We can even have a look at our element to check the flip was also applied:
tfms.show(tfms(items[0]))
# `Pipeline.show` will call decode on each `Transform` until it gets a type that knows how to show itself. The library considers a tuple as knowing how to show itself if all its parts have a `show` method. Here it does not happen before reaching `PetTfm` since the second part of our tuple is an int. But after decoding the original `PetTfm`, we get a `TitledImage` which has a `show` method.
#
# It's a good point to note that the `Transform`s of the `Pipeline` are sorted by their internal `order` attribute (with a default of `order=0`). You can always check the order in which the transforms are in a `Pipeline` by looking at its representation:
tfms
# Even if we define `tfms` with `Resize` before `FlipItem`, we can see they have been reordered because we have:
FlipItem.order, Resize.order
# To customize the order of a `Transform`, just set `order = ...` before the `__init__` (it's a class attribute). Let's make `PetTfm` of order -5 to be sure it's always run first:
# Then we can mess up the order of the transforms in our `Pipeline` but it will fix itself:
tfms = Pipeline([Resize(224), PetTfm(), FlipItem(p=1), ToTensor()])
tfms
# Now that we have a good `Pipeline` of transforms, let's add it to a list of filenames to build our dataset. A `Pipeline` combined with a collection is a `TfmdLists` in fastai.
# ## `TfmdLists` and `Datasets`
# The main difference between `TfmdLists` and `Datasets` is the number of `Pipeline`s you have: `TfmdLists` take one `Pipeline` to transform a list (like we currently have) whereas `Datasets` combines several `Pipeline`s in parallel to create a tuple from one set of raw items, for instance a tuple (input, target).
# ### One pipeline makes a `TfmdLists`
# Creating a `TfmdLists` just requires a list of items and a list of transforms that will be combined in a `Pipeline`:
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()])
x, y = tls[0]
x.shape, y
# We did not need to pass anything to `PetTfm` thanks to our setup method: the `Pipeline` was automatically setup on the `items` during the initialization, so `PetTfm` has created its vocab like before:
tls.vocab
# We can ask the `TfmdLists` to show the items we got:
tls.show((x, y))
# Or we have a shortcut with `show_at`:
show_at(tls, 0)
# ### Traning and validation set
# `TfmdLists` has an 's' in its name because it can represent several transformed lists: your training and validation sets. To use that functionality, we just need to pass `splits` to the initialization. `splits` should be a list of lists of indices (one list per set). To help create splits, we can use all the *splitters* of the fastai library:
splits = RandomSplitter(seed=42)(items)
splits
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()], splits=splits)
# Then your `tls` get a train and valid attributes (it also had them before, but the valid was empty and the train contained everything).
show_at(tls.train, 0)
# An interesting thing is that unless you pass `train_setup=False`, your transforms are setup on the training set only (which is best practices): the `items` received by <code>setups</code> are just the elements of the training set.
# ### Getting to `DataLoaders`
# From a `TfmdLists`, getting a `DataLoaders` object is very easy, you just have to call the `dataloaders` method:
dls = tls.dataloaders(bs=64)
# And `show_batch` will just *work*:
dls.show_batch()
# You can even add augmentation transforms, since we have a proper fastai typed image. Just remember to add the `IntToFloatTensor` transform that deals with the conversion of int to float (augmentation transforms of fastai on the GPU require float tensors). When calling `TfmdLists.dataloaders`, you pass the `batch_tfms` to `after_batch` (and potential new `item_tfms` to `after_item`):
dls = tls.dataloaders(bs=64, after_batch=[IntToFloatTensor(), *aug_transforms()])
dls.show_batch()
# ### Using `Datasets`
# `Datasets` applies a list of list of transforms (or list of `Pipeline`s) lazily to items of a collection, creating one output per list of transforms/`Pipeline`. This makes it easier for us to separate out steps of a process, so that we can re-use them and modify the process more easily. This is what lays the foundation of the data block API: we can easily mix and match types as inputs or outputs as they are associated to certain pipelines of transforms.
#
# For instance, let's write our own `ImageResizer` transform with two different implementations for images or masks:
# Specifying the type-annotations makes it so that our transform does nothing to thigns that are neither `PILImage` or `PILMask`, and resize images with `self.resample`, masks with the nearest neighbor interpolation. To create a `Datasets`, we then pass two pipelines of transforms, one for the input and one for the target:
tfms = [[PILImage.create, ImageResizer(128), ToTensor(), IntToFloatTensor()],
[labeller, Categorize()]]
dsets = Datasets(items, tfms)
# We can check that inputs and outputs have the right types:
t = dsets[0]
type(t[0]), type(t[1])
# We can decode and show using `dsets`:
x, y = dsets.decode(t)
x.shape, y
dsets.show(t)
# And we can pass our train/validation split like in `TfmdLists`:
dsets = Datasets(items, tfms, splits=splits)
# But we are not using the fact that `Transform`s dispatch over tuples here. `ImageResizer`, `ToTensor` and `IntToFloatTensor` could be passed as transforms over the tuple. This is done in `.dataloaders` by passing them to `after_item`. They won't do anything to the category but will only be applied to the inputs.
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# And we can check it works with `show_batch`:
dls.show_batch()
# If we just wanted to build one `DataLoader` from our `Datasets` (or the previous `TfmdLists`), you can pass it directly to `TfmdDL`:
dsets = Datasets(items, tfms)
dl = TfmdDL(dsets, bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ### Segmentation
# By using the same transforms in `after_item` but a different kind of targets (here segmentation masks), the targets are automatically processed as they should with the type-dispatch system.
cv_source = untar_data(URLs.CAMVID_TINY)
cv_items = get_image_files(cv_source / 'images')
cv_splitter = RandomSplitter(seed=42)
cv_split = cv_splitter(cv_items)
tfms = [[PILImage.create], [cv_label, PILMask.create]]
cv_dsets = Datasets(cv_items, tfms, splits=cv_split)
dls = cv_dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
dls.show_batch(max_n=4)
# ## Adding a test dataloader for inference
# Let's take back our pets dataset...
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ...and imagine we have some new files to classify.
path = untar_data(URLs.PETS)
tst_files = get_image_files(path / "images")
len(tst_files)
# We can create a dataloader that takes those files and applies the same transforms as the validation set with `DataLoaders.test_dl`:
tst_dl = dls.test_dl(tst_files)
tst_dl.show_batch(max_n=9)
# **Extra:**
# You can call `learn.get_preds` passing this newly created dataloaders to make predictions on our new images!
# What is really cool is that after you finished training your model, you can save it with `learn.export`, this is also going to save all the transforms that need to be applied to your data. In inference time you just need to load your learner with `load_learner` and you can immediately create a dataloader with `test_dl` to use it to generate new predictions!
# ## fin -
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
198,
2,
220,
220,
220,
220,
6626,
62,
265,
62,
33878,
25,
2081,
198,
2,
220... | 3.2966 | 5,118 |
_base_ = [
"../_base_/models/htc_without_semantic_swin_fpn.py",
# "../_base_/datasets/coco_instance.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
backbone=dict(
type="CBSwinTransformer",
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
),
neck=dict(type="CBFPN", in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.05, 0.05, 0.1, 0.1],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.033, 0.033, 0.067, 0.067],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
],
mask_roi_extractor=None,
mask_head=None,
),
test_cfg=dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type="soft_nms"),
)
),
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
# augmentation strategy originates from HTC
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True, with_mask=False, with_seg=False),
dict(
type="Resize",
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="SegRescale", scale_factor=1 / 8),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
# keys=["img", "gt_bboxes", "gt_labels", "gt_masks", "gt_semantic_seg"],
keys=[
"img",
"gt_bboxes",
"gt_labels",
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
dataset_type = "CocoDataset"
# classes = ("음식",)
classes = (
"기타",
"가래떡",
"어묵볶음",
"쌀밥",
"배추김치",
"라면류",
"닭찜",
"육류튀김",
"김치찌개",
"케이크류",
"잡곡밥",
"두부",
"제육볶음",
"열무김치",
"보리밥",
"기타빵류",
"돼지등갈비찜",
"치킨류",
"중식면류",
"달걀찜",
"조미김",
"감자볶음",
"미역국",
"김밥",
"국수류",
"기타반찬",
"김치찜",
"기타김치",
"스파게티류",
"기타떡",
"토마토",
"치즈",
"기타구이",
"등심스테이크",
"볶음밥류",
"참외",
"버섯볶음",
"샐러드",
"연근조림",
"죽류",
"기타소스/기타장류",
"돼지고기 수육",
"덮밥",
"젓갈",
"돈까스",
"시금치나물",
"포도",
"앙금빵류",
"상추",
"들깻잎",
"육류전",
"달걀프라이",
"채소류튀김",
"코다리찜",
"기타불고기",
"돼지고기구이",
"버거류",
"된장국",
"채소",
"떡볶이",
"낙지볶음",
"비빔밥",
"사과",
"피자류",
"숙주나물",
"애호박볶음",
"멸치볶음",
"생선구이",
"깻잎장아찌",
"콩조림",
"카레(커리)",
"돼지고기채소볶음",
"바나나",
"파프리카",
"고사리나물",
"미역줄기볶음",
"콩나물국",
"소불고기",
"떠먹는요구르트",
"햄",
"소고기구이",
"버섯구이",
"오이",
"된장찌개",
"무생채",
"어패류튀김",
"키위",
"리조또",
"오징어볶음",
"샌드위치류",
"만두류",
"과자",
"채소류전",
"시리얼",
"순두부찌개",
"귤",
"딸기",
"기타스테이크",
"잡채",
"오리불고기",
"취나물",
"가지볶음",
"삶은달걀",
"크림빵류",
"부침류",
"어패류전",
"한과류",
"소갈비찜",
"메추리알 장조림",
"안심스테이크",
"단호박찜",
"식빵류",
"시래기나물",
"아귀찜",
"김치볶음",
"우엉조림",
"감",
"돼지불고기",
"고기장조림",
"두부조림",
"오징어채볶음",
"즉석밥",
"오삼불고기",
"현미밥",
"파김치",
"페이스트리(파이)류",
"총각김치",
"닭가슴살",
"해물찜",
"도넛류",
"마시는요구르트",
"돼지갈비찜",
"함박스테이크",
"오징어찜",
"오이나물",
"컵/액체류용기",
"삶은브로콜리",
"청국장찌개",
"그라탕",
"적류",
"소고기채소볶음",
"조기찜",
"제품사진",
"기타해조류",
"기타장아찌/절임류",
"기타나물/숙채/생채/무침류",
"기타조림",
"기타국/찌개/탕",
"기타튀김",
"기타볶음",
"기타난류",
"기타찜",
"기타면류",
"견과류",
"기타채소류",
"기타과실류",
"크래커",
"기타전/적/부침류",
"기타밥류",
"기타죽/스프류",
"도토리묵무침",
"튀김빵류",
"기타과자류",
)
data_root = "/home/jovyan/data/filtered-food3"
anno_root = "/home/jovyan/workspace/ml_mg/json_data/"
samples_per_gpu = 1
data = dict(
workers_per_gpu=16,
samples_per_gpu=samples_per_gpu,
train=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163train.json",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163val.json",
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163test.json",
pipeline=test_pipeline,
),
)
optimizer = dict(
_delete_=True,
type="AdamW",
lr=0.0001 * (samples_per_gpu / 2),
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
"absolute_pos_embed": dict(decay_mult=0.0),
"relative_position_bias_table": dict(decay_mult=0.0),
"norm": dict(decay_mult=0.0),
}
),
)
lr_config = dict(step=[16, 19])
runner = dict(type="EpochBasedRunnerAmp", max_epochs=40)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
log_config = dict(
interval=1,
hooks=[
dict(type="TextLoggerHook", reset_flag=True),
dict(
type="WandbLoggerHook",
init_kwargs=dict(
project="mmdetection",
name="163_class_htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco",
),
),
],
)
evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details.
interval=5, metric=["bbox"] # Evaluation interval
)
workflow = [("train", 1)]
# workflow = [("train", 5), ("val", 1)]
# workflow = [("val", 1)]
resume_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# pretrained
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/checkpoints/htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.pth"
| [
62,
8692,
62,
796,
685,
198,
220,
220,
220,
366,
40720,
62,
8692,
62,
14,
27530,
14,
4352,
66,
62,
19419,
62,
43616,
5109,
62,
2032,
259,
62,
69,
21999,
13,
9078,
1600,
198,
220,
220,
220,
1303,
366,
40720,
62,
8692,
62,
14,
196... | 1.332638 | 7,191 |
import unittest
from getkey.platforms import PlatformTest
| [
11748,
555,
715,
395,
198,
198,
6738,
651,
2539,
13,
24254,
82,
1330,
19193,
14402,
628
] | 3.75 | 16 |
package(default_visibility = ["//visibility:public"])
exports_files([
"node_pkg_link",
"npm_pkg_link",
"yarn_pkg_link",
"node_pkg_link/bin/node",
"yarn_pkg_link/bin/yarn"
])
alias(
name = "yarn_pkg_link/bin/yarn.js",
actual = ":yarn_pkg_link/bin/yarn",
)
| [
26495,
7,
12286,
62,
4703,
2247,
796,
14631,
1003,
4703,
2247,
25,
11377,
8973,
8,
198,
198,
1069,
3742,
62,
16624,
26933,
198,
220,
366,
17440,
62,
35339,
62,
8726,
1600,
198,
220,
366,
77,
4426,
62,
35339,
62,
8726,
1600,
198,
220... | 2.168 | 125 |
# -*-coding: utf-8 -*-
cars = 100
space_in_a_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "자동차", cars, "대가 있습니다."
print "운전자는", drivers, "명 뿐입니다."
print "오늘은 빈 차가", cars_not_driven, "대일 것입니다."
print "오늘은", carpool_capacity, "명을 태울 수 있습니다."
print "함께 탈 사람은", passengers, "명 있습니다."
print "차마다", average_passengers_per_car, "명 정도씩 타야 합니다." | [
2,
532,
9,
12,
66,
7656,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37993,
796,
1802,
198,
13200,
62,
259,
62,
64,
62,
7718,
796,
604,
198,
36702,
796,
1542,
198,
6603,
9302,
796,
4101,
198,
37993,
62,
1662,
62,
15808,
796,
5... | 1.49848 | 329 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\plot_display_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
45302,
59,
29487,
62,
13812,
62,
38969,
519,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
124... | 3.026087 | 115 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 19:16:54 2020
@author: Abhishek Mukherjee
"""
#Merge overlapping intervals...
#Leetcode problem
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2365,
220,
642,
678,
25,
1433,
25,
4051,
12131,
201,
198,
201,
198,
31,
9800,
25,
2275,
14363,
258,
74,
31509,
372,
34589,
201,... | 2.4 | 65 |
import torch
import torch.nn as nn
import numpy as np
from typing import Tuple
class Representation(nn.Module):
"""
observation -> hidden state
"""
class Prediction(nn.Module):
"""
hidden state -> policy + value
"""
class Dynamics(nn.Module):
"""
hidden state + action -> next hidden state + reward
"""
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
309,
29291,
628,
628,
198,
198,
4871,
10858,
341,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
22... | 3.034783 | 115 |
# djangodocker/settings/production.py
from os import environ
from .base import *
# Current mode
MODE = 'Development'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
| [
2,
42625,
648,
375,
12721,
14,
33692,
14,
25493,
13,
9078,
198,
6738,
28686,
1330,
551,
2268,
198,
6738,
764,
8692,
1330,
1635,
198,
198,
2,
9236,
4235,
198,
49058,
796,
705,
41206,
6,
198,
198,
2,
10729,
4261,
9050,
39410,
25,
836,... | 3.431034 | 58 |
import numpy as np
def write_results(info_dict):
""" Write the results in a file """
f = open('%s/results/results_summary.txt' % info_dict['MainDirectory'], 'w')
f.write ('Information about the simulation:\n')
f.write ('---------------------------------\n')
if info_dict['etc_type'] == 'time':
f.write ('Compute the time required to observe the object with a SNR=%.2f \n' % info_dict['SNR'])
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'snr':
f.write ('Compute the SNR reached when observing the object during %d exposure(s) of %.2f seconds\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp']))
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'mag':
f.write ('Compute the magnitude reached when observing during %d exposure(s) of %.2f seconds with a SNR=%.2f\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp'],info_dict['SNR']))
f.write ('\nInformation about Passband:\n')
f.write ('----------------------------\n')
f.write ('Filter: %s %s\n' % (info_dict['filter_folder'],info_dict['filter_band']))
f.write ('Cut_on: %.f angstroms\n' % info_dict['Passband_cuton'])
f.write ('Effective wavelength: %.f angstroms\n' % info_dict['effWavelength'])
f.write ('Cut_off: %.f angstroms\n' % info_dict['Passband_cutoff'])
f.write ('\nInformation about Local conditions:\n')
f.write ('----------------------------\n')
f.write ('Site: %s\n' % info_dict['sky_site'])
f.write ('Seeing at zenith: %.2f\n' % info_dict['seeing_zenith'])
f.write ('Elevation: %.2f degrees\n' % info_dict['elevation'])
f.write ('Airmass: %.2f\n' % info_dict['airmass'])
f.write ('Moon age: %.2f\n' % info_dict['moon_age'])
if info_dict['detailed_trans']==1:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('Telescope: %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_tel'],info_dict['trans_mean_tel']*(1.-info_dict['obstruction'])))
f.write ('Instrument: %.2f \n' % info_dict['trans_mean_inst'])
f.write ('Optics (tel+inst): %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_optics'],info_dict['trans_mean_optics']*(1.-info_dict['obstruction'])))
f.write ('Filter: %.2f \n' % info_dict['trans_mean_filter'])
f.write ('Atmosphere: %.2f \n' % info_dict['trans_mean_atm'])
f.write ('Camera: %.2f \n' % info_dict['trans_mean_cam'])
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
elif info_dict['detailed_trans'] == 0:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
f.write ('\nZeropoint: %.2f (%s mag) \n' % (info_dict['zeropoint'],info_dict['photometry_system']))
if info_dict['etc_type'] == 'snr':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a total SNR of :\n\n' %(info_dict['photometry_system'],info_dict['mag'] ,info_dict['filter_band'],info_dict['exptime'],info_dict['Nexp']))
f.write ('\t - Integrated SNR over %d pixels: %.2f \n\n' % (info_dict['npix'], info_dict['SNR']))
f.write ('\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a SNR for the central pixel of of :\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['DIT_pix']*info_dict['Nexp'],info_dict['Nexp']))
f.write ('\t - SNR of the central pixel: %.2f \n\n' % info_dict['SNR_pix'])
elif info_dict['etc_type'] == 'time':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a total SNR of %.2f requires:\n\n' %(info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT']*info_dict['Nexp']))
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a SNR of %.2f for the central pixel requires:\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT_pix'] * info_dict['Nexp']))
elif info_dict['etc_type'] == 'mag':
f.write ('\n\nFor a total SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'],info_dict['exptime'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band']))
f.write ('\n\nFor the central pixel a SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'], info_dict['DIT_pix']*info_dict['Nexp'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band']))
#f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)\nInverse gain of %.2f e/ADU and %d bits implies a maximum number of electrons to be digitized of %.2f (electrons) \n' % (info_dict['cameras'][info_dict['channel']]['FWC'],info_dict['cameras'][info_dict['channel']]['gain'],info_dict['cameras'][info_dict['channel']]['bits'],info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1)))
f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)' % (info_dict['cameras'][info_dict['channel']]['FWC']))
f.write ('\n\n--------- One pixel only------------------\n')
f.write ('\nPhoto-electrons created: central pix for %d exposure(s) of %.2f sec \n' % (info_dict['Nexp'],info_dict['DIT_pix']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f (electrons)\n' % info_dict['Ftot_el_pix'])
f.write ('\t- Sky: %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT_pix']))
f.write ('\t- Readout: %10.2f (electrons)\n' % info_dict['cameras'][info_dict['channel']]['RN'])
f.write ('\t- Dark current: %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT_pix']))
f.write ('\t- Digitization: %10.2f (electrons)\n' % info_dict['dig_noise'])
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el_pix']/np.sqrt(info_dict['Ftot_el_pix'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT_pix'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the central pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT_pix'], info_dict['N_el_tot_pix1'] ))
if info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Central pixel saturated: number of electrons > Full well Capacity\n')
elif info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Central pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix1'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in central pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\n\n\n--------- Integrated over %d pixels------------------\n' % info_dict['npix'])
f.write ('\nPhoto-electrons created: brightest pix | total of %d pixels, %d exposure(s) of %.2f sec \n' % (info_dict['npix'],info_dict['Nexp'],info_dict['DIT']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f | %10.2f (electrons)\n' % (info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'], info_dict['Ftot_el']))
f.write ('\t- Sky: %10.2f | %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT'],(info_dict['Sky_CountRate'] * info_dict['npix']* info_dict['DIT'] * info_dict['Nexp'])))
f.write ('\t- Readout: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['RN'],(info_dict['cameras'][info_dict['channel']]['RN'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Dark current: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT'],(info_dict['cameras'][info_dict['channel']]['DC'] * info_dict['DIT'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Digitization: %10.2f | %10.2f (electrons)\n' % (info_dict['dig_noise'], (info_dict['dig_noise'] * info_dict['npix'] * info_dict['Nexp'])))
#f.write ('\nTotal noise: %.2f \n' % (np.sqrt(Ftot_el * f_PSF * DIT *Nexp + Nexp*factor_ima * npix*((RN**2. + DigN**2.) + DIT * ( DC + BN )))))
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF']/np.sqrt(info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write (' -integrated over %d pixels: %.2f\n' % (info_dict['npix'],(np.sqrt(info_dict['Nexp']) * info_dict['Ftot_el']) / np.sqrt(info_dict ['Ftot_el'] + info_dict['factor_ima'] * info_dict['npix']*((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the brightest pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT'], info_dict['N_el_tot_pix2']))
if info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Brightest pixel saturated: number of electrons > Full well Capacity \n')
elif info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Brightest pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix2'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in brightest pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\nDead time: %.2f sec \n(%.2f sec for dithering, the %.2f sec for the readout are not taken into account)\n' % (info_dict['deadtime_tot'],info_dict['T_dithering'],info_dict['cameras'][info_dict['channel']]['ReadoutTime']))
f.close()
| [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
3551,
62,
43420,
7,
10951,
62,
11600,
2599,
198,
220,
220,
220,
37227,
19430,
262,
2482,
287,
257,
2393,
37227,
628,
220,
220,
220,
277,
796,
1280,
10786,
4,
82,
14,
43420,
14,
43420,
6... | 2.306909 | 5,109 |
import pytest
from rlp.sedes import big_endian_int
from rlp.sedes.lists import CountableList
from rlp import SerializationError
| [
11748,
12972,
9288,
198,
6738,
374,
34431,
13,
36622,
274,
1330,
1263,
62,
437,
666,
62,
600,
198,
6738,
374,
34431,
13,
36622,
274,
13,
20713,
1330,
2764,
540,
8053,
198,
6738,
374,
34431,
1330,
23283,
1634,
12331,
628
] | 3.307692 | 39 |
"""
Tools for asking the ESP about any alarms that have been raised,
and telling the user about them if so.
The top alarmbar shows little QPushButtons for each alarm that is currently active.
If the user clicks a button, they are shown the message text and a "snooze" button
for that alarm.
There is a single physical snooze button which is manipulated based on which alarm
the user has selected.
"""
import sys
from communication import rpi
from PyQt5 import QtCore, QtWidgets
BITMAP = {1 << x: x for x in range(32)}
ERROR = 0
WARNING = 1
class SnoozeButton:
"""
Takes care of snoozing alarms.
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_h: AlarmHandler
- _alarmsnooze: QPushButton that user will press
- _code: The alarm code that the user is currently dealing with
- _mode: Whether the current alarm is an ERROR or a WARNING
"""
def __init__(self, esp32, alarm_h, alarmsnooze):
"""
Constructor
Arguments: see relevant class members
"""
self._esp32 = esp32
self._alarm_h = alarm_h
self._alarmsnooze = alarmsnooze
self._alarmsnooze.hide()
self._code = None
self._mode = None
self._alarmsnooze.clicked.connect(self._on_click_snooze)
self._alarmsnooze.setStyleSheet(
'background-color: rgb(0,0,205); color: white; font-weight: bold;')
self._alarmsnooze.setMaximumWidth(150)
def set_code(self, code):
"""
Sets the alarm code
Arguments:
- code: Integer alarm code
"""
self._code = code
self._alarmsnooze.setText('Snooze %s' % str(BITMAP[self._code]))
def set_mode(self, mode):
"""
Sets the mode.
Arguments:
- mode: ALARM or WARNING
"""
self._mode = mode
def show(self):
"""
Shows the snooze alarm button
"""
self._alarmsnooze.show()
def _on_click_snooze(self):
"""
The callback function called when the alarm snooze button is clicked.
"""
if self._mode not in [WARNING, ERROR]:
raise Exception('mode must be alarm or warning.')
# Reset the alarms/warnings in the ESP
# If the ESP connection fails at this
# time, raise an error box
if self._mode == ERROR:
self._esp32.snooze_hw_alarm(self._code)
self._alarm_h.snooze_alarm(self._code)
else:
self._esp32.reset_warnings()
self._alarm_h.snooze_warning(self._code)
class AlarmButton(QtWidgets.QPushButton):
"""
The alarm and warning buttons shown in the top alarmbar.
Class members:
- _mode: Whether this alarm is an ERROR or a WARNING
- _code: The integer code for this alarm.
- _errstr: Test describing this alarm.
- _label: The QLabel to populate with the error message, if the user
clicks our button.
- _snooze_btn: The SnoozeButton to manipulate if the user clicks our
button.
"""
def _on_click_event(self):
"""
The callback function called when the user clicks on an alarm button
"""
# Set the label showing the alarm name
style = """QLabel {
background-color: %s;
color: white;
font-weight: bold;
}""" % self._bkg_color
self._label.setStyleSheet(style)
self._label.setText(self._errstr)
self._label.show()
self._activate_snooze_btn()
def _activate_snooze_btn(self):
"""
Activates the snooze button that will silence this alarm
"""
self._snooze_btn.set_mode(self._mode)
self._snooze_btn.set_code(self._code)
self._snooze_btn.show()
class AlarmHandler:
"""
This class starts a QTimer dedicated to checking is there are any errors
or warnings coming from ESP32
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_time: Timer that will periodically ask the ESP about any alarms
- _err_buttons: {int: AlarmButton} for any active ERROR alarms
- _war_buttons: {int: AlarmButton} for any active WARNING alarms
- _alarmlabel: QLabel showing text of the currently-selected alarm
- _alarmstack: Stack of QPushButtons for active alarms
- _alarmsnooze: QPushButton for snoozing an alarm
- _snooze_btn: SnoozeButton that manipulates _alarmsnooze
"""
def __init__(self, config, esp32, alarmbar, hwfail_func):
"""
Constructor
Arguments: see relevant class members.
"""
self._esp32 = esp32
self._alarm_timer = QtCore.QTimer()
self._alarm_timer.timeout.connect(self.handle_alarms)
self._alarm_timer.start(config["alarminterval"] * 1000)
self._err_buttons = {}
self._war_buttons = {}
self._hwfail_func = hwfail_func
self._hwfail_codes = [1 << code for code in config['hwfail_codes']]
self._alarmlabel = alarmbar.findChild(QtWidgets.QLabel, "alarmlabel")
self._alarmstack = alarmbar.findChild(QtWidgets.QHBoxLayout, "alarmstack")
self._alarmsnooze = alarmbar.findChild(QtWidgets.QPushButton, "alarmsnooze")
self._snooze_btn = SnoozeButton(self._esp32, self, self._alarmsnooze)
def handle_alarms(self):
"""
The callback method which is called periodically to check if the ESP raised any
alarm or warning.
"""
# Retrieve alarms and warnings from the ESP
esp32alarm = self._esp32.get_alarms()
esp32warning = self._esp32.get_warnings()
#
# ALARMS
#
if esp32alarm:
errors = esp32alarm.strerror_all()
alarm_codes = esp32alarm.get_alarm_codes()
for alarm_code, err_str in zip(alarm_codes, errors):
if alarm_code in self._hwfail_codes:
self._hwfail_func(err_str)
print("Critical harware failure")
if alarm_code not in self._err_buttons:
btn = AlarmButton(ERROR, alarm_code, err_str,
self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._err_buttons[alarm_code] = btn
#
# WARNINGS
#
if esp32warning:
errors = esp32warning.strerror_all()
warning_codes = esp32warning.get_alarm_codes()
for warning_code, err_str in zip(warning_codes, errors):
if warning_code not in self._war_buttons:
btn = AlarmButton(
WARNING, warning_code, err_str, self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._war_buttons[warning_code] = btn
def snooze_alarm(self, code):
"""
Graphically snoozes alarm corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._err_buttons:
raise Exception('Cannot snooze code %s as alarm button doesn\'t exist.' % code)
self._err_buttons[code].deleteLater()
del self._err_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
def snooze_warning(self, code):
"""
Graphically snoozes warning corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._war_buttons:
raise Exception('Cannot snooze code %s as warning button doesn\'t exist.' % code)
self._war_buttons[code].deleteLater()
del self._war_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
class CriticalAlarmHandler:
"""
Handles severe communication and hardware malfunction errors.
These errors have a low chance of recovery, but this class handles irrecoverable as well as
potentially recoverable errors (with options to retry).
"""
def __init__(self, mainparent, esp32):
"""
Main constructor. Grabs necessary widgets from the main window
Arguments:
- mainparent: Reference to the mainwindow widget.
- esp32: Reference to the ESP32 interface.
"""
self._esp32 = esp32
self._toppane = mainparent.toppane
self._criticalerrorpage = mainparent.criticalerrorpage
self._bottombar = mainparent.bottombar
self._criticalerrorbar = mainparent.criticalerrorbar
self._mainparent = mainparent
self.nretry = 0
self._label_criticalerror = mainparent.findChild(QtWidgets.QLabel, "label_criticalerror")
self._label_criticaldetails = mainparent.findChild(
QtWidgets.QLabel,
"label_criticaldetails")
self._button_retrycmd = mainparent.findChild(QtWidgets.QPushButton, "button_retrycmd")
def show_critical_error(self, text, details=""):
"""
Shows the critical error in the mainwindow.
This includes changing the screen to red and displaying a big message to this effect.
"""
self._label_criticalerror.setText(text)
self._toppane.setCurrentWidget(self._criticalerrorpage)
self._bottombar.setCurrentWidget(self._criticalerrorbar)
self._label_criticaldetails.setText(details)
rpi.start_alarm_system()
self._mainparent.repaint()
input("Hang on wait reboot")
def call_system_failure(self, details=""):
"""
Calls a system failure and sets the mainwindow into a state that is irrecoverable without
maintenance support.
"""
self._button_retrycmd.hide()
disp_msg = "*** SYSTEM FAILURE ***\nCall the Maintenance Service"
details = str(details).replace("\n", "")
self.show_critical_error(disp_msg, details=details)
| [
37811,
198,
33637,
329,
4737,
262,
9428,
546,
597,
36302,
326,
423,
587,
4376,
11,
198,
392,
5149,
262,
2836,
546,
606,
611,
523,
13,
198,
198,
464,
1353,
10436,
5657,
2523,
1310,
1195,
49222,
1537,
27288,
329,
1123,
10436,
326,
318,
... | 2.337014 | 4,347 |
choice=input("是否需要输入新的学生信息(Yes/Y表示需要需要录入)?")
studentList=[]
if choice.upper()=="YES" or choice.upper()=="Y":
isError=False
student={}
student["name"]=input("请输入姓名:")
student["ID"]=input("请输入学号:")
score1=float(input("请输入语文成绩:"))
if score1 <= 100 and score1 >= 0:
student["score1"]=score1
else:
print("输入的语文成绩有错误!")
isError=True
score2=float(input("请输入数学成绩:"))
if score2 <= 100 and score2 >= 0:
student["score2"]=score2
else:
print("输入的数学成绩有错误!")
isError=True
score3=float(input("请输入英语成绩:"))
if score3 <= 100 and score3 >= 0:
student["score3"]=score3
else:
print("输入的英语成绩有错误!")
isError=True
if isError==False:
student["total"]=student["score1"]+student["score2"]+student["score3"]
studentList.append(student)
print(student["name"]+"的成绩录入成功!")
else:
print("输入有误,录入成绩失败!") | [
25541,
28,
15414,
7203,
42468,
28938,
99,
165,
250,
222,
17358,
223,
164,
122,
241,
17739,
98,
23877,
108,
21410,
27764,
99,
37955,
46479,
94,
162,
223,
107,
7,
5297,
14,
56,
26193,
101,
163,
97,
118,
165,
250,
222,
17358,
223,
165,... | 1.522727 | 616 |
from PySide2.QtWidgets import QWidget
from PySide2.QtCore import Qt, QPoint
from PySide2.QtGui import QPainter, QPen, QColor, QFont
import os
import math
from Assets.mathematical_scripts.util import getElectrodesList
#Function which load electrodes position, all are based on the up-left-quarter electrodes positions
#Function which load the connection from the PSC Matrix Size : Nelec x Nelec
#Stuff to draw head, connection, electrodes using QPainter
| [
6738,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
1330,
1195,
38300,
201,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
33734,
11,
1195,
12727,
201,
198,
6738,
9485,
24819,
17,
13,
48,
83,
8205,
72,
1330,
1195,
38490,
353,
... | 3.0375 | 160 |
import os
from datetime import datetime
import jinja2
from flask import Flask, redirect, render_template
from flask_cors import CORS
from raven.contrib.flask import Sentry
from werkzeug.middleware.proxy_fix import ProxyFix
from api import (admin_api, copy_study_api, dashboard_api, data_access_api, mobile_api,
other_researcher_apis, participant_administration, push_notifications_api, study_api,
survey_api)
from api.tableau_api.views import SummaryStatisticDailyStudyView
from api.tableau_api.web_data_connector import WebDataConnector
from authentication.admin_authentication import is_logged_in
from config.settings import SENTRY_ELASTIC_BEANSTALK_DSN, SENTRY_JAVASCRIPT_DSN
from libs.security import set_secret_key
from libs.sentry import normalize_sentry_dsn
from pages import (admin_pages, data_access_web_form, forest_pages, login_pages, mobile_pages,
participant_pages, survey_designer, system_admin_pages)
# Flask App
app = Flask(__name__, static_folder="frontend/static")
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader("frontend/templates")]
)
set_secret_key(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app)
# Flask Blueprints
app.register_blueprint(login_pages.login_pages)
app.register_blueprint(mobile_api.mobile_api)
app.register_blueprint(admin_pages.admin_pages)
app.register_blueprint(mobile_pages.mobile_pages)
app.register_blueprint(system_admin_pages.system_admin_pages)
app.register_blueprint(forest_pages.forest_pages)
app.register_blueprint(survey_designer.survey_designer)
app.register_blueprint(admin_api.admin_api)
app.register_blueprint(participant_administration.participant_administration)
app.register_blueprint(survey_api.survey_api)
app.register_blueprint(study_api.study_api)
app.register_blueprint(data_access_api.data_access_api)
app.register_blueprint(data_access_web_form.data_access_web_form)
app.register_blueprint(other_researcher_apis.other_researcher_apis)
app.register_blueprint(copy_study_api.copy_study_api)
app.register_blueprint(dashboard_api.dashboard_api)
app.register_blueprint(push_notifications_api.push_notifications_api)
app.register_blueprint(participant_pages.participant_pages)
SummaryStatisticDailyStudyView.register_urls(app)
WebDataConnector.register_urls(app)
# Jinja
app.jinja_env.globals['current_year'] = datetime.now().strftime('%Y')
# Sentry is not required, that was too much of a hassle
if SENTRY_ELASTIC_BEANSTALK_DSN:
sentry = Sentry(app, dsn=normalize_sentry_dsn(SENTRY_ELASTIC_BEANSTALK_DSN))
@app.route("/<page>.html")
# this would be called every page load in the context processor
DERIVED_DSN = normalize_sentry_dsn(SENTRY_JAVASCRIPT_DSN)
@app.context_processor
# Extra Production settings
if not __name__ == '__main__':
# Points our custom 404 page (in /frontend/templates) to display on a 404 error
@app.errorhandler(404)
# Extra Debugging settings
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.getenv("PORT", "8080")), debug=True)
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
474,
259,
6592,
17,
198,
6738,
42903,
1330,
46947,
11,
18941,
11,
8543,
62,
28243,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
37735,
13,
3642,
82... | 2.838951 | 1,068 |
"""
Modulo que transforma la rúbrica en excel en un formato fácil de pasar a u-cursos
"""
from typing import Tuple
from pathlib import Path
import os
import pandas as pd
INDICE_NOMBRE_ALUMNO = 0
SUBSEC_CODIGO_FUENTE = ("Funcionalidad", "Diseño")
SECCIONES = ("Código Fuente", "Coverage", "Javadoc", "Resumen")
NOTA = "Nota"
COMENTARIOS = ("Comentarios", "Corrector")
SEC_ADICIONALES = "Adicionales"
COVERAGE = "Porcentaje de coverage"
ROOT = Path(os.path.dirname(os.path.realpath(__file__)))
def get_total(puntaje: str):
""" Borra el substring `Total: ` del puntaje """
return puntaje.replace("Total: ", "").replace(",", ".")
def excel_a_string(excel_filename: str) -> Tuple[str, str]:
""" Convierte la rúbrica a una tupla fácil de pasar a un archivo .txt
:param excel_filename: el nombre del excel con la rúbrica
:return: una tupla con el nombre del alumno y los comentarios de revisión
"""
revision = ""
nombre_alumno = ""
nota = ""
a = pd.read_excel(excel_filename, header=None)
for index, row in a.iterrows():
if index == INDICE_NOMBRE_ALUMNO:
nombre_alumno = f"{row[1]}"
item = row[0]
# Puntajes totales de las subsecciones
if item in SUBSEC_CODIGO_FUENTE:
revision += "\n" + "=" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "=" * 80 + "\n"
# Puntajes totales de las secciones
elif item in SECCIONES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "#" * 80 + "\n"
# Nota final
elif item == NOTA:
nota = f"{row[3]}"
# Notas del corrector
elif item in COMENTARIOS:
revision += f"\n{item}: {row[1]}"
# Descuentos adicionales
elif item == SEC_ADICIONALES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]}\n" + "#" * 80 + "\n"
# Detalle de los descuentos
elif index > 1 and row[2] != 0:
if item == COVERAGE:
if row[3] != 0:
revision += f"\n{item}: {row[2] * 100}% = {row[3]}"
else:
revision += f"\n{row[0]}: {row[1]}x{row[2]} = {row[3]}"
if not nombre_alumno:
raise Exception("Falta nombre del alumno!!")
return nombre_alumno, f"Alumno: {nombre_alumno}\nNota: {nota}\n\n{revision}"
if __name__ == '__main__':
NOMBRE_ALUMNO, REVISION = excel_a_string(f"Rubrica_T2.xlsx")
with open(f"Comentarios {NOMBRE_ALUMNO}.txt", "w+",
encoding='utf-8') as comentarios_alumno:
comentarios_alumno.write(REVISION)
| [
37811,
198,
5841,
43348,
8358,
6121,
64,
8591,
374,
21356,
65,
30997,
551,
27336,
551,
555,
1296,
5549,
277,
6557,
2856,
390,
38836,
283,
257,
334,
12,
66,
1834,
418,
198,
37811,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
3108,
80... | 2.007564 | 1,322 |
import re
| [
11748,
302,
628,
628,
198
] | 2.8 | 5 |
"""
StratoDem Analytics : __test_snapshot_test_case
Principal Author(s) : Michael Clawar
Secondary Author(s) :
Description :
Notes :
March 27, 2018
"""
import dash_html_components as html
from snapshot_test import DashSnapshotTestCase
| [
37811,
198,
13290,
5549,
11522,
30437,
1058,
11593,
9288,
62,
45380,
9442,
62,
9288,
62,
7442,
198,
42904,
8521,
6434,
7,
82,
8,
1058,
3899,
30358,
283,
198,
12211,
560,
6434,
7,
82,
8,
1058,
198,
11828,
1058,
198,
198,
16130,
1058,
... | 3.226667 | 75 |
# Generated by Django 2.2.5 on 2019-09-25 13:37
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
13130,
12,
2931,
12,
1495,
1511,
25,
2718,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import logging
import numpy as np
import os
from jiminy import pyprofile
import sys
from jiminy import error
from jiminy.vncdriver import server_messages
logger = logging.getLogger(__name__)
# # TODO: we don't seem to be able to have multiple independent
# # windows at once
# def update_rectangle(self, x, y, width, height, data):
# self._update_rgbarray(x, y, width, height, update)
# def copy_rectangle(self, src_x, src_y, x, y, width, height):
# assert self._window
# rectangle = self.texture.get_region(src_x, self._height-height-src_y, width, height)
# self.texture.blit_into(rectangle.get_image_data(), x, self._height-height-y, 0)
# def fill_rectangle(self, x, y, width, height, color):
# import pyglet
# # While this technically works, it's super slow
# update = np.frombuffer(color, dtype=np.uint8)
# r, g, b = update[self._color_cycle]
# image_pattern = pyglet.image.SolidColorImagePattern(color=(r, g, b, 0))
# image = image_pattern.create_image(width, height)
# self.texture.blit_into(image, x, self._height-height-y, 0)
# def commit(self):
# self._window.clear()
# self._window.switch_to()
# self.texture.blit(0, 0)
# self._is_updated = True
| [
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
474,
320,
3541,
1330,
12972,
13317,
198,
11748,
25064,
198,
198,
6738,
474,
320,
3541,
1330,
4049,
198,
6738,
474,
320,
3541,
13,
85,
10782,
26230,
1330,
43... | 2.471698 | 530 |
"""Amplitude statistical module."""
import numpy as np
import scipy.stats
def mean(amplitude_values):
"""Calculate the mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`mean` value of the input array.
"""
return np.mean(amplitude_values)
def std(amplitude_values):
"""Compute the arithmetic mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.std(amplitude_values)
def var(amplitude_values):
"""Compute the variance value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.var(amplitude_values)
def rms(amplitude_values):
"""Compute the RMS (Root Mean Square) of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
RMS of the input array.
"""
return np.sqrt((np.array(amplitude_values) ** 2).mean())
def crest_factor(amplitude_values):
"""Compute the ratio of the peak to the RMS.
Used for estimating the amount of impact wear in a bearing.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The crest factor of the inputted values.
"""
peak = max(np.abs(amplitude_values))
return peak / rms(amplitude_values)
def skew(amplitude_values):
"""Compute the sample skewness of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The skewness value of the input array.
"""
return scipy.stats.skew(amplitude_values)
def kurtosis(amplitude_values, fisher=True, bias=True):
"""Compute the kurtosis ,Fisher or Pearson, of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
fisher (bool):
If ``True``, Fisher’s definition is used (normal ==> 0.0). If ``False``,
Pearson’s definition is used (normal ==> 3.0). Defaults to ``True``.
bias (bool):
If ``False``, then the calculations are corrected for statistical bias.
Defaults to ``True``.
Returns:
float:
The kurtosis value of the input array. If all values are equal, return
`-3` for Fisher's definition and `0` for Pearson's definition.
"""
return scipy.stats.kurtosis(amplitude_values, fisher=fisher, bias=bias)
| [
37811,
5840,
489,
3984,
13905,
8265,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
628,
198,
4299,
1612,
7,
321,
489,
3984,
62,
27160,
2599,
198,
220,
220,
220,
37227,
9771,
3129,
378,
262,
... | 2.485909 | 1,171 |
"""add change.outdated
Revision ID: 7ef7dfa2ca3a
Revises: 37a702b7f58e
Create Date: 2016-08-09 08:59:04.441926
"""
# revision identifiers, used by Alembic.
revision = '7ef7dfa2ca3a'
down_revision = '37a702b7f58e'
import warnings
from alembic import op
import sqlalchemy as sa
from hubtty.dbsupport import sqlite_alter_columns
| [
37811,
2860,
1487,
13,
448,
8715,
198,
198,
18009,
1166,
4522,
25,
767,
891,
22,
7568,
64,
17,
6888,
18,
64,
198,
18009,
2696,
25,
5214,
64,
36680,
65,
22,
69,
3365,
68,
198,
16447,
7536,
25,
1584,
12,
2919,
12,
2931,
8487,
25,
... | 2.455882 | 136 |
from __future__ import absolute_import, division, print_function
import unittest
import ctypes
import sys
from blaze.compute import ckernel
from blaze.py2help import skipIf
from dynd import nd, ndt, _lowlevel
# On 64-bit windows python 2.6 appears to have
# ctypes bugs in the C calling convention, so
# disable these tests.
win64_py26 = (sys.platform == 'win32' and
ctypes.sizeof(ctypes.c_void_p) == 8 and
sys.version_info[:2] <= (2, 6))
if __name__ == '__main__':
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
555,
715,
395,
198,
11748,
269,
19199,
198,
11748,
25064,
198,
198,
6738,
31259,
13,
5589,
1133,
1330,
269,
33885,
198,
6738,
31259,
13,
90... | 2.661538 | 195 |
import cv2
from glob import glob
import numpy as np
from tqdm import tqdm
im = cv2.imread('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/wu_train/Images/blouse/ff210d1818f907693a03a6ea2eb39f77.jpg')
for fn in tqdm(glob('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/r1_train/Images/blouse/*.jpg')):
im2 = cv2.imread(fn)
if im.shape == im2.shape:
if np.all(im==im2):
print(fn)
| [
11748,
269,
85,
17,
201,
198,
6738,
15095,
1330,
15095,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
201,
198,
201,
198,
320,
796,
269,
85,
17,
13,
320,
961,
10786,
14,
11195,
14,
... | 2.086124 | 209 |
#!/usr/bin/env python
import os
import sys
from os.path import abspath, dirname
if __name__ == "__main__":
project_dir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zc_test_app.settings")
from django.core.management import execute_from_command_line
from django import setup
setup()
execute_from_command_line(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
2... | 2.63354 | 161 |
dirs = [
"./src/mobicoin-cli-res.rc",
"./src/mobicoin-cli.cpp",
"./src/mobicoind-res.rc",
"./src/mobicoind.cpp",
"./src/qt/locale/mobicoin_bar.ts",
"./src/qt/locale/mobicoin_bg.ts",
"./src/qt/locale/mobicoin_ca.ts",
"./src/qt/locale/mobicoin_cmn.ts",
"./src/qt/locale/mobicoin_cs.ts",
"./src/qt/locale/mobicoin_da.ts",
"./src/qt/locale/mobicoin_de.ts",
"./src/qt/locale/mobicoin_el.ts",
"./src/qt/locale/mobicoin_en.ts",
"./src/qt/locale/mobicoin_eo.ts",
"./src/qt/locale/mobicoin_es.ts",
"./src/qt/locale/mobicoin_fi.ts",
"./src/qt/locale/mobicoin_fr.ts",
"./src/qt/locale/mobicoin_hu_HU.ts",
"./src/qt/locale/mobicoin_it.ts",
"./src/qt/locale/mobicoin_lv_LV.ts",
"./src/qt/locale/mobicoin_nb.ts",
"./src/qt/locale/mobicoin_nl.ts",
"./src/qt/locale/mobicoin_pl.ts",
"./src/qt/locale/mobicoin_pt.ts",
"./src/qt/locale/mobicoin_pt_BR.ts",
"./src/qt/locale/mobicoin_ru.ts",
"./src/qt/locale/mobicoin_sk.ts",
"./src/qt/locale/mobicoin_sv.ts",
"./src/qt/locale/mobicoin_tr.ts",
"./src/qt/locale/mobicoin_vi.ts",
"./src/qt/locale/mobicoin_zh_CN.ts",
"./src/qt/locale/mobicoin_zh_HK.ts",
"./src/qt/mobicoin.cpp",
"./src/qt/mobicoin.qrc",
"./src/qt/mobicoinstrings.cpp",
"./src/qt/res/icons/mobicoin.icns",
"./src/qt/res/images/mobicoin_logo_horizontal.png",
"./src/qt/res/mobicoin-qt-res.rc",
"./src/test/test_mobicoin.cpp",
]
import os
src = "mobicoin"
dst = "unpay"
for s in dirs:
d = s.replace(src,dst)
cmd = "git mv "+ s + " " + d
print cmd
os.system(cmd)
| [
15908,
82,
796,
685,
198,
1911,
14,
10677,
14,
39949,
3713,
259,
12,
44506,
12,
411,
13,
6015,
1600,
198,
1911,
14,
10677,
14,
39949,
3713,
259,
12,
44506,
13,
20322,
1600,
198,
1911,
14,
10677,
14,
39949,
3713,
521,
12,
411,
13,
... | 1.918848 | 764 |
from tamr_unify_client.models.base_resource import BaseResource
class AttributeConfiguration(BaseResource):
"""The configurations of Unify Attributes.
See https://docs.tamr.com/reference#the-attribute-configuration-object
"""
@classmethod
@property
def relative_id(self):
""":type: str"""
return self._data.get("relativeId")
@property
def id(self):
""":type: str"""
return self._data.get("id")
@property
def relative_attribute_id(self):
""":type: str"""
return self._data.get("relativeAttributeId")
@property
def attribute_role(self):
""":type: str"""
return self._data.get("attributeRole")
@property
def similarity_function(self):
""":type: str"""
return self._data.get("similarityFunction")
@property
def enabled_for_ml(self):
""":type: bool"""
return self._data.get("enabledForMl")
@property
def tokenizer(self):
""":type: str"""
return self._data.get("tokenizer")
@property
def numeric_field_resolution(self):
""":type: array (?) """
return self._data.get("numericFieldResolution")
@property
def attribute_name(self):
""":type: str"""
return self._data.get("attributeName")
| [
6738,
21885,
81,
62,
403,
1958,
62,
16366,
13,
27530,
13,
8692,
62,
31092,
1330,
7308,
26198,
628,
198,
4871,
3460,
4163,
38149,
7,
14881,
26198,
2599,
198,
220,
220,
220,
37227,
464,
25412,
286,
791,
1958,
49213,
13,
628,
220,
220,
... | 2.506641 | 527 |
from pypy.tool.memusage import log2gnumeric
log = """
[1000] ...
[2000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 500 bytes
| raw_malloced: 100 bytes
| used after collection:
| in ArenaCollection: 300 bytes
| raw_malloced: 50 bytes
| number of major collects: 1
`----------------------------------------------
[3000] gc-collect}
[4000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 600 bytes
| raw_malloced: 200 bytes
| used after collection:
| in ArenaCollection: 400 bytes
| raw_malloced: 100 bytes
| number of major collects: 1
`----------------------------------------------
[5000] gc-collect}
...
...
[6000] {translation-task
starting annotate
...
...
[7000] translation-task}
[8000] {translation-task
starting rtype_lltype
...
...
[9000] translation-task}
...
[a000] ...
"""
log = log.replace('\n', '')
| [
6738,
279,
4464,
88,
13,
25981,
13,
11883,
26060,
1330,
2604,
17,
4593,
39223,
198,
198,
6404,
796,
37227,
198,
58,
12825,
60,
2644,
198,
58,
11024,
60,
1391,
36484,
12,
33327,
198,
198,
13,
32284,
6462,
4947,
34400,
438,
198,
91,
9... | 2.543981 | 432 |
x = 206
y = 42
if x < y:
print(str(x) + ' is greater than ' + str(y))
| [
87,
796,
27253,
198,
88,
796,
5433,
198,
361,
2124,
1279,
331,
25,
198,
220,
220,
220,
3601,
7,
2536,
7,
87,
8,
1343,
705,
318,
3744,
621,
705,
1343,
965,
7,
88,
4008,
198
] | 2.114286 | 35 |
import torch
from torch import nn
from torch.nn import functional as F
from PIL import Image
import numpy as np
import json
from detectron2.data import MetadataCatalog
from detectron2.structures import ImageList, Instances, BitMasks
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from .gt_generate import GenerateGT
from .loss import sigmoid_focal_loss, weighted_dice_loss
from .head import build_position_head, build_kernel_head, build_feature_encoder, build_thing_generator, build_stuff_generator
from .backbone_utils import build_semanticfpn, build_backbone
from .utils import topk_score, multi_apply
__all__ = ["PanopticFCN"]
@META_ARCH_REGISTRY.register()
class PanopticFCN(nn.Module):
"""
Implement PanopticFCN the paper :paper:`Fully Convolutional Networks for Panoptic Segmentation`.
"""
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": Instances
* "sem_seg": semantic segmentation ground truth.
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "instances": Instances results.
* "sem_seg": Semantic Segmentation results.
* "panoptic_seg": available when `MODEL.INFERENCE.COMBINE.ENABLE`.
See the return value of
:func:`combine_thing_and_stuff` for its format.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
encode_feat = self.semantic_fpn(features)
encode_feat = self.feature_encoder(encode_feat)
features_in = [features[_feat] for _feat in self.in_feature]
pred_centers, pred_regions, pred_weights = multi_apply(self.forward_single_level, features_in)
if self.training:
gt_dict = self.get_ground_truth.generate(batched_inputs, images, pred_weights, encode_feat)
return self.losses(pred_centers, pred_regions, pred_weights, encode_feat, gt_dict)
else:
return self.inference(batched_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat)
def losses(self, pred_centers, pred_regions, pred_weights, encode_feat, gt_dict):
"""
Calculate losses of prediction with generated gt dict.
Args:
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
gt_dict(dict): a dict contains all information of gt
gt_dict = {
"center": gt gaussian scoremap for things,
"inst": gt instance target for things,
"index": gt index for things,
"index_mask": gt index mask for things,
"class": gt classes for things,
"sem_scores": gt semantic score map for stuff,
"sem_labels":gt semantic target for stuff,
"sem_index": gt index for stuff,
"sem_masks": gt index mask for stuff,
}
Returns:
loss(dict): a dict contains all information of loss function
loss = {
"loss_pos_th": position loss for things,
"loss_pos_st": position loss for stuff,
"loss_seg_th": segmentation loss for things,
"loss_seg_st": segmentation loss for stuff,
}
"""
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
loss_pos_ths, loss_pos_sts, idx_feat_th, weighted_values, idx_feat_st, thing_nums, stuff_nums = \
multi_apply(self.loss_single_level, pred_centers,
pred_regions, pred_weights,
gt_dict["center"], gt_dict["inst"],
gt_dict["index_mask"], gt_dict["class"],
gt_dict["sem_scores"], gt_dict["sem_masks"],
gt_dict["sem_index"])
thing_num = sum(thing_nums)
stuff_num = sum(stuff_nums)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
weighted_values = torch.cat(weighted_values, dim=1)
idx_feat_st = torch.cat(idx_feat_st, dim=1)
idx_feat_st = idx_feat_st.reshape(-1, *idx_feat_st.shape[2:])
thing_pred, _ = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num)
stuff_pred, _ = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num)
# for thing
thing_gt_idx = [_gt[:,:thing_nums[_idx]] for _idx, _gt in enumerate(gt_dict["index_mask"])]
thing_gt_idx = torch.cat(thing_gt_idx, dim=1)
thing_gt_idx = thing_gt_idx.reshape(-1).bool()
thing_gt_num = int(thing_gt_idx.sum())
thing_gt = [_gt[:,:thing_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["inst"])]
thing_gt = torch.cat(thing_gt, dim=1)
loss_thing = weighted_dice_loss(thing_pred, thing_gt,
gt_num=thing_gt_num,
index_mask=thing_gt_idx,
instance_num=thing_num,
weighted_val=weighted_values,
weighted_num=self.weighted_num,
mode="thing",
reduction="sum")
# for stuff
stuff_gt_idx = [_gt[:,:stuff_nums[_idx]] for _idx, _gt in enumerate(gt_dict["sem_index"])]
stuff_gt_idx = torch.cat(stuff_gt_idx, dim=1)
stuff_gt_idx = stuff_gt_idx.reshape(-1).bool()
stuff_gt_num = int(stuff_gt_idx.sum())
stuff_gt = [_gt[:,:stuff_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["sem_labels"])]
stuff_gt = torch.cat(stuff_gt, dim=1)
loss_stuff = weighted_dice_loss(stuff_pred, stuff_gt,
gt_num=stuff_gt_num,
index_mask=stuff_gt_idx,
instance_num=stuff_num,
weighted_val=1.0,
weighted_num=1,
mode="stuff",
reduction="sum")
loss = {}
# position loss
loss["loss_pos_th"] = self.pos_weight * sum(loss_pos_ths) / max(thing_gt_num, 1)
loss["loss_pos_st"] = self.pos_weight * sum(loss_pos_sts) / max(feat_shape[0],1)
# segmentation loss
loss["loss_seg_th"] = self.seg_weight * loss_thing / max(thing_gt_num, 1)
loss["loss_seg_st"] = self.seg_weight * loss_stuff / max(stuff_gt_num, 1)
return loss
@torch.no_grad()
@torch.no_grad()
def inference(self, batch_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat):
"""
Panoptic FCN inference process.
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`
image: ImageList in detectron2.structures
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
Returns:
processed_results(dict): a dict contains all predicted results
processed_results={
"sem_seg": prediction of stuff for semantic segmentation eval,
"instances": prediction of things for instance segmentation eval,
"panoptic_seg": prediction of both for panoptic segmentation eval.
}
"""
results = batch_inputs
processed_results = []
for img_idx, result_img in enumerate(results):
if "instances" in result_img.keys():
img_shape = result_img["instances"].image_size
else:
img_shape = result_img["image"].shape[-2:]
ori_shape = (result_img["height"], result_img["width"])
encode_feat = encode_feat[img_idx].unsqueeze(0)
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
result_instance = None
pred_regions = [_pred[img_idx].unsqueeze(0) for _pred in pred_regions]
pred_weights = [_pred[img_idx].unsqueeze(0) for _pred in pred_weights]
pred_centers = [_pred[img_idx].unsqueeze(0) for _pred in pred_centers]
pool_size = [3,3,3,5,5]
idx_feat_th, class_ths, score_ths, thing_num, idx_feat_st, score_sts, class_sts, stuff_num = \
multi_apply(self.inference_single_level, pred_centers,\
pred_regions, pred_weights, pool_size)
thing_num = sum(thing_num)
if thing_num == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
else:
class_ths = [_class for _class in class_ths if len(_class)>0]
score_ths = [_score for _score in score_ths if len(_score)>0]
idx_feat_th = [_feat for _feat in idx_feat_th if len(_feat)>0]
class_ths = torch.cat(class_ths, dim=0)
score_ths = torch.cat(score_ths, dim=0)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
keep = torch.argsort(score_ths, descending=True)
idx_feat_th = idx_feat_th[:,:,keep]
score_ths = score_ths[keep]
class_ths = class_ths[keep]
stuff_num = sum(stuff_num)
if stuff_num == 0:
class_sts, idx_feat_st, score_sts = [], [], []
else:
score_sts = [_score for _score in score_sts if len(_score)>0]
class_sts = [_cate_sem for _cate_sem in class_sts if len(_cate_sem)>0]
idx_feat_st = [_feat for _feat in idx_feat_st if len(_feat)>0]
score_sts = torch.cat(score_sts, dim=0)
class_sts = torch.cat(class_sts, dim=0)
idx_feat_st = torch.cat(idx_feat_st, dim=0)
pred_thing, [class_ths, score_ths] = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num, class_ths, score_ths)
pred_stuff, [class_sts, score_sts] = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num, class_sts, score_sts)
pred_stuff = pred_stuff.sigmoid()
if result_instance is None:
result_instance, pred_mask, class_ths, score_ths = self.process_inst(
class_ths, score_ths, pred_thing, img_shape, ori_shape)
else:
pred_mask, class_ths, score_ths = None, None, None
if self.sem_with_thing or self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
sem_classes = self.sem_classes
else:
sem_classes = self.sem_classes + 1
pred_stuff = F.interpolate(pred_stuff, scale_factor=self.common_stride, mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_stuff = F.interpolate(pred_stuff, size=ori_shape, mode="bilinear", align_corners=False)[0]
pred_sem_seg = torch.zeros(sem_classes, *pred_stuff.shape[-2:], device=self.device)
pred_sem_seg[class_sts] += pred_stuff
processed_results.append({"sem_seg": pred_sem_seg, "instances": result_instance})
if self.panoptic_combine:
result_panoptic = self.combine_thing_and_stuff(
[pred_mask, class_ths, score_ths],
pred_sem_seg.argmax(dim=0),
self.panoptic_overlap_thrs,
self.panoptic_stuff_limit,
self.panoptic_inst_thrs)
processed_results[-1]["panoptic_seg"] = result_panoptic
return processed_results
@torch.no_grad()
def process_inst(self, classes, scores, pred_inst, img_shape, ori_shape):
"""
Simple process generate prediction of Things.
Args:
classes: predicted classes of Things
scores: predicted scores of Things
pred_inst: predicted instances of Things
img_shape: input image shape
ori_shape: original image shape
Returns:
result_instance: preserved results for Things
pred_mask: preserved binary masks for Things
classes: preserved object classes
scores: processed object scores
"""
pred_inst = pred_inst.sigmoid()[0]
pred_mask = pred_inst > self.inst_thres
# object rescore.
sum_masks = pred_mask.sum((1, 2)).float() + 1e-6
seg_score = (pred_inst * pred_mask.float()).sum((1, 2)) / sum_masks
scores *= seg_score
keep = torch.argsort(scores, descending=True)
pred_inst = pred_inst[keep]
pred_mask = pred_mask[keep]
scores = scores[keep]
classes = classes[keep]
sum_masks = sum_masks[keep]
# object score filter.
keep = scores >= 0.05
if keep.sum() == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
return result_instance, pred_mask, None, None
pred_inst = pred_inst[keep]
scores = scores[keep]
classes = classes[keep]
# sort and keep top_k
keep = torch.argsort(scores, descending=True)
keep = keep[:self.center_top_num]
pred_inst = pred_inst[keep]
scores = scores[keep].reshape(-1)
classes = classes[keep].reshape(-1).to(torch.int32)
pred_inst = F.interpolate(pred_inst.unsqueeze(0),
scale_factor=self.common_stride,
mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_inst = F.interpolate(pred_inst,
size=ori_shape,
mode="bilinear",
align_corners=False)[0]
pred_mask = pred_inst > self.inst_thres
pred_bitinst = BitMasks(pred_mask)
result_instance = Instances(ori_shape,
pred_masks=pred_bitinst,
pred_boxes=pred_bitinst.get_bounding_boxes(),
pred_classes=classes,
scores=scores)
return result_instance, pred_mask, classes, scores
@torch.no_grad()
def combine_thing_and_stuff(
self,
thing_results,
stuff_results,
overlap_threshold,
stuff_area_limit,
inst_threshold,
):
"""
Implement a simple combining logic following
"combine_semantic_and_instance_predictions.py" in panopticapi
to produce panoptic segmentation outputs.
Args:
thing_results: prediction of Things
stuff_results: prediction of Stuff
overlap_threshold: overlap threshold for Things combination
stuff_area_limit: stuff area threshold for Stuff combination
inst_threshold: instances confidence threshold
Returns:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
pred_thing, thing_cate, thing_score = thing_results
panoptic_seg = torch.zeros_like(stuff_results, dtype=torch.int32)
current_segment_id = 0
segments_info = []
if thing_cate is not None:
print("thing_cate", thing_cate)
keep = thing_score >= inst_threshold
if keep.sum() > 0:
pred_thing = pred_thing[keep]
thing_cate = thing_cate[keep]
thing_score = thing_score[keep]
# Add instances one-by-one, check for overlaps with existing ones
for _idx, (_mask, _cate, _score) in enumerate(zip(pred_thing, thing_cate, thing_score)):
mask_area = _mask.sum().item()
intersect = _mask & (panoptic_seg > 0)
intersect_area = intersect.sum().item()
if mask_area==0 or intersect_area * 1.0 / mask_area > overlap_threshold:
continue
if intersect_area > 0:
_mask = _mask & (panoptic_seg == 0)
current_segment_id += 1
panoptic_seg[_mask] = current_segment_id
thing_category_id = _cate.item()
category_id = self.meta.thing_train_id2contiguous_id[thing_category_id]
# print("category_id_th", category_id)
segments_info.append(
{
"id": current_segment_id,
"isthing": True,
"score": _score.item(),
"category_id": category_id,
"instance_id": _idx,
})
# import pdb; pdb.set_trace()
stuff_labels = torch.unique(stuff_results)
for stuff_label in stuff_labels:
stuff_category_id = stuff_label.item()
# if stuff_category_id==0 or stuff_category_id==13: #this condition is experimental because we got key error:0 on the following line
# continue
category_id = self.meta.stuff_train_id2contiguous_id[stuff_category_id]
if self.cfg.MODEL.POSITION_HEAD.STUFF.WITH_THING:
if stuff_label == 0: # 0 is a special "thing" class
continue
if self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
if category_id in self.meta.thing_train_id2contiguous_id.values():
continue
mask = (stuff_results == stuff_label) & (panoptic_seg == 0)
mask_area = mask.sum()
if mask_area < stuff_area_limit:
continue
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": False,
"category_id": category_id,
"area": mask_area.item(),
})
return panoptic_seg, segments_info
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
198,
6738,
4886,
1313,
17,
13,
7890,
... | 1.963721 | 10,061 |
#
# A graph G = (V,E) is a set of nodes V and a set of edges E
#
# Returns a list of edges sorted by weight in decreasing order
| [
2,
198,
2,
317,
4823,
402,
796,
357,
53,
11,
36,
8,
318,
257,
900,
286,
13760,
569,
290,
257,
900,
286,
13015,
412,
198,
2,
628,
220,
1303,
16409,
257,
1351,
286,
13015,
23243,
416,
3463,
287,
24030,
1502,
628
] | 3.219512 | 41 |
# # California COVID-19 Hospitalizations
#
# COVID-19 hospitalizations in California by county.
#
# ## Requirements
# ! conda install pygraphviz --yes --quiet
# ! pip install jupyter pandas ploomber matplotlib --quiet
# ## Data sources
#
# COVID-19: https://data.chhs.ca.gov/dataset/california-covid-19-hospital-data-and-case-statistics
#
# Population: https://data.ca.gov/dataset/california-population-projection-by-county-age-gender-and-ethnicity
#
# ## Data cleaning
# +
import pandas as pd
from pathlib import Path
from IPython.display import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from ploomber import DAG
from ploomber.tasks import DownloadFromURL, PythonCallable
from ploomber.products import File
# +
# matplotlib config
plt.style.use('ggplot')
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['figure.figsize'] = [8.0, 8.0]
mpl.rcParams['figure.dpi'] = 120
# -
# we will save everything in output/
ROOT = Path('output/')
ROOT.mkdir(exist_ok=True)
# we use ploomber to get the organize our tasks
dag = DAG()
# +
# first two tasks just download the data
SOURCE = 'https://data.chhs.ca.gov/dataset/6882c390-b2d7-4b9a-aefa-2068cee63e47/resource/6cd8d424-dfaa-4bdd-9410-a3d656e1176e/download/covid19data.csv'
download = DownloadFromURL(SOURCE, File(ROOT / 'raw.csv'), dag, name='raw')
SOURCE_POP = 'https://data.ca.gov/dataset/7a8c03d3-ed86-498a-acdb-8ea09ccb4130/resource/2c217b79-4625-4ab2-86b3-6fc5d66f0409/download/population-estimates-and-projections-by-county-age-and-sex-california-1970-2050.csv'
download_pop = DownloadFromURL(SOURCE_POP, File(
ROOT / 'raw_pop'), dag, name='raw_pop')
# -
# we then join the downloaded data to normalize using population by county
def _join(upstream, product):
"""Join California COVID-19 hospitalizations with population data
"""
df = pd.read_csv(str(upstream['raw']))
df['Most Recent Date'] = pd.to_datetime(df['Most Recent Date'])
idx_m_recent = df.groupby('County Name')['Most Recent Date'].idxmax()
m_recent_total = df.iloc[idx_m_recent][['Most Recent Date', 'County Name',
'Total Count Confirmed']]
m_recent_total['county'] = m_recent_total['County Name'].str.upper()
pop = pd.read_csv(str(upstream['raw_pop']))
pop_by_county = pop[pop.year == 2020].groupby('county')[
['pop_total']].sum()
m_recent = pop_by_county.merge(m_recent_total, on='county')
m_recent['Total count per 100k population'] = (m_recent['Total Count Confirmed']
/ m_recent['pop_total'] * 100_000)
m_recent.to_csv(str(product), index=False)
# +
join = PythonCallable(_join, File(ROOT / 'joined.csv'), dag, name='joined')
# the joined data depends on the raw data
(download + download_pop) >> join
# -
# summary table
dag.status()
# plot. NOTE: pygraphviz is required to plot, easiest way to install is via "conda install pygraphviz"
path = dag.plot()
Image(filename=path)
# run all tasks
dag.build()
# ## Hospitalizations per 100,000 people
# load joined data
m_recent = pd.read_csv(str(dag['joined']))
(m_recent[['County Name', 'Total count per 100k population']]
.set_index('County Name')
.sort_values(by='Total count per 100k population', ascending=False)
.head(10)
.plot(kind='bar', title='Normalized cases by county (top 10)'))
| [
2,
1303,
3442,
7375,
11008,
12,
1129,
9256,
4582,
198,
2,
198,
2,
7375,
11008,
12,
1129,
4436,
4582,
287,
3442,
416,
7968,
13,
198,
2,
198,
2,
22492,
24422,
198,
198,
2,
5145,
1779,
64,
2721,
12972,
34960,
85,
528,
1377,
8505,
137... | 2.534307 | 1,370 |
import datetime
import warnings
import weakref
import dateutil.parser
import numpy as np
| [
11748,
4818,
8079,
198,
11748,
14601,
198,
11748,
4939,
5420,
198,
198,
11748,
3128,
22602,
13,
48610,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.68 | 25 |
# -*- coding: utf-8 -*-
import tensorflow as tf
import tf_euler
if __name__ == '__main__':
print("begin....")
tf_euler.initialize_embedded_graph('ppi') # 图数据目录
source = tf_euler.sample_node(128, tf_euler.ALL_NODE_TYPE)
source.set_shape([128])
model = DeepWalk(tf_euler.ALL_NODE_TYPE, [0, 1], 56944, 256)
_, loss, metric_name, metric = model(source)
global_step = tf.train.get_or_create_global_step()
train_op = tf.train.GradientDescentOptimizer(0.2).minimize(loss, global_step)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.train.MonitoredTrainingSession(
hooks=[
tf.train.LoggingTensorHook({'step': global_step,
'loss': loss, metric_name: metric}, 100),
tf.train.StopAtStepHook(2000)
]) as sess:
while not sess.should_stop():
sess.run(train_op)
| [
171,
119,
123,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
48700,
62,
68,
18173,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,... | 2.07907 | 430 |
import numpy as np
import cv2
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from model.config import arguments
from model.model import BaseNet
from dataset.dataset import FlowerData
import matplotlib.pyplot as plt
# model = BaseNet(num_class=2)
# model = model.half()
# stat_dict = torch.load('./checkpoint/base_epoch%.4d.pth' % 100)
# model.load_state_dict(stat_dict, strict=False)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
ax1.plot([1, 2], [3, 4])
ax2.plot([3, 4], [1, 2])
# fig1.show()
# fig2.show()
plt.show()
print()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
1174... | 2.625498 | 251 |
#! /usr/bin/env python3
# ------------
# tray_icon.py
# ------------
# Author: Daniel Sim (foxhead128)
# License: See LICENSE.md for more details.
# Description: This module contains a system tray icon class, used by Nimbus
# as it runs in the background.
# Import everything we need.
import sys
import subprocess
import common
import browser
import translate
import settings
import session
from translate import tr
# Extremely specific imports from PyQt5/PySide.
# We give PyQt5 priority because it supports Qt5.
if not common.pyqt4:
from PyQt5.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QWidget, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
else:
try:
from PyQt4.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt4.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
except:
from PySide.QtCore import Signal, Qt, QTimer, QSize
from PySide.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
# System tray icon.
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
220,
10541,
198,
2,
26473,
62,
4749,
13,
9078,
198,
2,
220,
10541,
198,
2,
6434,
25,
220,
220,
220,
220,
220,
7806,
3184,
357,
12792,
2256,
12762,
8,
198,
2,
13789,... | 2.764463 | 484 |
###############################################################################
# Done: READ the code below. TRACE (by hand) the execution of the code,
# predicting what will get printed. Then run the code
# and compare your prediction to what actually was printed.
# Then mark this _TODO_ as DONE and commit-and-push your work.
#
###############################################################################
main()
# hello snow white how are things?
# goodbye bashful see you later!
# Ciao
# Bai Bai
# hello grumpy how are things?
# hello sleepy how are things?
# hello magic mirror how are things?
# goodbye cruel queen see you later?
# Ciao
# Bai Bai
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
24429,
25,
220,
20832,
262,
2438,
2174,
13,
7579,
11598,
357,
1525,
1021,
8,
262,
9706,
286,
262,
2438,
11,
198,
2,
25539,
644,
481,
651,
10398,
13,
220,
3244,
1057,
262,
2438,
198,
2,
290,
... | 4.188679 | 159 |
RETENTION_SQL = """
SELECT
datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), reference_event.event_date) as base_interval,
datediff(%(period)s, reference_event.event_date, {trunc_func}(toDateTime(event_date))) as intervals_from_base,
COUNT(DISTINCT event.target) count
FROM (
{returning_event_query}
) event
JOIN (
{target_event_query}
) reference_event
ON (event.target = reference_event.target)
WHERE {trunc_func}(event.event_date) > {trunc_func}(reference_event.event_date)
GROUP BY base_interval, intervals_from_base
ORDER BY base_interval, intervals_from_base
"""
RETENTION_BREAKDOWN_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
datediff(
%(period)s,
target_event.event_date,
dateTrunc(%(period)s, toDateTime(returning_event.event_date))
) AS intervals_from_base,
COUNT(DISTINCT returning_event.target) AS count
FROM
({returning_event_query}) AS returning_event
JOIN ({target_event_query}) target_event
ON returning_event.target = target_event.target
WHERE
dateTrunc(%(period)s, returning_event.event_date) >
dateTrunc(%(period)s, target_event.event_date)
GROUP BY
breakdown_values,
intervals_from_base
ORDER BY
breakdown_values,
intervals_from_base
"""
REFERENCE_EVENT_SQL = """
SELECT DISTINCT
{trunc_func}(e.timestamp) as event_date,
pdi.person_id as person_id,
e.uuid as uuid,
e.event as event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(reference_start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(reference_end_date)s)
AND e.team_id = %(team_id)s {target_query} {filters}
"""
REFERENCE_EVENT_UNIQUE_SQL = """
SELECT DISTINCT
min({trunc_func}(e.timestamp)) as event_date,
pdi.person_id as person_id,
argMin(e.uuid, {trunc_func}(e.timestamp)) as min_uuid,
argMin(e.event, {trunc_func}(e.timestamp)) as min_event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
WHERE e.team_id = %(team_id)s {target_query} {filters}
GROUP BY person_id HAVING
event_date >= toDateTime(%(reference_start_date)s) AND event_date <= toDateTime(%(reference_end_date)s)
"""
RETENTION_PEOPLE_SQL = """
SELECT DISTINCT person_id
FROM events e join ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(end_date)s)
AND e.team_id = %(team_id)s AND person_id IN (
SELECT person_id FROM ({reference_event_query}) as persons
) {target_query} {filters}
LIMIT 100 OFFSET %(offset)s
"""
INITIAL_INTERVAL_SQL = """
SELECT datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), event_date) event_date,
count(DISTINCT target) FROM (
{reference_event_sql}
) GROUP BY event_date ORDER BY event_date
"""
INITIAL_BREAKDOWN_INTERVAL_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
count(DISTINCT target_event.target)
FROM ({reference_event_sql}) AS target_event
GROUP BY breakdown_values ORDER BY breakdown_values
"""
| [
26087,
45589,
62,
17861,
796,
37227,
198,
46506,
198,
220,
220,
220,
14567,
733,
7,
4,
7,
41007,
8,
82,
11,
1391,
2213,
19524,
62,
20786,
92,
7,
1462,
10430,
7575,
7,
4,
7,
9688,
62,
4475,
8,
82,
36911,
4941,
62,
15596,
13,
1559... | 2.434004 | 1,341 |
from platform import python_version
from typing import Optional
import click
from click.core import Context, Option
from .base import CONTEXT_SETTINGS, _DEFAULT_RESULT_TYPE, run_test
from ...config.meta import __TITLE__, __VERSION__
from ...models import ResultType
def pyspj_entry(name: str, spj, result_type: str = _DEFAULT_RESULT_TYPE,
version: Optional[str] = None, author: Optional[str] = None, email: Optional[str] = None):
"""
Create your pyspj CLI entry.
:param name: Name of the special judge.
:param spj: Special judge function or string.
:param result_type: Type of result, default is ``FREE``.
:param version: Version information, default is ``None``.
:param author: Author of this special judge, default is ``None``.
:param email: Author email of this special judge, default is ``None``.
:return: A click function, can be used directly to create a CLI program.
"""
result_type = ResultType.loads(result_type)
@click.command(context_settings=CONTEXT_SETTINGS,
help=f"{name.capitalize()} - test a pair of given input and output.")
@click.option('-v', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help="Show special judge's version information.")
@click.option('-i', '--input', 'input_content', type=str, help='Input content of special judge.')
@click.option('-o', '--output', 'output_content', type=str, help='Output content of special judge')
@click.option('-I', '--input_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Input file of special judge (if -i is given, this will be ignored).')
@click.option('-O', '--output_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Output file of special judge (if -o is given, this will be ignored).')
@click.option('-V', '--value', type=str, multiple=True,
help='Attached values for special judge (do not named as "stdin" or "stdout").')
@click.option('-p', '--pretty', type=bool, is_flag=True,
help='Use pretty mode to print json result.')
return _built_cli
| [
6738,
3859,
1330,
21015,
62,
9641,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
3904,
198,
6738,
3904,
13,
7295,
1330,
30532,
11,
16018,
198,
198,
6738,
764,
8692,
1330,
22904,
13918,
62,
28480,
51,
20754,
11,
4808,
7206,
38865,
62,
... | 2.784906 | 795 |
print(''' Vendetron 3000
El mejor programa para los vendedores en Panama!!
Izzzz Criminal!!!
''')
#Listas []
#Duplas ()
#Diccionarios {}
ventas_globales = 0.0
lista_vendedores = []
while True:
registro = {}
nombre = input("\nNombre: ") #Se introduce el nombre del vendedor.
while True:
try:
venta_mensual = float(input("Venta Mensual: ")) #Se va a leer las ventas.
break
except:
print("Tas metiendo demencia vivo!!!")
if venta_mensual > 100_000.00:
print("Eres un vendedor estrella!!!")
elif venta_mensual > 90000.00:
print("Te falta poco para la meta!!!")
elif venta_mensual > 50000:
print("Sobreviviste :)")
elif venta_mensual > 0.00:
print("Larga a vender mas sinverguenza")
else:
print("Que haces aqui? DESPEDIDO!!!")
registro["nombre"] = nombre
registro["venta"] = venta_mensual
lista_vendedores.append(registro)
opcion = input("Desea continuar (S/N)")
if opcion == 'N' or opcion == 'n':
break
print("Lista de Vendedores: ")
for x in lista_vendedores:
print(str(x["nombre"]) + ", ")
ventas_globales += float(x["venta"])
print("Ventas Globales:", str(ventas_globales))
print("Venta Maxima:" + str(max([x['venta'] for x in lista_vendedores])))
| [
4798,
7,
7061,
6,
44220,
316,
1313,
20343,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2574,
502,
73,
273,
1430,
64,
31215,
22346,
410,
1631,
2850,
551,
23519,
3228,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
314,
301... | 2.128571 | 630 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian Forces Driver """
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry.drivers import GaussianForcesDriver, Molecule
from qiskit.chemistry import QiskitChemistryError
class TestDriverGaussianForces(QiskitChemistryTestCase):
"""Gaussian Forces Driver tests."""
def test_driver_jcf(self):
""" Test the driver works with job control file """
try:
driver = GaussianForcesDriver(
['#p B3LYP/6-31g Freq=(Anharm) Int=Ultrafine SCF=VeryTight',
'',
'CO2 geometry optimization B3LYP/6-31g',
'',
'0 1',
'C -0.848629 2.067624 0.160992',
'O 0.098816 2.655801 -0.159738',
'O -1.796073 1.479446 0.481721',
'',
''
])
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_molecule(self):
""" Test the driver works with Molecule """
try:
driver = GaussianForcesDriver(
molecule=Molecule(geometry=[('C', [-0.848629, 2.067624, 0.160992]),
('O', [0.098816, 2.655801, -0.159738]),
('O', [-1.796073, 1.479446, 0.481721])],
multiplicity=1,
charge=0),
basis='6-31g')
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_logfile(self):
""" Test the driver works with logfile (Gaussian does not need to be installed) """
driver = GaussianForcesDriver(
logfile=self.get_resource_path('test_driver_gaussian_log.txt'))
result = driver.run()
self._check_driver_result(result)
if __name__ == '__main__':
unittest.main()
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
12131,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330,
257,
486... | 2.1251 | 1,247 |
"""MAVLink log parsing utilities."""
import argparse
from pymavlink.dialects.v10 import ceaufmg as mavlink
from pymavlink import mavutil
import numpy as np
def main():
"""Parse a MAVLink log."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--condition", default=None,
help="message filter condition")
parser.add_argument("--no-timestamps", dest="notimestamps",
action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--dialect", default="ceaufmg", help="MAVLink dialect")
parser.add_argument("log", metavar="LOG")
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.log, dialect=args.dialect,
notimestamps=args.notimestamps)
conn._link = None
while True:
msg = conn.recv_match(condition=args.condition)
if msg is None:
break
elif msg.get_type() == 'BAD_DATA':
continue
else:
header = msg.get_header()
timestamp = msg._timestamp or 0
fields = [getattr(msg, name) for name in msg.fieldnames]
print(header.msgId, header.srcSystem, header.srcComponent,
timestamp, *fields)
if __name__ == '__main__':
main()
| [
37811,
5673,
53,
11280,
2604,
32096,
20081,
526,
15931,
628,
198,
11748,
1822,
29572,
198,
198,
6738,
279,
4948,
615,
8726,
13,
38969,
478,
82,
13,
85,
940,
1330,
2906,
559,
69,
11296,
355,
285,
615,
8726,
198,
6738,
279,
4948,
615,
... | 2.232945 | 601 |
import enum
import itertools
from collections import defaultdict
from django.db import models
from django.core.validators import MinValueValidator
from utils.django.models import EnumField
| [
11748,
33829,
198,
11748,
340,
861,
10141,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
1855,
11395,
47139,
1352,
198,
198,
6738,
3384,
... | 3.679245 | 53 |
"""Bioprinter: print images with colored bacteria and yeast.
This implements the `bioprint` function, which takes an image and writes a CSV
file that the Labcyte Echo dispenser can use to print the pictures on a plate
using yeast, coli, ...
Written by Valentin for the Edinburgh Genome Foundry.
Original idea and Matlab code by Mike Shen:
https://github.com/mshen5/BioPointillism
"""
# __all__ = []
from .bioprinter import bioprint
from .version import __version__
| [
37811,
33,
14922,
81,
3849,
25,
3601,
4263,
351,
16396,
11492,
290,
20146,
13,
198,
198,
1212,
23986,
262,
4600,
8482,
404,
22272,
63,
2163,
11,
543,
2753,
281,
2939,
290,
6797,
257,
44189,
198,
7753,
326,
262,
3498,
948,
660,
21455,
... | 3.560606 | 132 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
"""
AUTOMATION SCRIPT: Supervided Pretrained Autoencoders for Inference in Networks
@author: Gunjan Mahindre
Version: 1.0
Date last modified: Sept. 27 2020
Description:
Run the main autoencoder code for various percentages of deletion
Run the code over "iter" number of iterations
Calculates Mean error, Absolute Hop Distance Error (AHDE) averaged over all iterations.
"""
"""
1. create windows
iterate through the windows
2. create these networks.. do 3 Power law
3. train
4. test - only over observed entries - on virgili network
% = 60, 80, 90, 99, 99.5, 99.9
plot : each window as seperate graph..
cross check whether the window of actual average node degree performs best..
"""
# IMPORT MODULES REQUIRED
print ('here 0')
import os
import random
import numpy as np
import pandas as pd
# import RobustDeepAutoencoder as auto
from RobustDeepAutoencoder import *
from RobustDeepAutoencoder import RDAE
import DeepAE as DAE
import networkx as nx
from evaluate import eval_result
# # ---------------------------------------------------
# # 1. create windows
# windows = [[5,7,9]]
# print ('here 1')
# w = windows[0]
# print ("current window:---------------------------------------------------------------------------------------- ", w)
# # create Directory for this window----------------
# directory = str(w[0]) + '_' + str(w[1]) + '_' + str(w[2])
# # Parent Directory path
# parent_dir = "/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/"
# # Path
# path = os.path.join(parent_dir, directory)
# Create the directory
# os.mkdir(path)
# print("Directory '% s' created" % directory)
# # ---------------------------------------------------
path = '/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/'
# path = '/content/drive/MyDrive/PhD work/Projects/ensemble/train bombing results/fine tuning/'
# data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/train bombing/'
data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/virgili emails/'
print ("RESULTS FOR SUPERVISED AUTOENCODERS ")
mean_results = []
abs_results = []
m_STD_results = []
a_STD_results = []
frac_list = [20, 40, 60, 80, 90, 99, 99.5, 99.9]
# frac_list = [20]
print ('here 2')
for fraction in frac_list:
# main_code(fraction, w)
print ("Fraction--------------------------------", fraction)
# for the given fraction----
# run option 1
# window for this option
w1 = [4,5,6]
print ("current window:---------------------------------------------------------------------------------------- ", w1)
hadamard_test1 = main_code(fraction, w1, path, data_path)
hadamard_test1 = np.array(hadamard_test1)
# save the corrected result matrix
print('done with option 1')
# # run option 2
# # window for this option
w2 = [6,7,8]
print ("current window:---------------------------------------------------------------------------------------- ", w2)
hadamard_test2 = main_code(fraction, w2, path, data_path)
hadamard_test2 = np.array(hadamard_test2)
# # save the corrected result matrix
print('done with option 2')
# # check if the same entries are being deleted.. so that we can average these entries===they ARE same :)
# if np.sum(hadamard_test1 - hadamard_test2) == 0:
# print ('same')
# # run option 3
# # window for this option
w3 = [8,9,10]
print ("current window:---------------------------------------------------------------------------------------- ", w3)
hadamard_test3 = main_code(fraction, w3, path, data_path)
hadamard_test3 = np.array(hadamard_test3)
# # save the corrected result matrix
print('done with option 3')
# average the three result matrices
# evaluate this final result
[mean_err, abs_err, mean_std, abs_std] = eval_result(path, w1, w2, w3, hadamard_test1, fraction, data_path)
print(mean_err, abs_err, mean_std, abs_std)
# # append the result to our variables :)
mean_results.append(mean_err)
abs_results.append(abs_err)
m_STD_results.append(mean_std)
a_STD_results.append(abs_std)
# save each result in a text file
filename = '/mean_error.txt'
np.savetxt(path + filename, mean_results)
filename = '/abs_error.txt'
np.savetxt(path + filename, abs_results)
filename = '/mean_STD.txt'
np.savetxt(path + filename, m_STD_results)
filename = '/abs_STD.txt'
np.savetxt(path + filename, a_STD_results)
print (frac_list)
exit() | [
37811,
201,
198,
39371,
2662,
6234,
6374,
46023,
25,
3115,
85,
1384,
37123,
13363,
5231,
6571,
19815,
364,
329,
554,
4288,
287,
27862,
201,
198,
201,
198,
31,
9800,
25,
6748,
13881,
8882,
521,
260,
201,
198,
14815,
25,
352,
13,
15,
... | 2.93904 | 1,542 |
"""Known test tags so that common tags can safely reused in :func:`bitbots_test.decorators.tag`"""
INTERACTIVE = "interactive"
"Tests which require user interaction in order to be run"
WEBOTS = "webots"
"Tests which require webots"
| [
37811,
29870,
1332,
15940,
523,
326,
2219,
15940,
460,
11512,
46823,
287,
1058,
20786,
25,
63,
2545,
42478,
62,
9288,
13,
12501,
273,
2024,
13,
12985,
63,
37811,
198,
198,
41358,
10659,
9306,
796,
366,
3849,
5275,
1,
198,
1,
51,
3558,... | 3.25 | 72 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 20 01:59:55 2020
@author: abhi0
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
8621,
1160,
5534,
25,
3270,
25,
2816,
12131,
201,
198,
201,
198,
31,
9800,
25,
450,
5303,
15,
201,
198,
37811,
201
] | 2.069767 | 43 |
#!/usr/bin/env python
#coding:utf-8
#realtime time log
import time
import redis
import json
import urllib2
rc = redis.Redis(host='192.168.1.15',port=6379)
f = open("/var/log/nginx/access.log", "r")
f.seek(0, 2)
while True:
offset = f.tell()
line = f.readline()
if not line:
f.seek(offset)
time.sleep(0.1)
else:
#ip = re.search(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])',line).group()
ip=line.split(' ')[0]
actime=line.split(' ')[3].split('/')[2].split(':',1)[1]
web=line.split(' ')[6]
res=line.split('"')[3]
client=line.split('"')[5]
#print ip,actime,web
rc.publish("fm110",[ipool(ip),actime,web,res,client])
f.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
5305,
2435,
640,
2604,
198,
11748,
640,
198,
11748,
2266,
271,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
17,
198,
6015,
796,
2266,
271,
... | 1.9 | 380 |
"""booleanvalue.py
Created on: May 19, 2017
Author: Jeroen van der Heijden <jeroen@transceptor.technology>
"""
from .value import Value
| [
37811,
2127,
21052,
8367,
13,
9078,
198,
198,
41972,
319,
25,
1737,
678,
11,
2177,
198,
220,
220,
220,
6434,
25,
449,
3529,
268,
5719,
4587,
679,
2926,
6559,
1279,
73,
3529,
268,
31,
7645,
49492,
13,
45503,
29,
198,
37811,
198,
6738... | 2.958333 | 48 |
"""Support for 2-D geometric operations.
Mathematics for poses, frames, and coordinate transformations derived from Peter Corke's
"Robotics, Vision, and Control: Fundamental Algorithms in MATLAB".
"""
from collections import namedtuple
from itertools import chain
import numpy as np
from components.messaging import Broadcaster
from components.util import between, within, iter_first_not_none, min_first
# Coord should be a numpy array representing a column vector.
# Angle should be in radians from the frame's +x axis.
Pose = namedtuple("Pose", ["Coord", "Angle"])
# Angles
def normalize_angle(angle):
"""Converts an angle in radians to an angle with -pi < value <= pi.
This encompasses the output range of the arctan function."""
negative = angle % -(2 * np.pi)
return negative - (2 * np.pi * int(negative / np.pi))
def positive_angle(angle):
"""Converts an angle in radians to an angle with 0 <= value < 2 * pi."""
return angle % (2 * np.pi)
# Vector representations
def to_vector(*values):
"""Converts the input values into a column vector."""
return np.array([[value] for value in values])
def vector_to_tuple(vector):
"""Converts a column vector into a tuple."""
return tuple(row[0] for row in vector)
def vectors_to_flat(vectors):
"""Converts iterable of column vectors to flat tuple of alternating coords."""
return tuple(chain.from_iterable(vector_to_tuple(vector) for vector in vectors))
def homogeneous_form(vector):
"""Returns the homogeneous form of a 2-D column vector."""
return np.vstack([vector, [1]])
def point_form(homogeneous_vector):
"""Returns the 2-D column vector of two elements from the homogeneous form."""
return homogeneous_vector[0:2, 0:1]
def direction_vector(angle):
"""Converts an angle from the +x axis into a unit direction vector."""
return to_vector(np.cos(angle), np.sin(angle))
def to_angle(direction):
"""Convers a direction vector into an angle in radians."""
return np.arctan2(direction[1], direction[0])[0]
# Transformation matrices
def rotation_matrix(angle):
"""Converts an angle from the +x axis into a 2-D rotation matrix."""
return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
def transformation(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix of a frame to its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat)
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, transl]), [0, 0, 1]])
def transformation_inverse(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix into a frame from its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat).transpose()
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, -1 * np.dot(rot_scale_mat, transl)]), [0, 0, 1]])
def compose(transformation_one, transformation_two):
"""Returns the transformation that is the composition of the two inputs."""
return np.dot(transformation_one, transformation_two)
# Transformations
def transform(matrix, frame_coords):
"""Transforms the non-homogeneous 2-D column vector using the homogeneous transformation matrix."""
return point_form(np.dot(matrix, homogeneous_form(frame_coords)))
def transform_x(matrix, frame_x):
"""Converts x-coord in the frame to x-coord in the parent's frame."""
return transform(matrix, to_vector(frame_x, 0))[0][0]
def transform_y(matrix, frame_y):
"""Converts y-coord in the frame to y-coord in the parent's frame."""
return transform(matrix, to_vector(0, frame_y))[1][0]
def transform_all(matrix, vectors):
"""Transforms every vector in a tuple into the parent's frame."""
return tuple(transform(matrix, vector) for vector in vectors)
def rotate_pose(pose, rotation_center, angle):
"""Rotates the pose about the specified point by the specified angle."""
center_to_pose = pose.Coord - rotation_center
center_to_direction = direction_vector(pose.Angle) + center_to_pose
rotation = transformation(Pose(to_vector(0, 0), angle))
transformed_pose = transform(rotation, center_to_pose)
transformed_direction = transform(rotation, center_to_direction)
transformed_angle = to_angle(transformed_direction - transformed_pose)
return Pose(transformed_pose + rotation_center, transformed_angle)
# Geometric primitives
def line_intersection(first_point, first_direction, second_point, second_direction):
"""Finds the intersection (if any) between two lines defined by their points and directions.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/563198
"""
cross_direction = np.cross(first_direction.flatten(), second_direction.flatten())
difference_point = second_point - first_point
if cross_direction == 0:
return None # Lines are collinear or parallel
second_location = float(np.cross(difference_point, first_direction, axis=0)) / cross_direction
first_location = float(np.cross(difference_point, second_direction, axis=0)) / cross_direction
return (first_location, second_location)
def ray_segment_intersection(ray_point, ray_angle, segment_left, segment_right):
"""Finds the intersection (if any) between the ray and the segment defined by two endpoints.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/14307158
"""
intersection = line_intersection(ray_point, direction_vector(ray_angle),
segment_left, segment_right - segment_left)
if intersection is None or intersection[0] < 0 or not between(0, 1, intersection[1]):
return None
else:
return intersection[0]
def perpendicular_to_line(point, line_left, line_right):
"""Finds the vector from the point to the nearest point on the line.
Uses the formula from Pablo's answer at http://stackoverflow.com/questions/5227373
"""
line_direction = line_right - line_left
line_direction = line_direction / float(np.linalg.norm(line_direction))
vector_projection = line_direction * np.vdot((point - line_left), line_direction)
return line_left + vector_projection - point
def segment_transformation(from_left, from_right, to_left, to_right):
"""Finds a transformation to move the "from" segment so that it overlaps the "to" line.
The transformation will rotate the "from" vector the minimum angle to become parallel with the
line defined by the "to" line segment, and it will translate the "from" vector the
minimum distance to become collinear with the line defined by the "to" line segment.
Arguments:
All arguments must be given as points in a common parent frame.
from_left: column vector of the "left" end of the line segment to be transformed.
from_right: column vector of the "right" end of the line segment to be transformed.
to_left: column vector of the "left" end of the line segment defining the target line.
The line segment will be rotated so that its "left" segment is closer to to_left.
to_right: column vector of the "right" end of the line segment defining the target line.
The line segment will be rotated so that its "right" segment is closer to to_right.
Return:
A 3-tuple of the center of rotation, the angle to rotate about that point,
and a vector of the subsequent translation.
"""
midpoint = 0.5 * (from_right + from_left) # this will be the center of rotation
return (midpoint,
to_angle(to_right - to_left) - to_angle(from_right - from_left),
perpendicular_to_line(midpoint, to_left, to_right))
class Frame(object):
"""Mix-in to support coordinate transformations from a frame."""
def get_transformation(self):
"""Returns the transformation matrix for efficient composition of transformations."""
(x_scale, y_scale) = self._get_scaling()
return transformation(self.get_pose(), x_scale, y_scale)
def get_transformation_inverse(self):
"""Returns the inverse transformation matrix."""
(x_scale, y_scale) = self._get_scaling()
return transformation_inverse(self.get_pose(), x_scale, y_scale)
# Abstract methods
def get_pose(self):
"""Returns the pose of the Frame relative to its parent Frame."""
pass
def _get_scaling(self):
"""Returns a 2-tuple of the x and y scaling relative to its parent Frame."""
return (1, 1)
class MobileFrame(Frame, Broadcaster):
"""Interface for a mobile Frame."""
# Abstract methods
def reset_pose(self):
"""Resets the frame to its initial pose."""
pass
class Rectangle(Frame):
"""Models a rectangular shape."""
# Implementation of parent abstract methods
def get_center(self):
"""Returns the center of the Wall."""
return self._center
def get_corners(self):
"""Returns a 4-tuple of the corners as column vectors."""
return (self._sides["East"][0], self._sides["North"][0],
self._sides["West"][0], self._sides["South"][0])
def get_side(self, side_name):
"""Returns the specified side."""
return self._sides[side_name]
def in_rectangle(self, coords):
"""Checks whether the coordinate, given as a column vector, is in the Rectangle."""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
return (within(self.__bounds[0], self.__bounds[2], point[0])
and within(self.__bounds[1], self.__bounds[3], point[1]))
def nearest_side(self, coords):
"""Finds the nearest side to the coordinate given in the parent frame as a column vector.
Returns the side as the name of the nearest side.
To identify the nearest side, uses the algorithm outlined in Raymond Manzoni's answer at
http://math.stackexchange.com/questions/194550/
"""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
slope = abs(float(self.__bounds[3] - self.__bounds[1])
/ (self.__bounds[2] - self.__bounds[0]))
if point[1] >= slope * abs(point[0]):
return "North"
elif point[1] <= -slope * abs(point[0]):
return "South"
elif slope * point[0] > abs(point[1]):
return "East"
elif slope * point[0] < -abs(point[1]):
return "West"
def ray_distance_to(self, ray_point, ray_angle, side=None):
"""Returns the distance to the Rectangle from the given ray, if the ray intersects.
The ray should be given in the parent frame as a column vector and an angle.
Returns a 2-tuple of the actual distance and the name of the intersecting side.
If a side is specified, finds the ray distance to that side, rather than the distance
to the first side the ray intersects.
"""
matrix = self.get_transformation()
if side is not None:
distance = ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, self.get_side(side)))
return (distance, side)
distances = tuple((ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, side)), side_name)
for (side_name, side) in self._sides.items())
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return (None, None)
def point_distance_to(self, point):
"""Returns the distance to the Rectangle from the given point in the parent frame.
Returns a 2-tuple of the distance between the point and the nearest side (as a line)
and the name of the nearest side.
Uses the formula presented in Wolfram MathWorld's "Point-Line Distance--2-Dimensional"
"""
transformed = transform(self.get_transformation_inverse(), point)
side_name = self.nearest_side(transformed)
side = self.get_side(side_name)
distance = (np.linalg.norm(np.cross((side[1] - side[0]).flatten(),
(side[0] - transformed).flatten()))
/ np.linalg.norm(side[1] - side[0]))
return (distance, side_name)
def ray_distance_to(rectangles, coords, angle):
"""Determines the first rectangle hit by the specified ray, and the distance along the ray.
Arguments:
rectangles: an iterable of Rectangles to check.
coords: the origin of the ray, as a column vector, in the parent frame of the rectangels.
angle: the direction of the ray, in radians, in the parent frame of the rectangles.
Return:
If a rectangle is hit by the ray, a 3-tuple of the distance to that rectangle, the
name of the side of the rectangle hit by the ray, and the id of the rectangle.
Otherwise, returns None.
"""
distances = [rectangle.ray_distance_to(coords, angle) + (rectangle.get_id(),)
for rectangle in rectangles]
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return None
| [
37811,
15514,
329,
362,
12,
35,
38445,
4560,
13,
198,
19044,
10024,
873,
329,
17313,
11,
13431,
11,
290,
20435,
38226,
10944,
422,
5613,
2744,
365,
338,
198,
1,
14350,
23891,
11,
19009,
11,
290,
6779,
25,
49983,
978,
7727,
907,
287,
... | 2.790482 | 4,854 |
from aiohttp.web import HTTPException
from aiommy.json import dumps
JSON_ERROR_KEY = 'error'
| [
6738,
257,
952,
4023,
13,
12384,
1330,
14626,
16922,
198,
6738,
257,
29005,
1820,
13,
17752,
1330,
45514,
198,
198,
40386,
62,
24908,
62,
20373,
796,
705,
18224,
6,
628,
198
] | 3.096774 | 31 |
nome = input('Digite seu nome inteiro: ').split()
print('Primeiro nome: ', nome[0])
print('Último nome: ', nome[-1])
| [
77,
462,
796,
5128,
10786,
19511,
578,
384,
84,
299,
462,
493,
68,
7058,
25,
705,
737,
35312,
3419,
198,
4798,
10786,
26405,
7058,
299,
462,
25,
46083,
299,
462,
58,
15,
12962,
198,
4798,
10786,
127,
248,
2528,
25147,
299,
462,
25,
... | 2.294118 | 51 |