content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from lauztat.calculators import FrequentistCalculator, AsymptoticCalculator
from lauztat.config import Config
from lauztat.parameters import POI
from lauztat.hypotests import Discovery
import numpy as np
import os
import matplotlib as mpl
mpl.use('Agg')
pwd = os.path.dirname(__file__)
bounds = (0.1, 3.0)
# Data and signal
np.random.seed(0)
tau = -2.0
beta = -1/tau
data = np.random.exponential(beta, 300)
peak = np.random.normal(1.2, 0.1, 25)
data = np.concatenate((data, peak))
data = data[(data > bounds[0]) & (data < bounds[1])]
| [
11748,
12972,
9288,
198,
198,
6738,
300,
559,
89,
83,
265,
13,
9948,
3129,
2024,
1330,
22192,
298,
396,
9771,
3129,
1352,
11,
1081,
4948,
457,
6210,
9771,
3129,
1352,
198,
6738,
300,
559,
89,
83,
265,
13,
11250,
1330,
17056,
198,
67... | 2.488789 | 223 |
# Generated by Django 3.0.9 on 2020-08-11 20:50
from django.db import migrations, models
import djmoney.models.fields
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
24,
319,
12131,
12,
2919,
12,
1157,
1160,
25,
1120,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
26316,
13,
27530,
13,
25747,
628
] | 3 | 40 |
from PySide2 import QtCore, QtGui
from dwpicker.geometry import (
DIRECTIONS, get_topleft_rect, get_bottomleft_rect, get_topright_rect,
get_bottomright_rect, get_left_side_rect, get_right_side_rect,
get_top_side_rect, get_bottom_side_rect, proportional_rect)
from dwpicker.painting import (
draw_selection_square, draw_manipulator, get_hovered_path, draw_shape)
from dwpicker.languages import execute_code
from dwpicker.selection import select_targets
EXCECUTION_WARNING = (
"""Code execution failed for shape: "{name}"
{error}.
""")
| [
198,
198,
6738,
9485,
24819,
17,
1330,
33734,
14055,
11,
33734,
8205,
72,
198,
198,
6738,
288,
24142,
15799,
13,
469,
15748,
1330,
357,
198,
220,
220,
220,
42242,
11053,
11,
651,
62,
83,
643,
701,
62,
2554,
11,
651,
62,
22487,
9464,... | 2.837563 | 197 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.35
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _ubigraph
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
_newclass = 0
del types
ubigraph_new_vertex = _ubigraph.ubigraph_new_vertex
ubigraph_new_edge = _ubigraph.ubigraph_new_edge
ubigraph_remove_vertex = _ubigraph.ubigraph_remove_vertex
ubigraph_remove_edge = _ubigraph.ubigraph_remove_edge
ubigraph_new_vertex_w_id = _ubigraph.ubigraph_new_vertex_w_id
ubigraph_new_edge_w_id = _ubigraph.ubigraph_new_edge_w_id
ubigraph_clear = _ubigraph.ubigraph_clear
ubigraph_set_vertex_attribute = _ubigraph.ubigraph_set_vertex_attribute
ubigraph_change_vertex_style = _ubigraph.ubigraph_change_vertex_style
ubigraph_new_vertex_style = _ubigraph.ubigraph_new_vertex_style
ubigraph_new_vertex_style_w_id = _ubigraph.ubigraph_new_vertex_style_w_id
ubigraph_set_vertex_style_attribute = _ubigraph.ubigraph_set_vertex_style_attribute
ubigraph_set_edge_attribute = _ubigraph.ubigraph_set_edge_attribute
ubigraph_change_edge_style = _ubigraph.ubigraph_change_edge_style
ubigraph_new_edge_style = _ubigraph.ubigraph_new_edge_style
ubigraph_new_edge_style_w_id = _ubigraph.ubigraph_new_edge_style_w_id
ubigraph_set_edge_style_attribute = _ubigraph.ubigraph_set_edge_style_attribute
| [
2,
770,
2393,
373,
6338,
7560,
416,
12672,
3528,
357,
4023,
1378,
2503,
13,
2032,
328,
13,
2398,
737,
198,
2,
10628,
352,
13,
18,
13,
2327,
198,
2,
198,
2,
2094,
470,
13096,
428,
2393,
11,
13096,
262,
12672,
3528,
7071,
2427,
13,
... | 2.846429 | 560 |
import os
import luigi
from luigi.contrib.external_program import ExternalProgramTask
from wespipeline import utils
class GetProgram(ExternalProgramTask):
"""Task user for downloading and giving execution permissions to the 2bit program.
The task gives execute permissions to the conversion utility for 2bit files
to be converted to fa files which can then be used for aligning the sequences.
The source for the program is ftp://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/twoBitToFa.
Parameters:
none
Output:
A `luigi.LocalTarget` for the executable.
"""
class TwoBitToFa(ExternalProgramTask):
"""Task user for Converting 2bit files to the fa format.
The task will use a local executable or require the task for obtaining it, and
use with the reference genome.
Parameters:
ref_url (str): Url for the resource with the reference genome.
reference_local_file (str): Path for the reference genome 2bit file. If given
the ``ref_url`` parameter will be ignored.
Output:
A `luigi.LocalTarget` for the reference genome fa file.
"""
ref_url = luigi.Parameter()
reference_local_file = luigi.Parameter(default='')
class GetReferenceFa(utils.MetaOutputHandler, luigi.WrapperTask):
"""Task user for obtaining the reference genome .fa file.
This task will retrieve an external genome or use a provided local one, and convert
it from 2bit format to .fa if neccessary.
Parameters:
ref_url (str): Url for the resource with the reference genome.
reference_local_file (str): Path for the reference genome 2bit file. If given
the ``ref_url`` parameter will be ignored.
from2bit (bool): Non case sensitive boolean indicating wether the reference genome
if in 2bit format. Defaults to false.
Output:
A `luigi.LocalTarget` for the reference genome fa file.
"""
reference_local_file = luigi.Parameter(default='')
ref_url = luigi.Parameter(default='')
from2bit = luigi.BoolParameter()
class PicardDict(ExternalProgramTask):
"""Task user for creating a dict file with the reference genome .fa file with the picard utility.
Parameters:
None
Output:
A `luigi.LocalTarget` for the .fai index file for the reference genome .
"""
class FaidxIndex(ExternalProgramTask):
"""Task user for indexing the reference genome .fa file with the samtools faidx utility.
Aligning the reference genome helps reducing access time drastically.
Parameters:
None
Output:
A `luigi.LocalTarget` for the .fai index file for the reference genome .
"""
class BwaIndex(ExternalProgramTask):
"""Task user for indexing the reference genome .fa file with the bwa index utility.
Aligning the reference genome helps reducing access time drastically.
Parameters:
None
Output:
A set of five files are result of indexing the reference genome. The extensions
for each of the files are '.amb', '.ann', '.bwt', '.pac', '.sa'.
"""
class ReferenceGenome(utils.MetaOutputHandler, luigi.Task):
"""Higher level task for retrieving the reference genome.
It is given preference to local files over downloading the reference. However the
indexing of the reference genome is always done using ``GloablParams.exp_name`` and
``GlobalParams.base_dir`` for determining filenames and location for newer files
respectively.
The indexing is done using both Samtools and Bwa toolkits.
Parameters:
reference_local_file (str) : Optional string indicating the location for the reference genome. If set, it will not be downloaded.
ref_url (str) : Url for the download of the reference genome.
from2bit (bool) : A boolean [True, False] indicating whether the reference genome must be converted from 2bit.
Output:
A dict mapping keys to `luigi.LocalTarget` instances for each of the processed files.
The following keys are available:
'faidx' : Local file with the index, result of indexing with Samtools.
'bwa' : Set of five files, result of indexing the reference genome with Bwa.
'fa' : Local file with the reference genome.
"""
reference_local_file = luigi.Parameter(default='',
description='Optional string indicating the location for the reference genome. If set, it will not be downloaded.')
ref_url = luigi.Parameter(default='',
description="Url for the download of the reference genome.")
from2bit = luigi.BoolParameter(parsing=luigi.BoolParameter.EXPLICIT_PARSING,
description="A boolean indicating whether the reference genome must be converted from 2bit. Defaults to false.")
if __name__ == '__main__':
luigi.run(['ReferenceGenome',
'--ReferenceGenome-ref-url', 'ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.2bit',
'--ReferenceGenome-from2bit', 'True',
'--utils.GlobalParams-base-dir', path.abspath(path.curdir),
'--utils.GlobalParams-log-dir', path.abspath(path.curdir),
'--utils.GlobalParams-exp-name', 'hg19'])
| [
11748,
28686,
198,
11748,
300,
84,
25754,
198,
6738,
300,
84,
25754,
13,
3642,
822,
13,
22615,
62,
23065,
1330,
34579,
15167,
25714,
198,
6738,
266,
9774,
541,
4470,
1330,
3384,
4487,
220,
198,
198,
4871,
3497,
15167,
7,
41506,
15167,
... | 2.969474 | 1,769 |
#!/usr/bin/env python
#
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import os
import math
import jagpdf
import jag.testlib as testlib
if __name__ == "__main__":
test_main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
5075,
12,
10531,
15374,
26388,
402,
411,
4712,
198,
2,
198,
2,
4307,
6169,
739,
262,
17168,
5964,
357,
6214,
19249,
2393,
198,
2,
38559,
24290,
13,
14... | 2.820755 | 106 |
import time
import numpy as np
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import smtplib
from_addr = "reposter@sina.com"
password = ""
| [
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3053,
13,
76,
524,
13,
5239,
1330,
337,
3955,
2767,
2302,
198,
6738,
3053,
13,
76,
524,
13,
16680,
541,
433,
1330,
337,
3955,
3620,
586,
541,
433,
198,
6738,
3053,
13,
25677... | 2.945205 | 73 |
from typing import Tuple
import numpy as np
def log_spaced(max_num: int, points_per_decate: int = 15) -> np.ndarray:
"""Generate an array of log spaced integers smaller than L"""
decades = np.log10(max_num)
series: np.ndarray = np.unique(
np.logspace(
start=0,
stop=decades,
num=int(decades * points_per_decate),
base=10,
endpoint=False,
).astype(int)
)
return series
| [
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
2604,
62,
2777,
2286,
7,
9806,
62,
22510,
25,
493,
11,
2173,
62,
525,
62,
12501,
378,
25,
493,
796,
1315,
8,
4613,
45941,
13,
358,
18747,
25,
... | 2.131818 | 220 |
with open('./flag') as f:
flag = f.read().strip()
print('''
________ .__ ______
/ _____/ __ __ ____ ______ ______ ____ ____ | | ___________ / __ \
/ \ ___| | \_/ __ \ / ___// ___// ___\/ __ \| | _/ __ \_ __ \> <
\ \_\ \ | /\ ___/ \___ \ \___ \\\\ \__\ ___/| |_\ ___/| | \/ -- \
\______ /____/ \___ >____ >____ >\___ >___ >____/\___ >__| \______ /
\/ \/ \/ \/ \/ \/ \/ \/
— Can you guess the flag?
''')
guess = ''
while guess != flag:
guess = input('> ')
if len(guess) != len(flag):
print('Invalid length.')
continue
out = ''
for j,k in zip(guess, flag):
if j == k:
out += j
elif j in flag:
out += '*'
else:
out += '.'
print(out)
print('%s is the correct flag.' % flag)
| [
4480,
1280,
7,
4458,
14,
32109,
11537,
355,
277,
25,
198,
220,
220,
220,
6056,
796,
277,
13,
961,
22446,
36311,
3419,
198,
198,
4798,
7,
7061,
6,
198,
220,
220,
2602,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 1.74954 | 543 |
from typing import List
from random import randint
from utils import create_random_list, measure
@measure
if __name__ == "__main__":
main()
| [
6738,
19720,
1330,
7343,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
3384,
4487,
1330,
2251,
62,
25120,
62,
4868,
11,
3953,
628,
198,
198,
31,
1326,
5015,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
... | 3.170213 | 47 |
from google.appengine.ext import ndb
import webapp2
import db_models
import json
| [
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
299,
9945,
198,
11748,
3992,
1324,
17,
198,
11748,
20613,
62,
27530,
198,
11748,
33918,
198
] | 3.375 | 24 |
# Generated by Django 3.1.13 on 2021-11-11 09:28
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1485,
319,
33448,
12,
1157,
12,
1157,
7769,
25,
2078,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
from typing import Iterable
from django.utils.translation import gettext_lazy
from rest_framework_friendly_errors import settings
def validation_failed_dict(
items: Iterable,
dict_code: int = 1001,
dict_message: str = 'Validation Failed'
):
"""Generate dict for failed validation in format of DRF-friendly-errors.
Attributes:
items: Items to put into dict. In format: [(code, field, message), ...]
Returns:
dict: In format of DRF
"""
return {
'code': dict_code,
'message': dict_message,
'errors': [
{'code': i[0], 'field': i[1], 'message': i[2]} for i in items
]
}
def username_duplicated(message_trans_key: str = 'username_duplicated'):
"""Return failed validation dict for duplicated username.
Attributes:
message_trans_key: Error message as key used for translation function (gettext).
Returns:
dict: In format of DRF
"""
item = (
settings.FRIENDLY_VALIDATOR_ERRORS['UniqueValidator'],
'username',
gettext_lazy(message_trans_key)
)
return validation_failed_dict([item])
| [
6738,
19720,
1330,
40806,
540,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
198,
6738,
1334,
62,
30604,
62,
13120,
62,
48277,
1330,
6460,
628,
198,
4299,
21201,
62,
47904,
62,
11600,
7,
198,
220,... | 2.598639 | 441 |
import subprocess
from benchmark_runner.common.logger.logger_time_stamp import logger
from benchmark_runner.common.ssh.ssh_exceptions import SSHSubprocessError
from benchmark_runner.main.environment_variables import environment_variables
class SSH:
"""
This class run local SSH commands
"""
def run(self, cmd: str, is_check: bool = False, background: bool = False):
"""
This method run shell commands
:param background: run ssh in background
:param cmd:
:param is_check:run check command
:return:
"""
try:
if background:
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
elif is_check:
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True, timeout=self.timeout,
universal_newlines=False)
# execute cmd
else:
output = subprocess.getoutput(cmd)
return output
except subprocess.CalledProcessError as err:
logger.error(f'subprocess Status : FAIL: {err.returncode} {err.output}')
raise SSHSubprocessError()
except Exception as err:
raise err
| [
198,
11748,
850,
14681,
198,
6738,
18335,
62,
16737,
13,
11321,
13,
6404,
1362,
13,
6404,
1362,
62,
2435,
62,
301,
696,
1330,
49706,
198,
6738,
18335,
62,
16737,
13,
11321,
13,
45824,
13,
45824,
62,
1069,
11755,
1330,
33825,
7004,
146... | 2.217391 | 598 |
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
setup(
name = "multicube",
version = "0.0.0",
author = "Vlas Sokolov",
author_email = "vlas145@gmail.com",
description = ("Tools for processing multiple component spectra."),
long_description=read('README.md'),
license = "MIT",
url = "https://github.com/vlas-sokolov/multicube",
packages = ["multicube", "examples"],
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
1456,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
796,
366,
... | 2.601156 | 173 |
#!/usr/bin/env python3
#################################################################################
# #
# Program purpose: Test whether two lines are parallel. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : September 19, 2019 #
# #
#################################################################################
if __name__ == '__main__':
# Read points for first line
pt_a = read_point("Enter point A [as x y] for line A: ")
pt_b = read_point("Enter point B [as x y] for line A: ")
line_a = Line_2D(point_a=pt_a, point_b=pt_b)
print(f"{'-' * 30}")
# Read points for second line
pt_c = read_point("Enter point A [as x y] for line B: ")
pt_d = read_point("Enter point B [as x y] for line B: ")
line_b = Line_2D(point_a=pt_c, point_b=pt_d)
if is_parallel(main_line_a=line_a, main_line_b=line_b):
print(f"Lines are parallel")
else:
print(f"Lines are NOT parallel")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
29113,
29113,
14468,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.038462 | 598 |
from ctypes import *
import ctypes
SharedBuffer = SharedBuffer_def(10,1.0,b"Hola")
print("Size of Shared Buffer : %d \n"%(sizeof(SharedBuffer)))
SHM_name = "testSHM3"
from multiprocessing import shared_memory, resource_tracker
shm = shared_memory.SharedMemory(name=SHM_name, size=sizeof(SharedBuffer), create=False)
# resource_tracker.unregister(shm._name, 'shared_memory')
# SharedBuffer.from_buffer_copy(shm.buf)
ctypes.memmove(ctypes.addressof(SharedBuffer), shm.buf.tobytes(), sizeof(SharedBuffer))
print(SharedBuffer.A)
print(SharedBuffer.B)
print(SharedBuffer.C)
SharedBuffer.A = 1000
# ctypes.memcpy(shm.buf, SharedBuffer, sizeof(SharedBuffer))
shm.buf[:] = SharedBuffer[:]
shm.close()
shm.unlink() | [
6738,
269,
19199,
1330,
1635,
198,
11748,
269,
19199,
198,
198,
2484,
1144,
28632,
796,
39403,
28632,
62,
4299,
7,
940,
11,
16,
13,
15,
11,
65,
1,
39,
5708,
4943,
198,
198,
4798,
7203,
10699,
286,
39403,
47017,
1058,
220,
4064,
67,
... | 2.599278 | 277 |
#!/usr/bin/env python3
"""
Benchmark on the struct module for FASTCALL.
http://bugs.python.org/issue29300
Created at 2017-02-02 by Victor STINNER.
"""
import pyperf
runner = pyperf.Runner()
runner.timeit('int.to_bytes(1, 4, "little")',
'to_bytes(1, 4, "little")',
setup='to_bytes = int.to_bytes',
duplicate=100)
runner.timeit('struct.pack("i", 1)',
'pack("i", 1)',
setup='import struct; pack = struct.pack',
duplicate=100)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
44199,
4102,
319,
262,
2878,
8265,
329,
376,
1921,
4825,
7036,
13,
198,
198,
4023,
1378,
32965,
13,
29412,
13,
2398,
14,
21949,
1959,
6200,
198,
198,
41972,
379,
2177,
... | 2.102459 | 244 |
import contextlib
import textwrap
import secrets
import subprocess
import pathlib
import functools
import pytest
import portend
import requests
import jaraco.envs
import munch
from twine import settings
@pytest.fixture()
@pytest.fixture()
def make_settings(pypirc):
"""Returns a factory function for settings.Settings with defaults."""
default_pypirc = """
[pypi]
username:foo
password:bar
"""
return _settings
class DevPiEnv(jaraco.envs.ToxEnv):
"""
Run devpi using tox:testenv:devpi.
"""
name = 'devpi'
username = 'foober'
@property
@property
@pytest.fixture(scope='session')
dist_names = [
'twine-1.5.0.tar.gz',
'twine-1.5.0-py2.py3-none-any.whl',
'twine-1.6.5.tar.gz',
'twine-1.6.5-py2.py3-none-any.whl',
]
@pytest.fixture(params=dist_names)
| [
11748,
4732,
8019,
198,
11748,
2420,
37150,
198,
11748,
13141,
198,
11748,
850,
14681,
198,
11748,
3108,
8019,
198,
11748,
1257,
310,
10141,
198,
198,
11748,
12972,
9288,
198,
11748,
2493,
437,
198,
11748,
7007,
198,
11748,
17379,
10602,
... | 2.34903 | 361 |
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.settings.Setting import typed_list
@linter(executable='cppcheck',
use_stdout=False,
use_stderr=True,
output_format='regex',
output_regex=r'(?P<line>\d+):(?P<severity>[a-zA-Z]+):'
r'(?P<origin>[a-zA-Z]+):(?P<message>.*)',
severity_map={'error': RESULT_SEVERITY.MAJOR,
'warning': RESULT_SEVERITY.NORMAL,
'style': RESULT_SEVERITY.INFO})
class CPPCheckBear:
"""
Report possible security weaknesses for C/C++.
For more information, consult <https://github.com/danmar/cppcheck>.
"""
LANGUAGES = {'C', 'C++'}
REQUIREMENTS = {DistributionRequirement('cppcheck')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Security', 'Unused Code', 'Unreachable Code', 'Smell'}
@staticmethod
def create_arguments(filename, file, config_file,
enable: typed_list(str)=[]):
"""
:param enable:
Choose specific issues to report. Issues that can be
reported are: all, warning, style, performance,
portability, information, unusedFunction,
missingInclude
"""
args = ('--template={line}:{severity}:{id}:{message}',)
if enable:
args += ('--enable=' + ','.join(enable),)
return args + (filename,)
| [
6738,
5655,
571,
13,
33227,
8019,
13,
397,
8709,
507,
13,
43,
3849,
1330,
300,
3849,
198,
6738,
20203,
62,
27604,
13,
8897,
18883,
13,
20344,
3890,
16844,
24615,
1330,
357,
198,
220,
220,
220,
27484,
16844,
24615,
8,
198,
6738,
5655,
... | 2.247967 | 738 |
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_INSTALL_DIR
import subprocess
import os
import glob
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_INSTALL_DIR + "/more_autograding_examples/cpp_custom/config"
SAMPLE_SUBMISSIONS = SUBMITTY_INSTALL_DIR + "/more_autograding_examples/cpp_custom/submissions"
@prebuild
############################################################################
@testcase
@testcase
@testcase
@testcase
@testcase
@testcase
| [
2,
19652,
408,
560,
17944,
13,
47081,
5888,
5499,
284,
10152,
3597,
5254,
13,
198,
6738,
9195,
1330,
662,
11249,
11,
1332,
7442,
11,
28932,
36393,
9936,
62,
38604,
7036,
62,
34720,
198,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
... | 3.528796 | 191 |
# -*- coding: utf-8 -*-
import os
import sphinx_rtd_theme
# sys.path.insert(0, os.path.abspath('.'))
def get_version():
"""
:return: # The short X.Y version.
"""
return ".".join(os.environ.get('MFMODULE_VERSION',
'unknown.unknown').split('.')[0:-1])
def get_release():
"""
:return: the full version, including alpha/beta/rc tags
"""
return os.environ.get('MFMODULE_VERSION', 'unknown')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel',
'sphinx.ext.todo',
'sphinx_automodapi.automodapi',
'sphinx_automodapi.smart_resolver',
]
# A dictionary of values to pass into the template engine’s context for all pages
html_context = {
# Enable the "Edit in GitHub link within the header of each page. See https://docs.readthedocs.io/en/stable/vcs.html
'display_github': True,
# Set the following variables to generate the resulting github URL for each page.
'github_user': 'metwork-framework',
'github_repo': 'mfext',
'github_version': 'integration',
# Path in the checkout to the docs root
'conf_py_path': '/doc/',
# Changes how to view files when using display_github, display_gitlab, etc.
# When using GitHub or GitLab this can be: blob (default), edit, or raw.
# See https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html#confval-vcs_pageview_mode
# Warning : the parameter is theme_vcs_pageview_mode and not vcs_pageview_mode as mentionned in the the documentation
'theme_vcs_pageview_mode': 'edit'
}
# True to prefix each section label with the name of the document it is in,
# followed by a colon. For example, index:Introduction for a section called Introduction that appears in document index.rst.
# Useful for avoiding ambiguity when the same section heading appears in different documents.
autosectionlabel_prefix_document = True
# If set, autosectionlabel chooses the sections for labeling by its depth.
# For example, when set 1 to autosectionlabel_maxdepth, labels are generated only for top level sections,
# and deeper sections are not labeled. It defaults to None (disabled).
autosectionlabel_maxdepth = 3
# The output format for Graphviz when building HTML files. This must be either 'png' or 'svg'
graphviz_output_format = 'svg'
# This must be a string that specifies the name of the directory the automodsumm generated documentation ends up in.
# This directory path should be relative to the documentation root (e.g., same place as index.rst). Defaults to 'api'.
automodapi_toctreedirnm = 'api'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
#source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mfext'
copyright = u'2017-2019, MetWork'
author = u'MetWork'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_release()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Emits a warning or not for each TO DO entries. The default is False.
todo_emit_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_style = 'css/override_theme.css'
html_logo = '_images/logo-metwork.png'
html_favicon = '_images/metwork.ico'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mfextdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mfext.tex',
u'mfext Documentation',
u'MetWork', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mfext', u'mfext Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mfext', u'mfext Documentation',
author, 'mfext', 'One line description of project.',
'Miscellaneous'),
]
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = False
autoclass_content = 'both'
def build_intersphinx_mapping_url(current_module, module):
"""
Guess and build the documentation url for a Metwork module
:param current_module: the current Metwork module name
:param module: the Metwork module name (e.g. mfext, mfdata, ...)
:return: the documentation url oof the module
"""
current_version = get_version()
# By default, we choose the 'release' url
url = "http://metwork-framework.org/pub/metwork/releases/docs/release_{}/{}".format(current_version, module)
if current_version.startswith("integration"):
url = "http://metwork-framework.org/pub/metwork/continuous_integration/docs/integration/{}".format(module)
elif current_version.startswith("dev"):
# CAUTION: here we assume the version (i.e. git branch) of documentation development starts with 'dev'
url = "{}/_build/html".format(os.path.abspath(os.path.dirname(__file__))).replace(current_module, module)
return url
intersphinx_mapping = {
'mfext': (build_intersphinx_mapping_url(project, 'mfext'), None),
'mfadmin': (build_intersphinx_mapping_url(project, 'mfadmin'), None),
'mfbase': (build_intersphinx_mapping_url(project, 'mfbase'), None),
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
599,
20079,
87,
62,
81,
8671,
62,
43810,
198,
2,
25064,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
10786,
26... | 3.031128 | 2,827 |
import os
from datetime import datetime
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
virtualenv_path = '/usr/lib/ckan'
def test_dynamic_menu(host):
"""Test menu.json is prepopulated with old modification time
https://github.com/GSA/catalog-app/issues/76"""
dynamic_menu = host.file('/var/tmp/ckan/dynamic_menu/menu.json')
assert dynamic_menu.exists
assert dynamic_menu.user == 'www-data'
assert dynamic_menu.group == 'www-data'
assert dynamic_menu.mode == 0o644
# We do a loose assertion on mtime to avoid timezone issues
assert dynamic_menu.mtime < datetime(2020, 1, 2)
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2... | 2.716912 | 272 |
__author__ = 'traff'
import threading
import os
import sys
import tempfile
from _prof_imports import Stats, FuncStat, Function
try:
execfile=execfile #Not in Py3k
except NameError:
#We must redefine it in Py3k if it's not already there
| [
834,
9800,
834,
796,
705,
9535,
487,
6,
198,
198,
11748,
4704,
278,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
6738,
4808,
5577,
62,
320,
3742,
1330,
20595,
11,
11138,
66,
17126,
11,
15553,
198,
198,
28311,
25... | 2.94186 | 86 |
#!/usr/bin/python3
from gui import Gui
if __name__ == '__main__':
Gui.run()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
11774,
1330,
1962,
72,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1962,
72,
13,
5143,
3419,
198
] | 2.25 | 36 |
import bs4
import json
import os
import requests
from utils import (get_content, get_soup, create_dir, write_image, save_json)
BASE_URL = 'https://www.shonenjump.com'
RENSAI_URL = BASE_URL + '/j/rensai/'
ARCHIVES_URL = RENSAI_URL + 'archives.html'
LIST_URL = RENSAI_URL + 'list/'
if __name__ == '__main__':
shonenjump() | [
11748,
275,
82,
19,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
7007,
198,
198,
6738,
3384,
4487,
1330,
357,
1136,
62,
11299,
11,
651,
62,
82,
10486,
11,
2251,
62,
15908,
11,
3551,
62,
9060,
11,
3613,
62,
17752,
8,
198,
198,
... | 2.451128 | 133 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains different graphic items
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import Qt, Signal, QObject, QPoint, QPointF
from Qt.QtWidgets import QGraphicsItem
from Qt.QtGui import QColor
class BaseGraphicsItem(QGraphicsItem, object):
"""
Base class for graphics items
"""
itemChanged = Signal()
itemDeleted = Signal()
@property
@enabled.setter
@property
@selected.setter
@property
@hovered.setter
@property
@position.setter
@property
@width.setter
@property
@height.setter
@property
@property
@property
@property
@property
@property
@render_effects.setter
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
26796,
326,
4909,
1180,
13028,
3709,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
... | 2.715278 | 288 |
import datetime
import numpy as np
import networkx as nx
import concurrent.futures
from functools import partial
PRINT_TIME=False
def disc(samples1, samples2, kernel, is_parallel=True, *args, **kwargs):
''' Discrepancy between 2 samples '''
d = 0
if not is_parallel:
for s1 in samples1:
for s2 in samples2:
d += kernel(s1, s2, *args, **kwargs)
else:
# with concurrent.futures.ProcessPoolExecutor() as executor:
# for dist in executor.map(kernel_parallel_worker, [
# (s1, samples2, partial(kernel, *args, **kwargs)) for s1 in samples1
# ]):
# d += dist
with concurrent.futures.ThreadPoolExecutor() as executor:
for dist in executor.map(kernel_parallel_worker, [
(s1, samples2, partial(kernel, *args, **kwargs)) for s1 in samples1
]):
d += dist
d /= len(samples1) * len(samples2)
return d
def compute_mmd(samples1, samples2, kernel, is_hist=True, *args, **kwargs):
''' MMD between two samples '''
print("--- MMD of sample1: {}, sample2:{}.---".format(len(samples1),len(samples2)))
# normalize histograms into pmf
if is_hist:
samples1 = [s1 / np.sum(s1) for s1 in samples1]
samples2 = [s2 / np.sum(s2) for s2 in samples2]
# print('===============================')
# print('s1: ', disc(samples1, samples1, kernel, *args, **kwargs))
# print('--------------------------')
# print('s2: ', disc(samples2, samples2, kernel, *args, **kwargs))
# print('--------------------------')
# print('cross: ', disc(samples1, samples2, kernel, *args, **kwargs))
# print('===============================')
return disc(samples1, samples1, kernel, *args, **kwargs) + \
disc(samples2, samples2, kernel, *args, **kwargs) - \
2 * disc(samples1, samples2, kernel, *args, **kwargs)
def degree_stats(graph_ref_list, graph_pred_list, is_parallel=True):
''' Compute the distance between the degree distributions of two unordered sets of graphs.
Args:
graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated
'''
sample_ref = []
sample_pred = []
# in case an empty graph is generated
graph_pred_list_remove_empty = [
G for G in graph_pred_list if not G.number_of_nodes() == 0
]
prev = datetime.datetime.now()
if is_parallel:
with concurrent.futures.ThreadPoolExecutor() as executor:
for deg_hist in executor.map(degree_worker, graph_ref_list):
sample_ref.append(deg_hist)
with concurrent.futures.ThreadPoolExecutor() as executor:
for deg_hist in executor.map(degree_worker, graph_pred_list_remove_empty):
sample_pred.append(deg_hist)
else:
for i in range(len(graph_ref_list)):
degree_temp = np.array(nx.degree_histogram(graph_ref_list[i]))
sample_ref.append(degree_temp)
for i in range(len(graph_pred_list_remove_empty)):
degree_temp = np.array(nx.degree_histogram(graph_pred_list_remove_empty[i]))
sample_pred.append(degree_temp)
# mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd)
# mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd)
mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_tv, sigma=2.0)
# mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian)
elapsed = datetime.datetime.now() - prev
if PRINT_TIME:
print('Time computing degree mmd: ', elapsed)
return mmd_dist
| [
11748,
4818,
8079,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
24580,
13,
69,
315,
942,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
4805,
12394,
62,
34694,
28,
25101,
628,
628,
198,
198,
4299... | 2.482004 | 1,417 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 10:42:21 2017
@author: Keerthi
"""
#from subseq_gen import subseq_rule
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
30030,
5979,
1679,
838,
25,
3682,
25,
2481,
2177,
201,
198,
201,
198,
31,
9800,
25,
3873,
263,
400,
72,
201,
198,
37811,
201,
198,
... | 1.541985 | 131 |
from .metrics.sequence_labeling import classification_report, precision_recall_fscore_support | [
6738,
764,
4164,
10466,
13,
43167,
62,
18242,
278,
1330,
17923,
62,
13116,
11,
15440,
62,
8344,
439,
62,
69,
26675,
62,
11284
] | 4.043478 | 23 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import \
(get_resource_name_completion_list, file_type, get_location_type, get_three_state_flag,
get_enum_type)
from azure.cli.command_modules.backup._validators import \
(datetime_type)
# ARGUMENT DEFINITIONS
allowed_container_types = ['AzureIaasVM']
allowed_workload_types = ['VM']
vault_name_type = CLIArgumentType(help='Name of the Recovery services vault.', options_list=['--vault-name', '-v'], completer=get_resource_name_completion_list('Microsoft.RecoveryServices/vaults'))
container_name_type = CLIArgumentType(help='Name of the container.', options_list=['--container-name', '-c'])
item_name_type = CLIArgumentType(help='Name of the backed up item.', options_list=['--item-name', '-i'])
policy_name_type = CLIArgumentType(help='Name of the backup policy.', options_list=['--policy-name', '-p'])
job_name_type = CLIArgumentType(help='Name of the job.', options_list=['--name', '-n'])
rp_name_type = CLIArgumentType(help='Name of the recovery point.', options_list=['--rp-name', '-r'])
# pylint: disable=too-many-statements
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 3.339019 | 469 |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 02 17:32:15 2014
@author: jaco_da
"""
import unittest
from sqpdfo.runtime import *
from sqpdfo.sqpdfo_global_variables import set_threshold, set_check_condition
from sqpdfo.sqpdfo import sqpdfo_
from numpy import array, zeros, arange, double
from numpy.testing import assert_almost_equal
from sqpdfo import helper
import tests.benchmarks
class Test_run_sqpdfo(unittest.TestCase):
"""
Reminder :
This class is a test for run_sqpdfo
these tests are run with whichmodel=0 and pquad=(n+1)(n+2)/2 in sqpdfo_() !!
"""
def test_run_sqpdfo_prob1(self):
"""
Test which compare python and matlab results
"""
args = tests.benchmarks.get(1)
set_check_condition(0)
set_threshold(1e-08)
options = helper.dummyUnionStruct()
options.hess_approx='model'
options.bfgs_restart=0
options.algo_descent='Powell'
options.tol_grad=1e-05
options.tol_feas=1e-05
options.tol_bnds=1e-05
options.dxmin=1e-6
options.miter=500
options.msimul=500
options.verbose=0
options.whichmodel = 'subbasis'
options.final_degree = 'quadratic'
x,lm,info=sqpdfo_(options, *args)
self.assertTrue(compare_array(x, array([[1.950000000000000,0.262499999999991]]), 1e-5, 1e-5))
self.assertTrue(compare_array(lm, array([[-0.6375,0, 0.7375]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(info.g, array([[-0.1, -2.95]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(info.ae, array([[ 1., 4.]]), self.abs_tol, self.rel_tol))
self.assertEqual(info.niter,4)
self.assertAlmostEqual(double(info.ce), -3.819167204710539e-14,places=5)
self.assertEqual(info.flag,0)
self.assertTrue(compare_array(info.nsimul, array([[0, 9, 0, 0 ]]), self.abs_tol, self.rel_tol))
self.assertAlmostEqual(info.f,-3.909687499999972,places=10)
self.assertEqual(info.compl,0)
self.assertTrue(compare_array(info.glag, 1e-10*array([0.76387451919401,-0.381947806715743]), self.abs_tol, self.rel_tol))
def test_run_sqpdfo_prob2(self):
"""
Test which compare python and matlab results
"""
args = tests.benchmarks.get(2)
set_check_condition(0)
set_threshold(1e-08)
options = helper.dummyUnionStruct()
options.hess_approx='model'
options.bfgs_restart=0
options.algo_descent='Powell'
options.tol_grad=1e-05
options.tol_feas=1e-05
options.tol_bnds=1e-05
options.dxmin=1e-6
options.miter=500
options.msimul=500
options.verbose=0
options.whichmodel = 'subbasis'
options.final_degree = 'quadratic'
x,lm,info=sqpdfo_(options, *args)
self.assertTrue(compare_array(x, array([[ 0.333326758778846, 0.666659126169760]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(lm, array([[0,0, -1.333312643708242]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(info.g, array([[ 1.333307035124744, 1.333318252334031]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(info.ae, array([[ 1.000000000014289, 1.000000000017430]]), self.abs_tol, self.rel_tol))
self.assertEqual(info.niter,5)
self.assertAlmostEqual(double(info.ce), -1.411505139448099e-05,places=10)
self.assertEqual(info.flag,0)
self.assertTrue(compare_array(info.nsimul, array([[0, 11, 0, 0]]), self.abs_tol, self.rel_tol))
self.assertAlmostEqual(info.f, 0.666647846741449,places=10)
self.assertEqual(info.compl,0)
self.assertTrue(compare_array(info.glag, 1e-05*array([ -0.560839309260430,0.560883753109032]), self.abs_tol, self.rel_tol))
#
def test_run_sqpdfo_prob3(self):
"""
Test which compare python and matlab results
"""
set_check_condition(0)
args = tests.benchmarks.get(3)
set_threshold(1e-08)
options = helper.dummyUnionStruct()
options.hess_approx='model'
options.bfgs_restart=0
options.algo_descent='Powell'
options.tol_grad=1e-05
options.tol_feas=1e-05
options.tol_bnds=1e-05
options.dxmin=1e-6
options.miter=500
options.msimul=500
options.verbose=0
options.whichmodel = 'subbasis'
options.final_degree = 'quadratic'
x,lm,info=sqpdfo_(options, *args)
self.assertTrue(compare_array(x, array([[-0.5,0,0.5]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(lm, array([[ 0, -0.000005713064576 ,0, 1.999997749517402, -0.999996152071198]]), self.abs_tol, self.abs_tol))
self.assertTrue(compare_array(info.g, array([[ -1.000001597463464, 0.000000267687146 , 0.999990706694728]]), self.abs_tol, self.rel_tol))
self.assertTrue(compare_array(info.ae, array([[ 1.000000000001365, 1.000000000001130, 0.999999999995923],[ 0.999999999985469 , 1.999999999999835, 2.999999999990382 ]]), self.abs_tol, self.rel_tol))
self.assertEqual(info.niter,4)
self.assertTrue(compare_array(info.ce, array([[0.222044604925031e-15, -0.111022302462516e-15]]),self.abs_tol, self.rel_tol))
self.assertEqual(info.flag,0)
self.assertTrue(compare_array(info.nsimul, array([[0, 11, 0, 0]]), self.abs_tol, self.rel_tol))
self.assertAlmostEqual(info.f, 0.500000000000000,places=10)
self.assertEqual(info.compl,0)
self.assertTrue(compare_array(info.glag, 1e-07*array([0.062859997207454, 0.188580686583393,-0.188580611126810]), self.abs_tol, self.rel_tol))
def test_run_sqpdfo_prob4(self):
"""
Test which compare python and matlab results
"""
set_check_condition(0)
args = tests.benchmarks.get(4)
set_threshold(1e-08)
options = helper.dummyUnionStruct()
options.hess_approx='model'
options.bfgs_restart=0
options.algo_descent='Powell'
options.tol_grad=1e-05
options.tol_feas=1e-05
options.tol_bnds=1e-05
options.dxmin=1e-6
options.miter=500
options.msimul=500
options.verbose=0
options.whichmodel = 'subbasis'
options.final_degree = 'quadratic'
x,lm,info=sqpdfo_(options, *args)
self.assertTrue(compare_array(x, array([[ -0.5, 0.0, 0.5, 1.0]]), self.abs_tol, 1e-5))
self.assertTrue(compare_array(lm, array([[ 0,0,0,0,1.999999758015728,-0.999999892175830,-0.333333335490867]]), self.abs_tol, 1e-6))
self.assertTrue(compare_array(info.g, array([[ -1.0, 0.0 , 1.0, 1.0]]), 1e-4, 1e-4))
self.assertTrue(compare_array(info.ae, array([[1.00000000000119, 1.00000000000153, 1.00000000000242, 1.09566597368092e-12],[1.00000000000310, 1.99999999999671, 3.00000000000154, 5.36746539189640e-12],[1.43689077548211e-12, 3.68227391508966e-12, -3.71857711253322e-12, 2.99999999015165]]), self.abs_tol, 1e-6))
self.assertTrue(compare_array(info.ce, array([ 0., 0., 0.]), 1e-4, 1e-4))
self.assertEqual(info.flag,0)
self.assertAlmostEqual(info.f, 1.5, places=6)
self.assertEqual(info.compl,0)
self.assertTrue(compare_array(info.glag, array([0.,0.,0.,0.]), 1e-4, 1e-4))
def test_run_sqpdfo_prob5(self):
"""
Test which compare python and matlab results
"""
set_check_condition(0)
set_threshold(1e-08)
options = helper.dummyUnionStruct()
options.hess_approx='model'
options.bfgs_restart=0
options.algo_descent='Powell'
options.tol_grad=1e-05
options.tol_feas=1e-05
options.tol_bnds=1e-05
options.dxmin=1e-6
options.miter=500
options.msimul=500
options.verbose=0
options.whichmodel = 'subbasis'
options.final_degree = 'quadratic'
x,lm,info=sqpdfo_(options, *tests.benchmarks.get(5))
assert_almost_equal(x, array([ -1.717135373541669,1.595700197732079,1.827260995778992,0.763703525309065,0.763584463389690]), decimal=3)
assert_almost_equal(lm, array([[ 0,0,0,0,0, 0.040162755804678,-0.037957678618516, 0.005222725990309]]).T, decimal=3)
assert_almost_equal(info.g.T, array([[ 0.091732656086263, -0.098713648653029, -0.086204099493362, -0.206254630841206, -0.206286791111375]]), decimal=4)
assert_almost_equal(info.ae, array([[-3.43427074714240, 3.19140039544429, 3.65452199155318, 1.52740705050108 ,1.52716892687853],
[-1.50249321407029e-11, 1.82726099577928 ,1.59570019773225 ,-3.81792231695518 ,-3.81851762654697],
[8.84566167319830, 7.63877736315284, 2.39648693851626e-11 ,3.91573621289396e-11, 1.95315487528657e-11]]), decimal=3)
assert_almost_equal(info.ce.T, 1.0e-07 *array([[ 0.661051284822634, -0.005370299760443, -0.042228007757217]]), decimal=5)
self.assertEqual(info.flag,0)
assert_almost_equal(info.f, 0.053949845718415, decimal=7)
self.assertEqual(info.compl,0)
assert_almost_equal(info.glag, array([[0.,0.,0.,0.,0.]]).T, decimal=4)
if __name__ == '__main__':
unittest.main() | [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
4280,
7816,
1596,
25,
2624,
25,
1314,
1946,
198,
198,
31,
9800,
25,
474,
10602,
62,
6814,
198,
37811,
198,
198,
11748,
555,
715,
39... | 2.001943 | 4,633 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from pysnap import Snapshot
snapshots = Snapshot()
snapshots['TestJestConfig.test_read[BIN] 1'] = {'/srv/unittests/node_modules/.bin/jest'}
snapshots['TestJestConfig.test_read[CONFIG_FILE] 1'] = {'jest.config.json'}
snapshots['TestJestConfig.test_read[CONFIG_FILE_PATH] 1'] = {'/srv/unittests/project/jest.config.json'}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11495,
1477,
24879,
25,
410,
16,
532,
3740,
1378,
42469,
13,
4743,
14,
89,
34,
19,
88,
52,
66,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
19... | 2.432584 | 178 |
# File: Q (Python 2.4)
from pandac.PandaModules import *
mainStoryTier = [
'MainStory',
'Chapter 1',
'Chapter 2',
'Chapter 3']
mainStoryTier2 = [
'RavenCoveStory']
weaponUnlockTier = [
'WeaponDoll',
'WeaponDagger',
'WeaponGrenade',
'WeaponStaff']
level4WeaponTier = [
'PirateLore',
'FeatsOfStrength',
'GoodForBusiness',
'VoodooDollUnlockL4',
'VoodooStaffUnlockL4']
level5WeaponTier = [
'VoodooDollUnlockL5',
'VoodooStaffUnlockL5',
'DaggerUnlockL5',
'PistolUnlockL5',
'CutlassUnlockL5']
level6WeaponTier = [
'']
teleportUnlockTier = [
'TeleportTotem',
'TPT_PortRoyalUnlock',
'TPT_CubaUnlock',
'TPT_PadresDelFuegoUnlock']
treasureCollectionTier = [
'TreasureRogues',
'TreasureRogues2',
'TreasureTeeth',
'TreasureTeeth2',
'TreasureMedals',
'TreasureMedals2',
'TreasureRings',
'TreasureRings2',
'TreasureChess',
'TreasureChess2',
'TreasureFigurines',
'TreasureFigurines2']
firstTier = mainStoryTier
storyTier = mainStoryTier2
secondTier = weaponUnlockTier
thirdTier = level4WeaponTier
fourthTier = level5WeaponTier
fifthTier = level6WeaponTier
sixthTier = teleportUnlockTier
seventhTier = treasureCollectionTier
| [
2,
9220,
25,
1195,
357,
37906,
362,
13,
19,
8,
198,
198,
6738,
19798,
330,
13,
47,
5282,
5841,
5028,
1330,
1635,
198,
12417,
11605,
35252,
796,
685,
198,
220,
220,
220,
705,
13383,
11605,
3256,
198,
220,
220,
220,
705,
14126,
352,
... | 2.36862 | 529 |
# -*- coding: utf-8 -*-
"""Generate a default configuration-file section for fn_sentinelone"""
def config_section_data():
"""
Produce add the default configuration section to app.config,
for fn_sentinelone when called by `resilient-circuits config [-c|-u]`
"""
config_data = None
config_data = u"""[fn_sentinelone]
# SentinelOne server
sentinelone_server=
# SentinelOne REST API version
api_version=2.1
# SentinelOne API token
api_token=
# SentinelOne poller settings
# Poller interval in seconds, comment out or set to 0 to disable poller
polling_interval=60
# Poller timeback time first time, in minutes
polling_lookback=120
# Comma separated list of SentinelOne account Ids to query for threats by poller
account_ids=
# Comma separated list of SentinelOne site Ids to query for threats by poller
site_ids=
# Optional "query" parameter for querying threats in SentinelOne
query_param=
# Optional incidentStatuses parameter for querying threats from SentinelOne
incident_statuses=resolved,in_progress,unresolved
# Optional "limit" parameter: limit the number of threats returned from querying threats from SentinelOne
limit=25
# Optional sortBy parameter used when querying SentinelOne threats
sort_by=createdDate
# Optional sortOrder for SentinelOne threat query results. Possible values: asc or desc
sort_order=desc
# Optional: send SOAR incident URL live link via threat note to SentinelOne
send_soar_link_to_sentinelone=true
# Custom templates to replace the default map of SentinelOne threat fields to SOAR incident fields
#create_incident_template=
#update_incident_template=
#close_incident_template=
# Optional path to SSL certificate
#verify=false | /path/toclient_certificate.pem
# Optional proxy settings
#http_proxy=
#https_proxy=
"""
return config_data
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
8645,
378,
257,
4277,
8398,
12,
7753,
2665,
329,
24714,
62,
34086,
20538,
505,
37811,
628,
198,
4299,
4566,
62,
5458,
62,
7890,
33529,
198,
220,
220,
220,
... | 3.522505 | 511 |
import sys
import os
import warnings
from random import randint
from collections import OrderedDict
import numpy as np
import pytest
from mmgroup import MM0, MMSpace, MMV
from mmgroup.generators import gen_leech3_op_vector_word
from mmgroup.clifford12 import leech3matrix_echelon
from mmgroup.clifford12 import leech3matrix_sub_diag
from mmgroup.clifford12 import leech3matrix_load
from mmgroup.clifford12 import leech3matrix_kernel_vector
from mmgroup.clifford12 import leech3matrix_compress
from mmgroup.clifford12 import xsp2co1_from_vect_mod3
from mmgroup.clifford12 import leech2matrix_add_eqn
from mmgroup.clifford12 import leech2matrix_solve_eqn
from mmgroup.clifford12 import leech3matrix_watermark
from mmgroup.clifford12 import leech3matrix_watermark_perm_num
from mmgroup.mat24 import MAT24_ORDER
from mmgroup.mm15 import op_word_tag_A
def rand_matrix_mod_n(n, dtype = np.int64):
"""Create random 24 x 24 matrix of intgers modulo n"""
m = np.zeros((24, 24), dtype = dtype)
for i in range(24):
for j in range(24):
m[i,j] = randint(0,n-1)
return m
def print_mod3(m):
"""Print 24 x 24 matrix of integers modulo 3"""
m1 = m % 3
for i in range(24):
print("".join(map(str, m1[i] % 3)))
print("")
def matrix_to_rep_modp(m, p):
"""Convert 24 x 24 matrix to monster rep, part A, modulo p
The function returns the first block of a vector of the monster
rep modulo p. Ths block corresponds to the entries of the
vector with tag 'A'.
"""
assert p in (3, 15)
if p == 3:
m0 = (m - 1) % 3 + 1
b = np.zeros(24, dtype = np.uint64)
for i in range(24):
b[i] = sum(m0[i,j] << (2*j) for j in range(24))
elif p == 15:
m0 = m % 3 + 3 * rand_matrix_mod_n(5)
b = np.zeros(2*24, dtype = np.uint64)
for i in range(24):
r = sum((int(m[i,j]) % 3 ) << (4*j) for j in range(24))
b[2*i] = r & 0xffffffffffffffff
b[2*i + 1] = r >> 64
else:
err = "Module Leech3matrix does not support p=%s"
raise ValueError(err, str(p))
return b
def from_array(m, load_mode = None):
"""Load 24 x 24 matrix 'm' to array of type 'leech3matrix'
If load_mode is 3 or 15 then matrix is converted to a vector of
of the monter rep using function ``matrix_to_rep_modp(m, p)``
with ``p = load_mode``.
By default we convert the matrix directly.
The function returnes an numpy array that can be processed
with the function in file ``leech3matrix.c``.
"""
a = np.zeros(72, dtype = np.uint64)
if load_mode in (3, 15):
b = matrix_to_rep_modp(m, load_mode)
leech3matrix_load(load_mode, b, a)
else:
for i in range(24):
s = int(sum(int(m[i,j]) % 3 << 4*j for j in range(24)))
a[3*i] = s & 0xffffffffffffffff
a[3*i+1] = (s >> 64) & 0xffffffff
return a
def as_array(a, high = 0, dtype = np.int64):
"""Convert array of type 'leech3matrix' to 24 x 24 matrix"""
m = np.zeros((24, 24), dtype = dtype)
jj = [96 * bool(high) + 4 * j for j in range(24)]
jj = [(j >> 6, j & 0x3f) for j in jj]
for i in range(24):
for j, (ofs, shift) in enumerate(jj):
m[i,j] = ((int(a[3*i + ofs]) >> shift) & 3) % 3
return m
#######################################################################
# Test function leech3matrix_echelon
#######################################################################
@pytest.mark.qstate
#######################################################################
# Test function leech3matrix_kernel_vector
#######################################################################
INT_BITS = 64
@pytest.mark.qstate
#######################################################################
# Test functions leech2matrix_add_eqn and leech2matrix_solve_eqn
#######################################################################
@pytest.mark.qstate
#######################################################################
# Test leech3matrix_watermark and leech3matrix_watermark_perm_num
#######################################################################
MMV15 = MMV(15)
MM = MM0
WATERMARK_TESTS = 10
WATERMARK_MIN_SUCCESS = 4
@pytest.mark.qstate
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
14601,
198,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
8085,
8094,
1330,
... | 2.504619 | 1,732 |
import jwt
from django.conf import settings
from datetime import datetime, timedelta
JWT_SECRET = settings.SECRET_KEY
JWT_ALGORITHM = settings.JWT_ALGORITHM
JWT_EXP_DELTA_SECONDS = settings.JWT_EXP_DELTA_SECONDS
COOKIE_MAX_AGE = 365 * 24 * 60 * 60
| [
11748,
474,
46569,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
41,
39386,
62,
23683,
26087,
796,
6460,
13,
23683,
26087,
62,
20373,
198,
41,
39386,
62,
1847,
38,
... | 2.171875 | 128 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core.credentials import AccessToken
from devtools_testutils import (
AzureTestCase,
AzureMgmtPreparer,
FakeResource,
ResourceGroupPreparer,
)
from devtools_testutils.cognitiveservices_testcase import CognitiveServicesAccountPreparer
from azure_devtools.scenario_tests import ReplayableTest
class FakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
@pytest.fixture(scope="session")
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
... | 3.940909 | 220 |
from models2 import Department, Employee, db
from app2 import app
db.drop_all()
db.create_all()
d1 = Department(dept_code="mktg", dept_name="Marketing",phone="897-9999")
d2 = Department(dept_code="acct", dept_name="Accounting",phone="111-5429")
river = Employee(name="River Bottom", state="NY", dept_code="mktg")
summer = Employee(name="Summer Winter", state="OR", dept_code="mktg")
joaquin = Employee(name="Joaquin Pheonix", dept_code="acct")
db.session.add(d1)
db.session.add(d2)
db.session.commit()
db.session.add(river)
db.session.add(joaquin)
db.session.add(summer)
db.session.commit() | [
6738,
4981,
17,
1330,
2732,
11,
36824,
11,
20613,
198,
6738,
598,
17,
1330,
598,
198,
198,
9945,
13,
14781,
62,
439,
3419,
198,
9945,
13,
17953,
62,
439,
3419,
198,
198,
67,
16,
796,
2732,
7,
2934,
457,
62,
8189,
2625,
76,
21841,
... | 2.651786 | 224 |
import torch
from torch.utils.data import DataLoader, Dataset
from PIL import Image
from torchvision import transforms
import torchvision.transforms.functional as FT
import os
from random import randint
import numpy as np
import random
from utils import decode_segmap
opj = os.path.join
if __name__ == '__main__':
ds = myds()
dl = DataLoader(ds)
a = next(iter(dl))
for i in a:
print(np.unique(i.detach().numpy()))
| [
11748,
28034,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
11,
16092,
292,
316,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
6738,
28034,
10178,
1330,
31408,
201,
198,
11748,
28034,
10178,
13,
7645,
23914,
13,
4512... | 2.518717 | 187 |
from django.test import TestCase, Client
from django.conf import settings
# Create your tests here.
from feeds.models import Source, Post, Enclosure, WebProxy
from feeds.utils import read_feed, find_proxies, get_proxy, fix_relative
from django.utils import timezone
from django.urls import reverse
from datetime import timedelta
import mock
import os
import requests_mock
TEST_FILES_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)),"testdata")
BASE_URL = 'http://feed.com/'
@requests_mock.Mocker()
@requests_mock.Mocker()
@requests_mock.Mocker()
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
20985,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
2,
13610,
534,
5254,
994,
13,
198,
6738,
21318,
13,
27530,
1330,
8090,
11,
2947,
11,
2039,
17966,
11,
5313,
44148,
1... | 2.300341 | 293 |
import datetime
import json
import discord
from discord.ext import commands
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
628,
198
] | 4.157895 | 19 |
# Copyright (C) 2020-2022 The opuntiaOS Project Authors.
# + Contributed by Nikita Melekhin <nimelehin@gmail.com>
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from Parser.Parser import Parser
from Generator.IRManager import IRManager
from Generator.BinWriter import BinWriter
import argparse | [
2,
15069,
357,
34,
8,
12131,
12,
1238,
1828,
383,
1034,
2797,
544,
2640,
4935,
46665,
13,
198,
2,
220,
1343,
2345,
6169,
416,
11271,
5350,
2185,
293,
14636,
259,
1279,
77,
524,
293,
20079,
31,
14816,
13,
785,
29,
198,
2,
220,
198,... | 3.394231 | 104 |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
# class ProductScraperItem(scrapy.Item):
# # define the fields for your item here like:
# # name = scrapy.Field()
# pass
| [
2,
2896,
500,
994,
262,
4981,
329,
534,
15881,
276,
3709,
198,
2,
198,
2,
4091,
10314,
287,
25,
198,
2,
3740,
1378,
31628,
13,
1416,
2416,
88,
13,
2398,
14,
268,
14,
42861,
14,
4852,
873,
14,
23814,
13,
6494,
198,
198,
11748,
15... | 2.79 | 100 |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.api_core.client_options import ClientOptions
# [START datalabeling_create_annotation_spec_set_beta]
def create_annotation_spec_set(project_id):
"""Creates a data labeling annotation spec set for the given
Google Cloud project.
"""
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
# [END datalabeling_create_annotation_spec_set_beta]
# If provided, use a provided test endpoint - this will prevent tests on
# this snippet from triggering any action by a real human
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
# [START datalabeling_create_annotation_spec_set_beta]
project_path = client.project_path(project_id)
annotation_spec_1 = datalabeling.types.AnnotationSpec(
display_name='label_1',
description='label_description_1'
)
annotation_spec_2 = datalabeling.types.AnnotationSpec(
display_name='label_2',
description='label_description_2'
)
annotation_spec_set = datalabeling.types.AnnotationSpecSet(
display_name='YOUR_ANNOTATION_SPEC_SET_DISPLAY_NAME',
description='YOUR_DESCRIPTION',
annotation_specs=[annotation_spec_1, annotation_spec_2]
)
response = client.create_annotation_spec_set(
project_path, annotation_spec_set)
# The format of the resource name:
# project_id/{project_id}/annotationSpecSets/{annotationSpecSets_id}
print('The annotation_spec_set resource name: {}'.format(response.name))
print('Display name: {}'.format(response.display_name))
print('Description: {}'.format(response.description))
print('Annotation specs:')
for annotation_spec in response.annotation_specs:
print('\tDisplay name: {}'.format(annotation_spec.display_name))
print('\tDescription: {}\n'.format(annotation_spec.description))
return response
# [END datalabeling_create_annotation_spec_set_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project-id',
help='Project ID. Required.',
required=True
)
args = parser.parse_args()
create_annotation_spec_set(args.project_id)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.852535 | 1,085 |
"""
1772. Sort Features by Popularity
Medium
You are given a string array features where features[i] is a single word that represents the name of a feature of the latest product you are working on. You have made a survey where users have reported which features they like. You are given a string array responses, where each responses[i] is a string containing space-separated words.
The popularity of a feature is the number of responses[i] that contain the feature. You want to sort the features in non-increasing order by their popularity. If two features have the same popularity, order them by their original index in features. Notice that one response could contain the same feature multiple times; this feature is only counted once in its popularity.
Return the features in sorted order.
Example 1:
Input: features = ["cooler","lock","touch"], responses = ["i like cooler cooler","lock touch cool","locker like touch"]
Output: ["touch","cooler","lock"]
Explanation: appearances("cooler") = 1, appearances("lock") = 1, appearances("touch") = 2. Since "cooler" and "lock" both had 1 appearance, "cooler" comes first because "cooler" came first in the features array.
Example 2:
Input: features = ["a","aa","b","c"], responses = ["a","a aa","a a a a a","b a"]
Output: ["a","aa","b","c"]
Constraints:
1 <= features.length <= 104
1 <= features[i].length <= 10
features contains no duplicates.
features[i] consists of lowercase letters.
1 <= responses.length <= 102
1 <= responses[i].length <= 103
responses[i] consists of lowercase letters and spaces.
responses[i] contains no two consecutive spaces.
responses[i] has no leading or trailing spaces.
"""
| [
37811,
198,
1558,
4761,
13,
33947,
17571,
416,
22623,
414,
198,
31205,
198,
198,
1639,
389,
1813,
257,
4731,
7177,
3033,
810,
3033,
58,
72,
60,
318,
257,
2060,
1573,
326,
6870,
262,
1438,
286,
257,
3895,
286,
262,
3452,
1720,
345,
3... | 3.885781 | 429 |
import hashlib
import os
os.chdir('C:/Users/Sai Charan/Desktop/Studies/522-Advance_Data_Mining/PROJECT')
output_file_path = "OUTPUT.txt"
input_file_path = "FINALOP.txt"
#2
completed_lines_hash = set()
#3
output_file = open(output_file_path, "w")
#4
for line in open(input_file_path, "r"):
#5
hashValue = hashlib.md5(line.rstrip().encode('utf-8')).hexdigest()
#6
if hashValue not in completed_lines_hash:
output_file.write(line)
completed_lines_hash.add(hashValue)
#7import sys
"""First of all, save the path of the input and output file paths in two variables. Change these values to your own input and output file path. You can drag and drop one file on the terminal to find out the path.
Create one Set variable. We are using Set because it can hold only unique variables. No duplicate variables can be added to a Set.
Open the output file in write mode. For opening a file in write mode, ‘w’ is used. We are opening the output file in write mode because we are going to write to this file. open() method is used to open a file.
Start one for loop to read from the input file line by line. We are opening the file in read mode. ‘r’ is used to read the file in read mode.
Find the hash value of the current line. We are removing any space and a new line from the end of the line before calculating the hash. hashlib library is used to find out the hash value of a line.
Check if this hash value is already in the Set variable or not. If not, it means the line is not printed to the output file yet. Put the line to the output file and add the hash value to the Set variable.
Finally, close the output text file.""" | [
11748,
12234,
8019,
198,
11748,
28686,
198,
198,
418,
13,
354,
15908,
10786,
34,
14079,
14490,
14,
50,
1872,
3178,
272,
14,
36881,
14,
45833,
14,
49542,
12,
2782,
19259,
62,
6601,
62,
44,
3191,
14,
31190,
23680,
11537,
198,
198,
22915... | 3.451477 | 474 |
#=========================================================================
'''
Project:Lecture - Structural Wind Engineering WS16-17
Chair of Structural Analysis @ TUM - A. Michalski, R. Wuchner, M. Pentek
MDoF system solver using direct time integration - Generalized-Alpha Schemem,
a monolithic formulation
Author: mate.pentek@tum.de
Based upon the original implementation for a SDoF system by M. Andre described in:
Formulation of the Generalized-Alpha method for LAGRANGE. Technical Report, Chair
of Structural Analysis @TUM, 2012.
See J. Chung, G.M. Hulbert: A time integration algorithm for structural dynamics
with improved numerical dissipation: the generalized-aplha mehod. ASME J. Appl.
Mech., 60:371-375,1993.
Description: This is a solver for direct numerical time integration for a 2DoF system.
It assumes linear DOFs with a Generalized alpha scheme with fixed dt.
Created on: 15.03.2016
Last update: 15.03.2016
'''
#===============================================================================
# StructureMDOF class for a MultiDegreeOfFreedom dynamic system
import numpy as np
import os
# constructor of the class
# ========================================================================
| [
2,
23926,
2559,
28,
198,
7061,
6,
198,
16775,
25,
43,
478,
495,
532,
32112,
1523,
3086,
14044,
25290,
1433,
12,
1558,
198,
220,
220,
220,
220,
220,
220,
220,
9369,
286,
32112,
1523,
14691,
2488,
309,
5883,
532,
317,
13,
2843,
874,
... | 3.583333 | 360 |
# Write_a_function
# Created by JKChang
# 14/08/2018, 10:58
# Tag:
# Description: https://www.hackerrank.com/challenges/write-a-function/problem
# In the Gregorian calendar three criteria must be taken into account to identify leap years:
# The year can be evenly divided by 4, is a leap year, unless:
# The year can be evenly divided by 100, it is NOT a leap year, unless:
# The year is also evenly divisible by 400. Then it is a leap year.
year = int(input())
print(is_leap(year))
| [
2,
19430,
62,
64,
62,
8818,
198,
2,
15622,
416,
449,
42,
1925,
648,
198,
2,
1478,
14,
2919,
14,
7908,
11,
838,
25,
3365,
198,
2,
17467,
25,
198,
2,
12489,
25,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
... | 3.246667 | 150 |
"""Tests of the optimization trajectory comparison workflow.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
2020, Citrine Informatics.
"""
import pytest
pytest.importorskip("sklearn")
import smlb
def test_optimization_trajectories():
"""Ensure that a simple optimization workflow can be run."""
from smlb.datasets.synthetic import Friedman1979Data
dataset = Friedman1979Data(dimensions=5)
sampler = smlb.RandomVectorSampler(size=100, rng=0)
training_data = sampler.fit(dataset).apply(dataset)
from smlb.learners import RandomForestRegressionSklearn
learner = RandomForestRegressionSklearn(uncertainties="naive", rng=0)
learner.fit(training_data)
pi_scorer = smlb.ProbabilityOfImprovement(target=2, goal="minimize")
from smlb.optimizers import RandomOptimizer
optimizer = RandomOptimizer(num_samples=30, rng=0)
evaluator = smlb.OptimizationTrajectoryPlot(optimizer_names=["random1", "random2"])
from smlb.workflows import OptimizationTrajectoryComparison
workflow = OptimizationTrajectoryComparison(
data=dataset,
model=learner,
scorer=pi_scorer,
optimizers=[optimizer, optimizer], # just to check that it can handle multiple optimizers
evaluations=[evaluator],
num_trials=3
)
workflow.run()
| [
37811,
51,
3558,
286,
262,
23989,
22942,
7208,
30798,
13,
198,
198,
23010,
811,
10850,
18252,
25187,
4102,
25,
198,
32,
18335,
286,
20683,
4981,
287,
4607,
12,
290,
5696,
4175,
23372,
13,
198,
42334,
11,
15792,
7640,
554,
18982,
873,
... | 2.880753 | 478 |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.api.config import SQLALCHEMY_DATABASE_URI
from app.api.models import Base
# Create our sqlite3 database
engine = create_engine(
SQLALCHEMY_DATABASE_URI,
# By default SQLite will only allow one thread to communicate with it,
# assuming that each thread would handle an independent request. But in
# FastAPI, using normal functions (def) more than one thread could
# interact with the database for the same request, so we need to make
# SQLite know that it should allow that.
connect_args={"check_same_thread": False}
)
# Create a database session for our engine
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Create all tables stored in `app.pi.models` metadata
Base.metadata.create_all(bind=engine)
| [
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
198,
6738,
598,
13,
15042,
13,
11250,
1330,
16363,
1847,
3398,
3620,
56,
62,
35,
1404,
6242,
11159,
62,
47269,
198,
6738,
5... | 3.374502 | 251 |
"""TD3-BC agent configuration."""
import dataclasses
from typing import Optional
from acme.adders import reverb as adders_reverb
import optax
@dataclasses.dataclass
class TD3BCConfig:
"""TD3-BC configuration."""
policy_optimizer: Optional[optax.GradientTransformation] = None
critic_optimizer: Optional[optax.GradientTransformation] = None
discount: float = 0.99
batch_size: int = 256
# target network soft update rate
tau: float = 0.005
# coefficient controlling relative importance of BC and TD loss
alpha: float = 2.5
noise_clip: float = 0.5
policy_noise: float = 0.2
policy_update_period: int = 2
# Replay options
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
# samples_per_insert: float = 256 * 4
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
# samples_per_insert_tolerance_rate: float = 0.1
min_replay_size: int = 1
max_replay_size: int = 1000000
prefetch_size: Optional[int] = None
| [
37811,
21016,
18,
12,
2749,
5797,
8398,
526,
15931,
198,
11748,
4818,
330,
28958,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
936,
1326,
13,
45940,
1330,
302,
19011,
355,
751,
364,
62,
260,
19011,
198,
11748,
2172,
897,
628,
198,
3... | 2.869333 | 375 |
"""
separate personal standard groups.
Revision ID: 2a6c63397399
Revises: 9fd4589cc82c
Create Date: 2018-05-23 17:17:51.205891
"""
import sqlalchemy as sa
from alembic import op
from alembic.context import get_context # noqa: F401
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.orm.session import sessionmaker
Session = sessionmaker()
# revision identifiers, used by Alembic.
revision = '2a6c63397399'
down_revision = '9fd4589cc82c'
branch_labels = None
depends_on = None
# OLD/NEW values must be different
OLD_GROUP_USERS = 'user'
NEW_GROUP_USERS = 'users'
OLD_GROUP_ADMIN = 'admin'
NEW_GROUP_ADMIN = 'administrators'
OLD_USER_USERS = OLD_GROUP_USERS
OLD_USER_ADMIN = OLD_GROUP_ADMIN
users = sa.table(
"users",
sa.column("id", sa.Integer),
sa.column("user_name", sa.String),
)
groups = sa.table(
"groups",
sa.column("id", sa.Integer),
sa.column("group_name", sa.String),
sa.column("member_count", sa.Integer)
)
users_groups = sa.table(
"users_groups",
sa.column("user_id", sa.Integer),
sa.column("group_id", sa.Integer),
)
def get_users_groups(db_session):
"""
Fetch current db users and groups.
"""
all_users = db_session.execute(sa.select([users]))
all_groups = db_session.execute(sa.select([groups]))
old_user_admin = [user for user in all_users if user.user_name == OLD_USER_ADMIN]
old_user_users = [user for user in all_users if user.user_name == OLD_USER_USERS]
old_group_admin = [group for group in all_groups if group.group_name == OLD_GROUP_ADMIN]
old_group_users = [group for group in all_groups if group.group_name == OLD_GROUP_USERS]
new_group_admin = [group for group in all_groups if group.group_name == NEW_GROUP_ADMIN]
new_group_users = [group for group in all_groups if group.group_name == NEW_GROUP_USERS]
return (
old_user_admin[0] if len(old_user_admin) > 0 else None,
old_user_users[0] if len(old_user_users) > 0 else None,
old_group_admin[0] if len(old_group_admin) > 0 else None,
old_group_users[0] if len(old_group_users) > 0 else None,
new_group_admin[0] if len(new_group_admin) > 0 else None,
new_group_users[0] if len(new_group_users) > 0 else None
)
def upgrade_migrate(old_group, old_user, new_group, new_name, db_session):
"""
Migrates a user and its personal user-group to a standard group.
Reassigns the user references to link to the new standard group.
"""
if new_group is None and old_group is not None:
# just rename the group, no need to adjust references
old_group.group_name = new_name
elif new_group is None and old_group is None:
# create missing group, no group reference to modify
db_session.execute(groups.insert().values(group_name=new_name, member_count=0))
elif new_group is not None and old_group is not None:
# both groups exist, must transfer references
all_usr_grp = db_session.execute(sa.select([users_groups]))
for usr_grp in all_usr_grp:
if usr_grp.group_id == old_group.id:
# if user_id/group_id combination already exists, delete duplicate that would be generated by transfer
match_new_grp = [ug for ug in all_usr_grp if ug.group_id == new_group.id]
if len(match_new_grp) > 0:
db_session.delete(usr_grp)
# otherwise transfer reference to new group
else:
usr_grp.group_id = new_group.id
# remove not required 'user-group'
if old_user is not None:
for usr_grp in db_session.execute(sa.select([users_groups])):
if usr_grp.user_id == old_user.id:
db_session.delete(usr_grp)
db_session.delete(old_user)
def downgrade_migrate(old_group, old_user, new_group, old_name, db_session):
"""
Migrates a standard group back to the original user and corresponding personal user-group.
Reassigns the user references to link to the old personal group.
"""
if old_group is None:
# create missing group
old_group = Group(group_name=old_name) # noqa
db_session.add(old_group)
if old_group is not None and new_group is not None:
# transfer user-group references
all_usr_grp = db_session.execute(sa.select(users_groups))
for usr_grp in all_usr_grp:
if usr_grp.group_id == new_group.id:
# if user_id/group_id combination already exists, delete duplicate that would be generated by transfer
match_old_grp = [ug for ug in all_usr_grp if ug.group_id == old_group.id]
if len(match_old_grp) > 0:
db_session.delete(usr_grp)
# otherwise transfer back reference to old group
else:
db_session.execute(users_groups.update()
.where(usr_grp.c.group_id == new_group.c.id)
.values(group_id=old_group.id))
if new_group is not None:
db_session.delete(new_group)
if old_user is None:
email = "{}@mail.com".format(old_name)
db_session.execute(users.insert().values(user_name=old_name, email=email))
old_user = db_session.execute(sa.select(users).where(users.c.user_name == old_name)).fetchone()
db_session.execute(users_groups.insert().values(group_id=old_group.id, user_id=old_user.id))
def clean_user_groups(db_session):
"""
Ensures that each user is the only one pointing to it's corresponding personal user-group.
Invalid user references are dropped.
"""
all_users = db_session.execute(sa.select([users]))
all_groups = db_session.execute(sa.select([groups]))
all_usr_grp = db_session.execute(sa.select([users_groups]))
all_usr_dict = dict([(usr.id, usr.user_name) for usr in all_users])
all_grp_dict = dict([(grp.id, grp.group_name) for grp in all_groups])
for usr_grp in all_usr_grp:
# delete any missing user/group references (pointing to nothing...)
if usr_grp.user_id not in all_usr_dict.keys() or usr_grp.group_id not in all_grp_dict.keys():
db_session.delete(usr_grp)
continue
# delete any user/personal-group reference of different names
grp_name = all_grp_dict[usr_grp.group_id]
usr_name = all_usr_dict[usr_grp.user_id]
is_personal_group = usr_name in all_grp_dict.values()
if is_personal_group and grp_name != usr_name:
db_session.delete(usr_grp)
| [
37811,
198,
25512,
378,
2614,
3210,
2628,
13,
198,
198,
18009,
1166,
4522,
25,
362,
64,
21,
66,
21,
29626,
4790,
2079,
198,
18009,
2696,
25,
860,
16344,
2231,
4531,
535,
6469,
66,
198,
16447,
7536,
25,
2864,
12,
2713,
12,
1954,
1596... | 2.35485 | 2,804 |
"""
Read and write data of go-eCharger wallbox.
"""
import os
import logging
import requests
import plugin_collection
log = logging.getLogger("GoEcharger")
class goeDevice():
''' Phases
0b00ABCDEF
A ... phase 3, in front of the contactor
B ... phase 2 in front of the contactor
C ... phase 1 in front of the contactor
D ... phase 3 after the contactor
E ... phase 2 after the contactor
F ... phase 1 after the contactor
pha | 0b00001000: Phase 1 is available
pha | 0b00111000: Phase1-3 is available
'''
'''
The following parameters can only be read:
version rbc rbt car err cbl pha tmp dws adi uby eto wst nrg fwv sse eca ecr
ecd ec4 ec5 ec6 ec7 ec8 ec9 ec1 rca rcr rcd rc4 rc5 rc6 rc7 rc8 rc9 rc1
The following parameters can be set:
amp ast alw stp dwo wss wke wen tof tds lbr aho afi ama al1 al2 al3 al4 al5
cid cch cfi lse ust wak r1x dto nmo rna rnm rne rn4 rn5 rn6 rn7 rn8 rn9 rn1
''' | [
37811,
198,
5569,
290,
3551,
1366,
286,
467,
12,
68,
28316,
263,
3355,
3524,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
7007,
198,
198,
11748,
13877,
62,
43681,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
... | 2.362768 | 419 |
import math
import mindspore
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out | [
11748,
10688,
198,
11748,
2000,
2777,
382,
198,
6738,
2000,
2777,
382,
13,
11321,
13,
17143,
2357,
1330,
25139,
2357,
198,
11748,
2000,
2777,
382,
13,
20471,
355,
299,
77,
198,
11748,
2000,
2777,
382,
13,
2840,
355,
39628,
198,
6738,
... | 3.219178 | 73 |
import uuid
import time
import enum
| [
11748,
334,
27112,
198,
11748,
640,
198,
11748,
33829,
198
] | 3.6 | 10 |
import torch
import numpy as np
from typing import Any
from typing import Dict
from typing import Union
from typing import Optional
from cftool.ml import Metrics
from cftool.misc import is_numeric
from cftool.misc import timing_context
from ...misc.toolkit import *
from ..base import ModelBase
from ...types import tensor_dict_type
from ...protocol import TrainerState
from ...modules.transform.core import SplitFeatures
@ModelBase.register("ddr")
@ModelBase.register_pipe("ddr")
# utilities
# core
# API
@ModelBase.register("ddr_q")
@ModelBase.register("ddr_cdf")
__all__ = ["CDF", "DDR", "Quantile"]
| [
11748,
28034,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
4479,
198,
6738,
19720,
1330,
32233,
198,
6738,
269,
701,
970,
13,
4029,
1330,
3395,
104... | 3.176768 | 198 |
# -*- coding: utf-8
from django.apps import AppConfig
import os
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
11748,
28686,
628
] | 2.826087 | 23 |
from dataclasses import dataclass
from viewdom import html
from viewdom_wired import component
@component()
@dataclass
@component()
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
1570,
3438,
1330,
27711,
198,
6738,
1570,
3438,
62,
44236,
1330,
7515,
628,
198,
31,
42895,
3419,
198,
31,
19608,
330,
31172,
628,
198,
31,
42895,
3419,
198,
31,
19608,
3... | 3.363636 | 44 |
import PIL.Image as Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from ..main import image_analysis | [
11748,
350,
4146,
13,
5159,
355,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
40140,
355,
12575,
198,
198,
6738,
11485,
12417,
1330,
2939,
62,
20930
] | 3.473684 | 38 |
import datetime
from task_base import TaskBase
# Cohen-Sutherland implementation | [
11748,
4818,
8079,
198,
6738,
4876,
62,
8692,
1330,
15941,
14881,
198,
198,
2,
17154,
12,
50,
45384,
7822
] | 4.263158 | 19 |
import serial.rs485
import libscrc
import sys
sPort = 'COM4'
req_init = "000300fc0000842b000300fc0000842b"
req_ids_loop = ["ff4306064246ff4306064246", "ff5006003381",
"ff500601f241", "ff500602b240", "ff5006037380", "ff5006043242", "ff500605f382"]
req_data = bytes.fromhex("030029001b")
req_bat_active = bytes.fromhex("ff4112434242313132383130")
current_bat_id = 2
bat_ids = []
bat_dupes = []
additional_reqs = []
if len(sys.argv) == 2:
sPort = sys.argv[1]
s = serial.Serial(
port=sPort,
baudrate=115200,
# parity=serial.PARITY_MARK,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0.005
)
s.rs485_mode = serial.rs485.RS485Settings(True, False, False, None, None)
s.write(bytes.fromhex(req_init))
for i in range(0, 2):
for req in req_ids_loop:
print("->", req)
s.write(bytes.fromhex(req))
cc = s.readline()
if len(cc) > 0:
print("<-", cc.hex())
if cc.hex().startswith("ff70"):
# received id from battery
bat_id = cc[12:15]
bat_id_hex = bat_id.hex()
if bat_id_hex in bat_dupes: # skip duplicates
print("skipping duplicate id ", bat_id_hex)
continue
bat_dupes.append(bat_id_hex)
req = req_bat_active + bat_id + bytes([current_bat_id])
bat_ids.append(current_bat_id)
current_bat_id += 1
req = req + libscrc.modbus(req).to_bytes(2, 'little')
s.write(req)
print("-> ", req.hex(), s.readline().hex(), "# received battery id", bat_id_hex,
"assigning bus id", current_bat_id - 1)
print(s.readline().hex(), "# response of set active bat id")
if len(cc) >= 58 and cc.hex().startswith("0203"):
evaluate_battery_status()
for reqid in bat_ids:
req = bytes([reqid]) + req_data
req = req + libscrc.modbus(req).to_bytes(2, 'little')
print("->", req.hex(), "# request data from bus id", reqid)
s.write(req)
cc = s.readline()
for i in range(0, 2):
cc += s.readline()
print("<-", cc.hex(), "# battery data")
if len(cc) >= 58:
evaluate_battery_status()
s.close()
| [
11748,
11389,
13,
3808,
32642,
198,
11748,
9195,
1416,
6015,
198,
11748,
25064,
198,
198,
82,
13924,
796,
705,
9858,
19,
6,
198,
42180,
62,
15003,
796,
366,
830,
6200,
16072,
2388,
23,
3682,
65,
830,
6200,
16072,
2388,
23,
3682,
65,
... | 1.998295 | 1,173 |
from turtle import *
import turtle
CoderMohit = turtle.Screen()
CoderMohit.bgcolor('white')
for angle in range (0,360,15):
setheading(angle)
forward(100)
write(str(angle)+'`')
backward(100)
CoderMohit.mainloop() | [
6738,
28699,
1330,
1635,
201,
198,
11748,
28699,
201,
198,
34,
12342,
38443,
270,
796,
28699,
13,
23901,
3419,
201,
198,
201,
198,
34,
12342,
38443,
270,
13,
35904,
8043,
10786,
11186,
11537,
201,
198,
201,
198,
201,
198,
1640,
9848,
... | 2.346154 | 104 |
# -*- coding: utf-8 -*-
"""
Execution module to handle MetalK8s sysctl.
"""
import configparser
import pathlib
from salt.exceptions import CommandExecutionError
import salt.utils.files
__virtualname__ = "metalk8s_sysctl"
# Order in this list defines the precedence
SYSCTL_CFG_DIRECTORIES = [
"/run/sysctl.d",
"/etc/sysctl.d",
"/usr/local/lib/sysctl.d",
"/usr/lib/sysctl.d",
"/lib/sysctl.d",
]
# This file is applied last no matter what
SYSCTL_DEFAULT_CFG = "/etc/sysctl.conf"
def _get_sysctl_files(config):
"""
Return all the sysctl configuration files ordered as they are
read by the system.
Inject the configuration file passed in argument `config` in this
list, in case this file does not exist yet.
If the `config` file is not in an authorized path (see `SYSCTL_FILE_GLOBS`
and `SYSCTL_DEFAULT_CFG`) or is overwritten by a file with the same name
but higher precedence, it is ignored as the system will not take care
of it anyway.
"""
config_path = pathlib.Path(config).resolve()
files = {}
for directory in SYSCTL_CFG_DIRECTORIES:
path = pathlib.Path(directory)
if path == config_path.parent:
files.setdefault(config_path.name, str(config_path))
for cfg in path.glob("*.conf"):
files.setdefault(cfg.name, str(cfg))
sorted_files = [files[name] for name in sorted(files)]
sorted_files.append(SYSCTL_DEFAULT_CFG)
return sorted_files
def has_precedence(name, value, config, strict=False):
"""
Read all sysctl configuration file to check if the passed `name` and
`value` are not overwritten by an already existing sysctl configuration
file.
If `strict` is set, check that the final value comes from the passed
`config` and not another sysctl configuration file (even if the value is
equal to `value`).
"""
sysctl_files = _get_sysctl_files(config)
# Ignore files before the `config` one.
try:
sysctl_files = sysctl_files[sysctl_files.index(config) + 1 :]
except ValueError:
# If the file is not in the list, it means it's overwritten by an
# other sysctl configuration file with higher precedence.
config_name = pathlib.PurePath(config).name
for sysctl_file in sysctl_files:
sysctl_name = pathlib.PurePath(sysctl_file).name
if sysctl_name == config_name:
raise CommandExecutionError( # pylint: disable=raise-missing-from
"'{0}' has a higher precedence and overrides '{1}'".format(
sysctl_file, config
)
)
# The target file is not in a directory checked by the system
raise CommandExecutionError( # pylint: disable=raise-missing-from
"{0} is not a correct path for a sysctl configuration "
"file, please use one of the following:\n- {1}".format(
config, "\n- ".join(SYSCTL_CFG_DIRECTORIES)
)
)
parser = configparser.ConfigParser(interpolation=None)
epured_value = " ".join(str(value).split())
for sysctl_file in sysctl_files:
with salt.utils.files.fopen(sysctl_file, "r") as sysctl_fd:
parser.read_file(["[global]", *sysctl_fd], source=sysctl_file)
sysctl = dict(parser.items("global"))
parser.remove_section("global")
if name in sysctl and (
strict or " ".join(sysctl[name].split()) != epured_value
):
raise CommandExecutionError(
"'{0}' redefines '{1}' with value '{2}'".format(
sysctl_file, name, sysctl[name]
)
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
23002,
1009,
8265,
284,
5412,
12136,
42,
23,
82,
25064,
34168,
13,
198,
37811,
198,
198,
11748,
4566,
48610,
198,
11748,
3108,
8019,
198,
198,
6738,
8268,
13... | 2.411726 | 1,535 |
import rospy
from rospy import TransportException
from tf2_msgs.msg import TFMessage
from tf2_ros import TransformListener, InvalidArgumentException, TimeoutException, Buffer
from tf2_server.srv import RequestTransformStream, RequestTransformStreamRequest, RequestTransformStreamResponse
| [
11748,
686,
2777,
88,
198,
6738,
686,
2777,
88,
1330,
19940,
16922,
198,
6738,
48700,
17,
62,
907,
14542,
13,
19662,
1330,
24958,
12837,
198,
6738,
48700,
17,
62,
4951,
1330,
26981,
33252,
11,
17665,
28100,
1713,
16922,
11,
3862,
448,
... | 4.25 | 68 |
from .form import BarcodeForm
from .reader import InputDeviceReader
| [
6738,
764,
687,
1330,
2409,
8189,
8479,
198,
6738,
764,
46862,
1330,
23412,
24728,
33634,
628,
198
] | 4.117647 | 17 |
""" Plugin related endpoints """
| [
37811,
42636,
3519,
886,
13033,
37227,
198
] | 4.714286 | 7 |
import os, aiohttp
import urllib.parse
from bs4 import BeautifulSoup
| [
11748,
28686,
11,
257,
952,
4023,
198,
11748,
2956,
297,
571,
13,
29572,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 2.393939 | 33 |
#! /usr/bin/env python
#
#
# Calculates coordinates of window corners of given raster dataset.
# It's just a simple helper for testing and debugging WKT Raster.
#
##############################################################################
# Copyright (C) 2009 Mateusz Loskot <mateusz@loskot.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##############################################################################
from osgeo import gdal
from osgeo import osr
import osgeo.gdalconst as gdalc
import sys
if len(sys.argv) != 6:
print "Usage: window.py <raster> <x> <y> <xsize> <ysize>"
print "\traster - GDAL supported dataset"
print "\tx - column - 1..N where N is raster X dimension"
print "\ty - row - 1..N where N is raster Y dimension"
print "\txsize - x-dimension of requested window (xsize <= xsize of raster - x)"
print "\tysize - y-dimension of requested window (ysize <= ysize of raster - y)"
sys.exit(0)
infile = sys.argv[1]
inxoff = int(sys.argv[2])
inyoff = int(sys.argv[3])
inxsize = int(sys.argv[4])
inysize = int(sys.argv[5])
print "=== INPUT ==="
print "File: %s" % infile
print "Window:"
print "- upper-left: %d x %d" % (inxoff, inyoff)
print "- dimensions: %d x %d" % (inxsize, inysize)
ds = gdal.Open(infile, gdalc.GA_ReadOnly);
if ds is None:
sys.exit('Cannot open input file: ' + str(infile))
xsize = ds.RasterXSize
ysize = ds.RasterYSize
print "=== RASTER ==="
print "- dimensions: %d x %d" % (xsize, ysize)
if inxsize > xsize or inysize > ysize or inxoff > xsize or inyoff > ysize:
print "Invalid size of input window"
sys.exit(1)
gt = ds.GetGeoTransform()
res = ( gt[1], gt[5] ) # X/Y pixel resolution
ulp = ( gt[0], gt[3] ) # X/Y upper left pixel corner
rot = ( gt[2], gt[4] ) # X-/Y-axis rotation
if is_georeferenced(gt):
print "- pixel size:", res
print "- upper left:", ulp
print "- rotation :", rot
else:
print "No georeferencing is available"
sys.exit(1)
print "=== WINDOW ==="
print "- upper-left :", calculate_corner(gt, inxoff, inyoff)
print "- lower-left :", calculate_corner(gt, inxoff, ysize)
print "- upper-right:", calculate_corner(gt, xsize, inyoff)
print "- lower-right:", calculate_corner(gt, xsize, ysize)
print "- center :", calculate_corner(gt, xsize/2, ysize/2)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
198,
2,
27131,
689,
22715,
286,
4324,
14371,
286,
1813,
374,
1603,
27039,
13,
198,
2,
632,
338,
655,
257,
2829,
31904,
329,
4856,
290,
28769,
370,
42176,
371,
1603,
13... | 2.893137 | 1,020 |
#!/usr/bin/env python3
"""
@Filename: detections.py
@Author: dulanj
@Time: 19/02/2022 09:33
"""
import logging
import os
import time
import cv2
from sports_event_detection.extras.params import database_update_frequency, save_video
from sports_event_detection.utils.storage import Storage
from sports_event_detection.utils.video_reader import VideoReader
from sports_event_detection.utils.video_writer import SEDVideoWriter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
31,
35063,
25,
220,
220,
220,
4886,
507,
13,
9078,
198,
31,
13838,
25,
220,
220,
220,
220,
220,
288,
377,
272,
73,
198,
31,
7575,
25,
220,
220,
220,
220,
220,
220... | 3 | 147 |
"""This program will flip a coin repeatedly, keeping track of heads and tails.
By Ted Silbernagel
"""
import random
from typing import Any
if __name__ == '__main__':
print('This program will flip a coin and keep track of how many heads/tails you got.')
flip_coin()
| [
37811,
1212,
1430,
481,
14283,
257,
10752,
7830,
11,
5291,
2610,
286,
6665,
290,
30514,
13,
198,
3886,
11396,
4243,
33900,
363,
417,
198,
37811,
198,
198,
11748,
4738,
198,
6738,
19720,
1330,
4377,
628,
628,
198,
361,
11593,
3672,
834,
... | 3.558442 | 77 |
#!/usr/bin/env python
"""Compress the hashes list using Golomb coding."""
import math
import sys
from bitstring import BitArray, Bits, BitStream
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("hash_length", 8, "Length of each hash in bytes.")
gflags.DEFINE_integer("two_power", 47, "Power of 2 for M (M=2**two_power).")
def read_hashes(from_file, hash_length):
"""Reads a list of sorted hashes from a file."""
with open(from_file, "rb") as f:
raw_hashes = f.read()
return [raw_hashes[i * hash_length:(i + 1) * hash_length]
for i in range(len(raw_hashes) // hash_length)]
def golomb_encode(hashes_list, hash_length, M):
"""Given a sorted list of fixed-size values, compress it by
using Golomb coding to represent the difference between the values."""
hash_len_bits = hash_length * 8
# Must be sorted for deltas to be small and easily compressable.
assert sorted(hashes_list) == hashes_list
# Must not contain duplicates.
assert len(hashes_list) == len(set(hashes_list))
# M is the tunable parameter.
m_bits = int(math.log(M, 2))
# Make sure that M is a power of 2.
assert M > 0 and not (M & (M - 1))
# First item in the output bit array is the first hash value.
outarray = BitArray(bytes = hashes_list[0], length=hash_len_bits)
# Set to true when the diff value / M == 0.
# If no such value exists then the chosen M is too small, so warn.
min_is_zero = False
prev = BitArray(bytes = hashes_list[0], length=hash_len_bits)
for curr_hash in hashes_list[1:]:
curr = BitArray(bytes=curr_hash, length=hash_len_bits)
N = curr.uint - prev.uint
q = int(math.floor(N / M))
r = N % M
# Unary-encode q.
if q == 0:
outarray.append(Bits(bin='0b0'))
min_is_zero = True
else:
outarray.append(Bits(bin=bin(2**q - 1) + '0'))
# Write r using plain binary representation.
outarray.append(Bits(uint=r, length=m_bits))
prev = curr
if not min_is_zero:
print "Inefficient encoding: Minimum is not zero."
return outarray.tobytes()
def uncompress_golomb_coding(coded_bytes, hash_length, M):
"""Given a bytstream produced using golomb_coded_bytes, uncompress it."""
ret_list = []
instream = BitStream(
bytes=coded_bytes, length=len(coded_bytes) * 8)
hash_len_bits = hash_length * 8
m_bits = int(math.log(M, 2))
# First item is a full hash value.
prev = instream.read("bits:%d" % hash_len_bits)
ret_list.append(prev.tobytes())
while (instream.bitpos + m_bits) <= instream.length:
# Read Unary-encoded value.
read_prefix = 0
curr_bit = instream.read("uint:1")
while curr_bit == 1:
read_prefix += 1
curr_bit = instream.read("uint:1")
assert curr_bit == 0
# Read r, assuming M bits were used to represent it.
r = instream.read("uint:%d" % m_bits)
curr_diff = read_prefix * M + r
curr_value_int = prev.uint + curr_diff
curr_value = Bits(uint=curr_value_int, length=hash_len_bits)
ret_list.append(curr_value.tobytes())
prev = curr_value
return ret_list
def main(input_file, output_file):
"""Reads and compresses the hashes."""
hashes = read_hashes(input_file, FLAGS.hash_length)
hashes.sort()
golomb_coded_bytes = golomb_encode(
hashes, FLAGS.hash_length, 2**FLAGS.two_power)
print "With M=2**%d, Golomb-coded data size is %d, compression ratio %f" % (
FLAGS.two_power,
len(golomb_coded_bytes),
len(golomb_coded_bytes) / float(len(hashes) * FLAGS.hash_length))
with open(output_file, 'wb') as f:
f.write(golomb_coded_bytes)
uncompressed_hashes = uncompress_golomb_coding(
golomb_coded_bytes, FLAGS.hash_length, 2**FLAGS.two_power)
print "Original hashes: %d Uncompressed: %d" % (
len(hashes), len(uncompressed_hashes))
assert uncompressed_hashes == hashes
if __name__ == '__main__':
sys.argv = FLAGS(sys.argv)
if len(sys.argv) < 3:
sys.stderr.write(
"Usage: %s <input hashes file> <compressed output file>\n"
" <input hashes file> Is the truncated, uncompressed hashes "
"list.\n"
" <compressed output file> is the output, Golomb-coded file.\n" %
sys.argv[0])
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
7293,
601,
262,
46621,
1351,
1262,
26849,
2381,
19617,
526,
15931,
198,
198,
11748,
10688,
198,
11748,
25064,
198,
198,
6738,
1643,
8841,
1330,
4722,
19182,
11,
44733,
11,
4722,
1... | 2.313333 | 1,950 |
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import sys
import glob
import atexit
import readline
import traceback
import requests
import json
import argparse
from viper.common.out import print_error
from viper.common.colors import cyan, magenta, white, bold, blue
from viper.core.plugins import __modules__
from viper.core.ui.commands import Commands
from viper.core.database import Database
from viper.core.config import Config
from requests_toolbelt.multipart.encoder import MultipartEncoder
cfg = Config()
post_headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
try:
input = raw_input
except NameError:
pass
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sha256', help='sha256 hash to work on pls', action='store', required=True)
parser.add_argument('-H', '--host', help='Viper API hostname', default='localhost', action='store', required=False)
parser.add_argument('-p', '--port', help='Viper API port', default='5556', action='store', required=False)
global arg
arg = parser.parse_args()
host = arg.host
port = arg.port
c = Console()
c.start(sha256=arg.sha256) | [
2,
770,
2393,
318,
636,
286,
34517,
532,
3740,
1378,
12567,
13,
785,
14,
8903,
525,
12,
30604,
14,
8903,
525,
198,
2,
4091,
262,
2393,
705,
43,
2149,
24290,
6,
329,
23345,
7170,
13,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
... | 3 | 431 |
import requests
import json
from urllib.request import urlopen
from bs4 import BeautifulSoup
from multiprocessing import Pool
from model import Reviews
"""from
VARIABLES
"""
header={
'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en-US,en;q=0.5',
'Content-Length':'58',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie':'PHPSESSID=an57n15lrqu18lrsthhqdef123; fbcity=1; zl=en; fbtrack=ba9e1871dc9a7e04c3c7f8bb4940e794; ueg=1; __utma=141625785.1460912619.1412698053.1412698053.1412698053.1; __utmb=141625785.6.10.1412698053; __utmc=141625785; __utmz=141625785.1412698053.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); dpr=1',
'Host':'www.zomato.com',
'Referer':'https://www.zomato.com/ncr/fork-you-hauz-khas-village-delhi/reviews',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0',
'X-NewRelic-ID':'VgcDUF5SGwEDV1RWAgg=',
'X-Requested-With':'XMLHttpRequest'
}
url='https://www.zomato.com/php/social_load_more.php'
class Zomato(object):
"""docstring for Zomato"""
test_url="https://www.zomato.com/ncr/alishas-kitchen-aaya-nagar-new-delhi"
z= Zomato(test_url)
z.get_data()
| [
11748,
7007,
198,
11748,
33918,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
6738,
2746,
1330,
20871,
198,
37811,
6738,
... | 2.033926 | 619 |
import datetime
import pandas as pd
from vc.data_io import files
# Object to hold Brazilian cities temperature data.
| [
11748,
4818,
8079,
201,
198,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
410,
66,
13,
7890,
62,
952,
1330,
3696,
201,
198,
201,
198,
201,
198,
2,
9515,
284,
1745,
17036,
4736,
5951,
1366,
13,
201,
198
] | 3.097561 | 41 |
# Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine.post_process import StepSuccess, LogEquals, StepTextEquals
from recipe_engine.post_process import StepException, StepFailure
from recipe_engine.post_process import DropExpectation
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'step',
]
| [
2,
15069,
33448,
383,
406,
9598,
40,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
326,
460,
307,
1043,
287,
262,
38559,
24290,
2393,
13,
... | 3.353846 | 130 |
import os
from typing import List
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtWidgets import (
QTabWidget, QGridLayout, QLabel,
QPushButton, QDialog, QMessageBox,
QStyleFactory
)
from PyQt5.QtCore import Qt, pyqtSignal
import emoji
from serial import SerialException
from mindpong.view.utils import (
BACKGROUND_COLORS, IMAGES_PATH, MINDPONG_TITLE,
get_image_file)
from mindpong.view.widgets.scalablearrow import ScalableArrow
from mindpong.view.widgets.mathquestions import MathQuestions
from mindpong.model.game import GameState
PINGPONG_FILE_NAME = 'ball.png'
RED_PLAYER_FILE_NAME = 'red_player.png'
BLUE_PLAYER_FILE_NAME = 'blue_player.png'
| [
11748,
28686,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
23252,
11,
1195,
47,
844,
8899,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
357,
198,
220,
220,
2... | 2.705179 | 251 |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.elastic.metrics import (
health_stats_for_version,
node_system_stats_for_version,
pshard_stats_for_version,
stats_for_version,
)
@pytest.mark.unit
@pytest.mark.parametrize(
'version, jvm_rate, expected_metric_count',
[
pytest.param([0, 90, 0], False, 133, id='v0.90'),
pytest.param([0, 90, 5], False, 134, id='v0.90.5'),
pytest.param([0, 90, 10], False, 132, id='v0.90.10'),
pytest.param([1, 0, 0], False, 140, id='v1'),
pytest.param([1, 3, 0], False, 142, id='v1.3.0'),
pytest.param([1, 4, 0], False, 162, id='v1.4.0'),
pytest.param([1, 5, 0], False, 165, id='v1.5.0'),
pytest.param([1, 6, 0], False, 173, id='v1.6.0'),
pytest.param([2, 0, 0], False, 172, id='v2.0.0'),
pytest.param([2, 1, 0], False, 177, id='v2.1.0'),
pytest.param([5, 0, 0], False, 180, id='v5'),
pytest.param([6, 3, 0], False, 180, id='v6.3.0'),
pytest.param([7, 2, 0], False, 177, id='v7.2.0'),
pytest.param([0, 90, 0], True, 133, id='v0.90'),
pytest.param([0, 90, 5], True, 134, id='v0.90.5'),
pytest.param([0, 90, 10], True, 136, id='v0.90.10'),
pytest.param([1, 0, 0], True, 144, id='jmx-rate-v1'),
pytest.param([1, 3, 0], True, 146, id='jmx-rate-v1.3.0'),
pytest.param([1, 4, 0], True, 166, id='jmx-rate-v1.4.0'),
pytest.param([1, 5, 0], True, 169, id='jmx-rate-v1.5.0'),
pytest.param([1, 6, 0], True, 177, id='jmx-rate-v1.6.0'),
pytest.param([2, 0, 0], True, 176, id='jmx-rate-v2.0.0'),
pytest.param([2, 1, 0], True, 181, id='jmx-rate-v2.1.0'),
pytest.param([5, 0, 0], True, 184, id='jmx-rate-v5'),
pytest.param([6, 3, 0], True, 184, id='jmx-rate-v6.3.0'),
pytest.param([7, 2, 0], True, 181, id='jmx-rate-v7.2.0'),
],
)
@pytest.mark.unit
@pytest.mark.parametrize(
'version, expected_metric_count',
[
pytest.param([0, 90, 0], 23, id='v0.90'),
pytest.param([0, 90, 5], 23, id='v0.90.5'),
pytest.param([0, 90, 10], 23, id='v0.90.10'),
pytest.param([1, 0, 0], 34, id='v1'),
pytest.param([1, 3, 0], 34, id='v1.3.0'),
pytest.param([1, 4, 0], 34, id='v1.4.0'),
pytest.param([1, 5, 0], 34, id='v1.5.0'),
pytest.param([1, 6, 0], 34, id='v1.6.0'),
pytest.param([2, 0, 0], 34, id='v2.0.0'),
pytest.param([2, 1, 0], 34, id='v2.1.0'),
pytest.param([5, 0, 0], 34, id='v5'),
pytest.param([6, 3, 0], 34, id='v6.3.0'),
pytest.param([7, 2, 0], 36, id='v7.2.0'),
],
)
@pytest.mark.unit
@pytest.mark.parametrize(
'version, expected_metric_count',
[
pytest.param([0, 90, 0], 8, id='v0.90'),
pytest.param([0, 90, 5], 8, id='v0.90.5'),
pytest.param([0, 90, 10], 8, id='v0.90.10'),
pytest.param([1, 0, 0], 8, id='v1'),
pytest.param([1, 3, 0], 8, id='v1.3.0'),
pytest.param([1, 4, 0], 8, id='v1.4.0'),
pytest.param([1, 5, 0], 8, id='v1.5.0'),
pytest.param([1, 6, 0], 8, id='v1.6.0'),
pytest.param([2, 0, 0], 8, id='v2.0.0'),
pytest.param([2, 1, 0], 8, id='v2.1.0'),
pytest.param([5, 0, 0], 9, id='v5'),
pytest.param([6, 3, 0], 9, id='v6.3.0'),
pytest.param([7, 2, 0], 9, id='v7.2.0'),
],
)
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.parametrize(
'version, expected_metric_count',
[
pytest.param([0, 90, 0], 7, id='v0.90'),
pytest.param([0, 90, 5], 7, id='v0.90.5'),
pytest.param([0, 90, 10], 7, id='v0.90.10'),
pytest.param([1, 0, 0], 9, id='v1'),
pytest.param([1, 3, 0], 9, id='v1.3.0'),
pytest.param([1, 4, 0], 9, id='v1.4.0'),
pytest.param([1, 5, 0], 9, id='v1.5.0'),
pytest.param([1, 6, 0], 9, id='v1.6.0'),
pytest.param([2, 0, 0], 9, id='v2.0.0'),
pytest.param([2, 1, 0], 9, id='v2.1.0'),
pytest.param([5, 0, 0], 13, id='v5'),
pytest.param([6, 3, 0], 13, id='v6.3.0'),
pytest.param([7, 2, 0], 13, id='v7.2.0'),
],
)
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
12,
25579,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
357,
3826,
38559,
24290,
8,
198,
11748,
12972,
9288,
198,
198,
6738,
481... | 1.825692 | 2,312 |
# -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Kite install utilities test."""
# Local imports
import os
import re
import sys
# Third-party imports
import pytest
# Local imports
from spyder.plugins.completion.kite.utils.install import (
KiteInstallationThread, DOWNLOADING_INSTALLER, DOWNLOADING_SCRIPT,
INSTALLING, FINISHED)
from spyder.plugins.completion.kite.utils.status import (
check_if_kite_installed, check_if_kite_running)
# Time to wait until the installation finishes
# (6 minutes in milliseconds)
INSTALL_TIMEOUT = 360000
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skip(reason="Fail on CIs and it's too heavy to run locally")
def test_kite_install(qtbot):
"""Test the correct execution of the installation process of kite."""
install_manager = KiteInstallationThread(None)
installation_statuses = []
install_manager.sig_installation_status.connect(installation_status)
install_manager.sig_error_msg.connect(error_msg)
install_manager.sig_download_progress.connect(download_progress)
install_manager.finished.connect(finished)
with qtbot.waitSignal(install_manager.finished, timeout=INSTALL_TIMEOUT):
install_manager.install()
# Check that kite was installed and is running
qtbot.waitUntil(
lambda: check_if_kite_installed() and check_if_kite_running(),
timeout=5000)
if __name__ == "__main__":
pytest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
10673,
23688,
1082,
4935,
25767,
669,
198,
2,
49962,
739,
262,
2846,
286,
262,
17168,
13789,
198,
2,
357,
3826,
13997,
1082,
14,
834,
15003,
834,
13,
90... | 2.974659 | 513 |
from neurogym.wrappers.monitor import Monitor
from neurogym.wrappers.noise import Noise
from neurogym.wrappers.pass_reward import PassReward
from neurogym.wrappers.pass_action import PassAction
from neurogym.wrappers.reaction_time import ReactionTime
from neurogym.wrappers.side_bias import SideBias
from neurogym.wrappers.block import RandomGroundTruth
from neurogym.wrappers.block import ScheduleAttr
from neurogym.wrappers.block import ScheduleEnvs
from neurogym.wrappers.block import TrialHistoryV2
ALL_WRAPPERS = {'Monitor-v0': 'neurogym.wrappers.monitor:Monitor',
'Noise-v0': 'neurogym.wrappers.noise:Noise',
'PassReward-v0': 'neurogym.wrappers.pass_reward:PassReward',
'PassAction-v0': 'neurogym.wrappers.pass_action:PassAction',
'ReactionTime-v0':
'neurogym.wrappers.reaction_time:ReactionTime',
'SideBias-v0': 'neurogym.wrappers.side_bias:SideBias',
}
| [
6738,
7669,
1360,
76,
13,
29988,
11799,
13,
41143,
1330,
18289,
198,
6738,
7669,
1360,
76,
13,
29988,
11799,
13,
3919,
786,
1330,
30964,
198,
6738,
7669,
1360,
76,
13,
29988,
11799,
13,
6603,
62,
260,
904,
1330,
6251,
48123,
198,
6738... | 2.373786 | 412 |
import json
import os
import shutil
import subprocess
from pathlib import Path
import pytest
from .helpers import create, get_umamba, random_string
@pytest.fixture(scope="session")
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
764,
16794,
364,
1330,
2251,
11,
651,
62,
388,
31842,
11,
4738,
62,
8841,
... | 3.315789 | 57 |
from time import sleep
import socket
import threading
'''import dominate
def create_doc():
doc = dominate.document(title="My favorite page")
with doc:
with tags.div():
tags.attr(cls="body")
tags.p("lorem ipsum")
'''
if __name__ == "__main__":
main()
| [
6738,
640,
1330,
3993,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
7061,
6,
11748,
17863,
198,
198,
4299,
2251,
62,
15390,
33529,
198,
220,
220,
220,
2205,
796,
17863,
13,
22897,
7,
7839,
2625,
3666,
4004,
2443,
4943,
198,
220,
220... | 2.387097 | 124 |
from lexibank_deepadungpalaung import Dataset
from lingpy import *
from lingpy.compare.partial import Partial
wl = Wordlist.from_cldf(Dataset().cldf_dir.joinpath('cldf-metadata.json'))
i = 0
for idx, tokens in wl.iter_rows('tokens'):
#print(idx, tokens)
for segment in tokens.n:
if not segment:
print(idx, tokens)
input('all fine')
columns=('concept_name', 'language_id',
'value', 'form', 'segments', 'language_glottocode', 'cogid_cognateset_id'
)
namespace=(('concept_name', 'concept'), ('language_id',
'doculect'), ('segments', 'tokens'), ('language_glottocode',
'glottolog'), ('concept_concepticon_id', 'concepticon'),
('language_latitude', 'latitude'), ('language_longitude',
'longitude'), ('cognacy', 'cognacy'),
('cogid_cognateset_id', 'cog'))
part = Partial.from_cldf(Dataset().cldf_dir.joinpath('cldf-metadata.json'),
columns=columns, namespace=namespace) #25
input('loaded data')
part.renumber('cog') #26
from lingpy.evaluate.acd import bcubes #10
for i in range(20): #27
t = 0.05 * i
ts = 't_'+str(i)
part.partial_cluster(method='sca', threshold=t, ref=ts)
part.add_cognate_ids(ts, ts+'id', idtype='strict')
p, r, f = bcubes(part, 'cogid', ts+'id', pprint=False)
print('{0:.2f} {1:.4} {2:.4f} {3:.2f}'.format(t, p, r, f))
for i in range(20): #30
t = 0.05 * i
ts = 't_'+str(i)
part.partial_cluster(method='sca', threshold=t, ref=ts)
part.add_cognate_ids(ts, ts+'id', idtype='loose')
p, r, f = bcubes(part, 'cogid', ts+'id', pprint=False)
print('{0:.2f} {1:.4} {2:.4f} {3:.2f}'.format(t, p, r, f))
alms = Alignments(part, ref='cogids')
alms.align()
alms.output('tsv', filename='../output/deepadung-wordlist-new2', ignore='all', prettify=False)
| [
6738,
31191,
571,
962,
62,
22089,
324,
2150,
18596,
1942,
70,
1330,
16092,
292,
316,
198,
6738,
18459,
9078,
1330,
1635,
198,
6738,
18459,
9078,
13,
5589,
533,
13,
47172,
1330,
43689,
198,
198,
40989,
796,
9678,
4868,
13,
6738,
62,
66... | 2.067686 | 916 |
import couchdb
import logging
log = logging.getLogger(__name__)
# merges d2 in d1, keeps values from d1
# taken from scilifelab
def merge(d1, d2):
""" Will merge dictionary d2 into dictionary d1.
On the case of finding the same key, the one in d1 will be used.
:param d1: Dictionary object
:param s2: Dictionary object
"""
for key in d2:
if key in d1:
if isinstance(d1[key], dict) and isinstance(d2[key], dict):
merge(d1[key], d2[key])
elif d1[key] == d2[key]:
pass # same leaf value
else:
log.debug("Values for key {key} in d1 and d2 differ,",
"using d1's value".format(key=key))
else:
d1[key] = d2[key]
return d1
| [
11748,
18507,
9945,
198,
11748,
18931,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
628,
198,
2,
4017,
3212,
288,
17,
287,
288,
16,
11,
7622,
3815,
422,
288,
16,
198,
2,
2077,
422,
629,
346,
361,
... | 2.060209 | 382 |
import os
import random
from pathlib import Path, PosixPath
from typing import Any, List, Set
import pytest
from pytest_envvars.django_utils import get_base_envvars, is_django_project
def pytest_addoption(parser):
"""Parse pytest.ini env_files section"""
group = parser.getgroup("envvars")
group.addoption(
"--validate-envvars",
action="store_true",
dest="validate_envvars",
default=False,
help="Validate envvars mocks",
)
group.addoption(
"--envvars-value",
dest="envvars_value",
default=False,
type=int,
choices=[0, 1],
help="Select value of envvars",
)
parser.addini(
'pytestenvvars__env_files',
type='linelist',
help='A line separated list of env files to parse',
)
parser.addini(
'pytestenvvars__dont_randomize_envvars',
type='linelist',
help='A line separated list of envvars not to be randomized',
)
def set_randomized_env_vars_from_list(
envvar_value_list: List[str],
ignored_django_envvars: Set[str],
ignored_envvars: Set[str],
randomize: bool = False,
envvars_value: Any = None,
) -> List[tuple]:
"""Get envvar_value_list split this list into envvar and value and randomize values.
Params:
envvar_value_list (list): list with envvars and values, eg. ['FOO=123', 'BAR=432']
ignored_django_envvars (set): set with envvars of django (used only in django projects)
ignored_envvars (set): set with ignored envvars added in configuration file
randomize (bool): True for randomize OR False for no randomize envvars
envvars_value (Any or None): Pass some value for all envvars OR None if dont pass anything
Returns:
List(tuples): List with tuples envvar and value, eg. [('FOO', '0'), ('BAR', '0')]
"""
randomized_envvars = []
for line in envvar_value_list:
if line.startswith("#") or line.strip() == "":
continue
envvar, _, value = line.partition('=')
envvar = envvar.strip()
value = value.strip()
randomized_envvars.append((envvar, value))
no_randomize = any([
randomize is False,
envvar in ignored_django_envvars,
envvar in ignored_envvars,
])
if no_randomize:
os.environ[envvar] = value
else:
os.environ[envvar] = envvars_value if envvars_value else random.choice(['0', '1'])
return randomized_envvars
def get_fullpath_filenames(filenames: List[str]) -> Set[PosixPath]:
"""Get filenames list and return a fullpath of these files
Params:
filenames (list): List of strings with filenames
Returns:
Set[PosixPath]: A set with one or more PosixPath objects
"""
fullpath_filenames = set()
for filename in filenames:
fullpath_filenames.update({
path.absolute() for path in Path().rglob(filename)
if path.is_file()
})
return fullpath_filenames
@pytest.hookimpl(tryfirst=True)
def pytest_load_initial_conftests(args, early_config, parser):
"""Load config files and randomize envvars from pytest.ini"""
parsed_args = parser.parse(args)
envvars_value = None
if parsed_args.envvars_value is not False:
envvars_value = str(parsed_args.envvars_value)
ignored_django_envvars = get_base_envvars() if is_django_project() else set()
ignored_envvars = early_config.getini("pytestenvvars__dont_randomize_envvars")
env_files = early_config.getini("pytestenvvars__env_files")
fullpath_env_files = get_fullpath_filenames(env_files)
for filename in fullpath_env_files:
with open(filename, 'r', encoding='utf-8-sig') as config_file:
set_randomized_env_vars_from_list(
config_file.readlines(),
ignored_django_envvars,
ignored_envvars,
parsed_args.validate_envvars,
envvars_value,
)
# very useful in unit tests...
# os.environ['PYTEST_ENVVARS_DEBUG'] = f"{envvars} - {ignored_envvars}"
| [
11748,
28686,
198,
11748,
4738,
198,
6738,
3108,
8019,
1330,
10644,
11,
18574,
844,
15235,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
5345,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
12972,
9288,
62,
24330,
85,
945,
13,
28241,
14208... | 2.304275 | 1,801 |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import re
from qf_lib.common.enums.expiration_date_field import ExpirationDateField
from qf_lib.common.tickers.tickers import BloombergTicker
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.series.qf_series import QFSeries
class BloombergFutureTicker(FutureTicker, BloombergTicker):
"""Representation of a Future Ticker, designed to be used by the BloombergDataProvider.
Parameters
----------
name: str
Field which contains a name (or a short description) of the FutureTicker.
family_id: str
Used to to verify if a specific BloombergTicker belongs to a certain futures family and to the active
Ticker string, which can be further used by the data provider to download the chain of corresponding Tickers.
The family ID pattern - e.g. for Cotton, an exemplary ticker string is of the following
form: "CTZ9 Comdty". The "Z9" part denotes the month and year codes - this is the only variable part of the
ticker. Thus, in order to verify if a ticker belongs to the cotton family, it should be in form of "CT{} Comdty".
For all other ticker families, the family_id should be in the form of specific ticker with the month and
year codes replaced with the "{}" placeholder.
N: int
Used to identify which specific Ticker should be considered by the Backtester, while using the general
Future Ticker class. For example N parameter set to 1, denotes the front future contract.
days_before_exp_date: int
Number of days before the expiration day of each of the contract, when the “current” specific contract
should be substituted with the next consecutive one.
point_value: int
Used to define the size of the contract.
designated_contracts: str
It is a string, which represents all month codes, that are being downloaded and stored
in the chain of future contracts. Any specific order of letters is not required. E.g. providing this
parameter value equal to "HMUZ", would restrict the future chain to only the contracts, which expire in
March, June, September and December, even if contracts for any other months exist and are returned by the
BloombergDataProvider get_futures_chain_tickers function.
"""
def get_active_ticker(self) -> BloombergTicker:
""" Returns the active ticker. """
specific_ticker_string = self.family_id.format("A")
return BloombergTicker.from_string(specific_ticker_string)
def _get_futures_chain_tickers(self):
"""
Function used to download the expiration dates of the futures contracts, in order to return afterwards current
futures tickers. It uses the list of month codes of designated contracts and filter out these, that should not
be considered by the future ticker.
"""
futures_chain_tickers_df = self._data_provider.get_futures_chain_tickers(self,
ExpirationDateField.all_dates())[self]
# Get the minimum date
futures_chain_tickers = futures_chain_tickers_df.min(axis=1)
futures_chain_tickers = QFSeries(data=futures_chain_tickers.index, index=futures_chain_tickers.values)
futures_chain_tickers.index = pd.to_datetime(futures_chain_tickers.index)
# Filter out the non-designated contracts
seed = self.family_id.split("{}")[0]
designated_contracts_seeds = tuple(seed + month_code for month_code in self.designated_contracts)
futures_chain_tickers = futures_chain_tickers[futures_chain_tickers.apply(
lambda t: t.ticker.startswith(designated_contracts_seeds)
)]
return futures_chain_tickers
def belongs_to_family(self, specific_ticker: BloombergTicker) -> bool:
"""
Function, which takes a specific BloombergTicker, and verifies if it belongs to the family of futures contracts,
identified by the FutureTicker.
Returns
-------
bool
"""
def ticker_to_family_id(t: BloombergTicker) -> str:
"""
Returns a custom ID, used to identify futures contracts families.
The function parses the contract symbol string and substitutes the characters which identify the month
(1 letter) and year of contract expiration (1-2 digits at the end of the first word in the string) with the
{} placeholder, e.g. in case of 'CTH16 Comdty', it returns 'CT{} Comdty'.
"""
groups = re.search(r'^.+([A-Z]\d{1,2}) \.*', t.ticker).groups()
month_year_part = groups[0]
return t.ticker.replace(month_year_part, "{}")
try:
family_id = ticker_to_family_id(specific_ticker)
return self.family_id == family_id
except AttributeError:
return False
| [
2,
220,
220,
220,
220,
15069,
1584,
12,
25579,
327,
28778,
784,
3427,
12275,
329,
19229,
4992,
198,
2,
198,
2,
220,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
2... | 2.82009 | 2,001 |
from PyInstaller.utils.hooks import (
collect_data_files,
copy_metadata,
collect_submodules
)
datas = copy_metadata('kivymd')
hiddenimports = collect_submodules('kivymd')
datas = collect_data_files('kivymd') | [
6738,
9485,
15798,
263,
13,
26791,
13,
25480,
82,
1330,
357,
198,
220,
220,
220,
2824,
62,
7890,
62,
16624,
11,
220,
198,
220,
220,
220,
4866,
62,
38993,
11,
198,
220,
220,
220,
2824,
62,
7266,
18170,
198,
8,
198,
198,
19608,
292,... | 2.611765 | 85 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_USERS_SEARCH_ENDPOINT,
API_USERS_CREATE_ENDPOINT,
API_USERS_UPDATE_ENDPOINT,
API_USERS_CHANGE_PASSWORD_ENDPOINT,
API_USERS_GROUPS_ENDPOINT,
API_USERS_DEACTIVATE_ENDPOINT,
API_USERS_UPDATE_LOGIN_ENDPOINT,
)
from sonarqube.utils.common import PAGE_GET, POST
class SonarQubeUsers(RestClient):
"""
SonarQube users Operations
"""
MAX_SEARCH_NUM = 200
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeUsers, self).__init__(**kwargs)
@PAGE_GET(API_USERS_SEARCH_ENDPOINT, item="users")
def search_users(self, q=None):
"""
SINCE 3.6
Get a list of active users.
:param q: Filter on login, name and email
:return:
"""
def create_user(
self, login, name, email=None, password=None, local="true", scmAccount=None
):
"""
SINCE 3.7
Create a user.
:param login: User login
:param name: User name
:param email: User email
:param password: User password. Only mandatory when creating local user, otherwise it should not be set
:param local: Specify if the user should be authenticated from SonarQube server or from an external
authentication system. Password should not be set when local is set to false.
Possible values are for: true, false, yes, no. default value is true.
:param scmAccount: List of SCM accounts. To set several values, the parameter must be called once for each value.
:return: request response
"""
params = {"login": login, "name": name, "local": local}
if email:
params.update({"email": email})
if local == "true" and password:
params.update({"password": password})
if scmAccount:
params.update({"scmAccount": scmAccount})
return self._post(API_USERS_CREATE_ENDPOINT, params=params)
@POST(API_USERS_UPDATE_ENDPOINT)
def update_user(self, login, name=None, email=None, scmAccount=None):
"""
SINCE 3.7
Update a user.
:param login: User login
:param name: User name
:param email: User email
:param scmAccount: SCM accounts.
:return: request response
"""
@POST(API_USERS_CHANGE_PASSWORD_ENDPOINT)
def change_user_password(self, login, password, previousPassword=None):
"""
SINCE 5.2
Update a user's password. Authenticated users can change their own password,
provided that the account is not linked to an external authentication system.
Administer System permission is required to change another user's password.
:param login: User login
:param password: New password
:param previousPassword: Previous password. Required when changing one's own password.
:return:
"""
@POST(API_USERS_DEACTIVATE_ENDPOINT)
def deactivate_user(self, login):
"""
SINCE 3.7
Deactivate a user.
:param login: User login
:return: request response
"""
@PAGE_GET(API_USERS_GROUPS_ENDPOINT, item="groups")
def search_groups_user_belongs_to(self, login, q=None, selected="selected"):
"""
SINCE 5.2
Lists the groups a user belongs to.
:param login:
:param q: Limit search to group names that contain the supplied string.
:param selected: Depending on the value, show only selected items (selected=selected), deselected items
(selected=deselected), or all items with their selection status (selected=all).Possible values are for:
* all
* deselected
* selected
default value is selected.
:return:
"""
@POST(API_USERS_UPDATE_LOGIN_ENDPOINT)
def update_user_login(self, login, newLogin):
"""
SINCE 7.6
Update a user login. A login can be updated many times.
:param login: The current login (case-sensitive)
:param newLogin: The new login. It must not already exist.
:return:
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
449,
498,
15483,
16380,
198,
6738,
3367,
283,
421,
1350,
13,
26791,
13,
2118,
62,
16366,
1330,
8324,
... | 2.404136 | 1,789 |
import pandas as pd
import numpy as np
from keras.utils import np_utils
class PhaseDataset(object):
"""
PhaseDataset
"""
phases = ['regP', 'regS', 'tele', 'N']
x_indices = ['PER', 'RECT', 'PLANS', 'INANG1', 'INANG3', 'HMXMN', 'HVRATP', 'HVRAT', 'NAB', 'TAB',
'HTOV1', 'HTOV2', 'HTOV3', 'HTOV4', 'HTOV5', 'SLOW']
y_indices = ['CLASS_PHASE']
phase_index = {'regP':0, 'regS':1, 'tele':2, 'N':3}
def get_dataset(self, stations, phase_list, split_ratio=0.75, manual=False):
"""
:param stations: list of station, example: ['URZ', 'LPAZ']
:param phase_list: list of phases, example:
{'P':['regP'], 'S':['regS'], 'T':['tele'], 'N':['N']}
or
{'PST':['regP', 'regS', 'tele'], 'N':['N']}
:param manual:
:return:
"""
dataset = None
for s in stations:
dataset_new = (self.df['STA'] == s)
if dataset is None:
dataset = dataset_new
else:
dataset = dataset | dataset_new
if not manual:
dataset = (self.df['SOURCE'] != 'M') & dataset
dataset_phases = {}
dataset_count = {}
for p in PhaseDataset.phases:
dataset_phases[p] = self.df[(self.df['CLASS_PHASE'] == p) & dataset]
dataset_count[p] = len(dataset_phases[p])
print(dataset_count['regP'], dataset_count['regS'], dataset_count['tele'], dataset_count['N'])
sample_PST_count = min(dataset_count['regP'], dataset_count['regS'], dataset_count['tele'])
sample_N_count = 3*sample_PST_count
for p in PhaseDataset.phases:
if p == 'N':
dataset_phases[p] = dataset_phases[p].sample(sample_N_count, random_state=self.random_state)
else:
dataset_phases[p] = dataset_phases[p].sample(sample_PST_count, random_state=self.random_state)
ds = {}
train_x = None
train_y = None
test_x = None
test_y = None
for pl in sorted(phase_list):
ds[pl] = None
for p in phase_list[pl]:
if ds[pl] is None:
ds[pl] = dataset_phases[p]
else:
ds[pl] = pd.concat([ds[pl], dataset_phases[p]])
ds[pl] = ds[pl].sample(frac=1, random_state=self.random_state)
print("ds {}:{}".format(pl, ds[pl].shape))
train_length = int(split_ratio*len(ds[pl]))
self.dataset_train[pl] = ds[pl][:train_length]
self.dataset_test[pl] = ds[pl][train_length:]
if train_x is None:
train_x = self.dataset_train[pl][PhaseDataset.x_indices].values
else:
train_x = np.concatenate((train_x, self.dataset_train[pl][PhaseDataset.x_indices].values))
if train_y is None:
train_y = [PhaseDataset.phase_index[y[0]]
for y in self.dataset_train[pl][PhaseDataset.y_indices].values.tolist()]
else:
train_y = np.concatenate((train_y,
[PhaseDataset.phase_index[y[0]]
for y in self.dataset_train[pl][PhaseDataset.y_indices].values.tolist()]))
if test_x is None:
test_x = self.dataset_test[pl][PhaseDataset.x_indices].values.tolist()
else:
test_x = np.concatenate((test_x, self.dataset_test[pl][PhaseDataset.x_indices].values.tolist()))
if test_y is None:
test_y = [PhaseDataset.phase_index[y[0]]
for y in self.dataset_test[pl][PhaseDataset.y_indices].values.tolist()]
else:
test_y = np.concatenate((test_y,
[PhaseDataset.phase_index[y[0]]
for y in self.dataset_test[pl][PhaseDataset.y_indices].values.tolist()]))
train_y = np_utils.to_categorical(train_y, len(phase_list))
test_y = np_utils.to_categorical(test_y, len(phase_list))
return train_x, train_y, test_x, test_y
if __name__ == "__main__":
phase_dataset = PhaseDataset(filename="data/phase/ml_features_tiny.csv")
train_x, train_y, test_x, test_y = phase_dataset.\
get_dataset(stations=["LPAZ"],
phase_list={'P':['regP'], 'S':['regS'], 'T':['tele'], 'N':['N']},
split_ratio=0.75, manual=False)
# phase_list={'PST':['regP', 'regS', 'tele'], 'N':['N']})
print(len(train_x), len(train_y), len(test_x), len(test_y))
print(train_x) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
26791,
1330,
45941,
62,
26791,
198,
198,
4871,
18983,
27354,
292,
316,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1898... | 1.81983 | 2,592 |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
.. _volttroncentral-agent:
The VolttronCentral(VCA) agent is used to manage remote VOLTTRON instances.
The VCA exposes a JSON-RPC based web api and a web enabled visualization
framework. The web enabled framework is known as VOLTTRON
Central Management Console (VCMC).
In order for an instance to be able to be managed by VCMC a
:class:`vcplatform.agent.VolttronCentralPlatform` must be executing on the
instance. If there is a :class:`vcplatform.agent.VolttronCentralPlatform`
running on the same instance as VCA it will be automatically registered as a
managed instance. Otherwise, there are two different paths to registering an
instance with VCA.
1. Through the web api a call to the JSON-RPC method register_instance.
2. From an external platform through pub/sub. this secondary method is
preferred when deploying instances in the field that need to "phone home"
to VCA after being deployed.
"""
import datetime
import logging
import os
import os.path as p
import sys
from collections import namedtuple
import gevent
from volttron.platform import jsonapi
from volttron.platform import jsonrpc
from volttron.platform.agent import utils
from volttron.platform.agent.known_identities import (
VOLTTRON_CENTRAL, PLATFORM_HISTORIAN, AUTH)
from volttron.platform.agent.utils import (
get_aware_utc_now, get_messagebus)
from volttron.platform.jsonrpc import (
INVALID_REQUEST, METHOD_NOT_FOUND,
UNHANDLED_EXCEPTION, UNAUTHORIZED,
UNAVAILABLE_PLATFORM, INVALID_PARAMS,
UNAVAILABLE_AGENT, INTERNAL_ERROR)
from volttron.platform.vip.agent import Agent, RPC, Unreachable
from .authenticate import Authenticate
from .platforms import Platforms, PlatformHandler
from .sessions import SessionHandler
# must be after importing of utils which imports grequest.
import requests
__version__ = "5.2"
utils.setup_logging()
_log = logging.getLogger(__name__)
# Web root is going to be relative to the volttron central agents
# current agent's installed path
DEFAULT_WEB_ROOT = p.abspath(p.join(p.dirname(__file__), 'webroot/'))
Platform = namedtuple('Platform', ['instance_name', 'serverkey', 'vip_address'])
RequiredArgs = namedtuple('RequiredArgs', ['id', 'session_user',
'platform_uuid'])
class VolttronCentralAgent(Agent):
""" Agent for managing many volttron instances from a central web ui.
During the
"""
def __init__(self, webroot=DEFAULT_WEB_ROOT, users={},
topic_replace_list=[], **kwargs):
""" Creates a `VolttronCentralAgent` object to manage instances.
Each instances that is registered must contain a running
`VolttronCentralPlatform`. Through this conduit the
`VolttronCentralAgent` is able to communicate securly and
efficiently.
:param config_path:
:param kwargs:
:return:
"""
_log.info("{} constructing...".format(self.__class__.__name__))
super(VolttronCentralAgent, self).__init__(enable_web=True, **kwargs)
# Create default configuration to be used in case of problems in the
# packaged agent configuration file.
self._default_config = dict(
webroot=os.path.abspath(webroot),
users=users,
topic_replace_list=topic_replace_list
)
self.vip.config.set_default("config", self._default_config)
# Start using config store.
self.vip.config.subscribe(self._configure,
actions=["NEW", "UPDATE"],
pattern="config")
#
# # During the configuration update/new/delete action this will be
# # updated to the current configuration.
# self.runtime_config = None
#
# # Start using config store.
# self.vip.config.set_default("config", config)
# self.vip.config.subscribe(self.configure_main,
# actions=['NEW', 'UPDATE', 'DELETE'],
# pattern="config")
#
# # Use config store to update the settings of a platform's configuration.
# self.vip.config.subscribe(self.configure_platforms,
# actions=['NEW', 'UPDATE', 'DELETE'],
# pattern="platforms/*")
#
# # mapping from the real topic into the replacement.
# self.replaced_topic_map = {}
#
# # mapping from md5 hash of address to the actual connection to the
# # remote instance.
# self.vcp_connections = {}
#
# # Current sessions available to the
# self.web_sessions = None
#
# # Platform health based upon device driver publishes
# self.device_health = defaultdict(dict)
#
# # Used to hold scheduled reconnection event for vcp agents.
# self._vcp_reconnect_event = None
#
# # the registered socket endpoints so we can send out management
# # events to all the registered session.
self._websocket_endpoints = set()
self._platforms = Platforms(self)
self._platform_scan_event = None
# Sessions that have been authentication with the system.
self._authenticated_sessions = None
def _configure(self, config_name, action, contents):
"""
The main configuration for volttron central. This is where validation
will occur.
Note this method is called:
1. When the agent first starts (with the params from packaged agent
file)
2. When 'store' is called through the volttron-ctl config command
line with 'config' as the name.
Required Configuration:
The volttron central requires a user mapping.
:param config_name:
:param action:
:param contents:
"""
config = self._default_config.copy()
config.update(contents)
users = config.get("users", None)
if self._authenticated_sessions:
self._authenticated_sessions.clear()
if users is None:
users = {}
_log.warning("No users are available for logging in!")
# Unregister all routes for vc and then re-add down below.
self.vip.web.unregister_all_routes()
self._authenticated_sessions = SessionHandler(Authenticate(users))
self.vip.web.register_endpoint(r'/vc/jsonrpc', self.jsonrpc)
self.vip.web.register_path(r'^/vc/.*',
config.get('webroot'))
# Start scanning for new platforms connections as well as for
# disconnects that happen.
gevent.spawn_later(1, self._scan_platform_connect_disconnect)
@staticmethod
def _scan_platform_connect_disconnect(self):
"""
Scan the local bus for peers that start with 'vcp-'. Handle the
connection and disconnection events here.
"""
if self._platform_scan_event is not None:
# This won't hurt anything if we are canceling ourselves.
self._platform_scan_event.cancel()
# Identities of all platform agents that are connecting to us should
# have an identity of platform.md5hash.
connected_platforms = set([x for x in self.vip.peerlist().get(timeout=5)
if x.startswith('vcp-') or x.endswith('.platform.agent')])
_log.debug("Connected: {}".format(connected_platforms))
disconnected = self._platforms.get_platform_vip_identities() - connected_platforms
for vip_id in disconnected:
self._handle_platform_disconnect(vip_id)
not_known = connected_platforms - self._platforms.get_platform_vip_identities()
for vip_id in not_known:
self._handle_platform_connection(vip_id)
next_platform_scan = VolttronCentralAgent._get_next_time_seconds()
# reschedule the next scan.
self._platform_scan_event = self.core.schedule(
next_platform_scan, self._scan_platform_connect_disconnect)
def open_authenticate_ws_endpoint(self, fromip, endpoint):
"""
Callback method from when websockets are opened. The endpoint must
be '/' delimited with the second to last section being the session
of a logged in user to volttron central itself.
:param fromip:
:param endpoint:
A string representing the endpoint of the websocket.
:return:
"""
_log.debug("OPENED ip: {} endpoint: {}".format(fromip, endpoint))
try:
session = endpoint.split('/')[-2]
except IndexError:
_log.error("Malformed endpoint. Must be delimited by '/'")
_log.error(
'Endpoint must have valid session in second to last position')
return False
if not self._authenticated_sessions.check_session(session, fromip):
_log.error("Authentication error for session!")
return False
_log.debug('Websocket allowed.')
self._websocket_endpoints.add(endpoint)
return True
@RPC.export
@RPC.export
def get_publickey(self):
"""
RPC method allowing the caller to retrieve the publickey of this agent.
This method is available for allowing :class:`VolttronCentralPlatform`
agents to allow this agent to be able to connect to its instance.
:return: The publickey of this volttron central agent.
:rtype: str
"""
return self.core.publickey
def _to_jsonrpc_obj(self, jsonrpcstr):
""" Convert data string into a JsonRpcData named tuple.
:param object data: Either a string or a dictionary representing a json document.
"""
return jsonrpc.JsonRpcData.parse(jsonrpcstr)
def jsonrpc(self, env, data):
""" The main entry point for ^jsonrpc data
This method will only accept rpcdata. The first time this method
is called, per session, it must be using get_authorization. That
will return a session token that must be included in every
subsequent request. The session is tied to the ip address
of the caller.
:param object env: Environment dictionary for the request.
:param object data: The JSON-RPC 2.0 method to call.
:return object: An JSON-RPC 2.0 response.
"""
if env['REQUEST_METHOD'].upper() != 'POST':
return jsonrpc.json_error('NA', INVALID_REQUEST,
'Invalid request method, only POST allowed')
try:
rpcdata = self._to_jsonrpc_obj(data)
_log.info('rpc method: {}'.format(rpcdata.method))
if rpcdata.method == 'get_authorization':
# Authentication url
# This does not need to be local, however for now we are going to
# make it so assuming only one level of authentication.
auth_url = "{url_scheme}://{HTTP_HOST}/authenticate".format(
url_scheme=env['wsgi.url_scheme'],
HTTP_HOST=env['HTTP_HOST'])
user = rpcdata.params['username']
args = {'username': rpcdata.params['username'],
'password': rpcdata.params['password'],
'ip': env['REMOTE_ADDR']}
resp = requests.post(auth_url, json=args, verify=False)
if resp.ok and resp.text:
claims = self.vip.web.get_user_claims(jsonapi.loads(resp.text)["access_token"])
# Because the web-user.json has the groups under a key and the
# groups is just passed into the session we need to make sure
# we pass in the proper thing to the _add_sesion function.
assert 'groups' in claims
authentication_token = resp.text
sess = authentication_token
self._authenticated_sessions._add_session(user=user,
groups=claims['groups'],
token=authentication_token,
ip=env['REMOTE_ADDR'])
else:
sess = self._authenticated_sessions.authenticate(**args)
if not sess:
_log.info('Invalid username/password for {}'.format(
rpcdata.params['username']))
return jsonrpc.json_error(
rpcdata.id, UNAUTHORIZED,
"Invalid username/password specified.")
_log.info('Session created for {}'.format(
rpcdata.params['username']))
self.vip.web.register_websocket(
"/vc/ws/{}/management".format(sess),
self.open_authenticate_ws_endpoint,
self._ws_closed,
self._received_data)
_log.info('Session created for {}'.format(
rpcdata.params['username']))
gevent.sleep(1)
return jsonrpc.json_result(rpcdata.id, sess)
token = rpcdata.authorization
ip = env['REMOTE_ADDR']
_log.debug('REMOTE_ADDR: {}'.format(ip))
session_user = self._authenticated_sessions.check_session(token, ip)
_log.debug('SESSION_USER IS: {}'.format(session_user))
if not session_user:
_log.debug("Session Check Failed for Token: {}".format(token))
return jsonrpc.json_error(rpcdata.id, UNAUTHORIZED,
"Invalid authentication token")
_log.debug('RPC METHOD IS: {}'.format(rpcdata.method))
# Route any other method that isn't
result_or_error = self._route_request(session_user,
rpcdata.id, rpcdata.method,
rpcdata.params)
except AssertionError:
return jsonrpc.json_error(
'NA', INVALID_REQUEST, 'Invalid rpc data {}'.format(data))
except Unreachable:
return jsonrpc.json_error(
rpcdata.id, UNAVAILABLE_PLATFORM,
"Couldn't reach platform with method {} params: {}".format(
rpcdata.method,
rpcdata.params))
except Exception as e:
return jsonrpc.json_error(
'NA', UNHANDLED_EXCEPTION, str(e)
)
return self._get_jsonrpc_response(rpcdata.id, result_or_error)
def _get_jsonrpc_response(self, id, result_or_error):
""" Wrap the response in either a json-rpc error or result.
:param id:
:param result_or_error:
:return:
"""
if isinstance(result_or_error, dict):
if 'jsonrpc' in result_or_error:
return result_or_error
if result_or_error is not None and isinstance(result_or_error, dict):
if 'error' in result_or_error:
error = result_or_error['error']
_log.debug("RPC RESPONSE ERROR: {}".format(error))
return jsonrpc.json_error(id, error['code'], error['message'])
return jsonrpc.json_result(id, result_or_error)
def _get_agents(self, instance_uuid, groups):
""" Retrieve the list of agents on a specific platform.
:param instance_uuid:
:param groups:
:return:
"""
_log.debug('_get_agents with groups: {}'.format(groups))
connected_to_pa = self._platform_connections[instance_uuid]
agents = connected_to_pa.agent.vip.rpc.call(
'platform.agent', 'list_agents').get(timeout=30)
for a in agents:
if 'admin' in groups:
if "platformagent" in a['name'] or \
"volttroncentral" in a['name']:
a['vc_can_start'] = False
a['vc_can_stop'] = False
a['vc_can_restart'] = True
else:
a['vc_can_start'] = True
a['vc_can_stop'] = True
a['vc_can_restart'] = True
else:
# Handle the permissions that are not admin.
a['vc_can_start'] = False
a['vc_can_stop'] = False
a['vc_can_restart'] = False
_log.debug('Agents returned: {}'.format(agents))
return agents
def set_setting(self, session_user, params):
"""
Sets or removes a setting from the config store. If the value is None
then the item will be removed from the store. If there is an error in
saving the value then a jsonrpc.json_error object is returned.
:param session_user: Unused
:param params: Dictionary that must contain 'key' and 'value' keys.
:return: A 'SUCCESS' string or a jsonrpc.json_error object.
"""
if 'key' not in params or not params['key']:
return jsonrpc.json_error(params['message_id'],
INVALID_PARAMS,
'Invalid parameter key not set')
if 'value' not in params:
return jsonrpc.json_error(params['message_id'],
INVALID_PARAMS,
'Invalid parameter key not set')
config_key = "settings/{}".format(params['key'])
value = params['value']
if value is None:
try:
self.vip.config.delete(config_key)
except KeyError:
pass
else:
# We handle empt string here because the config store doesn't allow
# empty strings to be set as a config store. I wasn't able to
# trap the ValueError that is raised on the server side.
if value == "":
return jsonrpc.json_error(params['message_id'],
INVALID_PARAMS,
'Invalid value set (empty string?)')
self.vip.config.set(config_key, value)
return 'SUCCESS'
def get_setting(self, session_user, params):
"""
Retrieve a value from the passed setting key. The params object must
contain a "key" to return from the settings store.
:param session_user: Unused
:param params: Dictionary that must contain a 'key' key.
:return: The value or a jsonrpc error object.
"""
config_key = "settings/{}".format(params['key'])
try:
value = self.vip.config.get(config_key)
except KeyError:
return jsonrpc.json_error(params['message_id'],
INVALID_PARAMS,
'Invalid key specified')
else:
return value
def get_setting_keys(self, session_user, params):
"""
Returns a list of all of the settings keys so the caller can know
what settings to request.
:param session_user: Unused
:param params: Unused
:return: A list of settings available to the caller.
"""
prefix = "settings/"
keys = [x[len(prefix):] for x in self.vip.config.list()
if x.startswith(prefix)]
return keys or []
def send_management_message(self, type, data={}):
"""
Send a message to any socket that has connected to the management
socket.
The payload sent to the client is like the following::
{
"type": "UPDATE_DEVICE_STATUS",
"data": "this is data that was passed"
}
:param type:
A string defining a unique type for sending to the websockets.
:param data:
An object that str can be called on.
:type type: str
:type data: serializable
"""
management_sockets = [s for s in self._websocket_endpoints
if s.endswith("management")]
# Nothing to send if we don't have any management sockets open.
if len(management_sockets) <= 0:
return
if data is None:
data = {}
payload = dict(
type=type,
data=str(data)
)
payload = jsonapi.dumps(payload)
for s in management_sockets:
self.vip.web.send(s, payload)
def _route_request(self, session_user, id, method, params):
""" Handle the methods volttron central can or pass off to platforms.
:param session_user:
The authenticated user's session info.
:param id:
JSON-RPC id field.
:param method:
:param params:
:return:
"""
_log.debug(
'inside _route_request {}, {}, {}'.format(id, method, params))
self.send_management_message(method)
method_split = method.split('.')
# The last part of the jsonrpc method is the actual method to be called.
method_check = method_split[-1]
# These functions will be sent to a platform.agent on either this
# instance or another. All of these functions have the same interface
# and can be collected into a dictionary rather than an if tree.
platform_methods = dict(
# bacnet related
start_bacnet_scan=self._handle_bacnet_scan,
publish_bacnet_props=self._handle_bacnet_props,
# config store related
store_agent_config="store_agent_config",
get_agent_config="get_agent_config",
delete_agent_config="delete_agent_config",
list_agent_configs="get_agent_config_list",
# management related
list_agents="get_agent_list",
get_devices="get_devices",
status_agents="status_agents"
)
# These methods are specifically to be handled by the platform not any
# agents on the platform that is why we have the length requirement.
#
# The jsonrpc method looks like the following
#
# platform.uuid.<dynamic entry>.method_on_vcp
if method_check in platform_methods:
platform_uuid = None
if isinstance(params, dict):
platform_uuid = params.pop('platform_uuid', None)
if platform_uuid is None:
if method_split[0] == 'platforms' and method_split[1] == 'uuid':
platform_uuid = method_split[2]
if not platform_uuid:
return err("Invalid platform_uuid specified as parameter"
.format(platform_uuid),
INVALID_PARAMS)
if not self._platforms.is_registered(platform_uuid):
return err("Unknown or unavailable platform {} specified as "
"parameter".format(platform_uuid),
UNAVAILABLE_PLATFORM)
try:
_log.debug('Calling {} on platform {}'.format(
method_check, platform_uuid
))
class_method = platform_methods[method_check]
platform = self._platforms.get_platform(platform_uuid)
# Determine whether the method to call is on the current class
# or on the platform object.
if isinstance(class_method, str):
method_ref = getattr(platform, class_method)
else:
method_ref = class_method
# Put the platform_uuid in the params so it can be used
# inside the method
params['platform_uuid'] = platform_uuid
except AttributeError or KeyError:
return jsonrpc.json_error(id, INTERNAL_ERROR,
"Attempted calling function "
"{} was unavailable".format(
class_method
))
except ValueError:
return jsonrpc.json_error(id, UNAVAILABLE_PLATFORM,
"Couldn't connect to platform "
"{}".format(platform_uuid))
else:
# pass the id through the message_id parameter.
if not params:
params = dict(message_id=id)
else:
params['message_id'] = id
# Methods will all have the signature
# method(session, params)
#
return method_ref(session_user, params)
vc_methods = dict(
register_management_endpoint=self._handle_management_endpoint,
list_platforms=self._platforms.get_platform_list,
list_performance=self._platforms.get_performance_list,
# Settings
set_setting=self.set_setting,
get_setting=self.get_setting,
get_setting_keys=self.get_setting_keys,
# Setup mode
enable_setup_mode=self._enable_setup_mode,
disable_setup_mode=self._disable_setup_mode
)
if method in vc_methods:
if not params:
params = dict(message_id=id)
else:
params['message_id'] = id
response = vc_methods[method](session_user, params)
_log.debug("Response is {}".format(response))
return response # vc_methods[method](session_user, params)
if method == 'register_instance':
if isinstance(params, list):
return self._register_instance(*params)
else:
return self._register_instance(**params)
elif method == 'unregister_platform':
return self.unregister_platform(params['instance_uuid'])
elif 'historian' in method:
has_platform_historian = PLATFORM_HISTORIAN in \
self.vip.peerlist().get(timeout=30)
if not has_platform_historian:
return err(
'The VOLTTRON Central platform historian is unavailable.',
UNAVAILABLE_AGENT)
_log.debug('Trapping platform.historian to vc.')
_log.debug('has_platform_historian: {}'.format(
has_platform_historian))
if 'historian.query' in method:
return self.vip.rpc.call(
PLATFORM_HISTORIAN, 'query', **params).get(timeout=30)
elif 'historian.get_topic_list' in method:
return self.vip.rpc.call(
PLATFORM_HISTORIAN, 'get_topic_list').get(timeout=30)
# This isn't known as a proper method on vc or a platform.
if len(method_split) < 3:
return err('Unknown method {}'.format(method))
if method_split[0] != 'platforms' or method_split[1] != 'uuid':
return err('Invalid format for instance must start with '
'platforms.uuid')
instance_uuid = method_split[2]
_log.debug('Instance uuid is: {}'.format(instance_uuid))
if not self._platforms.is_registered(instance_uuid):
return err('Unknown platform {}'.format(instance_uuid))
platform_method = '.'.join(method_split[3:])
_log.debug("Platform method is: {}".format(platform_method))
platform = self._platforms.get_platform(instance_uuid)
if not platform:
return jsonrpc.json_error(id,
UNAVAILABLE_PLATFORM,
"cannot connect to platform."
)
if platform_method.startswith('install'):
if 'admin' not in session_user['groups']:
return jsonrpc.json_error(
id, UNAUTHORIZED,
"Admin access is required to install agents")
return platform.route_to_agent_method(id, platform_method, params)
def _validate_config_params(self, config):
"""
Validate the configuration parameters of the default/updated parameters.
This method will return a list of "problems" with the configuration.
If there are no problems then an empty list is returned.
:param config: Configuration parameters for the volttron central agent.
:type config: dict
:return: The problems if any, [] if no problems
:rtype: list
"""
problems = []
webroot = config.get('webroot')
if not webroot:
problems.append('Invalid webroot in configuration.')
elif not os.path.exists(webroot):
problems.append(
'Webroot {} does not exist on machine'.format(webroot))
users = config.get('users')
if not users:
problems.append('A users node must be specified!')
else:
has_admin = False
try:
for user, item in users.items():
if 'password' not in item.keys():
problems.append('user {} must have a password!'.format(
user))
elif not item['password']:
problems.append('password for {} is blank!'.format(
user
))
if 'groups' not in item:
problems.append('missing groups key for user {}'.format(
user
))
elif not isinstance(item['groups'], list):
problems.append('groups must be a list of strings.')
elif not item['groups']:
problems.append(
'user {} must belong to at least one group.'.format(
user))
# See if there is an adminstator present.
if not has_admin and isinstance(item['groups'], list):
has_admin = 'admin' in item['groups']
except AttributeError:
problems.append('invalid user node.')
if not has_admin:
problems.append("One user must be in the admin group.")
return problems
def main(argv=sys.argv):
"""
Main method called by the eggsecutable.
:param argv:
:return:
"""
utils.vip_main(init_volttron_central, identity=VOLTTRON_CENTRAL,
version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
22935,
90,
198,
2,
43907,
25,
900,
277,
12685,
28,
40477,
12,
23,
10117,
28,
29412,
1509,
28,
19,
40379,
28,
19,
39747,
28,
19,
2123,
25,
198,
2,
198,
2,
15069,
12131,
... | 2.184738 | 15,162 |
import pytest
from ...utilities import colors
| [
11748,
12972,
9288,
198,
198,
6738,
2644,
315,
2410,
1330,
7577,
628,
628
] | 3.846154 | 13 |
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Int16
from cv_bridge import CvBridge
import cv2
import numpy as np
# pip install pillow
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
if __name__ == '__main__':
#rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
exit()
#rospy.spin()
| [
11748,
686,
2777,
88,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
7412,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
2558,
1433,
198,
6738,
269,
85,
62,
9458,
1330,
327,
85,
37385,
198,
11748,
269,
85,
17,
198,
11748,
2... | 2.588957 | 163 |
# Refaça o DESAFIO 051, lendo o primeiro termo e a razão de uma PA,
# mostrando os 10 primeiros termos da progressão usando a estrutura while.
p = int(input('Primeiro termo:'))
r = int(input('Razão pa:'))
i = 1
s = 10
c = 1
print('{}!'.format(p), end=' = ')
print(p, end=' => ')
while i != 0:
while s != 1:
p += r
print(p, end=' => ')
s -= 1
c += 1
print('PAUSE')
i = int(input('''Que continuar?
Digite 0 parar encerrar.
Se não quantas vezes quer fazer:'''))
s = i + 1
print('''FIM
Teve {} termos'''.format(c))
| [
2,
6524,
64,
50041,
267,
22196,
8579,
9399,
657,
4349,
11,
22096,
78,
267,
6994,
7058,
3381,
78,
304,
257,
374,
1031,
28749,
390,
334,
2611,
8147,
11,
198,
2,
749,
25192,
78,
28686,
838,
6994,
72,
4951,
3381,
418,
12379,
4371,
28749... | 2.145594 | 261 |
# -*- coding: utf-8 -*-
from memcached_status import view
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1066,
66,
2317,
62,
13376,
1330,
1570,
198
] | 2.521739 | 23 |
# The web functionality
import pork | [
2,
383,
3992,
11244,
198,
198,
11748,
18128
] | 4.5 | 8 |
# -*- coding: utf-8 -*-
# @Time : 2021/1/3
# @Author : Lart Pang
# @GitHub : https://github.com/lartpang
import os
import re
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter
from openpyxl.worksheet.worksheet import Worksheet
# Thanks:
# - Python_Openpyxl: https://www.cnblogs.com/programmer-tlh/p/10461353.html
# - Python之re模块: https://www.cnblogs.com/shenjianping/p/11647473.html
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33448,
14,
16,
14,
18,
198,
2,
2488,
13838,
220,
1058,
406,
433,
350,
648,
198,
2,
2488,
38,
270,
16066,
220,
1058,
3740,
1378,
... | 2.413408 | 179 |
# Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from typing import Type
import gevent
from gevent import socket, select
from gevent.queue import Queue, Empty
from gevent.server import StreamServer
from pyrlang.async_support.base_engine import BaseEngine, BaseQueue
from pyrlang.async_support.base_protocol import BaseProtocol
LOG = logging.getLogger("pyrlang")
class GeventEngine(BaseEngine):
""" Compatibility driver for Gevent.
Create it before creating Node and pass as argument 'engine' like so:
.. code-block:: python
e = GeventEngine()
node = Node(name="py@127.0.0.1", cookie="COOKIE", engine=e)
"""
def queue_new(self) -> BaseQueue:
""" Create Gevent queue adapter """
return GeventQueue()
def connect_with(self, protocol_class: Type[BaseProtocol], host_port: tuple,
protocol_args: list, protocol_kwargs: dict
) -> (BaseProtocol, socket.socket):
""" Helper which creates a new connection and feeds the data stream into
a protocol handler class.
:rtype: tuple(protocol_class, gevent.socket)
:param protocol_class: A handler class which has handler functions like
on_connected, consume, and on_connection_lost
:param protocol_kwargs: Keyword args to pass to the handler constructor
:param protocol_args: Args to pass to the handler constructor
:param host_port: (host,port) tuple where to connect
"""
LOG.info("Will connect to %s", host_port)
sock = socket.create_connection(address=host_port)
handler = protocol_class(*protocol_args, **protocol_kwargs)
handler.on_connected(host_port)
LOG.info("Connection to %s established", host_port)
try:
g = gevent.spawn(_read_loop, proto=handler, sock=sock)
g.start()
except Exception as e:
LOG.error("Exception: %s", traceback.format_exc())
return handler, sock
def spawn(self, loop_fn):
""" Spawns a task which will call loop_fn repeatedly while it
returns False, else will stop. """
greenlet = gevent.spawn(lambda: _generic_gevent_loop(loop_fn))
greenlet.start()
#
# Helpers for serving incoming connections and reading from the connected socket
#
def _call_later_helper(t, fn):
""" Sleeps T amount of seconds then calls a callable fn and dies. """
gevent.sleep(t)
fn()
def make_serve_loop(protocol_class: Type[BaseProtocol],
protocol_args: list,
protocol_kwargs: dict):
""" A basic connection handler that takes an accepted connection and feeds
the data stream into the receiver protocol handler class.
:param protocol_kwargs: Keyword args to pass to the handler class constructor
:param protocol_args: Args to pass to the handler class constructor
:type protocol_class: class
:param protocol_class: A handler class which has handler functions like
on_connected, consume, and on_connection_lost
:return: A handler function suitable for passing to StreamServer
"""
return _serve_loop
| [
2,
15069,
2864,
11,
5256,
17204,
23555,
12052,
11,
290,
311,
17,
16045,
10710,
9564,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.793154 | 1,373 |
from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
FONT = ("Arial", 20, "italic")
# def call_check_answer(self) -> str:
# is_correct = self.quiz.check_answer()
# if is_correct:
# print('You are right!')
# return 'True'
# else:
# print('You are wrong!')
# return 'False'
| [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
38964,
62,
27825,
1330,
2264,
528,
44687,
198,
198,
4221,
3620,
36,
62,
46786,
796,
25113,
22318,
35667,
1,
198,
37,
35830,
796,
5855,
32,
4454,
1600,
1160,
11,
366,
1287,
291,
4943,
628,
... | 2.088398 | 181 |
from pyboltzmann.generic_classes import *
from .binary_tree import BinaryTree
__all__ = ["CombinatorialClass",
"ZeroAtomClass",
"LAtomClass",
"UAtomClass",
"ProdClass",
"SetClass",
"DerivedClass",
"LDerivedClass",
"UDerivedClass",
"BinaryTree",
"network",
"three_connected_graph"
] | [
6738,
12972,
25593,
89,
9038,
13,
41357,
62,
37724,
1330,
1635,
198,
6738,
764,
39491,
62,
21048,
1330,
45755,
27660,
198,
834,
439,
834,
796,
14631,
20575,
20900,
498,
9487,
1600,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 1.815451 | 233 |
# Ke Chen
# knutchen@ucsd.edu
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
# Convert the DESED dataset
import numpy as np
import os
import librosa
import config
from utils import float32_to_int16
import soundfile as sf
if __name__ == '__main__':
main()
| [
2,
3873,
12555,
198,
2,
638,
315,
6607,
31,
1229,
21282,
13,
15532,
198,
2,
367,
4694,
12,
1404,
25,
317,
36210,
1137,
31315,
20151,
5390,
43959,
12,
50,
3620,
8643,
2149,
41260,
9399,
44069,
21389,
1137,
7473,
311,
15919,
42715,
3064... | 2.845455 | 110 |