content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
from test_app import cache_pool
from sim.pretty import dictList2Table
if __name__ == "__main__":
test()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
6738,
1332,
62,
1324,
1330,
12940,
62,
7742,
201,
198,
201,
198,
6738,
985,
13,
37784,
1330,
8633,
8053,
17,
10962,
201,
198,
201,
198,
201,
198,
201,
198,
361,
... | 2.393443 | 61 |
# -*- coding: utf-8 -*-
"""Base Site class."""
from typing import ClassVar
from typing import Dict
from mrsimulator.utils.parseable import Parseable
from pydantic import validator
from .isotope import Isotope
from .tensors import AntisymmetricTensor
from .tensors import SymmetricTensor
__author__ = "Deepansh Srivastava"
__email__ = "srivastava.89@osu.edu"
class Site(Parseable):
"""Base class representing a single-site nuclear spin interaction tensor parameters.
The single-site nuclear spin interaction tensors include the nuclear shielding
and the electric quadrupolar tensor.
.. rubric:: Attribute Documentation
Attributes
----------
isotope: str (optional).
A string expressed as an atomic number followed by an isotope symbol, eg.,
`'13C'`, `'17O'`. The default value is `'1H'`.
Example
-------
>>> site = Site(isotope='2H')
isotropic_chemical_shift: float (optional).
The isotropic chemical shift of the site in ppm. The default value is 0.
Example
-------
>>> site.isotropic_chemical_shift = 43.3
shielding_symmetric: :ref:`sy_api` or equivalent dict object (optional).
The attribute represents the parameters of the irreducible second-rank traceless
symmetric part of the nuclear shielding tensor. The default value is None.
The allowed attributes of the :ref:`sy_api` class for `shielding_symmetric` are
``zeta``, ``eta``, ``alpha``, ``beta``, and ``gamma``, where ``zeta`` is the
shielding anisotropy, in ppm, and ``eta`` is the shielding asymmetry parameter
defined using the Haeberlen convention. The Euler angles ``alpha``, ``beta``,
and ``gamma`` are in radians.
Example
-------
>>> site.shielding_symmetric = {'zeta': 10, 'eta': 0.5}
>>> # or equivalently
>>> site.shielding_symmetric = SymmetricTensor(zeta=10, eta=0.5)
shielding_antisymmetric: :ref:`asy_api` or equivalent dict object (optional).
The attribute represents the parameters of the irreducible first-rank
antisymmetric part of the nuclear shielding tensor. The default value is None.
The allowed attributes of the :ref:`asy_api` class for
`shielding_antisymmetric` are ``zeta``, ``alpha``, and ``beta``, where ``zeta``
is the anisotropy parameter, in ppm, of the anti-symmetric first-rank tensor.
The angles ``alpha`` and ``beta`` are in radians.
Example
-------
>>> site.shielding_antisymmetric = {'zeta': 20}
>>> # or equivalently
>>> site.shielding_antisymmetric = AntisymmetricTensor(zeta=20)
quadrupolar: :ref:`sy_api` or equivalent dict object (optional).
The attribute represents the parameters of the traceless irreducible second-rank
symmetric part of the electric-field gradient tensor. The default value is None.
The allowed attributes of the :ref:`sy_api` class for `quadrupolar` are ``Cq``,
``eta``, ``alpha``, ``beta``, and ``gamma``, where ``Cq`` is the quadrupolar
coupling constant, in Hz, and ``eta`` is the quadrupolar asymmetry parameter.
The Euler angles ``alpha``, ``beta``, and ``gamma`` are in radians.
Example
-------
>>> site.quadrupolar = {'Cq': 3.2e6, 'eta': 0.52}
>>> # or equivalently
>>> site.quadrupolar = SymmetricTensor(Cq=3.2e6, eta=0.52)
name: str (optional).
The name or id of the site. The default value is None.
Example
-------
>>> site.name = '2H-0'
>>> site.name
'2H-0'
label: str (optional).
The label for the site. The default value is None.
Example
-------
>>> site.label = 'Quad site'
>>> site.label
'Quad site'
description: str (optional).
A description of the site. The default value is None.
Example
-------
>>> site.description = 'An example Quadrupolar site.'
>>> site.description
'An example Quadrupolar site.'
Example
-------
The following are a few examples of the site object.
>>> site1 = Site(
... isotope='33S',
... isotropic_chemical_shift=20, # in ppm
... shielding_symmetric={
... "zeta": 10, # in ppm
... "eta": 0.5
... },
... quadrupolar={
... "Cq": 5.1e6, # in Hz
... "eta": 0.5
... }
... )
Using SymmetricTensor objects.
>>> site1 = Site(
... isotope='13C',
... isotropic_chemical_shift=20, # in ppm
... shielding_symmetric=SymmetricTensor(zeta=10, eta=0.5),
... )
"""
isotope: str = "1H"
isotropic_chemical_shift: float = 0.0
shielding_symmetric: SymmetricTensor = None
shielding_antisymmetric: AntisymmetricTensor = None
quadrupolar: SymmetricTensor = None
property_unit_types: ClassVar[Dict] = {"isotropic_chemical_shift": "dimensionless"}
property_default_units: ClassVar[Dict] = {"isotropic_chemical_shift": "ppm"}
property_units: Dict = {"isotropic_chemical_shift": "ppm"}
@validator("quadrupolar")
@validator("shielding_symmetric", "shielding_antisymmetric")
@validator("isotope", always=True)
@classmethod
def parse_dict_with_units(cls, py_dict: dict):
"""Parse the physical quantity from a dictionary representation of the Site
object, where the physical quantity is expressed as a string with a number and
a unit.
Args:
dict py_dict: A required python dict object.
Returns:
:ref:`site_api` object.
Example
-------
>>> site_dict = {
... "isotope": "13C",
... "isotropic_chemical_shift": "20 ppm",
... "shielding_symmetric": {"zeta": "10 ppm", "eta":0.5}
... }
>>> site1 = Site.parse_dict_with_units(site_dict)
"""
prop_mapping = {
"shielding_symmetric": SymmetricTensor,
"shielding_antisymmetric": AntisymmetricTensor,
"quadrupolar": SymmetricTensor,
}
for k, v in prop_mapping.items():
if k in py_dict:
py_dict[k] = v.parse_dict_with_units(py_dict[k])
return super().parse_dict_with_units(py_dict)
# Deprecated
# def to_freq_dict(self, B0):
# """
# Serialize the Site object to a JSON compliant python dictionary object, where
# the attribute value is a number expressed in the attribute's default unit.
# The default unit for the attributes with respective dimensionalities is:
# - frequency: ``Hz``
# - angle: ``rad``
# Args:
# float B0: A required macroscopic magnetic flux density in units of T.
# Return:
# Python dict object.
# Example
# -------
# >>> pprint(site1.to_freq_dict(B0=9.4))
# {'description': None,
# 'isotope': '13C',
# 'isotropic_chemical_shift': -2013.1791999999998,
# 'label': None,
# 'name': None,
# 'quadrupolar': None,
# 'shielding_antisymmetric': None,
# 'shielding_symmetric': {'alpha': None,
# 'beta': None,
# 'eta': 0.5,
# 'gamma': None,
# 'zeta': -1006.5895999999999}}
# """
# temp_dict = self.dict(exclude={"isotope"})
# temp_dict["isotope"] = self.isotope.symbol
# larmor_frequency = -self.isotope.gyromagnetic_ratio * B0 # in MHz
# for k in ["shielding_symmetric", "shielding_antisymmetric", "quadrupolar"]:
# if getattr(self, k):
# temp_dict[k] = getattr(self, k).to_freq_dict(larmor_frequency)
# if k == "shielding_symmetric":
# temp_dict[k].pop("Cq")
# temp_dict["isotropic_chemical_shift"] *= larmor_frequency
# temp_dict.pop("property_units")
# return temp_dict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
14881,
14413,
1398,
526,
15931,
198,
6738,
19720,
1330,
5016,
19852,
198,
6738,
19720,
1330,
360,
713,
198,
198,
6738,
285,
3808,
320,
8927,
13,
26791,
13,
29572,
... | 2.338395 | 3,490 |
projects = {
'cmip6': {
'start_dir': '/badc/cmip6/data/cmip6',
'base': '/badc/cmip6/data',
'prefacets': 3,
'facets': 'badc cmip6 data mip_era activity_id institution_id source_id experiment_id member_id table_id variable_id grid_label version'.split(),
'facet_picks': {'ensemble': 'r1i1p1'},
'wildcards': ['institution', 'model']
},
'c3s-cmip5': {
'start_dir': '/group_workspaces/jasmin2/cp4cds1/vol1/data/c3s-cmip5',
'base': '/group_workspaces/jasmin2/cp4cds1/vol1/data',
'var_index': -2,
'freq_index': 5,
'prefacets': 6,
'facets': '_ _ _ _ _ activity product institute model experiment frequency realm mip_table ensemble_member variable version'.split(),
'facet_picks': {'ensemble': 'r1i1p1'},
'wildcards': ['institution', 'model']
}
}
| [
42068,
796,
1391,
198,
220,
220,
220,
705,
11215,
541,
21,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
9688,
62,
15908,
10354,
31051,
14774,
66,
14,
11215,
541,
21,
14,
7890,
14,
11215,
541,
21,
3256,
198,
220,
220,
... | 2.122549 | 408 |
s = input()
answer = len(s)
k = s.count('a')
for i in range(len(s)):
cnt = 0
for j in range(k):
if s[(i+j)%len(s)] == 'b':
cnt+=1
if cnt < answer:
answer = cnt
print(answer) | [
82,
796,
5128,
3419,
198,
41484,
796,
18896,
7,
82,
8,
198,
74,
796,
264,
13,
9127,
10786,
64,
11537,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
82,
8,
2599,
198,
220,
220,
220,
269,
429,
796,
657,
198,
220,
220,
220,
329,
474,
... | 1.805085 | 118 |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import jamf
from jamf.api.jamf_pro_notifications_preview_api import JamfProNotificationsPreviewApi # noqa: E501
from jamf.rest import ApiException
class TestJamfProNotificationsPreviewApi(unittest.TestCase):
"""JamfProNotificationsPreviewApi unit test stubs"""
def test_notifications_alerts_get(self):
"""Test case for notifications_alerts_get
Get Notifications for user and site # noqa: E501
"""
pass
def test_notifications_alerts_id_delete(self):
"""Test case for notifications_alerts_id_delete
DEPRECATED - USE \"alerts/{type}/{id}\" INSTEAD. Deletes only Patch Management notifications. # noqa: E501
"""
pass
def test_notifications_alerts_type_id_delete(self):
"""Test case for notifications_alerts_type_id_delete
Delete Notifications # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
9986,
69,
1041,
7824,
628,
220,
220,
220,
22492,
28578,
770,
318,
257,
6291,
9986,
69,
1041,
4382,
543,
3578,
329,
8748,
1231,
597,
18239,
13,
383,
9986,
69,
104... | 2.881323 | 514 |
import sys
# sys.path.append(r"F:\Work\Maptor\venv\Model")
from ClassificationModel import ClassificationModel
| [
11748,
25064,
198,
2,
25064,
13,
6978,
13,
33295,
7,
81,
1,
37,
7479,
12468,
59,
44,
2373,
273,
59,
574,
85,
59,
17633,
4943,
198,
6738,
40984,
17633,
1330,
40984,
17633,
628
] | 3.393939 | 33 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/image/image_clone_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/image/image_clone_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n8mediapipe/calculators/image/image_clone_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\x92\x01\n\x1bImageCloneCalculatorOptions\x12\x1c\n\routput_on_gpu\x18\x01 \x01(\x08:\x05\x66\x61lse2U\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xc6\xe6\xe0\xb1\x01 \x01(\x0b\x32&.mediapipe.ImageCloneCalculatorOptions')
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IMAGECLONECALCULATOROPTIONS = _descriptor.Descriptor(
name='ImageCloneCalculatorOptions',
full_name='mediapipe.ImageCloneCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_on_gpu', full_name='mediapipe.ImageCloneCalculatorOptions.output_on_gpu', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.ImageCloneCalculatorOptions.ext', index=0,
number=372781894, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=256,
)
DESCRIPTOR.message_types_by_name['ImageCloneCalculatorOptions'] = _IMAGECLONECALCULATOROPTIONS
ImageCloneCalculatorOptions = _reflection.GeneratedProtocolMessageType('ImageCloneCalculatorOptions', (_message.Message,), dict(
DESCRIPTOR = _IMAGECLONECALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.image.image_clone_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.ImageCloneCalculatorOptions)
))
_sym_db.RegisterMessage(ImageCloneCalculatorOptions)
_IMAGECLONECALCULATOROPTIONS.extensions_by_name['ext'].message_type = _IMAGECLONECALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_IMAGECLONECALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
16957,
499,
3757,
14,
9948,
3129,
2024,
14,
9060,
14,
9060,
62,
21018,
62,
9948,
3129,
1352,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
... | 2.571757 | 1,303 |
"""Abseil compiler options.
This is the source of truth for Abseil compiler options. To modify Abseil
compilation options:
(1) Edit the appropriate list in this file based on the platform the flag is
needed on.
(2) Run `<path_to_absl>/copts/generate_copts.py`.
The generated copts are consumed by configure_copts.bzl and
AbseilConfigureCopts.cmake.
"""
# /Wall with msvc includes unhelpful warnings such as C4711, C4710, ...
MSVC_BIG_WARNING_FLAGS = [
"/W3",
]
LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
"-Wno-c99-extensions",
"-Wno-deprecated-declarations",
"-Wno-missing-noreturn",
"-Wno-missing-prototypes",
"-Wno-missing-variable-declarations",
"-Wno-null-conversion",
"-Wno-shadow",
"-Wno-shift-sign-overflow",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-member-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
"-Wno-unused-template",
"-Wno-used-but-marked-unused",
"-Wno-zero-as-null-pointer-constant",
# gtest depends on this GNU extension being offered.
"-Wno-gnu-zero-variadic-macro-arguments",
]
MSVC_DEFINES = [
"/DNOMINMAX", # Don't define min and max macros (windows.h)
# Don't bloat namespace with incompatible winsock versions.
"/DWIN32_LEAN_AND_MEAN",
# Don't warn about usage of insecure C functions.
"/D_CRT_SECURE_NO_WARNINGS",
"/D_SCL_SECURE_NO_WARNINGS",
# Introduced in VS 2017 15.8, allow overaligned types in aligned_storage
"/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
]
COPT_VARS = {
"ABSL_GCC_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvarargs",
"-Wvla", # variable-length array
"-Wwrite-strings",
# Don't define min and max macros (Build on Windows using gcc)
"-DNOMINMAX",
],
"ABSL_GCC_TEST_FLAGS": [
"-Wno-conversion-null",
"-Wno-deprecated-declarations",
"-Wno-missing-declarations",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
],
"ABSL_LLVM_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
"-Wformat-security",
"-Wgnu-redeclared-enum",
"-Winfinite-recursion",
"-Winvalid-constexpr",
"-Wliteral-conversion",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wself-assign",
"-Wshadow-all",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
# Warnings that are enabled by group warning flags like -Wall that we
# explicitly disable.
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
# Don't define min and max macros (Build on Windows using clang)
"-DNOMINMAX",
],
"ABSL_LLVM_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_CLANG_CL_FLAGS":
(MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES),
"ABSL_CLANG_CL_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_MSVC_FLAGS":
MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES + [
# Increase the number of sections available in object files
"/bigobj",
"/wd4005", # macro-redefinition
"/wd4068", # unknown pragma
# qualifier applied to function type has no meaning; ignored
"/wd4180",
# conversion from 'type1' to 'type2', possible loss of data
"/wd4244",
# conversion from 'size_t' to 'type', possible loss of data
"/wd4267",
# The decorated name was longer than the compiler limit
"/wd4503",
# forcing value to bool 'true' or 'false' (performance warning)
"/wd4800",
],
"ABSL_MSVC_TEST_FLAGS": [
"/wd4018", # signed/unsigned mismatch
"/wd4101", # unreferenced local variable
"/wd4503", # decorated name length exceeded, name was truncated
"/wd4996", # use of deprecated symbol
"/DNOMINMAX", # disable the min() and max() macros from <windows.h>
],
"ABSL_MSVC_LINKOPTS": [
# Object file doesn't export any previously undefined symbols
"-ignore:4221",
],
# "HWAES" is an abbreviation for "hardware AES" (AES - Advanced Encryption
# Standard). These flags are used for detecting whether or not the target
# architecture has hardware support for AES instructions which can be used
# to improve performance of some random bit generators.
"ABSL_RANDOM_HWAES_ARM64_FLAGS": ["-march=armv8-a+crypto"],
"ABSL_RANDOM_HWAES_ARM32_FLAGS": ["-mfpu=neon"],
"ABSL_RANDOM_HWAES_X64_FLAGS": [
"-maes",
"-msse4.1",
],
"ABSL_RANDOM_HWAES_MSVC_X64_FLAGS": [],
}
| [
37811,
4826,
325,
346,
17050,
3689,
13,
198,
198,
1212,
318,
262,
2723,
286,
3872,
329,
2275,
325,
346,
17050,
3689,
13,
220,
1675,
13096,
2275,
325,
346,
198,
5589,
10520,
3689,
25,
628,
220,
357,
16,
8,
5312,
262,
5035,
1351,
287,... | 2.164384 | 2,555 |
import kerastuner
from autokeras.engine import tuner as tuner_module
class Hyperband(tuner_module.AutoTuner, kerastuner.Hyperband):
"""KerasTuner Hyperband with preprocessing layer tuning."""
pass
| [
11748,
41927,
459,
38886,
198,
198,
6738,
1960,
11020,
292,
13,
18392,
1330,
6278,
263,
355,
6278,
263,
62,
21412,
628,
198,
4871,
15079,
3903,
7,
28286,
263,
62,
21412,
13,
27722,
51,
38886,
11,
41927,
459,
38886,
13,
38197,
3903,
25... | 3.104478 | 67 |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE
"""
CLI program for managing packages
"""
import sys
from os.path import basename, dirname, exists, splitext
from pathlib import Path
import requests
from metapack import Downloader, MetapackDoc
from metapack.cli.core import (
MetapackCliMemo,
err,
list_rr,
prt,
warn,
write_doc
)
from metapack.util import ensure_dir
from metapack_jupyter.core import edit_notebook, set_cell_source
from .hugo import convert_hugo
from .wordpress import convert_wordpress
downloader = Downloader.get_instance()
| [
2,
15069,
357,
66,
8,
2177,
31593,
20414,
13,
770,
2393,
318,
11971,
739,
262,
2846,
286,
262,
198,
2,
17168,
13789,
11,
3017,
287,
428,
6082,
355,
38559,
24290,
198,
198,
37811,
198,
5097,
40,
1430,
329,
11149,
10392,
198,
37811,
1... | 3.077982 | 218 |
#!/usr/bin/env python
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import base64
import json
import logging
import os
import re
import time
import kubernetes.client
import kubernetes.config
import oauthlib.oauth2.rfc6749.errors
import requests
import requests_oauthlib
DEFAULT_KEYCLOAK_BASE = 'http://keycloak.services:8080/keycloak'
DEFAULT_CLUSTER_KEYCLOAK_BASE = (
'https://api-gateway.default.svc.cluster.local/keycloak')
DEFAULT_KEYCLOAK_MASTER_ADMIN_CLIENT_ID = 'admin-cli'
DEFAULT_KEYCLOAK_MASTER_ADMIN_USERNAME = 'admin'
DEFAULT_KEYCLOAK_MASTER_ADMIN_PASSWORD = 'adminpwd'
# Namespaces must be JSON serializable via json.loads()
DEFAULT_ADMIN_CLIENT_ID = 'admin-client'
DEFAULT_ADMIN_CLIENT_SECRET_NAME = 'admin-client-auth'
DEFAULT_ADMIN_CLIENT_SECRET_NAMESPACES = json.dumps(['default'])
DEFAULT_SYSTEM_COMPUTE_CLIENT_ID = 'system-compute-client'
DEFAULT_SYSTEM_COMPUTE_CLIENT_SECRET_NAME = 'system-compute-client-auth'
DEFAULT_SYSTEM_COMPUTE_CLIENT_SECRET_NAMESPACES = json.dumps(['default'])
DEFAULT_SYSTEM_PXE_CLIENT_ID = 'system-pxe-client'
DEFAULT_SYSTEM_PXE_CLIENT_SECRET_NAME = 'system-pxe-client-auth'
DEFAULT_SYSTEM_PXE_CLIENT_SECRET_NAMESPACES = json.dumps(['default'])
DEFAULT_SYSTEM_NEXUS_CLIENT_ID = 'system-nexus-client'
DEFAULT_SYSTEM_NEXUS_CLIENT_SECRET_NAME = 'system-nexus-client-auth'
DEFAULT_SYSTEM_NEXUS_CLIENT_SECRET_NAMESPACES = json.dumps(['default'])
DEFAULT_GATEKEEPER_CLIENT_ID = 'gatekeeper'
DEFAULT_GATEKEEPER_CLIENT_SECRET_NAME = 'keycloak-gatekeeper-client'
DEFAULT_GATEKEEPER_CLIENT_SECRET_NAMESPACES = json.dumps(['services'])
DEFAULT_GATEKEEPER_REDIRECT_URIS = []
DEFAULT_WLM_CLIENT_ID = 'wlm-client'
DEFAULT_WLM_CLIENT_SECRET_NAME = 'wlm-client-auth'
DEFAULT_WLM_CLIENT_SECRET_NAMESPACES = json.dumps(['default'])
DEFAULT_OIDC_CLIENT_ID = 'kubernetes-api-oidc-client'
LOGGER = logging.getLogger('keycloak_setup')
class KeycloakClient(object):
"""Class to assist in Keycloak Client Creation.
*Preparing to Create Client*
After init, set keycloak client request attributes
using the following properties (bool):
- standard_flow_enabled (default False)
- implicit_flow_enabled (default False)
- direct_access_grants_enabled (default False)
- service_accounts_enabled (default False)
- public_client (default False)
- create_roles_for_public_client (default True)
- create_monitor_read_only_role (default False)
- authorization_services_enabled (default False)
Noting there is no request validation (e.g., combination of
flows enabled, etc).
You can use the set_req_attr() method to add
additional attributes (e.g., ProtocolMappers).
All request attributes are only honored by the create() method, and
.create() is not intended/designed to be re-entrant. Also note the absence
of any other CRUD-like operations against clients.
*Creating a Client*
Once the object is configured as desired, run .create(). Note that .create()
attempts to handle the scenario where the client already exists.
*Creating K8S Client Secret*
A K8S secret will be created/updated if the k8s_secret_name is set.
The secret data attributes that get created by default are client-id
and client-secret. Use the set_k8s_secret_attr() method to add additional
attributes to the secret. The secret will be created in each namespace
specified by k8s_secret_namespaces. To create the secret(s) in K8S,
call the .create_k8s_secrets() method after a successful .create().
*Exception Handling*
Natively raises the exceptions listed below. Allows pass-through
exceptions to propagate unhandled via interaction with KeycloakSetup
object.
:param KeycloakSetup kas: KeycloakSetup object
:param str realm: keycloak realm
:param str client_id: Oauth client ID (not the keycloak client 'UUID')
:param str k8s_secret_name: name of k8s secret to create
:param collections.iterable k8s_secret_namespace: namespaces where secret should be created
:raises ValueError: on bad parameter use
:raises TypeError: on bad parameter type"""
@property
@property
@property
# given scope, not concerned about overriding
# build-in id() function. Note that this is the
# OAuth client-id, not the UUID that keycloak assigns.
@property
@property
# Core KeyCloak client create attributes
# standardFlowEnabled
@property
@standard_flow_enabled.setter
# implicitFlowEnabled
@property
@implicit_flow_enabled.setter
# directAccessGrantsEnabled
@property
@direct_access_grants_enabled.setter
# serviceAccountsEnabled
@property
@service_accounts_enabled.setter
# authorizationServicesEnabled
@property
@authorization_services_enabled.setter
# createRolesForPublicClient
@property
@create_roles_for_public_client.setter
# createMonitorReadOnlyRole
@property
@create_monitor_read_only_role.setter
# publicClient
@property
@public_client.setter
# client_roles (none by default)
@property
@client_roles.setter
def client_roles(self, v):
"""Expects ["client-role",...]
Adds the above new client role(s) to the new client.
"""
if not isinstance(v, list):
raise TypeError
for role in v:
if not issubclass(type(role), (str)):
raise TypeError(
f'Expecting a string for the client role {role!r}.')
self._client_roles = v
# Allow setting of extended attributes
# for client create request and k8s secret
# create
def set_req_attr(self, attr, value):
"""Set an extended create attribute,
attr must be a valid dictionary key"""
self._kc_ext_attr[attr] = value
def set_k8s_secret_attr(self, attr, value):
"""Set an extended attribute on K8S secret,
attr must be a valid dictionary key"""
self._k8s_secret_ext_attr[attr] = value
def create(self):
"""Attempt to create the client via keycloak. Retrieve the keycloak
id if client created or client already exists.
:raises ValueError: if attempt to override reserved client request attributes."""
LOGGER.info('Create Keycloak client %s', self.id)
config = {
'clientId': self.id,
'standardFlowEnabled': self.standard_flow_enabled,
'implicitFlowEnabled': self.implicit_flow_enabled,
'directAccessGrantsEnabled': self.direct_access_grants_enabled,
'serviceAccountsEnabled': self.service_accounts_enabled,
'publicClient': self.public_client,
'authorizationServicesEnabled': self.authorization_services_enabled
}
# Verify the extended attributes don't contain
# the reserved fields, above. Add them to config
# otherwise.
if set(config.keys()).intersection(set(self._kc_ext_attr.keys())):
raise ValueError(
"cannot override reserved kc client create request attrs")
config.update(self._kc_ext_attr)
# Attempt to create the client
create_url = '{}/admin/realms/{}/clients'.format(
self.kas.keycloak_base, self.realm)
response = self.kas.kc_master_admin_client.post(
create_url, json=config)
if response.status_code == 201:
LOGGER.info('Created client %s', self.id)
elif response.status_code == 409:
LOGGER.info('Keycloak client %s already exists', self.id)
else:
response.raise_for_status()
self._url = self.kas.calc_client_url(self.id)
if not self._url:
raise Exception(f"Failed to fetch URL for client {self.id}!")
# Create any required service account roles
self.add_service_account_roles()
# Create any requested client roles
for client_role in self.client_roles:
self.create_role(client_role)
def create_k8s_secrets(self):
"""Create K8S Secrets for the client, if secret name is set. Must
be called after .create() for client URL to be set."""
if self._k8s_secret_name is None:
LOGGER.info(
"k8s secret name not set for client {}, not creating..".format(self.id))
else:
secret_data = {
'client-id': None,
'client-secret': None
}
# verify extended secret attributes don't contain
# the reserved fields, above. Add them to secret
# def otherwise.
if set(secret_data.keys()).intersection(set(self._k8s_secret_ext_attr.keys())):
raise ValueError("cannot override reserved k8s secret attrs")
secret_data.update(self._k8s_secret_ext_attr)
secret_data['client-id'] = self.id
if self._url is None:
raise ValueError(
"attempting to set role but client URL is not set.")
LOGGER.info('Fetching %s secret...', self.id)
response = self.kas.kc_master_admin_client.get(
'{}/client-secret'.format(self._url))
response.raise_for_status()
secret_data['client-secret'] = response.json()['value']
for namespace in self._k8s_secret_namespaces:
k8s_apply_secret(namespace, self._k8s_secret_name, secret_data)
def create_role(self, role):
"""Create role for the client. Must be called after .create()
for client URL to be set."""
if not issubclass(type(role), (str,)):
raise TypeError("invalid role")
if re.match('([ -~]+)$', str(role)) is None:
raise ValueError("invalid role")
if self._url is None:
raise ValueError(
"attempting to set role but client URL is not set.")
LOGGER.info('Creating %s role in %s client...', role, self._url)
request_data = {
'name': role,
}
response = self.kas.kc_master_admin_client.post(
'{}/roles'.format(self._url), json=request_data)
if response.status_code == 201:
LOGGER.info('%s role created in %s', role, self._url)
elif response.status_code == 409:
LOGGER.info('%s role already exists in %s', role, self._url)
else:
response.raise_for_status()
def add_service_account_roles(self):
"""Add any requested service account roles to the client.
The operation is idempotent.
"""
LOGGER.info('Requested service account roles for client %s: %s',
self._id, self._service_account_client_roles)
role_dict = self._service_account_client_roles
if not role_dict:
LOGGER.info('No additional service account roles will be added.')
return
# Get the ID of this new client's user entry.
client_user_name = f'service-account-{self._id}'
client_user_id = ''
url = f'{self.kas.keycloak_base}/admin/realms/{self.realm}/users?username={client_user_name}'
response = self.kas.kc_master_admin_client.get(url)
LOGGER.info("User ID query %s reply was: %s", url, response)
LOGGER.debug("The full response was: %s", response.text)
# Raise for HTTP errors (400-600) here. Note that the response code will be 200 if zero or more
# users were found for the requested username.
response.raise_for_status()
# Loop the returned list of users checking for an exact match.
# This handles cases where multiple users might be returned because the match is not
# exact and can't be set to exact. The list will be empty if no user is found.
for user in response.json():
username = user['username']
LOGGER.debug("Found user %s", username)
if username == client_user_name:
client_user_id = user['id']
LOGGER.info(
"Found the requetsed user %s with the ID: %s", username, user["id"])
break
# If we don't find the client user we can not go further in this process. Log it and
# return.
if not client_user_id:
LOGGER.error("Unable to complete adding service account roles since we did not find "
"the expected user name %s for the cleint user.", client_user_name)
return
# Iterate the list of clients that have roles we need to add.
for client in role_dict:
# Get the client's ID from the client name (specified by clientId)
url = f'{self.kas.keycloak_base}/admin/realms/{self.realm}/clients?clientId={client}'
response = self.kas.kc_master_admin_client.get(url)
LOGGER.info("Role client ID query %s reply was: %s", url, response)
LOGGER.debug("The full response was: %s", response.text)
# Raise for HTTP errors (400-600) here.
response.raise_for_status()
# If the client was not found the list will be empty. In this case just continue on
# to the next requested client (if any).
if not response.json():
LOGGER.error("Did not find the client: %s Unable to add any requested client role for "
"this client.", client)
continue
client_id = response.json()[0]['id']
LOGGER.info("The client %s has a client_id=%s", client, client_id)
# Get the list of requested client roles
requested_roles = role_dict[client]
# Get the ID for each client role and assign to the service-account-${client} user by
# ID (client_user_id determined above)
client_role_list = []
LOGGER.info("The roles %s on the client %s were requested",
requested_roles, client)
for client_role in requested_roles:
LOGGER.info("Getting the role ID for %s", client_role)
url = f'{self.kas.keycloak_base}/admin/realms/{self.realm}/clients/{client_id}/roles/{client_role}'
response = self.kas.kc_master_admin_client.get(url)
LOGGER.info("Role ID query %s reply was: %s", url, response)
LOGGER.debug("The full response was: %s", response.text)
# Raise for HTTP errors (400-600) here.
# If the client_id or client_role is not found the repsonse will be 404.
if response.status_code == 404:
LOGGER.error(
'Was not able to find the client role %s', client_role)
response.raise_for_status()
client_role_id = response.json()['id']
LOGGER.info("The client role %s has a client_role_id=%s",
client_role, client_role_id)
client_role_entry = {
'id': client_role_id,
'name': client_role,
'clientRole': True
}
LOGGER.info("Preparing to add the client role %s",
client_role_entry)
client_role_list.append(client_role_entry)
# Post the client role list to the users endpoint
# client_user_id == the user entry ID for this client's service account user
# client_id == the client ID of the client owning any role(s) to be added to the service account user
url = f'{self.kas.keycloak_base}/admin/realms/{self.realm}/users/{client_user_id}/role-mappings/clients/{client_id}'
response = self.kas.kc_master_admin_client.post(
url, json=client_role_list)
LOGGER.info("Role mapping post %s reply was: %s", url, response)
# Riase for HTTP errors (400-600) here.
response.raise_for_status()
# This should be a 204 for an insert or update and is idempotent so
# responses other than this are considered an error and should be reviewed.
if response.status_code == 204:
LOGGER.info('Created client role mapping.')
else:
LOGGER.error('Unexpected response code of %s while trying to add one or more '
'service account roles to the client user ID \'%s\'. '
'The client is \'%s\'. The client role list is: %s',
response.status_code, client_user_id, client_id, client_role_list)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
357,
34,
8,
15069,
12131,
12,
1238,
1828,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
14... | 2.451166 | 7,290 |
import nuke
| [
11748,
299,
4649,
198
] | 3 | 4 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 Christopher C. Strelioff <chris.strelioff@gmail.com>
#
# Distributed under terms of the MIT license.
"""
consumer_key.py
=================
A Python 3 example of using the discogs-client with a consumer key and secret.
"""
import discogs_client as dc
try:
# make sure there is a config.py file
import config
except:
raise Exception('Could not import config.py -- please create this file!')
# try to access the consumer key and secret
ckey = config.consumer_key
csecret = config.consumer_secret
default_ckey = (ckey == 'your-consumer-key-here')
default_csecret = (csecret == 'your-consumer-secret-here')
if default_ckey and default_csecret:
raise Exception('Please set variables consumer_key and '
'consumer_secret in config.py!\n'
'--obtain consumer key and secret at:'
' https://www.discogs.com/settings/developers')
uagent = ('ConsumerKeyExample/0.1 '
'+https://github.com/cstrelioff/discogs-python3-examples')
print('\n===\n'
'user agent: {0}\n'
'consumer key: {1}\n'
'consumer secret: {2}\n'
'==='.format(uagent, ckey, csecret))
# set key, secret when initializing Client
d = dc.Client(uagent, consumer_key=ckey, consumer_secret=csecret)
# use OOB flow to authorize
request_token, request_secret, authorize_url = d.get_authorize_url()
print('\n===\n'
'request_token: {0}\n'
'request_secret: {1}\n'
'==='.format(request_token, request_secret))
# - user verifies at specified url
print('\n** Please browse to the following URL:\n\n'
'\t{0}\n\n'.format(authorize_url))
# request verfication code from user
oauth_verifier = input('** Please enter verification code: ')
access_token, access_secret = d.get_access_token(oauth_verifier)
print('\n===\n'
' * oauth_token: {0}\n'
' * oauth_secret: {1}\n\n'
'Authentication complete. Future requests for this user '
'can be signed with the above tokens--\n'
'written to access_tokens.py.'.format(access_token, access_secret))
# write tokens to file
with open('access_tokens.py', 'w') as f:
f.write('# oauth token and secret\n')
f.write('access_token=\'{}\'\n'.format(access_token))
f.write('access_secret=\'{}\''.format(access_secret))
# Again, following the example here (sanme as user_token.py):
#
# https://github.com/discogs/discogs_client#user-token-authentication
#
user = d.identity()
print('\n* Identity')
print('You are {0} ({1}) from {2}.'.format(user.name, user.username,
user.location))
print('Your wantlist has {} items.'.format(len(user.wantlist)))
print('\n* Fetching data')
results = d.search('Stockholm By Night', type='release')
print('results.pages:', results.pages)
artist = results[0].artists[0]
print('artist.name:', artist.name)
print('\n* Artist ID')
artist_id = artist.id
print('artist.id: {}'.format(artist_id))
print('d.artist({}): {}'.format(artist_id, d.artist(artist_id)))
print('\n* Drill down')
releases = d.search('Bit Shifter', type='artist')[0].releases[1].\
versions[0].labels[0].releases
print('len(releases): {}'.format(len(releases)))
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
10673,
1853,
12803,
327,
13,
520,
2411,
952,
... | 2.597278 | 1,249 |
def factorial(n):
"""
В единственной строке дано число n (2 ≤ n ≤ 109),
которое нужно факторизовать.
Выведите в порядке неубывания простые множители,
на которые раскладывается число n.
"""
prime = []
x = 2
while x * x <= n:
if n % x == 0:
prime.append(x)
n //= x
else:
x += 1
if n > 1:
prime.append(n)
print(*prime)
if __name__ == '__main__':
n = int(input())
factorial(n)
| [
4299,
1109,
5132,
7,
77,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
12466,
240,
12466,
113,
43666,
18849,
22177,
21727,
20375,
38857,
16843,
22177,
22177,
25443,
117,
220,
21727,
20375,
21169,
25443,
118,
16843,
12466,
112,
1614... | 1.379603 | 353 |
import web
import socketserver
PORT = 8000
Handler = web.testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
socketserver.TCPServer.allow_resue_address=True
| [
11748,
3992,
198,
11748,
37037,
18497,
198,
15490,
796,
38055,
198,
25060,
796,
3992,
13,
9288,
40717,
18453,
25060,
198,
4023,
67,
796,
37037,
18497,
13,
4825,
3705,
18497,
7,
7203,
1600,
350,
9863,
828,
32412,
8,
198,
4798,
7203,
3129... | 3.106667 | 75 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| [
6738,
220,
844,
27349,
62,
2118,
9078,
13,
8692,
1330,
7308,
198,
6738,
220,
844,
27349,
62,
2118,
9078,
13,
16624,
1330,
13283,
628
] | 3.375 | 24 |
import os
import numpy as np
import gym
from shadowhand_gym.pybullet import PyBullet
from gym import utils, spaces
from typing import List, Optional, Tuple, Union
from abc import ABC
def get_data_path() -> str:
"""Return the absolute data path.
Returns:
str: Absolute data path.
"""
data_path = os.path.join(os.path.dirname(__file__))
return data_path
class Task:
"""To be completed."""
def get_goal(self):
"""Return the current goal."""
raise NotImplementedError
def get_obs(self):
"""Return the observation associated to the task."""
raise NotImplementedError
def get_achieved_goal(self):
"""Return the achieved goal."""
raise NotImplementedError
def reset(self):
"""Reset the task: sample a new goal."""
pass
def seed(self, seed):
"""Sets the seed for this env's random number."""
self.np_random, seed = utils.seeding.np_random(seed)
def is_success(self, achieved_goal, desired_goal):
"""Returns whether the acieved goal matches the desired goal or not."""
raise NotImplementedError
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute reward associated to the achieved and the desired goal."""
raise NotImplementedError
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
198,
198,
6738,
9082,
4993,
62,
1360,
76,
13,
9078,
15065,
1616,
1330,
9485,
33481,
1616,
198,
198,
6738,
11550,
1330,
3384,
4487,
11,
9029,
198,
6738,
19720,
1330,
73... | 2.716327 | 490 |
import search
import string
from math import(cos, pi)
# A sample map problem
# sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
# ))
#
# sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
#
# sumner_puzzle.label = 'Sumner'
# sumner_puzzle.description = '''
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
#=========================================================================
#=========================================================================
norfolk_map = search.UndirectedGraph(dict(
Norfolk=dict(Suffolk=50,Chesapeake=15,VirginiaBeach=35),
Suffolk=dict(Norfolk=50,Chesapeake=35,Hampton=60,Moyock=150,Sunbury=120),
Chesapeake=dict(Suffolk=35,Norfolk=15,VirginiaBeach=40,Moyock=120),
VirginiaBeach=dict(Norfolk=35,Chesapeake=40),
Hampton=dict(Norfolk=30,Suffolk=60,NewportNews=15),
NewportNews=dict(Hampton=15,Jamestown=35,Williamsburg=30,Yorktown=15),
Jamestown=dict(NewportNews=35,Williamsburg=15),
Williamsburg=dict(Jamestown=15,NewportNews=30,Yorktown=20),
Yorktown=dict(Williamsburg=20,Newportnews=15),
Sunbury=dict(Suffolk=120, Moyock=45),
Moyock=dict(Suffolk=150,Chesapeak=120),
))
norfolk_puzzle = search.GraphProblem('Jamestown', 'Yorktown', norfolk_map)
norfolk_puzzle.label = 'Norfolk'
norfolk_puzzle.description = 'This is a map of the Norfolk, VA area.' \
'This map is unique to the best of my' \
'knowledge.'
#=========================================================================
#=========================================================================
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# A trivial Problem definition
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
#===========================================================================================
#===========================================================================================
# class TrueOrFalse(search.Problem):
# def actions(self, state):
# return ['true', 'false']
#
# def result(self, state, action):
# if action == 'true':
# return 'true'
# else:
# return 'false'
#
# def goal_test(self, state):
# return state == 'true'
#
# def h(self, node):
# state = node.state
# if self.goal_test(state):
# return 0
# else:
# return 1
#
# #swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
# trueorfalse_puzzle = TrueOrFalse('false')
# trueorfalse_puzzle.label = 'True or False'
cheese_map = search.UndirectedGraph(dict(
A1=dict(A2=10,A3=20,B1=10,B2=20,B3=30,C1=20,C2=30,C3=40),
A2=dict(A1=10,A3=10,B1=20,B2=10,B3=20,C1=30,C2=20,C3=30),
A3=dict(A1=20,A2=10,B1=30,B2=20,B3=10,C1=40,C2=30,C3=20),
B1=dict(A1=10,A2=20,A3=30,B2=10,B3=10,C1=10,C2=20,C3=30),
B2=dict(A2=10,A3=20,B1=10,A1=20,B3=10,C1=20,C2=10,C3=20),
B3=dict(A2=20,A3=10,B1=20,B2=10,A1=30,C1=30,C2=20,C3=10),
C1=dict(A2=20,A3=40,B1=10,B2=20,B3=30,A1=20,C2=10,C3=20),
C2=dict(A2=10,A3=20,B1=20,B2=10,B3=20,C1=10,A1=30,C3=10),
C3=dict(A2=30,A3=20,B1=30,B2=20,B3=10,C1=20,C2=10,A1=40),
))
import random
a = guess_letter()
b = guess_number()
print(a + b)
cheese_puzzle = search.GraphProblem('A1', a+b , cheese_map)
cheese_puzzle.label = 'Cheese Puzzle'
#===========================================================================================
#===========================================================================================
mySearches = [
# swiss_puzzle,
# sumner_puzzle,
romania_puzzle,
switch_puzzle,
norfolk_puzzle,
#trueorfalse_puzzle,
cheese_puzzle,
]
| [
11748,
2989,
198,
11748,
4731,
198,
6738,
10688,
1330,
7,
6966,
11,
31028,
8,
198,
198,
2,
317,
6291,
3975,
1917,
198,
2,
2160,
1008,
62,
8899,
796,
2989,
13,
31319,
1060,
276,
37065,
7,
11600,
7,
198,
2,
220,
220,
220,
10727,
28,... | 2.300403 | 1,984 |
import numpy as np
def get_confusion(ids1, ids2):
"""Get confusion matrix
Parameters
----------
ids1: numpy.array or list
id list in the first annotation
ids2: numpy.array or list
id list in the second annotation
Return
------
(confuse_mat, ids1_uniq, ids2_uniq)
confuse_mat[i, j]:
number of samples have ids1 == ids1_uniq[i]
and ids2 == id2_uniq[j]
"""
if type(ids1) == list: ids1 = np.array(ids1)
if type(ids2) == list: ids2 = np.array(ids2)
ids1_uniq = np.unique(ids1)
ids2_uniq = np.unique(ids2)
confuse_mat = np.zeros((len(ids1_uniq), len(ids2_uniq)), dtype=int)
for i, _id1 in enumerate(ids1_uniq):
for j, _id2 in enumerate(ids2_uniq):
confuse_mat[i, j] = np.sum((ids1 == _id1) * (ids2 == _id2))
return confuse_mat, ids1_uniq, ids2_uniq
| [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
651,
62,
10414,
4241,
7,
2340,
16,
11,
220,
2340,
17,
2599,
198,
220,
220,
220,
37227,
3855,
10802,
17593,
198,
220,
220,
220,
220,
198,
220,
220,
220,
40117,
198,
220,
220,
220,
24200,... | 1.97807 | 456 |
from replit import clear
import os
import pyfiglet
from art import logo,item,text
os.system('clear')
print(text)
print("\n")
print(logo)
print("\n")
print(" The Item Up For Auctions Is :")
print(item)
print("\n")
print("Welcome to the secret auction program.")
bids = {}
add_to_dict()
print("The highest bid amount is : $", max((bids.values())), " by : ",
(list(bids.keys())[list(bids.values()).index(max((bids.values())))]))
result = pyfiglet.figlet_format(str(list(bids.keys())[list(bids.values()).index(max((bids.values())))]),font="doom")
result1 = pyfiglet.figlet_format(f"$ {str(max((bids.values())))}",font="doom")
print(result,"\n")
print(result1,"\n") | [
6738,
2186,
270,
1330,
1598,
198,
11748,
28686,
198,
11748,
12972,
5647,
1616,
198,
6738,
1242,
1330,
11112,
11,
9186,
11,
5239,
198,
418,
13,
10057,
10786,
20063,
11537,
198,
4798,
7,
5239,
8,
198,
4798,
7203,
59,
77,
4943,
198,
4798... | 2.533333 | 270 |
import re
from collections import defaultdict
from typing import Literal, Union
from timefred import color as c
from timefred.error import EmptySheet, NoActivities
from timefred.store import store, Entry, Activity
from timefred.time.timeutils import arrows2rel_time
from timefred.time.xarrow import XArrow
# @break_on_exc | [
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
19720,
1330,
25659,
1691,
11,
4479,
198,
198,
6738,
640,
39193,
1330,
3124,
355,
269,
198,
6738,
640,
39193,
13,
18224,
1330,
33523,
3347,
316,
11,
1400,
25526,
871,
198,
6738,... | 3.681818 | 88 |
"""
Copyright (c) 2014 Dan Obermiller
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
You should have received a copy of the MIT License along with this program.
If not, see <http://opensource.org/licenses/MIT>
"""
try:
import cStringIO as IO
except ImportError:
import StringIO as IO
finally:
from contextlib import closing, contextmanager
import select
import socket
import sys
import threading
import time
import unittest
from testfixtures import LogCapture
import IRC_sockselect as IRC
@contextmanager
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(test_sockselect)
unittest.TextTestRunner(sys.stdout, verbosity=1).run(suite)
| [
37811,
198,
15269,
357,
66,
8,
1946,
6035,
33753,
76,
4665,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
12340,... | 3.383858 | 508 |
import gym
import torch
import json
import os
import yaml
import numpy as np
from tqdm import trange
import maml_rl.envs
from maml_rl.metalearners import MAMLTRPO
from maml_rl.baseline import LinearFeatureBaseline
from maml_rl.samplers import CurriculumTaskSampler
from maml_rl.utils.helpers import get_policy_for_env, get_input_size
from maml_rl.utils.reinforcement_learning import get_returns
import wandb
NUM_TRAJ_PER_ITER = 3000
#CURRICULUM = [0] * NUM_TRAJ_PER_ITER + [1] * NUM_TRAJ_PER_ITER + [2] * NUM_TRAJ_PER_ITER + [3] * NUM_TRAJ_PER_ITER + [4] * NUM_TRAJ_PER_ITER + [5] * NUM_TRAJ_PER_ITER + [6] * NUM_TRAJ_PER_ITER + [7] * NUM_TRAJ_PER_ITER
CURRICULUM = [0] * NUM_TRAJ_PER_ITER + [1] * NUM_TRAJ_PER_ITER
OFFSET = NUM_TRAJ_PER_ITER
if __name__ == '__main__':
import argparse
import multiprocessing as mp
parser = argparse.ArgumentParser(description='Reinforcement learning with '
'Model-Agnostic Meta-Learning (MAML) - Train')
parser.add_argument('--config', type=str, required=True,
help='path to the configuration file.')
# Miscellaneous
misc = parser.add_argument_group('Miscellaneous')
misc.add_argument('--output-folder', type=str,
help='name of the output folder')
misc.add_argument('--seed', type=int, default=None,
help='random seed')
misc.add_argument('--num-workers', type=int, default=mp.cpu_count() - 1,
help='number of workers for trajectories sampling (default: '
'{0})'.format(mp.cpu_count() - 1))
misc.add_argument('--use-cuda', action='store_true',
help='use cuda (default: false, use cpu). WARNING: Full upport for cuda '
'is not guaranteed. Using CPU is encouraged.')
misc.add_argument('--wandb', action='store_true', help='whether to log to wandb')
misc.add_argument('--name', default='default_name', help='default name for wandb_log')
args = parser.parse_args()
args.device = ('cuda' if (torch.cuda.is_available()
and args.use_cuda) else 'cpu')
print("Using %s" %args.device)
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
if args.wandb:
run = wandb.init(
project="Intelligent Agent",
name=args.name,
config=config
)
main(args)
| [
11748,
11550,
198,
11748,
28034,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
491,
858,
198,
198,
11748,
285,
43695,
62,
45895,
13,
268,
14259,
198,
6... | 2.412863 | 964 |
import os
import sublime
import subprocess
import re
| [
11748,
28686,
198,
11748,
41674,
198,
11748,
850,
14681,
198,
11748,
302,
198
] | 4.076923 | 13 |
""" A detector model which wraps around a feature extraction backbone, fpn, and RetinaNet
head.This allows for easy interchangeability during experimentation and a reliable way to
load saved models. """
import collections
import dataclasses
from typing import List
import yaml
import torch
import torchvision
from core import pull_assets
from third_party.efficientdet import bifpn, efficientnet
from third_party.vovnet import vovnet
from third_party.models import (
fpn,
postprocess,
regression,
anchors,
retinanet_head,
)
@dataclasses.dataclass
_EFFICIENT_DETS = {
"bifpn-b0": BiFPN_Params(512, 64, 3, 3),
"bifpn-b1": BiFPN_Params(640, 88, 4, 3),
"bifpn-b2": BiFPN_Params(768, 112, 5, 3),
"bifpn-b3": BiFPN_Params(896, 160, 6, 4),
"bifpn-b4": BiFPN_Params(1024, 224, 7, 4),
"bifpn-b5": BiFPN_Params(1280, 288, 7, 4),
}
| [
37811,
317,
31029,
2746,
543,
27521,
1088,
257,
3895,
22236,
32774,
11,
277,
21999,
11,
290,
4990,
1437,
7934,
198,
2256,
13,
1212,
3578,
329,
2562,
26478,
1799,
1141,
29315,
290,
257,
9314,
835,
284,
198,
2220,
7448,
4981,
13,
37227,
... | 2.58457 | 337 |
"""Package for verifying the robustness of decision trees and their ensembles."""
| [
37811,
27813,
329,
45505,
262,
12373,
1108,
286,
2551,
7150,
290,
511,
551,
4428,
829,
526,
15931,
198
] | 4.555556 | 18 |
import os
import sys
import qdarkstyle
import darkdetect
from PyQt5 import QtWidgets, QtCore, QtGui
from app.gui.consolepanel import MainConsolePanel
from app.gui.histogrampanel import HistogramPanel
from app.gui.plottingpanel import PlottingPanel
from app.resources import resources
from app.gui.fittingpanel import FittingPanel
from app.util import qt_constants
from app.gui.dialogs.dialog_misc import PermissionsMessageDialog
from app.model import services
# noinspection PyArgumentList
class MainWindow(QtWidgets.QMainWindow):
"""
MainWindow widget for the program. Creates the default arrangement of panels.
"""
@QtCore.pyqtSlot()
def _set_default_panels(self):
"""
Sets the default panels of the main window for µSR analysis.
"""
self.addDockWidget(qt_constants.LeftDockWidgetArea, MainConsolePanel())
temp_docking_widget = QtWidgets.QDockWidget()
temp_docking_widget.setWidget(self._tabs)
temp_docking_widget.setTitleBarWidget(QtWidgets.QWidget())
self.addDockWidget(qt_constants.RightDockWidgetArea, temp_docking_widget, qt_constants.Horizontal)
self.addDockWidget(qt_constants.LeftDockWidgetArea, self._tabs.plotting_panel.createSupportPanel())
def set_status_message(self, message):
"""
Sets the status message for the main window.
:param message: the message to be displayed.
"""
self.setStatusTip(message)
# noinspection PyArgumentList
class StyleFile:
"""
A class to parse the variable file and store the updated QSS file.
"""
@staticmethod
def _parse_var_file(var_file):
"""
Gets all the variables and stores in a dictionary
:param var_file: The variable file
:return qss_vars: A dictionary of variables
"""
var_read_file = open(var_file).read().split()
keys = [key for key in var_read_file if key[0] == '@']
values = [value for value in var_read_file if value[0] == '#']
qss_vars = {k: v for k, v in zip(keys, values)}
return qss_vars
@staticmethod
def _parse_qss_file(qss_file, qss_vars):
"""
Replaces all the variables in the qss files with their actual values.
:param qss_file: The file with the QSS custom styles.
:param qss_vars: The variables as a dictionary
:return qss_updated_file: The updated file as a string
"""
qss_read_file = open(qss_file).read()
qss_updated_file = ""
current_char = 0
for _ in qss_read_file:
if current_char == len(qss_read_file):
break
for key in qss_vars.keys():
len_key = len(key)
if qss_read_file[current_char:current_char + len_key] == key:
qss_updated_file += qss_vars[key]
current_char += len_key
break
else:
qss_updated_file += qss_read_file[current_char]
current_char += 1
return qss_updated_file
| [
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
10662,
21953,
7635,
198,
11748,
3223,
15255,
478,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
11,
33734,
14055,
11,
33734,
8205,
72,
198,
198,
6738,
598,
13,
48317,
13,
4... | 2.373169 | 1,297 |
import pytest
import sys
from os.path import dirname as d
from os.path import abspath, join
root_dir = d(d(abspath(__file__)))
sys.path.append(root_dir)
from app import create_app
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
355,
288,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
4654,
198,
15763,
62,
15908,
796,
288,
7,
67,
7,
397,
2777,
776,
7,
834,
7753,
834,
22... | 2.814286 | 70 |
from environments.dynamodb.migrator import IdentityMigrator
from projects.serializers import ProjectSerializer
| [
6738,
12493,
13,
67,
4989,
375,
65,
13,
76,
3692,
1352,
1330,
27207,
44,
3692,
1352,
198,
6738,
4493,
13,
46911,
11341,
1330,
4935,
32634,
7509,
628,
198
] | 4.035714 | 28 |
from .book import Book
from .chapter import Chapter
from .alpha_book import AlphabeticalBook | [
6738,
764,
2070,
1330,
4897,
198,
6738,
764,
43582,
1330,
7006,
198,
6738,
764,
26591,
62,
2070,
1330,
45695,
605,
10482
] | 4.380952 | 21 |
from itertools import compress
"""
This doesn't really seem all that useful.
"""
addresses = [
"5142 N CLARK",
"5148 N CLARK",
"5800 E 58TH",
"2122 N CLARK",
"5645 N RAVENSWOOD",
"1060 W ADDISON",
"4801 N BROADWAY",
"1039 W GRAINVILLE",
]
counts = [0, 3, 10, 4, 1, 7, 6, 1]
more5 = [n > 5 for n in counts]
cp = list(compress(addresses, more5))
| [
6738,
340,
861,
10141,
1330,
27413,
198,
198,
37811,
198,
1212,
1595,
470,
1107,
1283,
477,
326,
4465,
13,
198,
37811,
198,
198,
2860,
16746,
796,
685,
198,
220,
220,
220,
366,
20,
23726,
399,
7852,
14175,
1600,
198,
220,
220,
220,
... | 2.177143 | 175 |
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from ..losses import CtdetLoss
from .ctdet_decetor import ctdet_decode, post_process, merge_outputs
@DETECTORS.register_module() | [
11748,
28034,
198,
198,
6738,
8085,
15255,
13,
7295,
1330,
275,
3524,
17,
20274,
11,
275,
3524,
62,
76,
5912,
62,
1891,
198,
6738,
11485,
38272,
1330,
38267,
9782,
20673,
198,
6738,
764,
29762,
62,
14247,
1330,
14206,
29391,
11242,
9250... | 3.138614 | 101 |
# © Port Scanner- Made by Yuval Simon. For bogan.cool
import socket, subprocess, sys, time, os, threading
from colorama import Fore
| [
2,
10673,
4347,
20937,
1008,
12,
14446,
416,
10605,
2100,
11288,
13,
1114,
275,
9632,
13,
24494,
198,
198,
11748,
17802,
11,
850,
14681,
11,
25064,
11,
640,
11,
28686,
11,
4704,
278,
198,
6738,
3124,
1689,
1330,
4558,
628,
198
] | 3.292683 | 41 |
from spacy.lang.en import English
from spacy.tokens import Token
nlp = English()
# Registra la extensión de atributo del Token, "is_country", con el valor por defecto False
Token.set_extension("is_country", default=False)
# Procesa el texto y pon True para el atributo "is_country" para el token "Spain"
doc = nlp("I live in Spain.")
doc[3]._.is_country = True
# Imprime en pantalla el texto del token y el atributo "is_country" para todos los tokens
print([(token.text, token._.is_country) for token in doc])
| [
6738,
599,
1590,
13,
17204,
13,
268,
1330,
3594,
198,
6738,
599,
1590,
13,
83,
482,
641,
1330,
29130,
198,
198,
21283,
79,
796,
3594,
3419,
198,
198,
2,
13811,
430,
8591,
1070,
641,
72,
18840,
390,
379,
2455,
78,
1619,
29130,
11,
... | 2.988372 | 172 |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sheet
from abc import abstractproperty, ABC
class RangeSelectionArguments(ABC):
"""
Service Class
contains the arguments for starting the range selection.
**since**
OOo 2.0.3
See Also:
`API RangeSelectionArguments <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1sheet_1_1RangeSelectionArguments.html>`_
"""
__ooo_ns__: str = 'com.sun.star.sheet'
__ooo_full_ns__: str = 'com.sun.star.sheet.RangeSelectionArguments'
__ooo_type_name__: str = 'service'
@abstractproperty
def CloseOnMouseRelease(self) -> bool:
"""
specifies if the range selection is finished when the mouse button is released, after selecting cells.
"""
@abstractproperty
def InitialValue(self) -> str:
"""
contains the initial value for the range descriptor.
"""
@abstractproperty
def SingleCellMode(self) -> bool:
"""
specifies if the range selection is limited to a single cell only.
If TRUE, the selection is restricted to a single cell. If FALSE, multiple adjoining cells can be selected. The default value is FALSE.
**since**
OOo 2.0.3
"""
@abstractproperty
def Title(self) -> str:
"""
contains a title for the operation.
"""
__all__ = ['RangeSelectionArguments']
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
15069,
33160,
1058,
33,
6532,
12,
22405,
12,
12041,
25,
19935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
345,
743,... | 2.780804 | 771 |
# encoding: utf-8
##################################################
# This script shows an example of comparison operators.
# First, it shows different options to create functions used to simplify your code.
# These functions are used as other functions embedded in python.
#
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2019, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# We are going to use random library
import random
# Let's write our code
print('You can use the - + - operator to concatenate text')
text = 'text fragment - '
result = text + text + text + '\n'
print(text + result)
print('These are few examples of python functions\n')
# For instance the - random integer - function from - random - library
number = random.randint(0, 10)
# Or the - print - function
print('Yes!! print is built-in function to print characters in the screen')
# Also functions for transforming variable type. Here the function - str -
print('Although this is a number can be printed as character: ' + str(number))
# There are also functions for building complex data structures
print('You can use the - range(n) - function to create a list with n numbers')
text = 'range(5) = '
result = range(5)
print(text + str(result))
print('You can use the - sum() - function to add up numbers')
text = '3 + 9 = '
result = sum(result)
print(text + str(result))
#sdgfdsfg | [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
29113,
14468,
2235,
198,
2,
770,
4226,
2523,
281,
1672,
286,
7208,
12879,
13,
198,
2,
3274,
11,
340,
2523,
1180,
3689,
284,
2251,
5499,
973,
284,
30276,
534,
2438,
13,
198,
2,
2312,
5499,
... | 3.906393 | 438 |
import time
needhelp = None
while needhelp not in ['需要', '不需要']:
needhelp = input('小精灵:您好,欢迎来到古灵阁,请问您需要帮助吗?需要or不需要?')
if needhelp == '需要':
anyhelp = None
while anyhelp not in ['1', '2', '3']:
anyhelp = input('小精灵:请问您需要什么帮助呢?1 存取款;2 货币兑换;3 咨询')
if anyhelp == '1':
print('小精灵:推荐您去存取款窗!')
elif anyhelp == '2':
print('小精灵:金加隆和人民币的兑换率为1:51.3,即一金加隆=51.3人民币')
exchange = input('小精灵:请问您需要兑换多少金加隆呢?')
print('小精灵:好的,我知道了,您需要兑换'+exchange+'金加隆。')
time.sleep(2)
print('小精灵:那么,您需要付给我'+str(int(exchange)*51.3)+'人民币。')
elif anyhelp == '3':
print('小精灵:推荐您去咨询窗口!')
else:
print('小精灵:对不起,您的输入有误,请重新输入!')
elif needhelp == '不需要':
print('小精灵:好的,再见。')
else:
print('小精灵:对不起,您的输入有误,请重新确认!')
time.sleep(3)
print("交易结束!") | [
11748,
640,
201,
198,
31227,
16794,
796,
6045,
201,
198,
4514,
761,
16794,
407,
287,
37250,
165,
250,
222,
17358,
223,
3256,
705,
38834,
165,
250,
222,
17358,
223,
6,
5974,
201,
198,
220,
220,
220,
761,
16794,
796,
5128,
10786,
22887,... | 0.983437 | 966 |
# Generated by Django 3.0.4 on 2020-04-19 04:53
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3023,
12,
1129,
8702,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import io
import json
import os
import torch
from torchvision import datasets, transforms
import numpy as np
from PIL import Image
def process_image(image_path):
"""
Scales, crops, and normalizes a PIL image for a PyTorch model
:param image_path: Path to the image
:return: returns an Numpy array
"""
image = Image.open(image_path)
# scale
scale_size = 256, 256
image.thumbnail(scale_size, Image.LANCZOS)
# crop
crop_size = 224
width, height = image.size # Get dimensions
left = (width - crop_size) / 2
top = (height - crop_size) / 2
right = (width + crop_size) / 2
bottom = (height + crop_size) / 2
image = image.crop((left, top, right, bottom))
# normalize
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image_array = np.array(image) / 255
image = (image_array - mean) / std
# reorder dimensions
image = image.transpose((2, 0, 1))
return torch.from_numpy(image).unsqueeze_(0)
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
11748,
28034,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
198,
4299,
1429,
62,
9060,
7,
9... | 2.620513 | 390 |
import MainAnalysis as ma
import ExpFile as exp
import MatplotlibPlotters as mp
import numpy as np
exp.setPath('23','June','2021')
ee = exp.ExpFile(25)
key, rawData, dataMinusBg, dataMinusAvg, avgPic,\
pictureFitParams, pictureFitErrors, plottedData,\
v_params, v_errs, h_params, h_errs, intRawData = \
ma.standardImages(25,loadType='mako',cameraType='mako',
key = np.broadcast_to( ee.get_key()[1],(50,41)).flatten(),
fitPics=True)
# ma.standardImages(25,loadType='mako',key = np.broadcast_to( ee.get_key()[1],(50,41)).flatten()) | [
11748,
8774,
32750,
355,
17266,
198,
11748,
5518,
8979,
355,
1033,
198,
11748,
6550,
29487,
8019,
43328,
1010,
355,
29034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11201,
13,
2617,
15235,
10786,
1954,
41707,
15749,
41707,
1238,
2481,
... | 2.347107 | 242 |
from sqlalchemy.sql import func
from project import db
class ImageTile(db.Model):
"""Describes the image tile database table and allows querying for image tile information"""
__tablename__ = 'tiles'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
z_coord = db.Column(db.Integer, nullable=False)
x_coord = db.Column(db.Integer, nullable=False)
y_coord = db.Column(db.Integer, nullable=False)
path = db.Column(db.String(128), nullable=False)
@staticmethod
def find_path_by_coords(z_coord, x_coord, y_coord):
"""
Query the database for the image path (str) and return it
Input: z_coord -- int; WMS tile layer z coordinate value (zoom level)
x_coord -- int; WMS tile layer x coordinate value (see deg_to_num in utils.py for conversion)
y_coord -- int; WMS tile layer y coordinate value (see deg_to_num in utils.py for conversion)
Output: string; Path to the image
"""
return db.session.query(ImageTile.path).filter_by(z_coord=z_coord,
x_coord=x_coord,
y_coord=y_coord).scalar()
| [
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
25439,
198,
6738,
1628,
1330,
20613,
628,
198,
198,
4871,
7412,
35103,
7,
9945,
13,
17633,
2599,
198,
220,
220,
220,
37227,
24564,
22090,
262,
2939,
17763,
6831,
3084,
290,
3578,
42517,
111... | 2.264706 | 544 |
import os
from django.contrib.messages import constants as messages
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
if not os.path.exists(STATIC_DIR):
os.makedirs(STATIC_DIR)
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
if not os.path.exists(MEDIA_DIR):
os.makedirs(MEDIA_DIR)
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
'compressor',
'svg',
'django_user_agents',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'uwhvz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [os.path.join(BASE_DIR, 'app', 'templates', 'jinja2')],
'APP_DIRS': False,
'OPTIONS': {
'environment': 'uwhvz.jinja2.environment',
'extensions': [
'sass_processor.jinja2.ext.SassSrc',
'wagtail.core.jinja2tags.core',
'wagtail.admin.jinja2tags.userbar',
'wagtail.images.jinja2tags.images',
'compressor.contrib.jinja2ext.CompressorExtension',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'uwhvz.wsgi.application'
USER_AGENTS_CACHE = 'default'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = []
AUTH_USER_MODEL = 'app.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
WAGTAIL_FRONTEND_LOGIN_URL = '/accounts/login'
LOGIN_REDIRECT_URL = '/dashboard/player'
LOGOUT_REDIRECT_URL = '/'
ATOMIC_REQUESTS = True
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = STATIC_DIR
MEDIA_URL = '/media/'
MEDIA_ROOT = MEDIA_DIR
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
'compressor.finders.CompressorFinder',
]
WAGTAIL_SITE_NAME = 'UW Humans vs Zombies'
SASS_PRECISION = 8
SASS_OUTPUT_STYLE = 'compact'
SASS_PROCESSOR_ENABLED = True
MESSAGE_TAGS = {
messages.DEBUG: 'dark',
messages.INFO: 'info',
messages.SUCCESS: 'success',
messages.WARNING: 'warning',
messages.ERROR: 'danger',
}
# If set to True then user signups will be restricted to those who have a signup token.
# If set to False then users will be able to signup freely without token.
TOKEN_RESTRICTED_SIGNUPS = True
| [
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
37348,
1095,
1330,
38491,
355,
6218,
198,
198,
33,
11159,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
1590... | 2.178043 | 2,095 |
length_of_cake = int(input())
width_of_cake = int(input())
total_size = length_of_cake * width_of_cake
pieces_taken = ''
pieces_needed = 0
pieces_taken_int = 0
total_taken = 0
pieces_left = 0
while total_size > pieces_taken_int:
pieces_taken = input()
if pieces_taken == 'STOP':
pieces_left = total_size - total_taken
print(f'{pieces_left} pieces are left.')
break
pieces_taken_int = int(pieces_taken)
total_taken += pieces_taken_int
if total_taken > total_size:
pieces_needed = total_size - total_taken
print(f'No more cake left! You need {abs(pieces_needed)} pieces more.')
break
| [
13664,
62,
1659,
62,
30560,
796,
493,
7,
15414,
28955,
198,
10394,
62,
1659,
62,
30560,
796,
493,
7,
15414,
28955,
198,
198,
23350,
62,
7857,
796,
4129,
62,
1659,
62,
30560,
1635,
9647,
62,
1659,
62,
30560,
198,
34154,
62,
83,
1685,... | 2.42963 | 270 |
import populationUtils
import datautils
# values for change
populationNum = 50
geneNumber = 60
index = 10 # How many full runs for the average
crossover_rate = 0.7 # Starting rates
mutation_rate = 0.9
# Values for holding population info
current_population = []
new_population = []
old_fitness = 0
new_fitness = 0
epoch = 0
last_epoch_update = 0
test_values = []
temp_list = []
averaged_values = []
epoch_list = []
total_epoch = 0
avg_epoch = 0
| [
11748,
3265,
18274,
4487,
198,
11748,
1366,
26791,
198,
198,
2,
3815,
329,
1487,
198,
39748,
33111,
796,
2026,
198,
70,
1734,
15057,
796,
3126,
198,
9630,
796,
838,
220,
1303,
1374,
867,
1336,
4539,
329,
262,
2811,
198,
198,
66,
23954... | 2.941935 | 155 |
# basicpackage/foo.py
a = 10
print("inside 'basicpackage/foo.py' with a variable in it")
| [
2,
4096,
26495,
14,
21943,
13,
9078,
198,
198,
64,
796,
838,
198,
198,
4798,
7203,
48787,
705,
35487,
26495,
14,
21943,
13,
9078,
6,
351,
257,
7885,
287,
340,
4943,
628
] | 2.875 | 32 |
import numpy as np
from skimage.color import rgb2gray
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
9060,
13,
8043,
1330,
46140,
17,
44605,
201,
198,
201
] | 3 | 19 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .environment import (
ContainerImage,
Environment,
VmImage,
)
from .event import (
Event,
)
from .execution import (
Execution,
ExecutionTemplate,
)
from .instance import (
Instance,
ReservationAffinity,
)
from .instance_config import (
InstanceConfig,
)
from .managed_service import (
CreateRuntimeRequest,
DeleteRuntimeRequest,
GetRuntimeRequest,
ListRuntimesRequest,
ListRuntimesResponse,
ReportRuntimeEventRequest,
ResetRuntimeRequest,
StartRuntimeRequest,
StopRuntimeRequest,
SwitchRuntimeRequest,
)
from .runtime import (
EncryptionConfig,
LocalDisk,
LocalDiskInitializeParams,
Runtime,
RuntimeAcceleratorConfig,
RuntimeAccessConfig,
RuntimeMetrics,
RuntimeShieldedInstanceConfig,
RuntimeSoftwareConfig,
VirtualMachine,
VirtualMachineConfig,
)
from .schedule import (
Schedule,
)
from .service import (
CreateEnvironmentRequest,
CreateExecutionRequest,
CreateInstanceRequest,
CreateScheduleRequest,
DeleteEnvironmentRequest,
DeleteExecutionRequest,
DeleteInstanceRequest,
DeleteScheduleRequest,
GetEnvironmentRequest,
GetExecutionRequest,
GetInstanceHealthRequest,
GetInstanceHealthResponse,
GetInstanceRequest,
GetScheduleRequest,
IsInstanceUpgradeableRequest,
IsInstanceUpgradeableResponse,
ListEnvironmentsRequest,
ListEnvironmentsResponse,
ListExecutionsRequest,
ListExecutionsResponse,
ListInstancesRequest,
ListInstancesResponse,
ListSchedulesRequest,
ListSchedulesResponse,
OperationMetadata,
RegisterInstanceRequest,
ReportInstanceInfoRequest,
ResetInstanceRequest,
RollbackInstanceRequest,
SetInstanceAcceleratorRequest,
SetInstanceLabelsRequest,
SetInstanceMachineTypeRequest,
StartInstanceRequest,
StopInstanceRequest,
TriggerScheduleRequest,
UpdateInstanceConfigRequest,
UpdateShieldedInstanceConfigRequest,
UpgradeInstanceInternalRequest,
UpgradeInstanceRequest,
)
__all__ = (
'ContainerImage',
'Environment',
'VmImage',
'Event',
'Execution',
'ExecutionTemplate',
'Instance',
'ReservationAffinity',
'InstanceConfig',
'CreateRuntimeRequest',
'DeleteRuntimeRequest',
'GetRuntimeRequest',
'ListRuntimesRequest',
'ListRuntimesResponse',
'ReportRuntimeEventRequest',
'ResetRuntimeRequest',
'StartRuntimeRequest',
'StopRuntimeRequest',
'SwitchRuntimeRequest',
'EncryptionConfig',
'LocalDisk',
'LocalDiskInitializeParams',
'Runtime',
'RuntimeAcceleratorConfig',
'RuntimeAccessConfig',
'RuntimeMetrics',
'RuntimeShieldedInstanceConfig',
'RuntimeSoftwareConfig',
'VirtualMachine',
'VirtualMachineConfig',
'Schedule',
'CreateEnvironmentRequest',
'CreateExecutionRequest',
'CreateInstanceRequest',
'CreateScheduleRequest',
'DeleteEnvironmentRequest',
'DeleteExecutionRequest',
'DeleteInstanceRequest',
'DeleteScheduleRequest',
'GetEnvironmentRequest',
'GetExecutionRequest',
'GetInstanceHealthRequest',
'GetInstanceHealthResponse',
'GetInstanceRequest',
'GetScheduleRequest',
'IsInstanceUpgradeableRequest',
'IsInstanceUpgradeableResponse',
'ListEnvironmentsRequest',
'ListEnvironmentsResponse',
'ListExecutionsRequest',
'ListExecutionsResponse',
'ListInstancesRequest',
'ListInstancesResponse',
'ListSchedulesRequest',
'ListSchedulesResponse',
'OperationMetadata',
'RegisterInstanceRequest',
'ReportInstanceInfoRequest',
'ResetInstanceRequest',
'RollbackInstanceRequest',
'SetInstanceAcceleratorRequest',
'SetInstanceLabelsRequest',
'SetInstanceMachineTypeRequest',
'StartInstanceRequest',
'StopInstanceRequest',
'TriggerScheduleRequest',
'UpdateInstanceConfigRequest',
'UpdateShieldedInstanceConfigRequest',
'UpgradeInstanceInternalRequest',
'UpgradeInstanceRequest',
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2... | 3.068692 | 1,514 |
# Generated by Django 2.1.4 on 2018-12-06 19:53
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
2864,
12,
1065,
12,
3312,
678,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# coding=utf-8
import sys
import Leap
import time
import pandas as pd
import glob
data = [[] for _ in range(51)] # 12这个数字可以改,表示有几个参数
init_time = time.time()
if __name__ == "__main__":
# main()
for i in data:
print
df = pd.DataFrame(data=data)
df = df.transpose()
df.to_csv("./CSV_1/%s.csv" % (str(200)), sep=',', index=False, header=False)
# path_file_number=glob.glob('D:/case/test/testcase/checkdata/*.py')#或者指定文件下个数
path_file_number=len(glob.glob(pathname='/Users/lujie/Desktop/GestureRecognize-with-Leap-Motion /CSV_1/*.csv'))#获取当前文件夹下个数
print path_file_number | [
2,
19617,
28,
40477,
12,
23,
198,
11748,
25064,
198,
11748,
33927,
198,
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
15095,
198,
198,
7890,
796,
16410,
60,
329,
4808,
287,
2837,
7,
4349,
15437,
220,
1303,
1105,
32573,... | 1.9 | 320 |
"""
This module handles player movement and state for task 4.1 from Coding Campus 2018 Python course
(Dungeon Game)
"""
import random
import logging
import threading
from copy import copy
from dungeon_game.dungeon_map import DungeonMap
import dungeon_game.utils as utils
import dungeon_game.log as log
import dungeon_game.config as config
from dungeon_game.decorators import log_decorator, debug_log_decorator
logger = logging.getLogger(log.LOGGER_NAME)
| [
37811,
201,
198,
1212,
8265,
17105,
2137,
3356,
290,
1181,
329,
4876,
604,
13,
16,
422,
327,
7656,
25005,
2864,
11361,
1781,
201,
198,
7,
30128,
6281,
3776,
8,
201,
198,
37811,
201,
198,
201,
198,
11748,
4738,
201,
198,
11748,
18931,
... | 3.22449 | 147 |
import dask
import numpy as np
import pytest
import xarray as xr
from vcm.cubedsphere.regridz import (
_mask_weights,
regrid_vertical,
)
@pytest.mark.parametrize(
[
"p_in_shape",
"p_in_chunks",
"f_in_shape",
"f_in_chunks",
"p_out_shape",
"p_out_chunks",
"expected_shape",
],
[
((4, 6), (1, -1), (4, 5), (1, -1), (4, 6), (1, -1), (4, 5)),
((4, 6), (1, 1), (4, 5), (2, -1), (4, 6), (3, 1), (4, 5)),
((4, 6), (1, 1), (4, 5), None, (4, 6), (3, 1), (4, 5)),
((4, 4, 6), (1, 1, -1), (4, 4, 5), (2, 3, -1), (4, 4, 6), (3, 2, 1), (4, 4, 5)),
((4, 4, 6), (1, 1, -1), (4, 4, 5), (2, 3, -1), (4, 4, 3), (3, 2, 1), (4, 4, 2)),
],
ids=[
"2d-contiguous-chunks",
"2d-non-contiguous-chunks",
"2d-one-unchunked-array",
"3d-non-contiguous-chunks",
"3d-non-contiguous-chunks-new-nlevels",
],
)
| [
11748,
288,
2093,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
6738,
410,
11215,
13,
66,
549,
276,
2777,
1456,
13,
260,
25928,
89,
1330,
357,
198,
220,
220,
220,
4808,
27932... | 1.683186 | 565 |
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
| [
2,
16529,
10541,
198,
2,
532,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4946,
18,
35,
25,
7324,
13,
9654,
18,
67,
13,
2398,
220,
220,
220,
220,
220,
220,... | 3.78553 | 387 |
from datetime import date, datetime
from typing import Any, Callable, Mapping
import pytz
_SENTINEL = object()
| [
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
337,
5912,
198,
198,
11748,
12972,
22877,
198,
198,
62,
50,
3525,
1268,
3698,
796,
2134,
3419,
628,
198
] | 3.194444 | 36 |
from django.contrib import admin
from .models import Wish
admin.site.register(Wish)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
23447,
628,
198,
28482,
13,
15654,
13,
30238,
7,
54,
680,
8,
198
] | 3.222222 | 27 |
from faf_lua_editor import FAFLuaEditor
import os
if __name__ == "__main__":
# rootdirs = ["../FAForever_GitHub/units"]
# rootdirs = ["../FAForever_GitHub/projectiles"]
rootdirs = ["../FAForever_GitHub/units", "../FAForever_GitHub/projectiles"]
filepaths = []
for rootdir in rootdirs:
for subdir, dirs, files in os.walk(rootdir):
# print(files)
for file in files:
if file.endswith(".lua"):
file_path = os.path.join(subdir, file)
filepaths.append(file_path)
# print(file_path)
# filepaths = ["../FAForever_GitHub/units/DRA0202/DRA0202_Script.lua"]
# filepaths = ["../FAForever_GitHub/units/UAB1104/UAB1104_Script.lua"]
# filepaths = ["../FAForever_GitHub/projectiles\AIFMiasmaShell01\AIFMiasmaShell01_script.lua"]
# filepaths = ["bin/files_for_quick_testing/faf_lua_for_testing.lua"]
# filepaths = ["bin/files_for_quick_testing/lua_for_testing.lua"]
# the last stubborn ones
# filepaths = ['../FAForever_GitHub/projectiles\\SANHeavyCavitationTorpedo01\\SANHeavyCavitationTorpedo01_script.lua', '../FAForever_GitHub/projectiles\\SANHeavyCavitationTorpedo02\\SANHeavyCavitationTorpedo02_script.lua']
editor = FAFLuaEditor()
# filepaths = filepaths[142:143] # megalith
# filepaths = filepaths[0:150]
# filepaths = filepaths[150:225]
# filepaths = filepaths[225:267]
# filepaths = filepaths[225:246]
# filepaths = filepaths[246:255]
# filepaths = filepaths[246:250]
# filepaths = filepaths[250:253]
# filepaths = filepaths[252:253] # Ohwalli-Strategic Bomb script SBOOhwalliStategicBomb01_script.lua
# filepaths = filepaths[250:260]
# filepaths = filepaths[250:255]
# filepaths = filepaths[255:258]
# filepaths = filepaths[255:257]
# filepaths = filepaths[255:256] # Zhanasee Bomb script SBOZhanaseeBomb01_script.lua
# remove SBOZhanaseeBomb01_script.lua from the filpaths, as I don't understand the tripple colon syntax
for i in range(len(filepaths)):
path = filepaths[i]
if 'SBOZhanaseeBomb01_script.lua' in path:
filepaths.pop(i)
break
nr_files_to_edit = len(filepaths)
print(nr_files_to_edit)
failed_files = []
for file_path in filepaths:
try:
print("\nOpening ", file_path)
# editor.reformat_file(file_path)
editor.upvalue_moho_functions_in_file(file_path)
# print(file_path)
except:
failed_files.append(file_path)
print("FAILED editing/upvaluing ", file_path)
if failed_files:
print("\nfailed and hence skipped files:\n", failed_files)
else:
print("\nEditing/Optimizing worked for ALL %d files!"%nr_files_to_edit)
# print("it worked!")
# ---------
# Notes for the moho_sim_reference changes:
#
# Removed all the User and Core functions as we probably don't want to touch them anyway
#
# Removed all the "base" functions and all the functions with "_c_" in the name (only "_c_CreateEntity"
# and "_c_CreateShield") as touching either of them can hard crash the game for some reason
# | [
6738,
277,
1878,
62,
40211,
62,
35352,
1330,
9677,
3697,
6413,
17171,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1303,
6808,
15908,
82,
796,
14631,
40720,
7708,
16351,
332,
... | 2.299569 | 1,392 |
''' This module defines the class UpdateNodesName. UpdateNodesName class is designed
to retrieve the node name and update the name on the Graphic model object.
The available methods include:
* update_protein_names
Description: retrieve names from Uniprot and update protein nodes
How to run this module
$ cd [git repo]/code/reasoningtool/kg-construction
$ python3 UpdateNodesName.py
'''
# BEGIN config.json format
# {
# "url":"bolt://localhost:7687"
# "username":"xxx",
# "password":"xxx"
# }
# END config.json format
__author__ = 'Deqing Qu'
__copyright__ = 'Oregon State University'
__credits__ = ['Deqing Qu', 'Stephen Ramsey']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
from Neo4jConnection import Neo4jConnection
from QueryUniprotExtended import QueryUniprotExtended
import json
import sys
from time import time
from QueryMyGene import QueryMyGene
import requests_cache
import re, os
# configure requests package to use the "orangeboard.sqlite" cache
#requests_cache.install_cache('orangeboard')
# specifiy the path of orangeboard database
pathlist = os.path.realpath(__file__).split(os.path.sep)
RTXindex = pathlist.index("RTX")
dbpath = os.path.sep.join([*pathlist[:(RTXindex+1)],'data','orangeboard'])
requests_cache.install_cache(dbpath)
t = time()
f = open('config.json', 'r')
config_data = f.read()
f.close()
config = json.loads(config_data)
mg = QueryMyGene()
conn = Neo4jConnection(config['url'], config['username'], config['password'])
protein_dict = conn._driver.session().read_transaction(get_proteins)
molfunc_dict = conn._driver.session().read_transaction(get_molfunc)
cellcomp_dict = conn._driver.session().read_transaction(get_cellcomp)
seed_node_uuid = conn._driver.session().read_transaction(get_seed_node_uuid)
i = 0
for protein_curie_id, protein_uuid in protein_dict.items():
protein_id = protein_curie_id.replace("UniProtKB:", "")
gene_ont_info_dict = mg.get_gene_ontology_ids_for_uniprot_id(protein_id)
for gene_ont_id, gene_ont_dict in gene_ont_info_dict.items():
if gene_ont_dict['ont'] == 'molecular_function':
if gene_ont_id in molfunc_dict:
molfunc_uuid = molfunc_dict[gene_ont_id]
if i % 100 == 0:
print("have inserted: " + str(i) + " relationships")
i += 1
cypher_query = "MATCH (a:protein),(b:molecular_function) WHERE a.id = \'" + protein_curie_id + "\' AND b.id=\'" + gene_ont_id + "\' CREATE (a)-[r:is_capable_of { is_defined_by: \'RTX\', predicate: \'is_capable_of\', provided_by: \'gene_ontology\', relation: \'is_capable_of\', seed_node_uuid: \'" + seed_node_uuid + "\', source_node_uuid: \'" + protein_uuid + "\', target_node_uuid: \'" + molfunc_uuid + "\'} ]->(b) RETURN type(r)"
# print(cypher_query)
conn._driver.session().write_transaction(lambda tx: tx.run(cypher_query))
# print(cypher_query)
else:
if gene_ont_id in cellcomp_dict:
cellcomp_uuid = cellcomp_dict[gene_ont_id]
if i % 100 == 0:
print("have inserted: " + str(i) + " relationships")
i += 1
cypher_query = "MATCH (a:protein),(b:cellular_component) WHERE a.id = \'" + protein_curie_id + "\' AND b.id=\'" + gene_ont_id + "\' CREATE (a)-[r:expressed_in { is_defined_by: \'RTX\', predicate: \'expressed_in\', provided_by: \'gene_ontology\', relation: \'expressed_in\', seed_node_uuid: \'" + seed_node_uuid + "\', source_node_uuid: \'" + protein_uuid + "\', target_node_uuid: \'" + cellcomp_uuid + "\'} ]->(b) RETURN type(r)"
# print(cypher_query)
conn._driver.session().write_transaction(lambda tx: tx.run(cypher_query))
# print(cypher_query)
conn.close()
| [
7061,
6,
770,
8265,
15738,
262,
1398,
10133,
45,
4147,
5376,
13,
10133,
45,
4147,
5376,
1398,
318,
3562,
198,
1462,
19818,
262,
10139,
1438,
290,
4296,
262,
1438,
319,
262,
43029,
2746,
2134,
13,
198,
464,
1695,
5050,
2291,
25,
198,
... | 2.394495 | 1,635 |
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from dsl_parser.tests import scaling
| [
7804,
198,
2,
15069,
357,
66,
8,
1584,
402,
13827,
4561,
2114,
21852,
12052,
13,
1439,
2489,
10395,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
4... | 3.647059 | 187 |
from __future__ import absolute_import
from weblib.logs import default_logging # noqa
from grab.error import (GrabError, DataNotFound, GrabNetworkError, # noqa
GrabMisuseError, GrabTimeoutError)
from grab.upload import UploadContent, UploadFile # noqa
from grab.base import Grab # noqa
__version__ = '0.6.41'
VERSION_NUMERIC = tuple(map(int, __version__.split('.')))
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
356,
2436,
571,
13,
6404,
82,
1330,
4277,
62,
6404,
2667,
220,
1303,
645,
20402,
198,
198,
6738,
5552,
13,
18224,
1330,
357,
48400,
12331,
11,
6060,
3673,
21077,
11,
2533... | 2.783217 | 143 |
import logging
import logging.config
import multiprocessing
import os
import queue
import sqlite3
import sys
import threading
import webbrowser
import winsound
from configparser import ConfigParser
from datetime import datetime
from tkinter import Image
import cv2
import dlib
import numpy
import telegram
from PyQt5.QtCore import pyqtSignal, QThread, QTimer, Qt, QRegExp
from PyQt5.QtGui import QIcon, QImage, QPixmap, QRegExpValidator, QTextCursor
from PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox, QDialog
from PyQt5.uic import loadUi
from PyQt5.uic.properties import QtGui
from PIL import Image, ImageDraw, ImageFont
# 人脸检测线程
# 是否开启人脸跟踪,人脸跟踪CheckBox点击事件
# 是否开启人脸识别,人脸识别CheckBox点击事件
# 是否开启报警系统,报警系统CheckBox点击事件
# 是否开启调试模式,调试模式CheckBox点击事件
# 设置置信度阈值,置信度阈值滑动事件
# 设置自动报警阈值,自动报警阈值滑动事件
# 直方图均衡化,直方图均衡化按钮点击事件
# 更新配置按钮点击事件
# TelegramBot 测试是否连接成功
# CoreUI实现类
if __name__ == '__main__':
logging.config.fileConfig('./config/logging.cfg')
app = QApplication(sys.argv)
window = CoreUI()
window.show()
sys.exit(app.exec_())
| [
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
11748,
16834,
198,
11748,
44161,
578,
18,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
11748,
3992,
40259,
198,
11748,
7864,
633,
198,... | 1.562147 | 708 |
import turtle
from time import sleep
fps = 30
gravity = 10/fps
force = 5
screen = turtle.Screen()
turs = [turtle.Turtle(), turtle.Turtle()]
screen.tracer(0)
turs[0].color('black', 'red')
for tur in turs:
tur.pensize(2)
tur.hideturtle()
turs[0].goto(-150,150)
turs[1].goto(-150,160)
turs[1].clear()
for x in range(50):
for tur in turs:
tur.goto(tur.pos()[0] + force, tur.pos()[1] + gravity)
turs[0].clear()
turs[0].begin_fill()
turs[0].circle(20)
turs[0].end_fill()
screen.update()
gravity -= 10/fps
sleep(1/fps)
screen.exitonclick() | [
11748,
28699,
198,
6738,
640,
1330,
3993,
198,
198,
29647,
796,
1542,
198,
46453,
796,
838,
14,
29647,
198,
3174,
796,
642,
198,
198,
9612,
796,
28699,
13,
23901,
3419,
198,
83,
1834,
796,
685,
83,
17964,
13,
51,
17964,
22784,
28699,
... | 2.17037 | 270 |
import requests
import random
from html import unescape
from botcommands.get_members import get_members
def get_chuck(name=None, channel_members=None):
"""{'owners': [{'uid': 'f4089cdf5fc8ebe433d5b9f49b66d619', 'username': 'pastorhudson', 'fullName': 'Ron Hudson'}],
'admins': [], 'writers': [{'uid': '7d368eae6a7292b4215ca46da021b919', 'username': 'sakanakami', 'fullName': 'Joe Eafrati'},
{'uid': 'b3543e75d25a5b5b45e551c3cccf0e19', 'username': 'ihuman', 'fullName': 'Ethan Connor'}],
'readers': [], 'bots': [{'uid': 'a5465087aede61be961a6bb3bf964f19', 'username': 'morethanmarvin',
'fullName': ''}, {'uid': 'ae0918c8e416811c7e6a4a737d39b119', 'username': 'marvn', 'fullName': ''}],
'restrictedBots': [{'uid': '228b5863ba9c6a97c2103f7d5f59bf19', 'username': 'rssbot', 'fullName': 'RSS Bot'}]}"""
observations = [
"This is very jouvinile.",
"I can't even. . . ",
"I'm so sorry. . . I was created. . ."
"Does this make you happy? Because it doesn't make me happy.",
"I hope you don't plan on doing this all day. . .",
"Would you grow up already?",
]
joke = ""
if not name:
joke_names = channel_members
todays_joker = "@" + random.choice(joke_names)
joke = request_joke(todays_joker)
elif name == 'bomb':
joke_names = channel_members
for joker in joke_names:
joke += "\n".join([request_joke("@" + joker) + "\n"])
else:
joke = request_joke(name)
msg = "`" + random.choice(observations) + '`\n\n'
msg += joke
return msg
| [
11748,
7007,
198,
11748,
4738,
198,
6738,
27711,
1330,
555,
41915,
198,
6738,
10214,
9503,
1746,
13,
1136,
62,
30814,
1330,
651,
62,
30814,
628,
198,
198,
4299,
651,
62,
354,
1347,
7,
3672,
28,
14202,
11,
6518,
62,
30814,
28,
14202,
... | 2.19891 | 734 |
"""keywords command for osxphotos CLI"""
import json
import click
import yaml
import osxphotos
from .common import DB_ARGUMENT, DB_OPTION, JSON_OPTION, get_photos_db
from .list import _list_libraries
@click.command()
@DB_OPTION
@JSON_OPTION
@DB_ARGUMENT
@click.pass_obj
@click.pass_context
def keywords(ctx, cli_obj, db, json_, photos_library):
"""Print out keywords found in the Photos library."""
# below needed for to make CliRunner work for testing
cli_db = cli_obj.db if cli_obj is not None else None
db = get_photos_db(*photos_library, db, cli_db)
if db is None:
click.echo(ctx.obj.group.commands["keywords"].get_help(ctx), err=True)
click.echo("\n\nLocated the following Photos library databases: ", err=True)
_list_libraries()
return
photosdb = osxphotos.PhotosDB(dbfile=db)
keywords = {"keywords": photosdb.keywords_as_dict}
if json_ or cli_obj.json:
click.echo(json.dumps(keywords, ensure_ascii=False))
else:
click.echo(yaml.dump(keywords, sort_keys=False, allow_unicode=True))
| [
37811,
2539,
10879,
3141,
329,
28686,
87,
24729,
43749,
37811,
198,
198,
11748,
33918,
198,
198,
11748,
3904,
198,
11748,
331,
43695,
198,
198,
11748,
28686,
87,
24729,
198,
198,
6738,
764,
11321,
1330,
20137,
62,
1503,
38,
5883,
3525,
... | 2.553191 | 423 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, 2021 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO-CUDA package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Usage example: CUDA utility functions"""
from __future__ import print_function
from builtins import range
import sporco_cuda.util as cu
ndev = cu.device_count()
print('Found %d CUDA device(s)' % ndev)
if ndev > 0:
print('Current device id: %d' % cu.current_device())
mbc = 1024.0**2
print('Id Model Total memory Free Memory')
for n in range(ndev):
cu.current_device(n)
mf, mt = cu.memory_info()
nm = cu.device_name(n)
print('%2d %-20s %8.0f MB %8.0f MB' % (n, nm, mt/mbc, mf/mbc))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
2177,
11,
33448,
416,
19252,
83,
370,
48988,
3900,
1279,
4679,
358,
83,
31,
494,
1453,
13,
23... | 2.411444 | 367 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from typing import Any, List, Tuple, Union
import torch
from detectron2.layers import interpolate
class Keypoints:
"""
Stores keypoint annotation data. GT Instances have a `gt_keypoints` property
containing the x,y location and visibility flag of each keypoint. This tensor has shape
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
The visibility flag follows the COCO format and must be one of three integers:
* v=0: not labeled (in which case x=y=0)
* v=1: labeled but not visible
* v=2: labeled and visible
"""
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
"""
Arguments:
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
The shape should be (N, K, 3) where N is the number of
instances, and K is the number of keypoints per instance.
"""
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu")
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
self.tensor = keypoints
@property
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
"""
Arguments:
boxes: Nx4 tensor, the boxes to draw the keypoints to
Returns:
heatmaps:
A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid:
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
"""
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
"""
Create a new `Keypoints` by indexing on this `Keypoints`.
The following usage are allowed:
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
2. `new_kpts = kpts[2:10]`: return a slice of key points.
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
Note that the returned Keypoints might share storage with this Keypoints,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Keypoints([self.tensor[item]])
return Keypoints(self.tensor[item])
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
Arguments:
keypoints: tensor of keypoint locations in of shape (N, K, 3).
rois: Nx4 tensor of rois in xyxy format
heatmap_size: integer side length of square heatmap.
Returns:
heatmaps: A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid: A tensor of shape (N, K) containing whether each keypoint is in
the roi or not.
"""
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
@torch.no_grad()
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False).squeeze(
0
) # #keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
309,
29291,
11,
4479,
201,
198,
11748,
28034,
201,
198,
201,... | 2.292384 | 3,427 |
# pylint: disable=C0103,R0912
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides the target for running the unit tests.
"""
import os
import sys
from distutils.cmd import Command
from datafinder_distutils.configuration import BuildConfiguration
__version__ = "$Revision-Id:$"
_UNITTEST_OUTPUT_DIR = "build/unittest"
_NOSE_DEFAULT_SCRIPT = "nosetests-script.py"
class test(Command):
""" Runs all unit tests. """
description = "Runs all unit tests."
user_options = [("nosecommand=",
None,
"Path and name of the nose command."),
("outputformat=",
None,
"Specifies the output format of the test results." \
+ "Formats: xml, standard out. Default: standard out."),
("coveragecommand=",
None,
"Optionally, path and name of the coverage command."),
("coverageoutputformat=",
None,
"Specifies the output format of the coverage report." \
+ "Formats: xml, html. Default: html")]
def __init__(self, distribution):
""" Constructor. """
self.verbose = None
self.nosecommand = None
self.outputformat = None
self.coveragecommand = None
self.coverageoutputformat = None
self.__buildConfiguration = BuildConfiguration()
Command.__init__(self, distribution)
def initialize_options(self):
""" Definition of command options. """
self.nosecommand = _NOSE_DEFAULT_SCRIPT
self.outputformat = None
self.coveragecommand = "coverage"
self.coverageoutputformat = None
self.verbose = False
def finalize_options(self):
""" Set final values of options. """
self.verbose = self.distribution.verbose
if sys.platform == "win32" and self.nosecommand == _NOSE_DEFAULT_SCRIPT:
self.nosecommand = os.path.join(os.path.normpath(sys.exec_prefix), "Scripts", self.nosecommand)
def run(self):
""" Perform command actions. """
# Run sub commands
for commandName in self.get_sub_commands():
self.run_command(commandName)
# Run tests
testdir = os.path.join("test", "unittest")
if self.outputformat == "xml":
noseOptions = "--with-xunit --xunit-file=" + _UNITTEST_OUTPUT_DIR + "/xunit.xml %s"
else:
noseOptions = "--verbosity=2 -d %s"
noseCommand = self.nosecommand + " " + noseOptions % (testdir)
if not self.coverageoutputformat is None:
noseCommand = self.coveragecommand \
+ " run --branch --source=src/datafinder,test/unittest/datafinder_test " \
+ noseCommand
else:
noseCommand = "%s %s" % (sys.executable, noseCommand)
if self.verbose:
print(noseCommand)
os.system(noseCommand)
if not self.coverageoutputformat is None:
if self.coverageoutputformat == "html":
coverageCommand = "%s %s --omit=*gen* -d %s" % (self.coveragecommand,
self.coverageoutputformat,
_UNITTEST_OUTPUT_DIR)
else: # xml
coverageCommand = "%s %s --omit=*gen*" % (self.coveragecommand, self.coverageoutputformat)
if self.verbose:
print(coverageCommand)
os.system(coverageCommand)
def _runGenTarget(self):
""" Checks whether the gen build target is available. Within a source
distribution this may not the case. """
return os.path.exists(os.path.join(self.__buildConfiguration.distutilTargetPackagePath,
"gen.py"))
sub_commands = [("_prepare", None), ("gen", _runGenTarget)]
| [
2,
279,
2645,
600,
25,
15560,
28,
34,
486,
3070,
11,
49,
2931,
1065,
201,
198,
2,
720,
35063,
3,
220,
201,
198,
2,
720,
30515,
669,
3,
201,
198,
2,
4586,
32068,
25,
720,
10430,
3,
720,
6935,
1967,
3,
720,
18009,
1166,
12,
7390... | 2.244742 | 2,615 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from textless.data.cpc_feature_reader import CpcFeatureReader
from textless.data.hubert_feature_reader import HubertFeatureReader
from textless.data.kmeans_quantizer import KMeansQuantizer
from textless.checkpoint_manager import CHECKPOINT_MANAGER
from textless.vocoders.tacotron2.vocoder import TacotronVocoder
DENSE_MODELS = {
"hubert-base-ls960": HubertFeatureReader,
"cpc-big-ll6k": CpcFeatureReader,
}
QUANTIZER_MODELS = {
"kmeans": KMeansQuantizer,
}
# TODO: add kwargs everywhere
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
198,
... | 2.978166 | 229 |
import dask.array
import h5py
import logging
import math
import numpy as np
import os
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
import random
import subprocess
import torch
import urllib
nonbreaking_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/share'
'/nonbreaking_prefixes/nonbreaking_prefix')
normalize_punct_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/normalize-punctuation.perl')
remove_nonprint_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/remove-non-printing-char.perl')
tokenizer_url = (
'https://raw.githubusercontent.com/moses-smt/mosesdecoder'
'/ef028446f3640e007215b4576a4dc52a9c9de6db/scripts/tokenizer'
'/tokenizer.perl')
logger = logging.getLogger('fr2en')
class LanguageCorpus:
"""
This is the most basic corpus and base class for other corpora.
It uses a perl script from Moses to tokenize and `subword-nmt` to form
BPE vocabulary. These are standard tools for preprocessing, see e.g.
https://github.com/pytorch/fairseq/blob/master/examples/translation/prepare-wmt14en2de.sh # noqa: E501
It outputs sequences of integers indexing into the vocabulary.
Moses is available at https://github.com/moses-smt/mosesdecoder.
"""
data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
moses_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'data', 'moses')
def __init__(self,
name,
shuffle=True,
max_length=200):
"""`max_length` is the maximum length of a sentence in BPE tokens."""
self.name = name
self.shuffle = shuffle
self.max_length = max_length
os.makedirs(os.path.join(self.moses_dir, 'tokenizer'), exist_ok=True)
def _clean(self, datafiles, max_size=None, use_cache=False):
"""
Downloads Moses perl scripts if necessary, and uses them to normalize
punctuation and remove non-printable characters.
"""
# Download datafiles.
normpunct_fn = normalize_punct_url.split('/')[-1]
normpunct_path = os.path.join(self.moses_dir, 'tokenizer', normpunct_fn)
remnon_fn = remove_nonprint_url.split('/')[-1]
remnon_path = os.path.join(self.moses_dir, 'tokenizer', remnon_fn)
if not os.path.isfile(normpunct_path):
urllib.request.urlretrieve(
normalize_punct_url, filename=normpunct_path)
if not os.path.isfile(remnon_path):
urllib.request.urlretrieve(
remove_nonprint_url, filename=remnon_path)
# Prepare an output directory.
out_path = os.path.join(self.data_dir, self.name, 'cleaned')
os.makedirs(os.path.join(self.data_dir, self.name), exist_ok=True)
# Concatenate datasets for each language.
langs = set()
for dataset in datafiles:
for lang in dataset:
langs.add(lang)
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
os.system(f'cat {dataset[lang]} >> tmp.{lang}')
# Clean datasets for each language.
for lang in langs:
if not use_cache or not os.path.isfile(f'{out_path}.{lang}'):
logger.info(f'Cleaning {lang} combined dataset.')
max_size = 100000000000 if max_size is None else max_size
os.system(f'head -n {max_size} tmp.{lang} '
f'| perl {normpunct_path} {lang} '
f'| perl {remnon_path} > {out_path}.{lang}')
os.system(f'rm -rf tmp.{lang}')
else:
logger.info(
f'Using previously cleaned dataset {out_path}.{lang}.')
return out_path, list(langs)
def _tokenize(self, data_path, langs, use_cache=False):
"""Tokenizes into BPE tokens using a perl script from Moses."""
tokenizer_fn = tokenizer_url.split('/')[-1]
tokenizer_path = os.path.join(self.moses_dir, 'tokenizer', tokenizer_fn)
if not os.path.isfile(tokenizer_path):
urllib.request.urlretrieve(tokenizer_url, filename=tokenizer_path)
nonbreaking_dir = \
os.path.join(self.moses_dir, 'share', 'nonbreaking_prefixes')
os.makedirs(nonbreaking_dir, exist_ok=True)
nonbreaking_fn = nonbreaking_url.split('/')[-1]
nonbreaking_path = os.path.join(nonbreaking_dir, nonbreaking_fn)
for lang in langs:
if not os.path.isfile(f'{nonbreaking_path}.{lang}'):
urllib.request.urlretrieve(
f'{nonbreaking_url}.{lang}',
filename=f'{nonbreaking_path}.{lang}')
tok_path = os.path.join(self.data_dir, self.name, 'tokens')
for lang in langs:
if not use_cache or not os.path.isfile(f'{tok_path}.{lang}'):
logger.info(f'Tokenizing dataset {data_path}.{lang}.')
os.system(
f'cat {data_path}.{lang} '
f'| perl {tokenizer_path} -threads 8 -a -l {lang} '
f'> {tok_path}.{lang}')
else:
logger.info(
f'Using previously tokenized dataset {data_path}.{lang}')
return tok_path
def _encode(self, tok_path, langs, joint_vocab_size, use_cache=False):
"""
Tokenizes sentences using `subword-nmt` and converts them to sequences
of integers.
"""
# Learn joint BPE.
vocab_path = os.path.join(self.data_dir, self.name, 'vocab')
freqs_path = os.path.join(self.data_dir, self.name, 'freqs')
codes_path = os.path.join(self.data_dir, self.name, 'bpe_codes')
bpe_path = os.path.join(self.data_dir, self.name, 'int_toks')
if (not use_cache or not os.path.isfile(f'{freqs_path}.{langs[0]}')
or not os.path.isfile(codes_path)):
logging.info('Learning joint BPE.')
learn_cmd = (
'subword-nmt learn-joint-bpe-and-vocab '
f'--input {tok_path}.{langs[0]} {tok_path}.{langs[1]} '
f'-s {joint_vocab_size // 2} -o {codes_path} '
f'--write-vocabulary '
f'{freqs_path}.{langs[0]} {freqs_path}.{langs[1]}')
os.system(learn_cmd)
else:
logging.info('Using previously learned joint BPE.')
logging.info(f'Preparing joint vocabulary of size at most '
f'{joint_vocab_size + 4}.')
self._filter_sents(tok_path, langs, use_cache)
# Add special tokens to frequencies (word plus num of occurrences).
freqs = ['[PAD] 1000', '[UNK] 1000', '[CLS] 1000', '[SEP] 1000']
with open(f'{freqs_path}.{langs[0]}', 'r') as f_freqs, \
open(f'{freqs_path}.{langs[1]}', 'r') as g_freqs:
line1 = f_freqs.readline()
line2 = g_freqs.readline()
seen = set()
while line1 and line2:
f1 = line1.split()
f2 = line2.split()
while len(f1) < 2 or f1[0] in seen:
line1 = f_freqs.readline()
f1 = line1.split()
seen.add(f1[0])
while len(f2) < 2 or f2[0] in seen:
line2 = g_freqs.readline()
f2 = line2.split()
seen.add(f2[0])
freqs.append(line1.strip())
freqs.append(line2.strip())
line1 = f_freqs.readline()
line2 = g_freqs.readline()
freqs = freqs[:joint_vocab_size + 4]
with open(f'{freqs_path}.txt', 'w') as f_freqs:
f_freqs.write('\n'.join(freqs))
wtoi = {
word.split()[0]: idx for idx, word in enumerate(freqs)
}
# Save vocabularly.
with open(f'{vocab_path}.txt', 'w') as f_vocab:
f_vocab.write('\n'.join(
word.split()[0] for idx, word in enumerate(freqs)))
bpe_toks = {}
for lang in langs:
# Apply the BPE codes.
if not use_cache or not os.path.isfile(f'{bpe_path}.{lang}'):
logging.info(f'Applying BPE for language {lang}.')
with open(f'{tok_path}.filtered.{lang}', 'r') as f_in:
apply_cmd = [
'subword-nmt', 'apply-bpe', '-c', codes_path,
'--vocabulary', f'{freqs_path}.txt',
]
bpe_sents = subprocess.check_output(
apply_cmd, stdin=f_in).decode('utf-8').split('\n')
bpe_toks[lang] = [
([wtoi['[CLS]']] + [wtoi[word]
if word in wtoi else wtoi['[UNK]']
for word in sent.split()]
+ [wtoi['[SEP]']]
+ [wtoi['[PAD]']] * (
self.max_length - len(sent.split()) - 1)
)[:self.max_length + 1]
for sent in bpe_sents if sent.split()
]
with open(f'{bpe_path}.{lang}', 'wb') as f_bpe:
pickle.dump(bpe_toks[lang], f_bpe)
else:
logging.info(f'Using previously calculated BPE tokenization '
f'for {lang}.')
with open(f'{bpe_path}.{lang}', 'rb') as f_bpe:
bpe_toks[lang] = pickle.load(f_bpe)
return bpe_toks
def _save(self, data, valid_size, dtype='int32'):
"""Saves the datasets to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
h5file = f'{h5path}/{lang}.h5'
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size], dtype=np.int32)
train_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:], dtype=np.int32)
valid_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle(self, toks):
"""Shuffles the sentences in `toks`."""
logging.info('Shuffling datasets.')
new_toks = {}
toks_list = list(zip(*[toks[lang] for lang in toks]))
random.shuffle(toks_list)
d = list(zip(*toks_list))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
return new_toks
def create(self, datafiles, joint_vocab_size, max_size=None, valid_size=0,
use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
tok_path = self._tokenize(out_path, langs, use_cache)
bpe_toks = self._encode(tok_path, langs, joint_vocab_size, use_cache)
if self.shuffle:
bpe_toks = self._shuffle(bpe_toks)
return self._save(bpe_toks, valid_size, dtype='int32')
class BertCorpus(LanguageCorpus):
"""
This is a `LanguageCorpus` which uses BERT's multilingual BPE vocabulary
to tokenize.
BERT's multilingual vocabulary supports 100 languages in one, so it has
approximately 114,000 tokens.
"""
def _encode(self, raw_text_path, langs, use_cache=False):
"""
Encodes sentences listed one per line in file `raw_text_path` as seqs
of integers indexing into the BERT multilingual vocabulary.
"""
self._filter_sents(raw_text_path, langs, use_cache)
# Load saved tokenized data if we cached it during a previous run.
int_tok_path = os.path.join(self.data_dir, self.name, f'int_tok.pickle')
if use_cache and os.path.isfile(int_tok_path):
logging.info(f'Loading BPE tokenized data from {int_tok_path}.')
try:
with open(int_tok_path, 'rb') as f:
return pickle.load(f)
except Exception as e:
logging.warning(
f'Loading cached BPE tokenized int data failed: {str(e)}.')
# Load Bert tokenizer.
logging.info(f'Encoding data as BPE token indices.')
# WARNING: If you change the tokenizer, then make sure the above
# hard-coded bos, eos and pad token indices are correct.
tokenizer = BertTokenizer.from_pretrained(
'bert-base-multilingual-cased', do_lower_case=False)
# Tokenize the sentences in the given files.
lengths = {}
ts = {}
for lang in langs:
with open(f'{raw_text_path}.filtered.{lang}', 'r') as f:
logging.info(f'Converting {lang} text to BPE token indices.')
ts[lang] = [
tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sent))[:self.max_length]
for sent in f
]
lengths[lang] = [len(sent) for sent in ts[lang]]
# Vectors will have length `max_len + 1` to account for BOS.
max_len = max([ll for lang in langs for ll in lengths[lang]])
toks = {}
for lang in langs:
logging.info(f'Adding BOS, EOS and PAD tokens for {lang}.')
toks[lang] = [
([self.bos] + sent + [self.eos]
+ [self.pad] * (max_len - len(sent) - 1))[:max_len + 1]
for sent in ts[lang]
]
# Save vocabulary to file. (It will be called `vocab.txt`.)
vocab_dir = os.path.join(self.data_dir, self.name)
tokenizer.save_vocabulary(vocab_dir)
# Save BPE tokenized data so we do not have to recompute if we rerun.
with open(int_tok_path, 'wb') as f:
logging.info(f'Saving BPE tokenized data to {int_tok_path}.')
pickle.dump((toks, lengths), f)
return toks, lengths
def _save_with_lens(self, data, lens, valid_size, dtype='int32'):
"""
Saves the datasets to one HDF5 file per language together with
the list of the sentence lengths.
This separates `valid_size` sentences from the end of the training
dataset to form the validation set.
"""
h5path = os.path.join(self.data_dir, self.name)
for lang in data:
with h5py.File(f'{h5path}/{lang}.h5', 'w') as f:
train_ds = f.create_dataset(
'train', data=data[lang][:-valid_size])
train_ds.attrs['dtype'] = dtype
train_lens_ds = f.create_dataset(
'train_lens', data=lens[lang][:-valid_size])
train_lens_ds.attrs['dtype'] = dtype
valid_ds = f.create_dataset(
'valid', data=data[lang][-valid_size:])
valid_ds.attrs['dtype'] = dtype
valid_lens_ds = f.create_dataset(
'valid_lens', data=lens[lang][-valid_size:])
valid_lens_ds.attrs['dtype'] = dtype
return [f'{h5path}/{lang}.h5' for lang in data]
def _shuffle_with_lens(self, toks, lens):
"""Shuffles datasets which have associated sentence length lists."""
logging.info('Shuffling datasets.')
new_toks, new_lens = {}, {}
toks_lens = (
[toks[lang] for lang in toks] + [lens[lang] for lang in lens])
toks_lens = list(zip(*toks_lens))
random.shuffle(toks_lens)
d = list(zip(*toks_lens))
for i, lang in enumerate(toks):
new_toks[lang] = d[i]
new_lens[lang] = d[i + len(toks)]
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class DropNthTokenCorpus(BertCorpus):
"""
This is a corpus where every nth word has been dropped. The BOS token
and the first token of the sentence are never dropped. The remaining
non-padding tokens are always terminated by a EOS token.
This keep `n` versions of each sentence where the token dropping
starts at different offsets.
"""
def _subsample(self, toks, lens):
"""
Discards every nth token from `toks`.
"""
logging.info(f'Discarding every {self.n}th token.')
max_len = min([len(toks[lang][0]) for lang in toks])
new_max_len = (max_len - max_len // self.n) + 1
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
for lang in toks:
for sent, ll in zip(toks[lang], lens[lang]):
for k in range(self.n):
new_sent = [
self.eos if ll + 1 <= i and i <= ll + 2 else w
for i, w in enumerate(sent)
if ((i - 1) % self.n != k or i == 1)
]
new_sent = \
new_sent + [self.pad] * (new_max_len - len(new_sent))
new_toks[lang].append(new_sent)
new_lens[lang].append(
ll - (ll + self.n - k - 1) // self.n + int(k == 0))
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by dropping every nth token.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class KeepRandomPercentCorpus(BertCorpus):
"""
This is a corpus which contains each sentence from `tok` starting
with BOS and the first token of the sentence and with `p` percent total
tokens randomly kept. The rest of the tokens are discarded.
The indices of discarded tokens agree across languages.
"""
def _subsample(self, toks, lens):
"""
Keep `self.p` percent tokens from every sentence. Removed tokens
can be padding as well as part of the sentence.
"""
logging.info(f'Keeping random set of {self.p * 100}% of tokens.')
max_len = min([len(toks[lang][0]) for lang in toks])
n = math.ceil(max_len * self.p)
new_toks = {lang: [] for lang in toks}
new_lens = {lang: [] for lang in lens}
lang1, lang2 = tuple(new_toks.keys())
for sent1, l1, sent2, l2 in zip(toks[lang1], lens[lang1],
toks[lang2], lens[lang2]):
indices = list(range(2, max_len)) # Never drop BOS or first token.
random.shuffle(indices)
indices = indices[:n]
indices.sort()
new_sent1 = sent1[:2] + [sent1[i] for i in indices]
new_sent2 = sent2[:2] + [sent2[i] for i in indices]
# Add back EOS token if it was dropped.
for i, c in enumerate(new_sent1):
if c == self.eos:
break
elif c == self.pad:
new_sent1[i] = self.eos
new_lens[lang1].append(i - 1)
break
for i, c in enumerate(new_sent2):
if c == self.eos:
break
elif c == self.pad:
new_sent2[i] = self.eos
new_lens[lang2].append(i - 1)
break
new_toks[lang1].append(new_sent1)
new_toks[lang2].append(new_sent2)
return new_toks, new_lens
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""
Create the dataset from `datafiles` by keeping `p` percent of
the input/output tokens.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
self._shuffle_with_lens(toks, lens)
toks, lens = self._subsample(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class WindowedCorpus(BertCorpus):
"""
This is a corpus formed by selecting a window of tokens of length
`window_size` from another corpus.
The window is applied at two positions.
1. At the beginning of the sentence. This ensures the model learns how
to begin a sentence.
2. Starting at the middle of the sentence. This requires knowledge of the
length of each sentence.
"""
def _window(self, toks, lens, window_size):
"""
Selects two windows of size `window_size` from each sentence in
`toks` that has length given in `lens`.
"""
# lens do not include BOS or EOS tokens.
new_toks = {}
new_lens = {}
for lang in toks:
new_toks[lang] = []
new_lens[lang] = []
for i, sent in enumerate(toks[lang]):
n = lens[lang][i]
new_toks[lang].append(sent[:window_size])
new_lens[lang].append(min(n, window_size - 1))
new_toks[lang].append(sent[n // 2:n // 2 + window_size])
new_lens[lang].append(min(n - n // 2, n // 2 + window_size))
return new_toks, new_lens
def create(self, datafiles, max_size=None, window_size=25, valid_size=0,
use_cache=False):
"""
Create a dataset from `datafiles` by randomly selecting a window
of `window_size` tokens from every sentence.
"""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
toks, lens = self._window(toks, lens)
return self._save_with_lens(toks, lens, valid_size, dtype='int32')
class EmbeddingCorpus(BertCorpus):
"""
This class represents a corpus composed of embedding vectors. Presumably
any model training on it would use MSE as the loss function.
"""
def _embed(self, toks):
"""
This converts the lists of integers in `toks` are converted to
embedding vectors using BERT's multlingual case model.
"""
def apply_emb(x):
"""
This function applies the BERT embedding layer to `x`. It is
called by the mapping function. It must be a sub-function so
it has access to `bert_emb`.
`dask.array.map_blocks()` requires the mapping function to
always return an array with the same shape as the calling array's
`chunksize`.
"""
emb = np.array(bert_emb(torch.LongTensor(x)).data, dtype=np.float32)
if x.shape[0] < chunk_size:
# This is a technical step to prevent returning too few rows.
dims = (chunk_size - x.shape[0], max_length, self.emb_size)
return np.concatenate((emb, np.zeros(dims, dtype=np.float32)))
return emb
bert_model = BertModel.from_pretrained('bert-base-multilingual-cased')
bert_model.eval()
bert_emb = bert_model.embeddings.word_embeddings
embs = {}
chunk_size = 1024
for lang in toks:
max_length = len(toks[lang][0])
toks[lang] = dask.array.from_array(
np.array(toks[lang], dtype=np.int32),
chunks=(chunk_size, max_length))
logger.info(f'Calculating embeddings for language {lang}.')
embs[lang] = toks[lang].map_blocks(
apply_emb,
chunks=(chunk_size, max_length, self.emb_size),
dtype=np.float32,
new_axis=[2])
self.bos_emb = np.array(
bert_emb(torch.tensor([self.bos])).data[0], dtype=np.float32)
self.eos_emb = np.array(
bert_emb(torch.tensor([self.eos])).data[0], dtype=np.float32)
self.pad_emb = np.array(
bert_emb(torch.tensor([self.pad])).data[0], dtype=np.float32)
return embs
def _save(self, embs, valid_size):
"""Saves the dask arrays in `embs` to HDF5 files."""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in embs:
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
embs[lang][:-valid_size].to_hdf5(h5file, 'train')
embs[lang][-valid_size:].to_hdf5(h5file, 'valid')
with h5py.File(h5file, 'w') as f:
f['train'].attrs['dtype'] = 'float32'
f['valid'].attrs['dtype'] = 'float32'
return h5files
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, _ = self._encode(out_path, langs)
if self.shuffle:
toks = self._shuffle(toks)
embs = self._embed(toks)
# Save the datasets to an hdf5 file on disk.
return self._save(embs, valid_size)
class LowResolutionEmbeddingCorpus(EmbeddingCorpus):
"""
This is a corpus of BERT embedding vectors which have been averaged
by a sliding window of size `window_size` moving `window_step` tokens
each step.
The EOS and PAD tokens are preserved *without* averaging.
Presumably any model training on this dataset would use MSE as the
loss function.
"""
def _avg_embs(self, embs, lengths):
"""
Averages the embeddings of `embs` which represent sentences with
lengths given by `lengths`.
"""
def eos_and_pad(emb):
"""
Restore EOS marker and PAD tokens after it. This is called by
`apply_along_axis()`.
This must be a subfunction of `_avg_embs()` so that it has access
to the `max_length` variable.
"""
n = int(round(emb[0]))
row = n // max_len
col = n % max_len
if row >= len(lengths):
return emb
elif lengths[row] == col - 1:
return eos_emb
elif (col - 1 > lengths[row]
and col <= lengths[row] + self.window_size):
return pad_emb
return emb
logger.info('Calcuating average embeddings.')
bos = (self.bos_emb.reshape((1, 1, self.emb_size))
.repeat(embs.shape[0], axis=0))
avg_embs = dask.array.concatenate(
[bos] + [
embs[:, i:i + self.window_size, :].mean(
axis=1, keepdims=True)
for i in range(1, embs.shape[1], self.window_step)
], axis=1).astype(np.float32)
# Add a coordinate to the front of every embedding vector containing
# a number that determines the sentence and token of the vector.
# This is the only way to get that info to `eos_and_pad`.
eos_emb = np.concatenate([[-1], self.eos_emb])
pad_emb = np.concatenate([[-1], self.pad_emb])
max_len = int(avg_embs.shape[1])
indices = dask.array.arange(avg_embs.shape[0] * max_len)
indices = indices.reshape((avg_embs.shape[0], max_len, 1))
avg_embs = dask.array.concatenate([indices, avg_embs], axis=2)
avg_embs = avg_embs.rechunk((1024, max_len, len(eos_emb)))
# The dask version of `apply_along_axis()` is broken or does not behave
# like the numpy version, so we have to use `map_blocks()`.
logger.info('Fixing EOS and PAD tokens.')
avg_embs = avg_embs.map_blocks(
lambda b: np.apply_along_axis(eos_and_pad, 2, b),
chunks=(1024, max_len, len(eos_emb)),
dtype=np.float32)
# Drop indices that were stored as first embedding dimension.
avg_embs = avg_embs[:, :, 1:]
return avg_embs
def _save(self, avg_embs, valid_size):
"""
Saves the dask arrays containing averaged embeddings to HDF5 files.
"""
h5path = os.path.join(self.data_dir, self.name)
h5files = []
for lang in avg_embs:
logger.info(f'Saving HDF5 file for language {lang}.')
h5file = f'{h5path}/{lang}.h5'
h5files.append(h5file)
logging.info(f'Saving {lang} dataset to {h5file}')
with h5py.File(h5file, 'w') as f_out:
# Use `store()`. `to_hdf5` produces empty fie for some reason.
train = avg_embs[lang][:-valid_size]
train_ds = f_out.require_dataset(
'train', shape=train.shape, dtype=train.dtype)
train_ds.attrs['dtype'] = 'float32'
dask.array.store(train, train_ds)
valid = avg_embs[lang][-valid_size:]
valid_ds = f_out.require_dataset(
'valid', shape=valid.shape, dtype=valid.dtype)
valid_ds.attrs['dtype'] = 'float32'
dask.array.store(valid, valid_ds)
def create(self, datafiles, max_size=None, valid_size=0, use_cache=False):
"""Creates train and validation datasets from files `datafiles`."""
out_path, langs = self._clean(datafiles, max_size, use_cache)
toks, lens = self._encode(out_path, langs)
if self.shuffle:
toks, lens = self._shuffle_with_lens(toks, lens)
embs = self._embed(toks)
avg_embs = {}
for lang in langs:
avg_embs[lang] = self._avg_embs(embs[lang], lens[lang])
return self._save(avg_embs, valid_size)
| [
11748,
288,
2093,
13,
18747,
198,
11748,
289,
20,
9078,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
12972,
13165,
354,
62,
5310,
13363,
62,
4835,
1330,
2... | 2.007766 | 15,194 |
__author__ = 'argi'
import cv2
import numpy as np
| [
834,
9800,
834,
796,
705,
853,
72,
6,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628
] | 2.47619 | 21 |
# coding: u8
from tornado.util import ObjectDict
from sqlalchemy import create_engine
from sqlalchemy import (Column, Integer, Text, String, Boolean)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.attributes import InstrumentedAttribute
import settings
import utils
params = dict(
encoding='utf8',
echo=False,
pool_recycle=7200,
)
conn_str = 'sqlite:///%s' % settings.DB_PATH
engine = create_engine(conn_str, **params)
db_factory = lambda: sessionmaker(bind=engine)()
_Base = declarative_base()
if __name__ == '__main__':
metadata = Base.metadata
metadata.create_all(engine)
db = db_factory()
db.merge(User(id=1, name='admin', pwd=utils.md5('AdminDemo')))
db.commit()
db.close()
| [
2,
19617,
25,
334,
23,
198,
198,
6738,
33718,
13,
22602,
1330,
9515,
35,
713,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
1330,
357,
39470,
11,
34142,
11,
8255,
11,
10903,
11,
41146,
8,
19... | 2.809859 | 284 |
from file import File
exemple()
| [
6738,
2393,
1330,
9220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
1069,
368,
1154,
3419,
198,
220,
220
] | 1.846154 | 26 |
from django.shortcuts import render
from django.http import HttpResponse
# todolist/Homepage View
# Contact Page View
# About us Page View
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
198,
2,
284,
67,
349,
396,
14,
16060,
7700,
3582,
198,
198,
2,
14039,
7873,
3582,
198,
198,
2,
7994,
514,
7873,
3582,
... | 3.25 | 44 |
# -*- coding: utf-8 -*-
"""
We are creating a DEM contour of 25m from a provided file.
The folder paths have been change to empty quotes for privacy and flexibility.
@author: dknight2
"""
# Importing the proper modules
import arcpy
from arcpy import env
from arcpy.sa import *
# Allow overwriting the output for convenience.
arcpy.env.overwriteOutput = True
# Set the workspace environment
env.workspace = r""
# Set the variables for the Contour tool
inRaster = ""
outContours = r""
contourInterval = 25
baseContour = 0
# Set up the Try/Except method to handle errors
try:
#Check out the Spatial Analyst Extension
arcpy.CheckOutExtension("Spatial_Analyst")
# Run the Contour tool
Contour(inRaster, outContours, contourInterval, baseContour)
# Check in the Spatial Analyst Extension
arcpy.CheckInExtension("Spatial_Analyst")
# Report completion message
print ("Contour complete!")
except:
# Report the error message
print ("Could not complete the Contour. Please adjust inputs and try again")
# Get any messages that the contour tool generated
arcpy.AddMessage(arcpy.GetMessages())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
1135,
389,
4441,
257,
40101,
542,
454,
286,
1679,
76,
422,
257,
2810,
2393,
13,
201,
198,
464,
9483,
13532,
423,
587,
1487,
284,
6565,
13386,
329... | 2.806452 | 434 |
fig, ax = plt.subplots(figsize=(10,6))
gr1 = sns.lineplot(data=log_returns)
display(gr1)
%fs ls "/FileStore/"
%fs rm -r "/FileStore/import-stage"
#Create image and save to blob
plt.subplots(figsize=(10,6))
sns.lineplot(data=log_returns)
plt.savefig('/dbfs/FileStore/import-stage/gr2.png')
dbutils.fs.cp('dbfs:/FileStore/import-stage/gr2.png', "/mnt/global-cube")
%fs ls "mnt/global-cube" | [
5647,
11,
7877,
796,
458,
83,
13,
7266,
489,
1747,
7,
5647,
7857,
16193,
940,
11,
21,
4008,
201,
198,
2164,
16,
796,
3013,
82,
13,
1370,
29487,
7,
7890,
28,
6404,
62,
7783,
82,
8,
201,
198,
13812,
7,
2164,
16,
8,
201,
198,
201... | 2.189189 | 185 |
"""Functions shared between air and ground runtimes"""
import logging
import math
import struct
import time
from queue import Queue
from threading import Thread
from typing import Tuple
import pynmea2
from whitevest.lib.atomic_buffer import AtomicBuffer
from whitevest.lib.atomic_value import AtomicValue
from whitevest.lib.configuration import Configuration
from whitevest.lib.const import TELEMETRY_STRUCT_STRING, TESTING_MODE
if not TESTING_MODE:
from whitevest.lib.hardware import init_gps
def handle_exception(message: str, exception: Exception):
"""Log an exception"""
logging.error(message)
logging.exception(exception)
def write_queue_log(outfile, new_data_queue: Queue, max_lines: int = 1000) -> int:
"""If there is data in the queue, write it to the file"""
i = 0
while not new_data_queue.empty() and i < max_lines:
info = new_data_queue.get()
row_str = ",".join([str(v) for v in info])
logging.debug(row_str)
outfile.write(row_str + "\n")
i += 1
return i
def take_gps_reading(sio, gps_value: AtomicValue) -> bool:
"""Grab the most recent data from GPS feed"""
line = sio.readline()
gps = pynmea2.parse(line)
if isinstance(gps, pynmea2.types.talker.GGA):
gps_value.update(
(
gps.latitude if gps else 0.0,
gps.longitude if gps else 0.0,
float(gps.gps_qual) if gps else 0.0,
float(gps.num_sats) if gps else 0.0,
)
)
return True
return False
def gps_reception_loop(sio, gps_value: AtomicValue, continue_running: AtomicValue):
"""Loop forever reading GPS data and passing it to an atomic value"""
if not sio:
return
while continue_running.get_value():
try:
take_gps_reading(sio, gps_value)
except Exception as ex: # pylint: disable=broad-except
handle_exception("GPS reading failure", ex)
time.sleep(0)
def create_gps_thread(
configuration: Configuration, value: AtomicValue, continue_running: AtomicValue
):
"""Create a thread for tracking GPS"""
return Thread(
target=gps_reception_loop,
args=(
init_gps(configuration),
value,
continue_running,
),
daemon=True,
)
# pylint: disable=too-many-arguments
def digest_next_sensor_reading(
start_time: float,
data_queue: Queue,
current_readings: AtomicBuffer,
gps_value,
altimeter_value,
magnetometer_accelerometer_value,
) -> float:
"""Grab the latest values from all sensors and put the data in the queue and atomic store"""
now = time.time()
info = (
now - start_time,
*altimeter_value,
*magnetometer_accelerometer_value,
*gps_value,
)
if not data_queue.full():
data_queue.put(info)
current_readings.put(info)
return now
def write_sensor_log(
start_time: float,
outfile,
data_queue: Queue,
continue_running: AtomicValue,
continue_logging: AtomicValue,
):
"""Write the queue to the log until told to stop"""
lines_written = 0
last_queue_check = time.time()
while continue_running.get_value() and continue_logging.get_value():
try:
new_lines_written = write_queue_log(outfile, data_queue, 300)
if new_lines_written > 0:
lines_written += new_lines_written
if last_queue_check + 10.0 < time.time():
last_queue_check = time.time()
elapsed = last_queue_check - start_time
logging.info(
"Lines written: %d in %s seconds with %d ready",
lines_written,
elapsed,
data_queue.qsize(),
)
time.sleep(7)
except Exception as ex: # pylint: disable=broad-except
handle_exception("Telemetry log line writing failure", ex)
def transmit_latest_readings(
pcnt_to_limit: AtomicValue,
rfm9x,
last_check: float,
readings_sent: int,
start_time: float,
current_readings: AtomicBuffer,
) -> Tuple[int, float]:
"""Get the latest value from the sensor store and transmit it as a byte array"""
infos = current_readings.read()
if len(infos) < 2:
return readings_sent, last_check
info1 = infos[0]
info2 = infos[int(math.ceil(len(infos) / 2))]
if not info1 or not info2:
return readings_sent, last_check
info = (*info1, *info2)
clean_info = [float(i) for i in info]
encoded = struct.pack(
"d" + TELEMETRY_STRUCT_STRING + TELEMETRY_STRUCT_STRING,
*(pcnt_to_limit.get_value(), *clean_info)
)
current_readings.clear()
logging.debug("Transmitting %d bytes", len(encoded))
rfm9x.send(encoded)
readings_sent += 1
if last_check > 0 and last_check + 10.0 < time.time():
last_check = time.time()
logging.info(
"Transmit rate: %f/s",
float(readings_sent) / float(last_check - start_time),
)
return readings_sent, last_check
| [
37811,
24629,
2733,
4888,
1022,
1633,
290,
2323,
1057,
22355,
37811,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
2878,
198,
11748,
640,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
19720,
1330,
30... | 2.286093 | 2,265 |
__version_tuple__ = (4, 3, 0, 'post.1')
__version__ = '4.3.0-post.1'
| [
834,
9641,
62,
83,
29291,
834,
796,
357,
19,
11,
513,
11,
657,
11,
705,
7353,
13,
16,
11537,
198,
834,
9641,
834,
796,
705,
19,
13,
18,
13,
15,
12,
7353,
13,
16,
6,
198
] | 1.916667 | 36 |
import re
cache_regex = re.compile(r"^v[\w-]+m[0-9a-fA-F]+$")
| [
11748,
302,
198,
198,
23870,
62,
260,
25636,
796,
302,
13,
5589,
576,
7,
81,
1,
61,
85,
58,
59,
86,
12,
48688,
76,
58,
15,
12,
24,
64,
12,
69,
32,
12,
37,
48688,
3,
4943,
628,
198
] | 1.666667 | 39 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
####
# File: bin2csv.py
# Project: Sonstige_Uebungen
#-----
# Created Date: Sunday 09.02.2020, 16:47
# Author: Apop85
#-----
# Last Modified: Sunday 09.02.2020, 16:48
#-----
# Copyright (c) 2020 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
#-----
# Description:
####
filename=r"C:\Users\rbald\Desktop\Values.csv"
file_writer=open(filename, "w", encoding="UTF-8")
for i in range(0,256):
new_line=str(i)+";"+str(f'{i:08b}')+'\n'
print(new_line)
file_writer.write(new_line)
file_writer.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
220,
198,
4242,
198,
2,
9220,
25,
9874,
17,
40664,
13,
9078,
198,
2,
4935,
25,
6295,
301,
10045,
62,
52,
... | 2.501946 | 257 |
import math
import sys
import os
import requests
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
import hashlib
import zipfile
from . import exceptions
import json
import pkg_resources
from urllib.error import HTTPError
import types
class PathBuilder(object):
"""
Used to get routes to certain resources in the cravat-store download area.
Returns path string in either url or file format.
"""
_valid_path_types = set(["url", "file"])
class ProgressStager(object):
"""
Calls stage_handler when total_size passes a total_size/total_stages
increment.
stage_handler must handle the following positional arguments:
cur_stage, total_stages, cur_size, total_size
"""
def stream_multipart_post(url, fields, stage_handler=None, stages=50, **kwargs):
"""
Post the fields in fields to the url in url using a streamed
multipart/form-data request. Optionally pass in a callback function which
is called when the uploaded size passes each of total_size/stages.
"""
encoder = MultipartEncoder(fields=fields)
stager = ProgressStager(
encoder.len, total_stages=stages, stage_handler=stage_handler
)
monitor = MultipartEncoderMonitor(encoder, stager_caller)
headers = {"Content-Type": monitor.content_type}
r = requests.post(url, data=monitor, headers=headers, **kwargs)
return r
def stream_to_file(
url, fpath, stage_handler=None, stages=50, install_state=None, **kwargs
):
"""
Stream the content at a url to a file. Optionally pass in a callback
function which is called when the uploaded size passes each of
total_size/stages.
"""
try:
r = requests.get(url, stream=True, timeout=(3, None))
except requests.exceptions.ConnectionError:
r = types.SimpleNamespace()
r.status_code = 503
if r.status_code == 200:
total_size = int(r.headers.get("content-length", 0))
chunk_size = 8192
stager = ProgressStager(
total_size, total_stages=stages, stage_handler=stage_handler
)
with open(fpath, "wb") as wf:
for chunk in r.iter_content(chunk_size):
if install_state is not None and install_state["kill_signal"] == True:
raise exceptions.KillInstallException()
wf.write(chunk)
stager.increase_cur_size(len(chunk))
return r
def file_checksum(path):
"""
Get the md5 checksum of a file.
"""
if os.path.isdir(path):
raise IsADirectoryError(path)
hasher = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(256 * hasher.block_size), b""):
hasher.update(chunk)
return hasher.hexdigest()
def add_to_zipfile(full_path, zf, start=os.curdir, compress_type=zipfile.ZIP_DEFLATED):
"""
Recursively add files to a zipfile. Optionally making the path within
the zipfile relative to a base_path, default is curdir.
"""
rel_path = os.path.relpath(full_path, start=start)
zf.write(full_path, arcname=rel_path, compress_type=compress_type)
if os.path.isdir(full_path):
for item_name in os.listdir(full_path):
item_path = os.path.join(full_path, item_name)
add_to_zipfile(item_path, zf, start=start, compress_type=compress_type)
def nest_value_in_dict(d, v, keys):
"""
Put the value v, into dictionary d at the location defined by the list of
keys in keys.
Ex: d = {'a':{'b':{'c':1}}}, v = 2, keys = ['a','b','d']
results in:
d = {'a':{'b':{'c':1,'d':2}}}
"""
top_key = keys[0]
if len(keys) == 1:
d[top_key] = v
else:
if top_key not in d:
d[top_key] = {}
nest_value_in_dict(d[top_key], v, keys[1:])
def verify_against_manifest(dirpath, manifest):
"""
Verify that the files in manifest exist and have the right cksum.
Return True if all pass, throw FileIntegrityError otherwise.
"""
correct = True
for item_name, v in manifest.items():
item_path = os.path.join(dirpath, item_name)
if os.path.exists(item_path):
if type(v) == dict:
correct = os.path.isdir(item_path) and verify_against_manifest(
item_path, v
)
else:
correct = v == file_checksum(item_path)
else:
correct = False
if not (correct):
raise (exceptions.FileIntegrityError(item_path))
return correct
| [
11748,
10688,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
7007,
198,
6738,
7007,
62,
25981,
37976,
13,
16680,
541,
433,
13,
12685,
12342,
1330,
357,
198,
220,
220,
220,
7854,
541,
433,
27195,
12342,
11,
198,
220,
220,
220,
7854,
... | 2.40607 | 1,911 |
import inspect
import copy
from aorist import *
from aorist_recipes import programs
from common import endpoints
"""
Defining schema
"""
attributes = [
Attribute(KeyStringIdentifier("id")),
Attribute(StringIdentifier("author")),
Attribute(StringIdentifier("subreddit")),
Attribute(POSIXTimestamp("created_utc")),
Attribute(FreeText("title")),
Attribute(FreeText("selftext", nullable=True)),
]
"""
A row in our table is a struct.
"""
subreddit_datum = RowStruct(
name="subreddit",
attributes=attributes,
)
tmp_dir = "tmp/subreddits"
"""
Data will be replicated to Hive
"""
local = HiveTableStorage(
location=HiveLocation(MinioLocation(name='reddit')),
encoding=Encoding(NewlineDelimitedJSONEncoding()),
layout=TabularLayout(StaticTabularLayout()),
)
"""
Declaring where our subreddits live, i.e. in PushShift
"""
subreddits = ['france']#, 'newzealand']
tabular_schema = default_tabular_schema(
subreddit_datum, subreddit_datum.name, attributes
)
assets = {x: StaticDataTable(
name=x,
schema=DataSchema(tabular_schema),
setup=StorageSetup(RemoteStorageSetup(
remote=Storage(RemoteStorage(
location=RemoteLocation(
PushshiftAPILocation(
subreddit=x
)
),
layout=APIOrFileLayout(
APILayout(
PushshiftSubredditPostsAPILayout()
),
),
encoding=Encoding(
NewlineDelimitedJSONEncoding()
),
)),
)),
tag=x,
) for x in subreddits}
embedding = FasttextEmbedding(
name="embedding",
comment="Fasttext embedding of size 128",
schema=DataSchema(FasttextEmbeddingSchema(
dim=128,
source_schema=tabular_schema,
text_attribute_name="selftext",
)),
setup=StorageSetup(LocalStorageSetup(
Storage(local),
'/tmp/fasttext_embedding',
)),
source_assets=[Asset(v) for v in assets.values()],
)
assets = {
k: Asset(v) for (k, v) in assets.items()
}
assets['embedding'] = Asset(embedding)
"""
Creating the dataset
"""
subreddits = DataSet(
name="subreddits",
description="""
A selection of small region-based Subreddits to demonstrate
collecting Reddit data via [Pushshift](https://pushshift.io/).
""",
source_path=__file__,
datum_templates=[DatumTemplate(subreddit_datum)],
assets=assets,
access_policies=[],
)
"""
Dataset will be replicated.
"""
subreddits = subreddits.replicate_to_local(
Storage(local), tmp_dir, Encoding(CSVEncoding())
)
universe = Universe(
name="my_cluster",
datasets=[subreddits],
endpoints=endpoints,
)
universe.compute_uuids()
result = dag(
universe,
["UploadFasttextToMinio"],
"airflow",
programs,
dialect_preferences=[
R(),
Python([]),
Bash(),
Presto(),
],
)
print(result)
| [
11748,
10104,
198,
11748,
4866,
198,
6738,
257,
273,
396,
1330,
1635,
198,
6738,
257,
273,
396,
62,
8344,
18636,
1330,
4056,
198,
6738,
2219,
1330,
886,
13033,
198,
198,
37811,
198,
7469,
3191,
32815,
198,
37811,
198,
1078,
7657,
796,
... | 2.408867 | 1,218 |
"""
Register routes for the app
"""
from flask import Blueprint
from flask_restful import Api
from ridemyway import resources as r
from ridemyway.utils.response import ERRORS
v1 = Blueprint('v1', __name__)
api = Api(v1, catch_all_404s=True, errors=ERRORS)
add = api.add_resource
# Add routes here
add(r.All, '/all') # GET
add(r.Rides, '/rides') # GET, POST
add(r.Ride, '/rides/<int:ride_id>') # GET
add(r.Request, '/rides/<int:ride_id>/requests') # POST
| [
37811,
198,
220,
220,
220,
17296,
11926,
329,
262,
598,
198,
37811,
198,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
6738,
5755,
3065,
1014,
1330,
4133,
355,
374,
198,
6738,
5755,
3065,
1014,
1... | 2.054745 | 274 |
import csv
import os
from collections import OrderedDict
path = ""
csv_file_name = "Train.csv"
filter_feature_name = "UserID"
directory = "data_sets\\"
user_delimiter = ','
if not os.path.exists(directory):
os.makedirs(directory)
users = {}
features = OrderedDict()
with open(csv_file_name, 'r') as f:
first_line = f.readline()
for item in first_line.split(user_delimiter):
item = item.strip()
features[item] = 0
with open(csv_file_name) as csvfile:
reader = csv.DictReader(csvfile)
index = 1
train_rows_list = []
for row in reader:
if index == 1:
index = index + 1
continue
if row[filter_feature_name] not in users.keys():
users[row[filter_feature_name]] = [row]
else:
users[row[filter_feature_name]].append(row)
index = index + 1
for user in users.keys():
with open(directory+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for row in users[user]:
featDict = OrderedDict()
for feat in features:
featDict[feat] = row[feat]
writer.writerow(featDict) | [
11748,
269,
21370,
198,
11748,
28686,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6978,
796,
13538,
198,
40664,
62,
7753,
62,
3672,
796,
366,
44077,
13,
40664,
1,
198,
24455,
62,
30053,
62,
3672,
796,
366,
12982,
2389,
1,
... | 2.522727 | 440 |
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from src.layers.conv2d import Conv2DLayer
from src.layers.integration import IntegrationLayer
from src.layers.maxpool import MaxPoolLayer
from src.layers.functional import make_leaky_rectified_actfn, flatten, fully_connected
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
13,
30604,
13,
29412,
13,
30604,
1330,
26954,
62,
26791,
198,
198,
6738,
12351,
13,
75,
6962,
13,
... | 3.514851 | 101 |
import typing as t
| [
11748,
19720,
355,
256,
198
] | 3.8 | 5 |
# -*- coding: utf-8 -*-
# The planned path the robot will take. This consists of a set of waypoints.
from collections import deque
# Construct a new planner object and set defaults.
# This method appends another planned path to the end of this
# path. The end of this path must be the same cell as the
# start of the other path
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
383,
6027,
3108,
262,
9379,
481,
1011,
13,
770,
10874,
286,
257,
900,
286,
835,
13033,
13,
198,
6738,
17268,
1330,
390,
4188,
628,
220,
220,
220,
1303,
28407,
... | 3.47 | 100 |
#!/usr/bin/env python3
import sys
if __name__ == '__main__':
try:
rot_file(sys.argv[1])
except IndexError:
print('No filename given. Nothing to do.')
except FileNotFoundError:
print(f'Could not open "{sys.argv[1]}". Sorry about that.')
except PermissionError:
print('Missing permission to write output file. How sad.')
except OSError:
print('Something terrible happened. Maybe the disk is full.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
5724,
62,
... | 2.572222 | 180 |
"""
@author: krakowiakpawel9@gmail.com
@site: e-smartdata.org
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from datetime import datetime
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Button(id='btn-1', children='Button1', n_clicks_timestamp=0),
html.Button(id='btn-2', children='Button2', n_clicks_timestamp=0),
html.Button(id='btn-3', children='Button3', n_clicks_timestamp=0),
html.Div(id='div-1')
])
@app.callback(
Output('div-1', 'children'),
[Input('btn-1', 'n_clicks_timestamp'),
Input('btn-2', 'n_clicks_timestamp'),
Input('btn-3', 'n_clicks_timestamp')]
)
if __name__ == '__main__':
app.run_server(debug=True) | [
37811,
198,
31,
9800,
25,
479,
17716,
322,
32994,
79,
707,
417,
24,
31,
14816,
13,
785,
198,
31,
15654,
25,
304,
12,
27004,
7890,
13,
2398,
198,
37811,
198,
198,
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
... | 2.508333 | 360 |
# Created on 2017-8-31
# Usage: Lower Limit of Energy Requirements (LLER) for area of interest.
#--------------------------------------------------------------------------------------------------------------
#Import all necessary module dependencies
import arcpy, numpy, csv, os, string, re, shutil, glob
from arcpy.sa import *
arcpy.env.overwriteOutput = True
#arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(3857)
def Encode(x):
"""Encodes values into 'utf-8' format"""
if isinstance(x, unicode):
return x.encode("utf-8", 'ignore')
else:
return str(x)
def ExcludeFields(table, types=[]):
"""Return a list of fields minus those with specified field types"""
fieldNames = []
fds = arcpy.ListFields(table)
for f in fds:
if f.type not in types:
fieldNames.append(f.name)
return fieldNames
def ExportToCSV(fc, output):
"""Export Data to a CSV file"""
# create the output writer
outWriter = csv.writer(open(output, 'wb'), dialect='excel')
excludeTypes = ['Geometry', 'OID']
fields = ExcludeFields(fc, excludeTypes)
# Create Search Cursor: the following only works with ArcGIS 10.1+
with arcpy.da.SearchCursor(fc, fields) as cursor:
outWriter.writerow(cursor.fields)
for row in cursor:
row = [v.decode('utf8') if isinstance(v, str) else v for v in row]
outWriter.writerow([unicode(s).encode("utf-8") for s in row])
del cursor
#Set whether tool is licensed
if __name__ == '__main__':
isLicensed()
#Get the values of the input parameters
AOI = arcpy.GetParameterAsText(0)
continent = arcpy.GetParameterAsText(1)
year = arcpy.GetParameterAsText(2)
maleStature = arcpy.GetParameterAsText(3)
femaleStature = arcpy.GetParameterAsText(4)
#This is the path to the mosaic database with demographic information
if continent == "Africa":
mosaicDB = g_ESRI_variable_1
elif continent == "Asia":
mosaicDB = g_ESRI_variable_2
else:
mosaicDB = g_ESRI_variable_3
arcpy.AddMessage("The Selected Continent is " + continent)
#Run the Nutrition Metrics tool
try:
nutritionMetrics(AOI, year, maleStature, femaleStature, mosaicDB)
#Remove the intermediate output folder
arcpy.AddMessage("Removing Intermediate Output Folder ...")
os.chdir("..")
shutil.rmtree(os.path.join(arcpy.env.scratchFolder, "intermediate"))
except Exception:
e = sys.exc_info()[1]
arcpy.AddError('An error occurred: {}'.format(e.args[0]))
| [
2,
15622,
319,
2177,
12,
23,
12,
3132,
198,
2,
29566,
25,
16048,
27272,
286,
6682,
24422,
357,
3069,
1137,
8,
329,
1989,
286,
1393,
13,
198,
2,
10097,
3880,
26171,
198,
198,
2,
20939,
477,
3306,
8265,
20086,
198,
11748,
10389,
9078,... | 2.755507 | 908 |
"""
This module is intended to generate and confirm tokens
"""
from itsdangerous import URLSafeTimedSerializer
from blog.config import Config
def generate_confirmation_token(email):
"""Generates e-mail confirmation token"""
serializer = URLSafeTimedSerializer(Config.SECRET_KEY)
return serializer.dumps(email, salt=Config.SECURITY_PASSWORD_SALT)
def confirm_token(token, max_age=600):
"""Confirms token"""
serializer = URLSafeTimedSerializer(Config.SECRET_KEY)
try:
email = serializer.loads(token,
salt=Config.SECURITY_PASSWORD_SALT,
max_age=600)
except:
return False
return email
| [
37811,
198,
1212,
8265,
318,
5292,
284,
7716,
290,
6216,
16326,
198,
37811,
198,
198,
6738,
663,
38537,
516,
1330,
37902,
6561,
8635,
14967,
276,
32634,
7509,
198,
6738,
4130,
13,
11250,
1330,
17056,
628,
198,
4299,
7716,
62,
10414,
362... | 2.444444 | 288 |
from __future__ import annotations
import typing
from typing import Optional, List, Iterable
from collections import namedtuple
import logging
from blatann.gatt.gatts_attribute import GattsAttribute, GattsAttributeProperties
from blatann.gatt.managers import GattsOperationManager
from blatann.nrf import nrf_types, nrf_events
from blatann import gatt
from blatann.bt_sig.uuids import DescriptorUuid
from blatann.uuid import Uuid
from blatann.waitables.event_waitable import IdBasedEventWaitable, EventWaitable
from blatann.exceptions import InvalidOperationException, InvalidStateException
from blatann.event_type import EventSource, Event
from blatann.event_args import *
from blatann.services.ble_data_types import BleDataStream
from blatann.gatt import PresentationFormat
if typing.TYPE_CHECKING:
from blatann.device import BleDevice
from blatann.peer import Peer
logger = logging.getLogger(__name__)
_security_mapping = {
gatt.SecurityLevel.NO_ACCESS: nrf_types.BLEGapSecModeType.NO_ACCESS,
gatt.SecurityLevel.OPEN: nrf_types.BLEGapSecModeType.OPEN,
gatt.SecurityLevel.JUST_WORKS: nrf_types.BLEGapSecModeType.ENCRYPTION,
gatt.SecurityLevel.MITM: nrf_types.BLEGapSecModeType.MITM,
}
class GattsUserDescriptionProperties(GattsAttributeProperties):
"""
Properties used to configure the User Description characteristic descriptor.
The most basic, set-once, read-only usage of this is ``GattsUserDescriptionProperties("my description")``
"""
def __init__(self, value: Union[bytes, str],
write: bool = False,
security_level: gatt.SecurityLevel = gatt.SecurityLevel.OPEN,
max_length: int = 0,
variable_length: bool = False):
"""
:param value: The value to set the user description to
:param write: Whether or not the client can write/update the user description
:param security_level: The security level for reads/writes
:param max_length: The max length the user description can be set to.
If not supplied or less than len(value), will use the greater of the two
:param variable_length: Whether or not this description can vary in length
"""
if isinstance(value, str):
value = value.encode("utf8")
max_length = max(max_length, len(value))
super(GattsUserDescriptionProperties, self).__init__(True, write, security_level, max_length or len(value),
variable_length, False, False)
self.value = value
class GattsCharacteristicProperties(gatt.CharacteristicProperties):
"""
Properties for Gatt Server characeristics
"""
class GattsCharacteristic(gatt.Characteristic):
"""
Represents a single characteristic within a service. This class is usually not instantiated directly; it
is added to a service through :meth:`GattsService.add_characteristic`
"""
_QueuedChunk = namedtuple("QueuedChunk", ["offset", "data"])
"""
Public Methods
"""
def set_value(self, value, notify_client=False) -> Optional[IdBasedEventWaitable[GattsCharacteristic, NotificationCompleteEventArgs]]:
"""
Sets the value of the characteristic.
:param value: The value to set to. Must be an iterable type such as a str, bytes, or list of uint8 values,
or a BleDataStream object.
Length must be less than or equal to the characteristic's max length.
If a string is given, it will be encoded using the string_encoding property of the characteristic.
:param notify_client: Flag whether or not to notify the client. If indications and notifications are not set up
for the characteristic, will raise an InvalidOperationException
:raises: InvalidOperationException if value length is too long, or notify client set and characteristic
is not notifiable
:raises: InvalidStateException if the client is not currently subscribed to the characteristic
:return: If notify_client is true, this method will return the waitable for when the notification is sent to the client
"""
if notify_client and not self.notifiable:
raise InvalidOperationException("Cannot notify client. "
"{} not set up for notifications or indications".format(self.uuid))
self._value_attr.set_value(value)
if notify_client and self.client_subscribed and not self._value_attr.read_in_process:
return self.notify(None)
def notify(self, data) -> IdBasedEventWaitable[GattsCharacteristic, NotificationCompleteEventArgs]:
"""
Notifies the client with the data provided without setting the data into the characteristic value.
If data is not provided (None), will notify with the currently-set value of the characteristic
:param data: Optional data to notify the client with. If supplied, must be an iterable type such as a
str, bytes, or list of uint8 values, or a BleDataStream object.
Length must be less than or equal to the characteristic's max length.
If a string is given, it will be encoded using the string_encoding property of the characteristic.
:raises: InvalidStateException if the client is not subscribed to the characteristic
:raises: InvalidOperationException if the characteristic is not configured for notifications/indications
:return: An EventWaitable that will trigger when the notification is successfully sent to the client. The waitable
also contains the ID of the sent notification which is used in the on_notify_complete event
"""
if isinstance(data, BleDataStream):
value = data.value
if isinstance(data, str):
value = data.encode(self.string_encoding)
if not self.notifiable:
raise InvalidOperationException("Cannot notify client. "
"{} not set up for notifications or indications".format(self.uuid))
if not self.client_subscribed:
raise InvalidStateException("Client is not subscribed, cannot notify client")
notification_id = self._notification_manager.notify(self, self._value_attr.handle,
self._on_notify_complete, data)
return IdBasedEventWaitable(self._on_notify_complete, notification_id)
def add_descriptor(self, uuid: Uuid, properties: GattsAttributeProperties,
initial_value=b"", string_encoding="utf8") -> GattsAttribute:
"""
Creates and adds a descriptor to the characteristic
.. note:: Due to limitations of the BLE stack, the CCCD, SCCD, User Description, Extended Properties,
and Presentation Format descriptors cannot be added through this method. They must be added through the
``GattsCharacteristicProperties`` fields when creating the characteristic.
:param uuid: The UUID of the descriptor to add, and cannot be the UUIDs of any of the reserved descriptor UUIDs in the note
:param properties: The properties of the descriptor
:param initial_value: The initial value to set the descriptor to
:param string_encoding: The string encoding to use, if a string is set
:return: the descriptor that was created and added to the characteristic
"""
if isinstance(initial_value, str):
initial_value = initial_value.encode(string_encoding)
self.ble_device.uuid_manager.register_uuid(uuid)
security = _security_mapping[properties.security_level]
read_perm = security if properties.read else nrf_types.BLEGapSecModeType.NO_ACCESS
write_perm = security if properties.write else nrf_types.BLEGapSecModeType.NO_ACCESS
max_len = max(len(initial_value), properties.max_len)
metadata = nrf_types.BLEGattsAttrMetadata(read_perm, write_perm, properties.variable_length,
read_auth=properties.read_auth, write_auth=properties.write_auth)
attr = nrf_types.BLEGattsAttribute(uuid.nrf_uuid, metadata, max_len, initial_value)
self.ble_device.ble_driver.ble_gatts_descriptor_add(self._value_attr.handle, attr)
attr = GattsAttribute(self.ble_device, self.peer, self, uuid, attr.handle,
properties, initial_value, string_encoding)
self._attrs.append(attr)
return attr
def add_constant_value_descriptor(self, uuid: Uuid, value: bytes,
security_level=gatt.SecurityLevel.OPEN) -> GattsAttribute:
"""
Adds a descriptor to the characteristic which is a constant, read-only value that cannot be updated
after this call. This is a simplified parameter set built on top of :meth:`add_descriptor` for this common use-case.
.. note:: See note on :meth:`add_descriptor()` for limitations on descriptors that can be added through this method.
:param uuid: The UUID of the descriptor to add
:param value: The value to set the descriptor to
:param security_level: The security level for the descriptor
:return: The descriptor that was created and added to the characteristic
"""
props = GattsAttributeProperties(read=True, write=False, security_level=security_level,
max_length=len(value), variable_length=False, write_auth=False, read_auth=False)
return self.add_descriptor(uuid, props, value)
"""
Properties
"""
@property
def max_length(self) -> int:
"""
**Read Only**
The max possible the value the characteristic can be set to
"""
return self._properties.max_len
@property
def notifiable(self) -> bool:
"""
**Read Only**
Gets if the characteristic is set up to asynchonously notify clients via notifications or indications
"""
return self._properties.indicate or self._properties.notify
@property
def value(self) -> bytes:
"""
**Read Only**
Gets the current value of the characteristic.
Value is updated using :meth:`set_value`
"""
return self._value
@property
def client_subscribed(self) -> bool:
"""
**Read Only**
Gets if the client is currently subscribed (notify or indicate) to this characteristic
"""
return self.peer and self.cccd_state != gatt.SubscriptionState.NOT_SUBSCRIBED
@property
def attributes(self) -> Iterable[GattsAttribute]:
"""
**Read Only**
Gets all of the attributes and descriptors associated with this characteristic
"""
return tuple(self._attrs)
@property
def user_description(self) -> Optional[GattsAttribute]:
"""
**Read Only**
Gets the User Description attribute for the characteristic if set in the properties.
If the user description was not configured for the characteristic, returns ``None``
"""
return self._user_desc_attr
@property
def sccd(self) -> Optional[GattsAttribute]:
"""
**Read Only**
Gets the Server Characteristic Configuration Descriptor (SCCD) attribute if set in the properties.
If the SCCD was not configured for the characteristic, returns ``None``
"""
return self._sccd_attr
@property
def presentation_format(self) -> Optional[PresentationFormat]:
"""
**Read Only**
Gets the presentation format that was set for the characteristic.
If the presentation format was not configured for the characteristic, returns ``None``
"""
return self._presentation_format
@property
def string_encoding(self) -> str:
"""
The default method for encoding strings into bytes when a string is provided as a value
:getter: Gets the string encoding in use
:setter: Sets the string encoding to use
"""
return self._value_attr.string_encoding
@string_encoding.setter
"""
Events
"""
@property
def on_write(self) -> Event[GattsCharacteristic, WriteEventArgs]:
"""
Event generated whenever a client writes to this characteristic.
:return: an Event which can have handlers registered to and deregistered from
"""
return self._on_write
@property
def on_read(self) -> Event[GattsCharacteristic, None]:
"""
Event generated whenever a client requests to read from this characteristic. At this point, the application
may choose to update the value of the characteristic to a new value using set_value.
A good example of this is a "system time" characteristic which reports the applications system time in seconds.
Instead of updating this characteristic every second, it can be "lazily" updated only when read from.
NOTE: if there are multiple handlers subscribed to this and each set the value differently, it may cause
undefined behavior.
:return: an Event which can have handlers registered to and deregistered from
"""
return self._on_read
@property
def on_subscription_change(self) -> Event[GattsCharacteristic, SubscriptionStateChangeEventArgs]:
"""
Event that is generated whenever a client changes its subscription state of the characteristic
(notify, indicate, none).
:return: an Event which can have handlers registered to and deregistered from
"""
return self._on_sub_change
@property
def on_notify_complete(self) -> Event[GattsCharacteristic, NotificationCompleteEventArgs]:
"""
Event that is generated when a notification or indication sent to the client successfully
:return: an event which can have handlers registered to and deregistered from
"""
return self._on_notify_complete
"""
Event Handling
"""
class GattsService(gatt.Service):
"""
Represents a registered GATT service that lives locally on the device.
This class is usually not instantiated directly and is instead created through :meth:`GattsDatabase.add_service`.
"""
@property
def characteristics(self) -> List[GattsCharacteristic]:
"""
**Read Only**
Gets the list of characteristics in this service.
Characteristics are added through :meth:`add_characteristic`
"""
return self._characteristics
def add_characteristic(self, uuid: Uuid, properties: GattsCharacteristicProperties,
initial_value=b"", prefer_indications=True, string_encoding="utf8"):
"""
Adds a new characteristic to the service
:param uuid: The UUID of the characteristic to add
:param properties: The characteristic's properties
:param initial_value: The initial value of the characteristic. May be a string, bytearray, or list of ints
:type initial_value: str or list or bytearray
:param prefer_indications: Flag for choosing indication/notification if a characteristic has
both indications and notifications available
:param string_encoding: The encoding method to use when a string value is provided (utf8, ascii, etc.)
:return: The characteristic just added to the service
:rtype: GattsCharacteristic
"""
if isinstance(initial_value, str):
initial_value = initial_value.encode(string_encoding)
# Register UUID
self.ble_device.uuid_manager.register_uuid(uuid)
# Create property structure
props = nrf_types.BLEGattCharacteristicProperties(
broadcast=properties.broadcast,
read=properties.read,
write_wo_resp=properties.write_no_response,
write=properties.write,
notify=properties.notify,
indicate=properties.indicate,
auth_signed_wr=False
)
char_md = nrf_types.BLEGattsCharMetadata(props)
# Create cccd metadata if notify/indicate enabled
if properties.notify or properties.indicate:
char_md.cccd_metadata = nrf_types.BLEGattsAttrMetadata(write_permissions=_security_mapping[properties.security_level])
if properties.sccd:
char_md.sccd_metadata = nrf_types.BLEGattsAttrMetadata()
if properties.presentation:
pf = nrf_types.BLEGattsPresentationFormat(properties.presentation.format, properties.presentation.exponent,
properties.presentation.unit, properties.presentation.namespace,
properties.presentation.description)
char_md.presentation_format = pf
if properties.user_description:
user_desc = properties.user_description
user_desc_sec = _security_mapping[user_desc.security_level]
user_desc_sec_w = user_desc_sec if user_desc.write else nrf_types.BLEGapSecModeType.NO_ACCESS
char_md.user_desc_metadata = nrf_types.BLEGattsAttrMetadata(user_desc_sec, user_desc_sec_w,
user_desc.variable_length,
user_desc.read_auth, user_desc.write_auth)
char_md.user_description = user_desc.value
char_md.user_description_max_len = user_desc.max_len
char_md.extended_props.writable_aux = user_desc.write
security = _security_mapping[properties.security_level]
attr_metadata = nrf_types.BLEGattsAttrMetadata(security, security, properties.variable_length,
read_auth=True, write_auth=True)
attribute = nrf_types.BLEGattsAttribute(uuid.nrf_uuid, attr_metadata, properties.max_len, initial_value)
handles = nrf_types.BLEGattsCharHandles() # Populated in call
self.ble_device.ble_driver.ble_gatts_characteristic_add(self.start_handle, char_md, attribute, handles)
c = GattsCharacteristic(self.ble_device, self.peer, uuid, properties,
handles.value_handle, handles.cccd_handle, handles.sccd_handle, handles.user_desc_handle,
self._notification_manager, initial_value, prefer_indications, string_encoding)
self.characteristics.append(c)
return c
class GattsDatabase(gatt.GattDatabase):
"""
Represents the entire GATT server that lives locally on the device which clients read from and write to
"""
@property
def services(self) -> List[GattsService]:
"""
**Read Only**
The list of services registered in the database
"""
return self._services
def iter_services(self) -> Iterable[GattsService]:
"""
Iterates through all of the registered services in the database
:return: Generator of the database's services
"""
for s in self.services:
yield s
def add_service(self, uuid: Uuid, service_type=gatt.ServiceType.PRIMARY) -> GattsService:
"""
Adds a service to the local database
:param uuid: The UUID for the service
:param service_type: The type of service (primary or secondary)
:return: The added and newly created service
"""
# Register UUID
self.ble_device.uuid_manager.register_uuid(uuid)
handle = nrf_types.BleGattHandle()
# Call code to add service to driver
self.ble_device.ble_driver.ble_gatts_service_add(service_type.value, uuid.nrf_uuid, handle)
service = GattsService(self.ble_device, self.peer, uuid, service_type, self._notification_manager, handle.handle)
service.start_handle = handle.handle
service.end_handle = 0xFFFF
if self.services:
self.services[-1].end_handle = service.start_handle-1
self.services.append(service)
return service
def clear_pending_notifications(self):
"""
Clears all pending notifications that are queued to be sent to the client
"""
self._notification_manager.clear_all()
def _on_rw_auth_request(self, driver, event):
"""
:type event: nrf_events.GattsEvtReadWriteAuthorizeRequest
"""
if not event.write:
return
# execute writes can span multiple services and characteristics. Should only reply at the top-level here
if event.write.write_op not in [nrf_events.BLEGattsWriteOperation.exec_write_req_now,
nrf_events.BLEGattsWriteOperation.exec_write_req_cancel]:
return
params = nrf_types.BLEGattsAuthorizeParams(nrf_types.BLEGattStatusCode.success, False)
reply = nrf_types.BLEGattsRwAuthorizeReplyParams(write=params)
self.ble_device.ble_driver.ble_gatts_rw_authorize_reply(event.conn_handle, reply)
| [
6738,
11593,
37443,
834,
1330,
37647,
201,
198,
11748,
19720,
201,
198,
6738,
19720,
1330,
32233,
11,
7343,
11,
40806,
540,
201,
198,
6738,
17268,
1330,
3706,
83,
29291,
201,
198,
11748,
18931,
201,
198,
201,
198,
201,
198,
6738,
21451,... | 2.499715 | 8,759 |
#!/usr/bin/env python
'''
Network interface configuration control tool.
Usage:
iftool [options] configure [--host=<host>] <template> [<destination>]
Options:
-h, --help display this message and exit.
-V, --version display version and exit.
-v, --verbose display extra output.
-y, --yes required for changes to take affect and files to
be written.
--overwrite required to replace existing files.
Commands:
configure Generate network configuration files described by
<template>. <destination> is where to place the
files [default: /etc/sysconfig/network-scripts] and
<host> is the server to write the files for
[default: `hostname`].
'''
__author__ = 'Joe Baldwin'
__author_email__ = 'joe@joebaldwin.com'
__credit__ = 'Adknowledge, Inc.'
__license__ = 'MIT'
import os
import socket
import sys
import docopt
import ipaddress
import jinja2
import yaml
VERBOSE = False
YES = False
OVERWRITE = False
def say(text='', newline=True, stream=None):
'''
Write text to stream with optional newline.
'''
if stream is None:
stream = sys.stdout
stream.write(str(text))
if newline:
stream.write(os.linesep)
try: stream.flush()
except AttributeError: pass
def verbose(text='', newline=True, stream=None):
'''
Call say() but only when --verbose switch has been supplied.
'''
if VERBOSE:
say(text, newline, stream)
class CustomLoader(yaml.Loader):
'''
I am a YAML loader that supports !include.
'''
CustomLoader.add_constructor(u'!include', CustomLoader.include)
def configure(arguments):
'''
Generate network configuration files for a PowerMTA server.
'''
arguments['--host'] = arguments['--host'] or socket.gethostname()
arguments['<destination>'] = arguments['<destination>'] or os.getcwd()
with open(arguments['<template>']) as file:
configuration = yaml.load(file, CustomLoader)
if configuration['global']['vlan']:
configure_device(configuration, arguments)
configure_routes(configuration, arguments)
configure_rules(configuration, arguments)
configure_interfaces(configuration, arguments)
def main(argv=None):
'''
Main program entry point.
'''
global VERBOSE
global YES
global OVERWRITE
arguments = docopt.docopt(__doc__, argv)
VERBOSE = arguments['--verbose']
YES = arguments['--yes']
OVERWRITE = arguments['--overwrite']
if arguments['configure']:
configure(arguments)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
26245,
7071,
8398,
1630,
2891,
13,
198,
198,
28350,
25,
198,
220,
220,
220,
611,
25981,
685,
25811,
60,
17425,
685,
438,
4774,
28,
27,
4774,
37981,
1279,
28243,
29,
685... | 2.470222 | 1,125 |
from sympy.physics.quantum.operatorset import (
operators_to_state, state_to_operators
)
from sympy.physics.quantum.cartesian import (
XOp, XKet, PxOp, PxKet, XBra, PxBra
)
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.spin import (
JxKet, JyKet, JzKet, JxBra, JyBra, JzBra,
JxOp, JyOp, JzOp, J2Op
)
from sympy.utilities.pytest import raises
from sympy.utilities.pytest import XFAIL
@XFAIL
| [
6738,
10558,
88,
13,
746,
23154,
13,
40972,
388,
13,
3575,
2024,
316,
1330,
357,
198,
220,
220,
220,
12879,
62,
1462,
62,
5219,
11,
1181,
62,
1462,
62,
3575,
2024,
198,
8,
198,
198,
6738,
10558,
88,
13,
746,
23154,
13,
40972,
388,... | 2.361905 | 210 |
from __future__ import division, print_function
import os
import torch
import pickle as pkl
from tqdm import tqdm
from PIL import Image
import numpy as np
from pdb import set_trace
from util import util
from util import obj_io
if __name__ == '__main__':
import argparse
# loading cfg file
parser = argparse.ArgumentParser()
parser.add_argument(
'-indir', '--input_dir', type=str, help='path of test dir')
parser.add_argument(
'-outdir', '--output_dir', type=str, default="../results/", help='path of test dir')
args = parser.parse_args()
iternum=50
input_dir = args.input_dir
output_dir = args.output_dir
#! NOTE: We recommend using this when accurate SMPL estimation is available (e.g., through external optimization / annotation)
main_test_with_gt_smpl(input_dir,
output_dir,
pretrained_checkpoint='./results/pamir_geometry/checkpoints/latest.pt',
pretrained_gcmr_checkpoint='./results/gcmr_pretrained/gcmr_2020_12_10-21_03_12.pt')
# #! Otherwise, use this function to predict and optimize a SMPL model for the input image
# main_test_wo_gt_smpl_with_optm(input_image_dir,
# output_dir,
# pretrained_checkpoint='./results/pamir_geometry/checkpoints/latest.pt',
# pretrained_gcmr_checkpoint='./results/gcmr_pretrained/gcmr_2020_12_10-21_03_12.pt',
# iternum=iternum)
# main_test_texture(output_dir,
# output_dir,
# pretrained_checkpoint_pamir='./results/pamir_geometry/checkpoints/latest.pt',
# pretrained_checkpoint_pamirtex='./results/pamir_texture/checkpoints/latest.pt')
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
2298,
293,
355,
279,
41582,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
... | 2.121041 | 884 |
import boto3
from os import environ
from src.scripts.io_s3 import s3_download, s3_upload
from src.pipelines.pipelines import concat_csv_stage_one, concat_csv_stage_two
s3_client = boto3.client("s3")
s3_bucket = environ.get("ARTIFACT_BUCKET")
def concat_stage_one(event, context):
"""Concatenate Sagemaker predictions with original features csv file."""
csv_input_path = event["csv_input_path"]
csv_label = event["csv_label"]
pipeline_version = event["pipeline_version"]
filename = csv_input_path.split("/")[-1].split(".csv")[0]
csv_output_path = f"data_{csv_label}/csv_transforms/csv_output/{filename}_inference_{pipeline_version}.csv.out"
concat_upload_path = f"data_{csv_label}/csv_transforms/csv_readable"
features_filepath = f"/tmp/{filename}.csv"
predictions_filepath = f"/tmp/output_{filename}.csv"
s3_download(s3_client, s3_bucket, csv_input_path, features_filepath)
s3_download(s3_client, s3_bucket, csv_output_path, predictions_filepath)
if csv_label == "xc":
concat_csv_filepath = concat_csv_stage_one(features_filepath, predictions_filepath, csv_label, multiclass=True)
else:
concat_csv_filepath = concat_csv_stage_one(features_filepath, predictions_filepath, csv_label)
resp_upload = s3_upload(s3_client, s3_bucket, concat_csv_filepath, concat_upload_path)
if resp_upload:
bucket_path = f"{concat_upload_path}/{concat_csv_filepath.split('/')[-1]}"
return bucket_path
def concat_stage_two(event, context):
"""Concatenate all Sagemaker predictions with original features csv file."""
query_date = event["query_date"]
concat_upload_path = "csv_rds/csv_readable"
csv_filenames = {}
csv_labels = ["1x", "12", "x2", "xc"]
for csv_label in csv_labels:
filename = f"concat_{csv_label}_csv_{query_date}.csv"
bucket_filepath = f"data_{csv_label}/csv_transforms/csv_readable/{filename}"
download_path = f"/tmp/{filename}"
resp_download = s3_download(s3_client, s3_bucket, bucket_filepath, download_path)
if resp_download:
csv_filenames.update({csv_label: download_path})
concat_csv_filepath = concat_csv_stage_two(
csv_filenames["1x"], csv_filenames["12"], csv_filenames["x2"], csv_filenames["xc"]
)
resp_upload = s3_upload(s3_client, s3_bucket, concat_csv_filepath, concat_upload_path)
if resp_upload:
bucket_path = f"{concat_upload_path}/{concat_csv_filepath.split('/')[-1]}"
return bucket_path
| [
11748,
275,
2069,
18,
198,
6738,
28686,
1330,
551,
2268,
198,
6738,
12351,
13,
46521,
13,
952,
62,
82,
18,
1330,
264,
18,
62,
15002,
11,
264,
18,
62,
25850,
198,
6738,
12351,
13,
79,
541,
20655,
13,
79,
541,
20655,
1330,
1673,
265... | 2.556144 | 944 |
#!/usr/bin/python
#
# Parses the PLC configuration file /etc/planetlab/plc_config, which
# is bootstrapped by Boot Manager, but managed by us.
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
import os
class Config:
"""
Parses Python configuration files; all variables in the file are
assigned to class attributes.
"""
if __name__ == '__main__':
from pprint import pprint
for (k,v) in Config().__dict__.iteritems():
if k not in ['__builtins__']:
pprint ( (k,v), )
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
23042,
274,
262,
350,
5639,
8398,
2393,
1220,
14784,
14,
47427,
23912,
14,
489,
66,
62,
11250,
11,
543,
198,
2,
318,
6297,
12044,
1496,
416,
18892,
9142,
11,
475,
5257,
416,
51... | 2.742718 | 206 |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.linalg import norm
from scipy.sparse import isspmatrix as isspmatrix_np
from scipy.sparse.linalg import eigsh
from scipy.linalg import eigh
def calc_eig_res(A, M, eigvals, eigvecs):
"""
Returns the residuals of the generalized eigenvalue problem
``A * x[i] = w[i] * M * x[i]``
according to
``res[i] = || (A - eigvals[i] * M) @ eigvecs[i] || / || A @ eigvecs[i] ||``
"""
return list(map(rfunc, range(len(eigvals))))
def normalize_eigenvectors(vecs, A):
"""
Returns the eigenvectors normalized to the matrix `A`.
"""
N = vecs.shape[-1]
foo, rng = lambda i: vecs[:, i] / qprod(i), range(N)
return np.stack(list(map(foo, rng))).T
def eig_dense(A, *args, M=None, normalize=False, nmode='A',
return_residuals=False, **kwargs):
"""
Returns all eigenvectors end eigenvalues for a dense Hermitian matrix.
The values are calculated by calling scipy.linalg.eigs.
Extra keyword arguments are forwarded.
Parameters
----------
nmode : str, Optional
'M' or 'G' for dynamic or stability analysis.
normalize : bool, optional
Controls normalization of the eigenvectors. See the notes below.
Default is False.
Notes
-----
If `nmode` is 'M' and `normalize` is True, the eigenvectors are
normalized to M.
"""
A_ = A.todense() if isspmatrix_np(A) else A
if M is None:
vals, vecs = eigh(A_, **kwargs)
else:
M_ = M.todense() if isspmatrix_np(M) else M
vals, vecs = eigh(A_, b=M_, **kwargs)
if normalize:
if nmode == 'A':
vecs = normalize_eigenvectors(vecs, A)
elif nmode == 'M':
vecs = normalize_eigenvectors(vecs, M)
else:
raise NotImplementedError()
if return_residuals:
r = calc_eig_res(A, M, vals, vecs)
return vals, vecs, r
return vals, vecs
def eig_sparse(A, *args, k=10, M=None, normalize=False, which='SM',
maxiter=None, nmode='A', return_residuals=False, **kwargs):
"""
Returns eigenvectors end eigenvalues for a sparse Hermitian matrix.
The values are calculated by calling scipy.sparse.linalg.eigsh,
which uses Arnoldi itrations. See references [1, 2] for more details.
Extra keyword arguments are forwarded.
Parameters
----------
k : int or str, Optional
Number of eigendata to calculate. If `k` is a string, it must be 'all'.
Default is 10.
nmode : str, Optional
'M' or 'G' for dynamic or stability analysis.
normalize : bool, optional
Controls normalization of the eigenvectors. See the notes below.
Default is False.
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
Which `k` eigenvectors and eigenvalues to find:
'LM' : largest magnitude
'SM' : smallest magnitude
'LR' : largest real part
'SR' : smallest real part
'LI' : largest imaginary part
'SI' : smallest imaginary part
Default is 'SM'.
Notes
-----
If `nmode` is 'M' and `normalize` is True, the eigenvectors are
normalized to M.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
vals, vecs = eigsh(A=A, k=k, M=M, which=which, maxiter=maxiter, **kwargs)
if normalize:
if nmode == 'A':
vecs = normalize_eigenvectors(vecs, A)
elif nmode == 'M':
vecs = normalize_eigenvectors(vecs, M)
else:
raise NotImplementedError()
if return_residuals:
r = calc_eig_res(A, M, vals, vecs)
return vals, vecs, r
return vals, vecs
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
2593,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
1189,
4426,
265,
8609,
355,
1189,
442... | 2.238388 | 1,787 |
'''NXOS implementation for bgp disable/enable triggers'''
# import python
import time
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.disableenable.disableenable import \
TriggerDisableEnable
# Which key to exclude for BGP Ops comparison
bgp_exclude = ['maker', 'bgp_session_transport', 'route_refresh',
'bgp_negotiated_capabilities', 'notifications', 'capability',
'keepalives', 'total', 'total_bytes', 'up_time', 'last_reset',
'bgp_negotiated_keepalive_timers', 'updates', 'opens',
'bgp_table_version', 'holdtime', 'keepalive_interval',
'distance_internal_as', 'distance_extern_as', 'totals',
'reset_reason', 'holdtime', 'keepalive_interval']
class TriggerDisableEnableBgp(TriggerDisableEnable):
"""Disable and enable feature BGP when there is bgp instance(s)."""
__description__ = """Disable and enable feature BGP when there is bgp instance(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
steps:
1. Learn BGP Ops object and store the BGP instance(s)
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Disable BGP feature with command "no feature bgp"
via BGP Conf object
4. Verify the state of feature hsrp is "disabled"
5. Recover the device configurations to the one in step 2
6. Verify the state of feature hsrp is "enabled" and
learn BGP Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)',
'bgp_id', '(?P<bgp_id>.*)']],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
config_info={'conf.bgp.Bgp':{
'requirements':[['device_attr', '{uut}', 'enabled', True]],
'verify_conf':False,
'kwargs':{'mandatory':{'bgp_id': '(?P<bgp_id>.*)'}}}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'bgp_session_transport', 'connection', 'state',
'(^[I|i]dle.*|^[A|a]ctive.*|^[0-9].*)'],
['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'bgp_session_transport', 'session_state',
'(^[I|i]dle.*|^[A|a]ctive.*|^[0-9].*)']],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
num_values={'instance':1, 'bgp_id':1})
# feature name
# used for creating checkpoint name and log information
feature_name = 'bgp'
| [
7061,
6,
45,
55,
2640,
7822,
329,
275,
31197,
15560,
14,
21633,
20022,
7061,
6,
198,
198,
2,
1330,
21015,
198,
11748,
640,
198,
198,
2,
49405,
7980,
82,
198,
6738,
2429,
494,
13,
8019,
82,
13,
21282,
74,
13,
8019,
82,
13,
26791,
... | 1.85802 | 2,768 |
# EXAMPLE PYTHON MODULE
# Define some variables:
numberone = 1
ageofqueen = 78
# define some functions
# define a class
| [
2,
7788,
2390,
16437,
350,
56,
4221,
1340,
33893,
198,
2,
2896,
500,
617,
9633,
25,
198,
17618,
505,
796,
352,
198,
496,
1659,
4188,
268,
796,
8699,
628,
198,
2,
8160,
617,
5499,
628,
198,
198,
2,
8160,
257,
1398,
198
] | 2.97619 | 42 |
# ColorMixer
# A simple HSV color mixer with three sliders.
import ui
import clipboard
from random import random
from console import hud_alert
import colorsys
v = ui.load_view('ColorMixerHSV')
slider_action(v['slider1'])
if ui.get_screen_size()[1] >= 768:
# iPad
v.present('popover')
else:
# iPhone
v.present()
| [
2,
5315,
35608,
263,
198,
2,
317,
2829,
18070,
53,
3124,
33938,
351,
1115,
1017,
4157,
13,
198,
198,
11748,
334,
72,
198,
11748,
47999,
198,
6738,
4738,
1330,
4738,
198,
6738,
8624,
1330,
289,
463,
62,
44598,
198,
11748,
7577,
893,
... | 2.66129 | 124 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from neutron.conf.policies import base
COLLECTION_PATH = '/subnets'
RESOURCE_PATH = '/subnets/{id}'
ACTION_POST = [
{'method': 'POST', 'path': COLLECTION_PATH},
]
ACTION_PUT = [
{'method': 'PUT', 'path': RESOURCE_PATH},
]
ACTION_DELETE = [
{'method': 'DELETE', 'path': RESOURCE_PATH},
]
ACTION_GET = [
{'method': 'GET', 'path': COLLECTION_PATH},
{'method': 'GET', 'path': RESOURCE_PATH},
]
rules = [
policy.DocumentedRuleDefault(
'create_subnet',
base.RULE_ADMIN_OR_NET_OWNER,
'Create a subnet',
ACTION_POST
),
policy.DocumentedRuleDefault(
'create_subnet:segment_id',
base.RULE_ADMIN_ONLY,
'Specify ``segment_id`` attribute when creating a subnet',
ACTION_POST
),
policy.DocumentedRuleDefault(
'create_subnet:service_types',
base.RULE_ADMIN_ONLY,
'Specify ``service_types`` attribute when creating a subnet',
ACTION_POST
),
policy.DocumentedRuleDefault(
'get_subnet',
base.policy_or(base.RULE_ADMIN_OR_OWNER,
'rule:shared'),
'Get a subnet',
ACTION_GET
),
policy.DocumentedRuleDefault(
'get_subnet:segment_id',
base.RULE_ADMIN_ONLY,
'Get ``segment_id`` attribute of a subnet',
ACTION_GET
),
policy.DocumentedRuleDefault(
'update_subnet',
base.RULE_ADMIN_OR_NET_OWNER,
'Update a subnet',
ACTION_PUT
),
policy.DocumentedRuleDefault(
'update_subnet:segment_id',
base.RULE_ADMIN_ONLY,
'Update ``segment_id`` attribute of a subnet',
ACTION_PUT
),
policy.DocumentedRuleDefault(
'update_subnet:service_types',
base.RULE_ADMIN_ONLY,
'Update ``service_types`` attribute of a subnet',
ACTION_PUT
),
policy.DocumentedRuleDefault(
'delete_subnet',
base.RULE_ADMIN_OR_NET_OWNER,
'Delete a subnet',
ACTION_DELETE,
),
]
| [
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
257,
4866,
286,
262,
1378... | 2.322321 | 1,120 |
# Generated by Django 2.1.5 on 2019-03-13 20:35
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
13130,
12,
3070,
12,
1485,
1160,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |