hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc37dfa33a663a02ada364c1ee333a355dde9511 | 1,655 | py | Python | pyvisdk/do/weekly_task_scheduler.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/weekly_task_scheduler.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/weekly_task_scheduler.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def WeeklyTaskScheduler(vim, *args, **kwargs):
'''The WeeklyTaskScheduler data object sets the time for weekly task execution.
You can set the schedule for task execution on one or more days during the
week, and you complete the schedule by setting the inherited properties for the
hour and minute.By default the scheduler executes the task according to the
specified day(s) every week. If you set the interval to a value greater than 1,
the task will execute at the specified weekly interval. (For example, an
interval of 2 will cause the task to execute on the specified days every 2
weeks.)'''
obj = vim.client.factory.create('{urn:vim25}WeeklyTaskScheduler')
# do some validation checking...
if (len(args) + len(kwargs)) < 10:
raise IndexError('Expected at least 11 arguments got: %d' % len(args))
required = [ 'friday', 'monday', 'saturday', 'sunday', 'thursday', 'tuesday', 'wednesday',
'hour', 'minute', 'interval' ]
optional = [ 'activeTime', 'expireTime', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 40.365854 | 124 | 0.653172 |
f679b579d3321d256521b0996472668f3ab5880b | 742 | py | Python | examples/src/Presentations/Properties/CheckPasswordExample.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Presentations/Properties/CheckPasswordExample.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Presentations/Properties/CheckPasswordExample.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | import aspose.slides as slides
def props_check_password():
# The example below demonstrates how to check a password to open a presentation
#Path for source presentation
dataDir = "./examples/data/"
outDir = "./examples/out/"
# Check the Password via IPresentationInfo Interface
presentationInfo = slides.PresentationFactory.instance.get_presentation_info(dataDir + "props_ppt_with_password.ppt")
isPasswordCorrect = presentationInfo.check_password("my_password")
print("The password \"my_password\" for the presentation is " + str(isPasswordCorrect))
isPasswordCorrect = presentationInfo.check_password("pass1")
print("The password \"pass1\" for the presentation is " + str(isPasswordCorrect))
| 46.375 | 121 | 0.756065 |
ec328de9d817cb86073ffd3e50c4ded94949752d | 2,071 | py | Python | trees/interview_questions/binary_search_tree_check.py | vcelis/python-data-structures-algorithms-interviews | 41e21968e3fd35a4a04b45ce68a0a6c952cfca92 | [
"MIT"
] | null | null | null | trees/interview_questions/binary_search_tree_check.py | vcelis/python-data-structures-algorithms-interviews | 41e21968e3fd35a4a04b45ce68a0a6c952cfca92 | [
"MIT"
] | null | null | null | trees/interview_questions/binary_search_tree_check.py | vcelis/python-data-structures-algorithms-interviews | 41e21968e3fd35a4a04b45ce68a0a6c952cfca92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Binary Search Tree Check
Given a binary tree, check whether it’s a binary search tree or not.
'''
from nose.tools import assert_equal
class BinaryTree():
def __init__(self, root_obj):
self.key = root_obj
self.left_child = None
self.right_child = None
def insert_left(self, new_node):
tmp = BinaryTree(new_node)
tmp.left_child = self.left_child
self.left_child = tmp
def insert_right(self, new_node):
tmp = BinaryTree(new_node)
tmp.right_child = self.right_child
self.right_child = tmp
def get_right_child(self):
return self.right_child
def get_left_child(self):
return self.left_child
def set_root_val(self, new_obj):
self.key = new_obj
def get_root_val(self):
return self.key
def inorder(tree, result):
if tree:
inorder(tree.get_left_child(), result)
result.append(tree.get_root_val())
inorder(tree.get_right_child(), result)
return result
def is_binary_tree(tree):
result = inorder(tree, [])
return result == sorted(result)
class TestIsBinaryTree():
def test(self, sol):
'''Setup Demo tree
1
2 3
4 5
'''
tree = BinaryTree('1')
tree.insert_left('2')
tree.insert_right('3')
tree.get_left_child().insert_left('4')
tree.get_left_child().insert_right('5')
assert_equal(sol(tree), False)
'''Setup Demo tree
4
2 5
1 3
'''
tree = BinaryTree('4')
tree.insert_left('2')
tree.insert_right('5')
tree.get_left_child().insert_left('1')
tree.get_left_child().insert_right('3')
print(inorder(tree, []))
assert_equal(sol(tree), True)
print('ALL TEST CASES PASSED')
if __name__ == '__main__':
test = TestIsBinaryTree()
test.test(is_binary_tree)
| 24.081395 | 68 | 0.574119 |
88a1ab42c92b04c55d81f6d737f7f09fb9d01b5b | 5,066 | py | Python | tensorflow_transform/tf_metadata/dataset_schema.py | devidipak/transform | 56efe455b29fa3d0a29ce2f8872adc41ed6012c3 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/tf_metadata/dataset_schema.py | devidipak/transform | 56efe455b29fa3d0a29ce2f8872adc41ed6012c3 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/tf_metadata/dataset_schema.py | devidipak/transform | 56efe455b29fa3d0a29ce2f8872adc41ed6012c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-memory representation of the schema of a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow_transform.tf_metadata import schema_utils
from tensorflow.python.util import deprecation
from tensorflow_metadata.proto.v0 import schema_pb2
class Schema(object):
"""The schema of a dataset.
This is an in-memory representation that may be serialized and deserialized to
and from a variety of disk representations.
Args:
column_schemas: (optional) A dict from logical column names to
`ColumnSchema`s.
"""
def __init__(self, column_schemas):
feature_spec = {name: spec
for name, (_, spec) in column_schemas.items()}
domains = {name: domain
for name, (domain, _) in column_schemas.items()
if domain is not None}
self._schema_proto = schema_utils.schema_from_feature_spec(
feature_spec, domains)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._schema_proto == other._schema_proto # pylint: disable=protected-access
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._schema_proto))
def as_feature_spec(self):
"""Returns a representation of this Schema as a feature spec.
A feature spec (for a whole dataset) is a dictionary from logical feature
names to one of `FixedLenFeature`, `SparseFeature` or `VarLenFeature`.
Returns:
A representation of this Schema as a feature spec.
"""
return schema_utils.schema_as_feature_spec(
self._schema_proto)[0]
def domains(self):
"""Returns the domains for this feature spec."""
return schema_utils.schema_as_feature_spec(
self._schema_proto)[1]
# Implement reduce so that the proto is serialized using proto serialization
# instead of the default pickling.
def __getstate__(self):
return self._schema_proto.SerializeToString()
def __setstate__(self, state):
self._schema_proto = schema_pb2.Schema()
self._schema_proto.MergeFromString(state)
@deprecation.deprecated(
None,
'ColumnSchema is a deprecated, use from_feature_spec to create a `Schema`')
def ColumnSchema(domain, axes, representation): # pylint: disable=invalid-name
"""Legacy constructor for a column schema."""
if isinstance(domain, tf.DType):
dtype = domain
int_domain = None
elif isinstance(domain, schema_pb2.IntDomain):
dtype = tf.int64
int_domain = domain
else:
raise TypeError('Invalid domain: {}'.format(domain))
if isinstance(representation, FixedColumnRepresentation):
spec = tf.FixedLenFeature(axes, dtype, representation.default_value)
elif isinstance(representation, ListColumnRepresentation):
spec = tf.VarLenFeature(dtype)
else:
raise TypeError('Invalid representation: {}'.format(representation))
return int_domain, spec
def IntDomain(dtype, min_value=None, max_value=None, is_categorical=None): # pylint: disable=invalid-name
"""Legacy constructor for an IntDomain."""
if dtype != tf.int64:
raise ValueError('IntDomain must be called with dtype=tf.int64')
return schema_pb2.IntDomain(min=min_value, max=max_value,
is_categorical=is_categorical)
class FixedColumnRepresentation(collections.namedtuple(
'FixedColumnRepresentation', ['default_value'])):
def __new__(cls, default_value=None):
return super(FixedColumnRepresentation, cls).__new__(cls, default_value)
ListColumnRepresentation = collections.namedtuple(
'ListColumnRepresentation', [])
def from_feature_spec(feature_spec, domains=None):
"""Convert a feature_spec to a Schema.
Args:
feature_spec: a features specification in the format expected by
tf.parse_example(), i.e.
`{name: FixedLenFeature(...), name: VarLenFeature(...), ...'
domains: a dictionary whose keys are a subset of the keys of `feature_spec`
and values are an schema_pb2.IntDomain, schema_pb2.StringDomain or
schema_pb2.FloatDomain.
Returns:
A Schema representing the provided set of columns.
"""
if domains is None:
domains = {}
column_schemas = {name: (domains.get(name), spec)
for name, spec in feature_spec.items()}
return Schema(column_schemas)
| 33.773333 | 106 | 0.726806 |
3c9f59ac5503670d6b8e2165afd5d7a25b00dde7 | 852 | py | Python | instacat/urls.py | merhan-mohamed/CREATE-INSTAGRAM-LIKE-APP-IN-PYTHON-DJANGO-Find-projects | 94e92faff4970b7d7e9e5e7c8c1e000a00a41ce7 | [
"MIT"
] | 1 | 2022-02-04T19:20:08.000Z | 2022-02-04T19:20:08.000Z | instacat/urls.py | merhan-mohamed/CREATE-INSTAGRAM-LIKE-APP-IN-PYTHON-DJANGO-Find-projects | 94e92faff4970b7d7e9e5e7c8c1e000a00a41ce7 | [
"MIT"
] | 2 | 2020-05-01T08:16:32.000Z | 2020-07-21T12:55:15.000Z | instacat/urls.py | merhan-mohamed/CREATE-INSTAGRAM-LIKE-APP-IN-PYTHON-DJANGO-Find-projects | 94e92faff4970b7d7e9e5e7c8c1e000a00a41ce7 | [
"MIT"
] | null | null | null | """instacat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
admin.autodiscover()
urlpatterns = [
path('admin/', admin.site.urls),
path('Insta/', include('Insta.urls'))
]
| 31.555556 | 78 | 0.681925 |
c7d8a0edd3b3bf9645b20ca5967a8abf480617ab | 5,932 | py | Python | nightlightpi/config.py | jmb/NightLightPi | 82f5d37a35e3457e31ca100524011908e5b33c4d | [
"MIT"
] | 2 | 2018-10-01T21:45:22.000Z | 2020-07-26T09:07:09.000Z | nightlightpi/config.py | jmb/NightLightPi | 82f5d37a35e3457e31ca100524011908e5b33c4d | [
"MIT"
] | 4 | 2017-09-29T19:19:07.000Z | 2019-10-08T05:15:29.000Z | nightlightpi/config.py | jmb/NightLightPi | 82f5d37a35e3457e31ca100524011908e5b33c4d | [
"MIT"
] | 4 | 2017-10-08T22:08:25.000Z | 2019-10-20T06:03:47.000Z | # -*- coding: utf-8; -*-
"""Handle configuration file loading and fallback behaviors.
This module exports the load_config method which will attempt to load
application configuration from disk. It will first check to see if an
environment variable has been set to indicate the path of the config
file. If the environment variable is not set, it will assume the user
is running on a Raspberry PI using Linux and will attempt to find the
configuration file at /etc/nightlightpi/nightlightpi.yaml.
Example:
conf = load_config()
if conf.some_value:
print("some_value was set in the config!")
"""
__all__ = ["load_config"]
from os import environ
from os.path import join
from pkg_resources import resource_filename
from pykwalify.core import Core
from pykwalify.errors import SchemaError
from yaml import safe_load
from nightlightpi.errorstrings import MISSING_CONFIG_VALUE
ETCPATH = join("/", "etc", "nightlightpi", "nightlightpi.yaml")
ENVCONFIGPATH = "NIGHTLIGHTPICONFIG"
def load_config():
"""Load configuration from disk, returned as a Config instance."""
data = load_valid_yaml(environ.get(ENVCONFIGPATH, ETCPATH))
conf = Config()
try:
_set_config_values(conf, data)
except KeyError as e:
raise RuntimeError(MISSING_CONFIG_VALUE.format(e.args[0]))
return conf
def load_valid_yaml(path):
"""Return a dict deserialized from the file located at path.
The data will be validated against the schema defined in conf-schema.yaml.
"""
schema_path = resource_filename("nightlightpi", "conf-schema.yaml")
c = Core(source_file=path, schema_files=[schema_path])
return c.validate()
# TODO: This could be simplified by using YAML SafeLoader and using object
# deserialization. (wkmanire 2017-10-10)
def _set_config_values(conf, data):
"""Copy data from the YAML configuration data into the Config instance."""
_set_mqtt_values(conf.mqtt, data)
_set_led_strip_values(conf.led_strip, data)
_set_inputs_values(conf.inputs, data)
_set_temperature_values(conf.temperature, data)
_set_display_mode_values(conf.off_mode, "Off", data)
_set_display_mode_values(conf.temp_mode, "Temperature", data)
_set_display_mode_values(conf.rainbow_mode, "Rainbow", data)
def _set_mqtt_values(mqtt, data):
mqtt_data = data["mqtt"]
mqtt.brightness_topic = mqtt_data["brightness_topic"]
mqtt.display_topic = mqtt_data["display_topic"]
mqtt.enable = mqtt_data["enable"]
mqtt.humidity_topic = mqtt_data["humidity_topic"]
mqtt.light_topic = mqtt_data["light_topic"]
mqtt.password = mqtt_data["password"]
mqtt.port = mqtt_data["port"]
mqtt.server = mqtt_data["server"]
mqtt.temperature_topic = mqtt_data["temperature_topic"]
mqtt.user = mqtt_data["user"]
def _set_led_strip_values(led_strip, data):
led_strip_data = data["led_strip"]
led_strip.length = led_strip_data["length"]
led_strip.light = led_strip_data["light"]
led_strip.max_brightness = led_strip_data["max_brightness"]
led_strip.brightness = led_strip_data["brightness"]
def _set_inputs_values(inputs, data):
inputs_data = data["inputs"]
inputs.button_light = inputs_data["button_light"]
inputs.button_display = inputs_data["button_display"]
def _set_temperature_values(temp_config, data):
temp_config_data = data["temperature"]
temp_config.sensor_ranges = temp_config_data["sensor_ranges"]
temp_config.sensor_type = temp_config_data["sensor_type"]
colours = list()
for c in temp_config_data["sensor_colours"]:
colours.append((c["r"], c["g"], c["b"]))
temp_config.sensor_colours = colours
def _set_time_values(timing_config, data):
timing_data = data["timing"]
timing_config.speed_in_seconds = timing_data["speed_in_seconds"]
timing_config.menu_button_pressed_time_in_seconds = timing_data["menu_button_pressed_time_in_seconds"]
timing_config.menu_display = timing_data["menu_display"]
def _set_display_mode_values(mode, name, data):
for mode_data in data["display_modes"]:
if mode_data["name"] == name:
mode.name = mode_data["name"]
mode.menu = mode_data["menu"]
mode.background = mode_data["background"]
class Config:
"""Provide configuration for the MQTT server and RPi attached device.
This is a composite configuration class built up from other
configuration objects.
"""
def __init__(self):
self.mqtt = MQTTConfig()
self.led_strip = LEDStripConfig()
self.inputs = InputsConfig()
self.temperature = TemperatureConfig()
self.timing = TimingConfig()
self.off_mode = DisplayModeConfig()
self.temp_mode = DisplayModeConfig()
self.rainbow_mode = DisplayModeConfig()
class MQTTConfig:
def __init__(self):
self.enable = None
self.server = None
self.port = None
self.user = None
self.password = None
self.temperature_topic = None
self.humidity_topic = None
self.display_topic = None
self.light_topic = None
self.brightness_topic = None
class LEDStripConfig:
def __init__(self):
self.length = None
self.light = None
self.max_brightness = None
self.brightness = None
class InputsConfig:
def __init__(self):
self.buttons_light = None
self.buttons_display = None
class TemperatureConfig:
def __init__(self):
self.sensor_ranges = None
self.sensor_colours = None
self.sensor_type = "AM2302"
self.pin = 22
self.update_seconds = 60
class TimingConfig:
def __init__(self):
self.speed_in_seconds = None
self.menu_button_pressed_time_in_seconds = None
self.menu_display = None
class DisplayModeConfig:
def __init__(self):
self.name = None
self.menu = None
self.background = None
| 30.265306 | 106 | 0.710216 |
7f9ff188f20997e0a7864f8c3b5866169f47372a | 1,609 | py | Python | salt/modules/sapcarmod.py | aleksei-burlakov/salt-shaptools | a3e16de2380f3255e92c5e6e1b5c4f28acc52e8b | [
"Apache-2.0"
] | 9 | 2019-10-31T23:30:35.000Z | 2022-02-04T02:41:59.000Z | salt/modules/sapcarmod.py | aleksei-burlakov/salt-shaptools | a3e16de2380f3255e92c5e6e1b5c4f28acc52e8b | [
"Apache-2.0"
] | 42 | 2019-04-25T12:07:40.000Z | 2022-03-31T14:19:23.000Z | salt/modules/sapcarmod.py | aleksei-burlakov/salt-shaptools | a3e16de2380f3255e92c5e6e1b5c4f28acc52e8b | [
"Apache-2.0"
] | 10 | 2019-05-08T22:23:24.000Z | 2021-07-21T07:49:58.000Z | # -*- coding: utf-8 -*-
'''
Module to provide SAP tools functionality to Salt
.. versionadded:: pending
:maintainer: Simranpal Singh <sisingh@suse.com>
:maturity: alpha
:depends: ``shaptools`` Python module
:platform: all
:configuration: This module requires the shaptools package
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
from salt import exceptions
# Import third party libs
try:
from shaptools import saputils
HAS_SAPUTILS = True
except ImportError: # pragma: no cover
HAS_SAPUTILS = False
__virtualname__ = 'sapcar'
def __virtual__(): # pragma: no cover
'''
Only load this module if shaptools python module is installed
'''
if HAS_SAPUTILS:
return __virtualname__
return (
False,
'The sapcar execution module failed to load: the shaptools python'
' library is not available.')
def extract(
sapcar_exe,
sar_file,
output_dir=None,
options=None):
'''
Extract a SAP sar archive
sapcar_exe_file
Path to the SAPCAR executable file. SAPCAR is a SAP tool to extract SAP SAR format archives
sar_file
Path to the sar file to be extracted
output_dir
Location where to extract the SAR file
options:
Additional parameters to the SAPCAR tool
'''
try:
return saputils.extract_sapcar_file(
sapcar_exe=sapcar_exe, sar_file=sar_file, output_dir=output_dir, options=options)
except saputils.SapUtilsError as err:
raise exceptions.CommandExecutionError(err)
| 24.753846 | 100 | 0.68179 |
835f251026a762d06eef5c926f17ceb315a07f29 | 6,534 | py | Python | tools/inference_for_coco.py | tyunist/FPN_Tensorflow | ec8b746e539994b28a7d949040c597c543c5f8d5 | [
"MIT"
] | 374 | 2018-12-02T06:59:44.000Z | 2022-03-15T10:34:00.000Z | tools/inference_for_coco.py | tyunist/FPN_Tensorflow | ec8b746e539994b28a7d949040c597c543c5f8d5 | [
"MIT"
] | 157 | 2018-12-02T07:37:39.000Z | 2022-03-16T09:49:11.000Z | tools/inference_for_coco.py | tyunist/FPN_Tensorflow | ec8b746e539994b28a7d949040c597c543c5f8d5 | [
"MIT"
] | 141 | 2018-12-12T11:57:59.000Z | 2022-02-28T13:12:58.000Z | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os, sys
import tensorflow as tf
import time
import cv2
import pickle
import numpy as np
sys.path.append("../")
sys.path.insert(0, '/home/yjr/PycharmProjects/Faster-RCNN_TF/data/lib_coco/PythonAPI')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
from libs.val_libs import voc_eval
from libs.box_utils import draw_box_in_img
from libs.label_name_dict.coco_dict import LABEL_NAME_MAP, classes_originID
from help_utils import tools
from data.lib_coco.PythonAPI.pycocotools.coco import COCO
import json
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
def eval_with_plac(det_net, imgId_list, coco, out_json_root, draw_imgs=False):
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not GBR
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# coco_test_results = []
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model')
for i, imgid in enumerate(imgId_list):
imgname = coco.loadImgs(ids=[imgid])[0]['file_name']
raw_img = cv2.imread(os.path.join("/home/yjr/DataSet/COCO/2017/test2017", imgname))
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
start = time.time()
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: raw_img[:, :, ::-1]} # cv is BGR. But need RGB
)
end = time.time()
if draw_imgs:
show_indices = detected_scores >= cfgs.SHOW_SCORE_THRSHOLD
show_scores = detected_scores[show_indices]
show_boxes = detected_boxes[show_indices]
show_categories = detected_categories[show_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(np.squeeze(resized_img, 0),
boxes=show_boxes,
labels=show_categories,
scores=show_scores)
cv2.imwrite(cfgs.TEST_SAVE_PATH + '/' + str(imgid) + '.jpg',
final_detections[:, :, ::-1])
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = np.transpose(np.stack([xmin, ymin, xmax-xmin, ymax-ymin]))
dets = np.hstack((detected_categories.reshape(-1, 1),
detected_scores.reshape(-1, 1),
boxes))
a_img_detect_result = []
for a_det in dets:
label, score, bbox = a_det[0], a_det[1], a_det[2:]
cat_id = classes_originID[LABEL_NAME_MAP[label]]
if score<0.00001:
continue
det_object = {"image_id": imgid,
"category_id": cat_id,
"bbox": bbox.tolist(),
"score": float(score)}
# print (det_object)
a_img_detect_result.append(det_object)
f = open(os.path.join(out_json_root, 'each_img', str(imgid)+'.json'), 'w')
json.dump(a_img_detect_result, f) # , indent=4
f.close()
del a_img_detect_result
del dets
del boxes
del resized_img
del raw_img
tools.view_bar('{} image cost {}s'.format(imgid, (end - start)), i + 1, len(imgId_list))
def eval(num_imgs):
# annotation_path = '/home/yjr/DataSet/COCO/2017/test_annotations/image_info_test2017.json'
annotation_path = '/home/yjr/DataSet/COCO/2017/test_annotations/image_info_test-dev2017.json'
# annotation_path = '/home/yjr/DataSet/COCO/2017/annotations/instances_train2017.json'
print("load coco .... it will cost about 17s..")
coco = COCO(annotation_path)
imgId_list = coco.getImgIds()
if num_imgs !=np.inf:
imgId_list = imgId_list[: num_imgs]
faster_rcnn = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
save_dir = os.path.join(cfgs.EVALUATE_DIR, cfgs.VERSION)
eval_with_plac(det_net=faster_rcnn, coco=coco, imgId_list=imgId_list, out_json_root=save_dir,
draw_imgs=True)
print("each img over**************")
final_detections = []
with open(os.path.join(save_dir, 'coco2017test_results.json'), 'w') as wf:
for imgid in imgId_list:
f = open(os.path.join(save_dir, 'each_img', str(imgid)+'.json'))
tmp_list = json.load(f)
# print (type(tmp_list))
final_detections.extend(tmp_list)
del tmp_list
f.close()
json.dump(final_detections, wf)
if __name__ == '__main__':
eval(np.inf)
| 36.3 | 111 | 0.591062 |
5d60d7c525bbffd6e1ab1b7eb020b48d41605955 | 6,705 | py | Python | baselines/arch/layoutvae/bboxVAE.py | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 | [
"MIT"
] | 1 | 2021-11-02T05:13:12.000Z | 2021-11-02T05:13:12.000Z | baselines/arch/layoutvae/bboxVAE.py | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 | [
"MIT"
] | 1 | 2021-12-17T14:29:18.000Z | 2021-12-17T14:29:18.000Z | baselines/arch/layoutvae/bboxVAE.py | atmacvit/meronymnet | 47e1a7caadc0f770439bb26a93b885f790f62804 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Dense, LSTM, Input, Layer
from tensorflow.keras import Model
import numpy as np
def ConditioningInputModel(n_labels, is_class_condition=False):
label_set_count = Input(shape=(n_labels,))
curr_label = Input(shape=(n_labels,))
previous_bbox_encoding = Input(shape=(128,))
if is_class_condition==True:
class_input = Input(shape=(10, ))
i1 = Dense(128, activation='relu')(label_set_count)
i1 = Dense(128, activation='relu')(i1)
i2 = Dense(128, activation='relu')(curr_label)
i2 = Dense(128, activation='relu')(i2)
i3 = Dense(128, activation='relu')(previous_bbox_encoding)
if is_class_condition==True:
i4 = Dense(128, activation='relu')(class_input)
i4 = Dense(128, activation='relu')(i4)
if is_class_condition==True:
output = Concatenate()([i1, i2, i3, i4])
else:
output = Concatenate()([i1, i2, i3])
output = Dense(128)(output)
if is_class_condition==True:
input_list = [label_set_count, curr_label, previous_bbox_encoding, class_input]
else:
input_list = [label_set_count, curr_label, previous_bbox_encoding]
return Model(inputs=input_list, outputs=output)
def Encoder():
gt_bbox = Input(shape=(4,))
conditioning_input = Input(shape=(128,))
i1 = Dense(128, activation='relu')(gt_bbox)
i1 = Dense(128)(i1)
intermediate = Concatenate()([i1, conditioning_input])
intermediate = Dense(32, activation='relu')(intermediate)
z_mean = Dense(32)(intermediate)
z_log_var = Dense(32)(intermediate)
return Model(inputs=[gt_bbox, conditioning_input], outputs=[z_mean, z_log_var])
def Decoder():
conditioning_input = Input(shape=(128,))
latent_dim = Input(shape=(32,))
output = Concatenate()([conditioning_input, latent_dim])
output = Dense(128, activation='relu')(output)
output = Dense(64, activation='relu')(output)
output = Dense(4,activation='sigmoid')(output)
return Model(inputs=[conditioning_input, latent_dim], outputs=output)
class Sampling(Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
def kl_divergence(p_mean, p_log_var, q_mean, q_log_var):
kl_div = q_log_var - p_log_var + (tf.exp(p_log_var) + tf.square(p_mean - q_mean))/(tf.exp(q_log_var)) - 1
kl_div *= 0.5
return kl_div
def Prior():
conditioning_input = Input(shape=(128,))
output = Dense(32, activation='relu')(conditioning_input)
z_mean = Dense(32)(output)
z_log_var = Dense(32)(output)
return Model(inputs=conditioning_input,outputs=[z_mean,z_log_var])
class BBoxVAE(tf.keras.Model):
def __init__(self, is_class_condition=False):
super(BBoxVAE, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.prior = Prior()
self.condition_model = ConditioningInputModel(24, is_class_condition)
self.rnn_layer = LSTM(128, return_state=True)
self.vae_sampling = Sampling()
self.class_cond = is_class_condition
def call(self, input, training=True):
label_set = input[0] # label set with counts i.e 1 Multi-label encoding (batch_size, 24)
bbox_input = input[1]
if self.class_cond==True:
class_input = input[2]
print("Training value = " + str(training))
# ground-truth bounding boxes (batch_size, 24, 4)
num_labels = label_set.shape[1] # time-steps for LSTM
batch_size = label_set.shape[0]
#print(batch_size)
prev_bounding_boxes_encoding = tf.zeros((batch_size, 128)) # zeros initially
state_h = None
state_c = None
bbox_outputs = []
kl_losses = []
for i in range(num_labels): # num_labels = number of timesteps
one_hot = tf.Variable(tf.zeros((batch_size, 24)))
ones = tf.Variable(tf.ones((batch_size,)))
one_hot[:, i].assign(ones)
curr_label = one_hot
if self.class_cond==False:
conditioning_info = self.condition_model([label_set, curr_label, prev_bounding_boxes_encoding])
else:
conditioning_info = self.condition_model([label_set, curr_label, prev_bounding_boxes_encoding, class_input])
z_mean_c, z_log_var_c = self.prior(conditioning_info)
ground_truth_bbox = bbox_input[:, i, :]
z_mean, z_log_var = self.encoder([ground_truth_bbox, conditioning_info])
kl_loss = kl_divergence(z_mean, z_log_var, z_mean_c, z_log_var_c) #1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)#kl_divergence(z_mean_e, z_log_var_e, z_mean_c, z_log_var_c)
#kl_loss *= -0.5
kl_losses.append(kl_loss)
if training==True:
z = self.vae_sampling([z_mean, z_log_var])
else:
#z = tf.random.normal((batch_size, 32))
z = self.vae_sampling([z_mean_c, z_log_var_c])#sample from prior
current_step_bbox = self.decoder([conditioning_info, z])
lstm_input = tf.concat([curr_label, current_step_bbox], axis=1)
lstm_input = tf.expand_dims(lstm_input, axis=1) # (batch_size, 1, features) for single timestep execution
if i == 0:
prev_bounding_boxes_encoding, state_h, state_c = self.rnn_layer(lstm_input)
else:
prev_bounding_boxes_encoding, state_h, state_c = self.rnn_layer(lstm_input,
initial_state=[state_h, state_c])
prev_bounding_boxes_encoding = tf.squeeze(prev_bounding_boxes_encoding)
bbox_outputs.append(current_step_bbox)
bbox_outputs = tf.stack(bbox_outputs, axis=1)
mse = tf.keras.losses.MeanSquaredError()
bbox_loss = mse(bbox_input, bbox_outputs)
print("Bounding Box loss = " + str(bbox_loss))
kl_losses = tf.stack(kl_losses, axis=1)
kl_loss_final = tf.reduce_mean(kl_losses)
print("Mean KL Divergence = " + str(kl_loss_final))
if training==True:
self.add_loss(bbox_loss + 0.005 * kl_loss_final)
else:
self.add_loss(bbox_loss)
return bbox_outputs
if __name__ == "__main__":
model = BBoxVAE()
x1 = tf.random.normal((32, 24))
x2 = tf.random.normal((32, 24, 4))
model([x1, x2])
| 37.458101 | 193 | 0.644295 |
892109cc3c001173bd3a73fca24209f66a23bf61 | 841 | py | Python | main.py | waynenilsen/VectorStrength | e42ef8a513e6770ac56a6cbc80e19cdd404e0ec6 | [
"MIT"
] | null | null | null | main.py | waynenilsen/VectorStrength | e42ef8a513e6770ac56a6cbc80e19cdd404e0ec6 | [
"MIT"
] | null | null | null | main.py | waynenilsen/VectorStrength | e42ef8a513e6770ac56a6cbc80e19cdd404e0ec6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import database
from flask import Flask, render_template, request, abort
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('sensors.html')
@app.route('/data', methods=['GET', 'POST'])
def post_data():
if request.method == 'POST':
data = request.get_json()
database.add_record(data)
return "OK"
elif request.method == 'GET':
return app.send_static_file('database.db')
else:
abort(404)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
app.run('0.0.0.0', 5000)
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(int(sys.argv[1]))
IOLoop.instance().start()
| 22.72973 | 56 | 0.652794 |
db96b587b7e0e7b6f0ba69a3cd404dd5a4e57a47 | 11,434 | py | Python | okl4_kernel/okl4_2.1.1-patch.9/tools/pyelf/weaver/rootprogram_xml.py | CyberQueenMara/baseband-research | e1605537e10c37e161fff1a3416b908c9894f204 | [
"MIT"
] | 77 | 2018-12-31T22:12:09.000Z | 2021-12-31T22:56:13.000Z | okl4_kernel/okl4_2.1.1-patch.9/tools/pyelf/weaver/rootprogram_xml.py | CyberQueenMara/baseband-research | e1605537e10c37e161fff1a3416b908c9894f204 | [
"MIT"
] | null | null | null | okl4_kernel/okl4_2.1.1-patch.9/tools/pyelf/weaver/rootprogram_xml.py | CyberQueenMara/baseband-research | e1605537e10c37e161fff1a3416b908c9894f204 | [
"MIT"
] | 24 | 2019-01-20T15:51:52.000Z | 2021-12-25T18:29:13.000Z | ##############################################################################
# Copyright (c) 2007 Open Kernel Labs, Inc. (Copyright Holder).
# All rights reserved.
#
# 1. Redistribution and use of OKL4 (Software) in source and binary
# forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# (a) Redistributions of source code must retain this clause 1
# (including paragraphs (a), (b) and (c)), clause 2 and clause 3
# (Licence Terms) and the above copyright notice.
#
# (b) Redistributions in binary form must reproduce the above
# copyright notice and the Licence Terms in the documentation and/or
# other materials provided with the distribution.
#
# (c) Redistributions in any form must be accompanied by information on
# how to obtain complete source code for:
# (i) the Software; and
# (ii) all accompanying software that uses (or is intended to
# use) the Software whether directly or indirectly. Such source
# code must:
# (iii) either be included in the distribution or be available
# for no more than the cost of distribution plus a nominal fee;
# and
# (iv) be licensed by each relevant holder of copyright under
# either the Licence Terms (with an appropriate copyright notice)
# or the terms of a licence which is approved by the Open Source
# Initative. For an executable file, "complete source code"
# means the source code for all modules it contains and includes
# associated build and other files reasonably required to produce
# the executable.
#
# 2. THIS SOFTWARE IS PROVIDED ``AS IS'' AND, TO THE EXTENT PERMITTED BY
# LAW, ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. WHERE ANY WARRANTY IS
# IMPLIED AND IS PREVENTED BY LAW FROM BEING DISCLAIMED THEN TO THE
# EXTENT PERMISSIBLE BY LAW: (A) THE WARRANTY IS READ DOWN IN FAVOUR OF
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT) AND (B) ANY LIMITATIONS PERMITTED BY LAW (INCLUDING AS TO
# THE EXTENT OF THE WARRANTY AND THE REMEDIES AVAILABLE IN THE EVENT OF
# BREACH) ARE DEEMED PART OF THIS LICENCE IN A FORM MOST FAVOURABLE TO
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT). IN THE LICENCE TERMS, "PARTICIPANT" INCLUDES EVERY
# PERSON WHO HAS CONTRIBUTED TO THE SOFTWARE OR WHO HAS BEEN INVOLVED IN
# THE DISTRIBUTION OR DISSEMINATION OF THE SOFTWARE.
#
# 3. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ANY OTHER PARTICIPANT BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Processing of the rootprogram XML tag.
"""
from elf.core import PreparedElfFile, UnpreparedElfFile
from elf.constants import ET_EXEC
from weaver import MergeError
from weaver.ezxml import ParsedElement, Element, bool_attr, str_attr
from weaver.segments_xml import collect_patches, collect_elf_segments, \
Segment_el, Patch_el, Heap_el, start_to_value
from weaver.prog_pd_xml import collect_environment_element, \
collect_thread, Environment_el
from weaver.memobjs_xml import Stack_el, collect_memsection_element
import weaver.image
import weaver.bootinfo
Extension_el = Element("extension", Segment_el, Patch_el,
name = (str_attr, "required"),
file = (str_attr, "optional"),
start = (str_attr, "optional"),
direct = (bool_attr, "optional"),
pager = (str_attr, "optional"),
physpool = (str_attr, "optional"))
RootProgram_el = Element("rootprogram", Segment_el, Patch_el,
Extension_el, Environment_el,
Stack_el, Heap_el,
file = (str_attr, "required"),
physpool = (str_attr, "required"),
virtpool = (str_attr, "required"),
pager = (str_attr, "optional"),
direct = (bool_attr, "optional"))
def get_symbol(elf, symbol, may_not_exist = False):
sym = elf.find_symbol(symbol)
if not sym:
if may_not_exist:
return None
else:
print "warn: cannot find symbol ", symbol
return None
address = sym.get_value()
bytes = sym.get_size()
if bytes == 0:
bytes = elf.wordsize / 8
return (address, bytes)
def collect_rootprogram_element(parsed, ignore_name, namespace, image, machine, bootinfo, pools):
"""Handle an Iguana Server Compound Object"""
# Find the tag
root_program_el = parsed.find_child("rootprogram")
assert(root_program_el is not None)
# New namespace for objects living in the root program's PD.
rp_namespace = namespace.add_namespace('rootprogram')
pd = weaver.bootinfo.RootServerPD('rootserver', rp_namespace,
image, machine, pools)
# Declare the default memory pools.
def_virtpool = getattr(root_program_el, "virtpool", None)
def_physpool = getattr(root_program_el, "physpool", None)
def_pager = getattr(root_program_el, "pager", None)
def_direct = getattr(root_program_el, "direct", None)
bootinfo.set_system_default_attrs(def_virtpool,
def_physpool,
image,
def_pager,
def_direct)
elf = UnpreparedElfFile(filename=root_program_el.file)
if elf.elf_type != ET_EXEC:
raise MergeError, "All the merged ELF files must be of EXEC type."
# Record the entry point of the root program so that the kernel
# can start it.
image.kconfig.set_rootserver_entry(elf.entry_point)
pd.set_default_pools(image, bootinfo)
# Collect the object environment
env = collect_environment_element(root_program_el.find_child('environment'),
rp_namespace, machine, pools, image, bootinfo)
segment_els = root_program_el.find_children("segment")
collect_elf_segments(elf,
image.ROOT_PROGRAM,
segment_els,
root_program_el.file,
rp_namespace,
image,
machine,
pools)
# Record any patches being made to the program.
patch_els = root_program_el.find_children("patch")
for p in getattr(RootProgram_el, "extra_patches", []):
addr = get_symbol(elf, p[0], True)
if addr == None:
continue
addr = int(addr[0])+ int(p[1])
new_patch = Patch_el(address=hex(addr), bytes=p[2], value=p[3])
patch_els.append(new_patch)
collect_patches(elf, patch_els, root_program_el.file, image)
for extension_el in root_program_el.find_children("extension"):
if not ignore_name.match(extension_el.name):
collect_extension_element(extension_el,
pd,
rp_namespace,
elf,
image,
machine,
bootinfo,
pools)
# Collect the main thread. The root program can only have one
# thread, so this call chiefly is used to collect information
# about the stack.
#
# The stack is not set up as a memsection, so it is not put in the
# object environment.
thread = collect_thread(elf, root_program_el, ignore_name, rp_namespace,
image, machine, pools,
entry = elf.entry_point,
name = 'rootprogram',
namespace_thread_name = "main")
pd.add_thread(thread)
# Collect the heap. Is there no element, create a fake one for
# the collection code to use.
#
# The heap is not set up as a memsection, so it is not put in the
# object environment.
heap_el = root_program_el.find_child('heap')
if heap_el is None:
heap_el = ParsedElement('heap')
heap_ms = collect_memsection_element(heap_el, ignore_name,
rp_namespace, image, machine,
pools)
pd.attach_heap(heap_ms)
image.add_group(0, [heap_ms.get_ms()])
pd.add_environment(env)
bootinfo.add_rootserver_pd(pd)
def collect_extension_element(extension_el, pd, namespace, rp_elf, image, machine, bootinfo, pools):
# New namespace for objects living in the extension.
extn_namespace = namespace.add_namespace(extension_el.name)
elf = None
start = None
name = None
physpool = getattr(extension_el, 'physpool', None)
pager = getattr(extension_el, 'pager', None)
direct = getattr(extension_el, 'direct', None)
# Push the overriding pools for the extension.
image.push_attrs(physical = physpool,
pager = pager,
direct = direct)
if hasattr(extension_el, "file"):
elf = UnpreparedElfFile(filename=extension_el.file)
if elf.elf_type != ET_EXEC:
raise MergeError, "All the merged ELF files must be of EXEC type."
segment_els = extension_el.find_children("segment")
segs = collect_elf_segments(elf,
image.EXTENSION,
segment_els,
extension_el.name,
extn_namespace,
image,
machine,
pools)
segs_ms = [bootinfo.record_segment_info(extension_el.name,
seg, image, machine, pools) for seg in segs]
for seg_ms in segs_ms:
pd.attach_memsection(seg_ms)
# Record any patches being made to the program.
patch_els = extension_el.find_children("patch")
collect_patches(elf, patch_els, extension_el.file, image)
start = elf.entry_point
name = extension_el.file
if hasattr(extension_el, "start"):
start = extension_el.start
name = extension_el.name
# If no file is supplied, look for symbols in the root
# program.
if elf is None:
elf = rp_elf
elf = elf.prepare(elf.wordsize, elf.endianess)
start = start_to_value(start, elf)
bootinfo.add_elf_info(name = name,
elf_type = image.EXTENSION,
entry_point = start)
image.pop_attrs()
| 41.729927 | 100 | 0.606699 |
59f863ed9958597bb869596437ab39082a0e5bb7 | 2,114 | py | Python | src/deeplearning/QuantumDataset.py | philipco/mcm-bidirectional-compression | 64f9d1cb2f302e948d8331477e5ef8f4fc7d872f | [
"MIT"
] | null | null | null | src/deeplearning/QuantumDataset.py | philipco/mcm-bidirectional-compression | 64f9d1cb2f302e948d8331477e5ef8f4fc7d872f | [
"MIT"
] | null | null | null | src/deeplearning/QuantumDataset.py | philipco/mcm-bidirectional-compression | 64f9d1cb2f302e948d8331477e5ef8f4fc7d872f | [
"MIT"
] | null | null | null | """
Created by Philippenko, 13rd May 2021.
"""
import itertools
import torch
from torch.utils.data import Dataset
from src.utils.Utilities import get_project_root, create_folder_if_not_existing
from src.utils.data.RealDatasetPreparation import prepare_quantum
class QuantumDataset(Dataset):
def __init__(self, train=True):
root = get_project_root()
create_folder_if_not_existing("{0}/pickle/quantum-non-iid-N21".format(root))
X, Y, dim_notebook = prepare_quantum(20, data_path="{0}/pickle/".format(root), pickle_path="{0}/pickle/quantum-non-iid-N20".format(root), iid=False)
for y in Y:
for i in range(len(y)):
if y[i].item() == -1:
y[i] = 0
else:
y[i] = 1
test_data, test_labels = [], []
eval_data, eval_labels = [], []
last_index = 0
split = []
for i in range(len(X)):
x, y = X[i], Y[i]
n = int(len(x) * 10 / 100)
test_data += x[:n]
test_labels += y[:n]
eval_data += x[n:2*n]
eval_labels += y[n:2*n]
X[i], Y[i] = X[i][n:], Y[i][n:]
split.append(list(range(last_index, last_index + len(X[i]))))
last_index += len(X[i])
self.train = train
if self.train:
self.data = eval_data + list(itertools.chain.from_iterable(X[:20]))
self.labels = eval_labels + list(itertools.chain.from_iterable(Y[:20]))
self.ind_val = len(eval_data)
self.split = [[s[i] + len(eval_data) for i in range(len(s))] for s in split]
else:
self.data = test_data
self.labels = test_labels
def __len__(self):
return len(self.labels)
def __getitem__(self, index: int):
return self.data[index].float(), self.labels[index].type(torch.LongTensor)
if __name__ == '__main__':
dataset = QuantumDataset(train=True)
print(len(dataset))
print(dataset[100])
dataset = QuantumDataset(train=False)
print(len(dataset))
print(dataset[100]) | 32.030303 | 156 | 0.574267 |
3b1716217017764bfa156dbe791b5396f73a6bca | 1,506 | py | Python | server.py | kanishk98/transaction-scheduler | 778f3f59704623ad53c008d28ea64e2e3665aa1e | [
"MIT"
] | null | null | null | server.py | kanishk98/transaction-scheduler | 778f3f59704623ad53c008d28ea64e2e3665aa1e | [
"MIT"
] | 1 | 2018-10-07T21:46:33.000Z | 2018-10-08T04:56:08.000Z | server.py | kanishk98/transaction-scheduler | 778f3f59704623ad53c008d28ea64e2e3665aa1e | [
"MIT"
] | null | null | null | from flask import Flask, request
import json
from request_handler import send_to_scheduler
import fifo
import ldsf
from datamodel import Operation, Item, Schedule
import jsonpickle
import time
operations = []
app = Flask(__name__)
@app.route('/add-operation', methods=['POST'])
def add_operation():
arr = json.loads(request.data)
print('Request received')
for data in arr:
i = data['item']
var = i['variable']
k = i['kind']
item = Item(k, var)
operation = Operation(item.kind, item, data['tid'])
send_to_scheduler(operation, len(arr))
with open('./bldsf_time.txt', 'r') as f:
return f.readline()
return 'OK'
@app.route('/add-operation-fifo', methods=['POST'])
def add_operation_fifo():
arr = json.loads(request.data)
print('Request received')
for data in arr:
i = data['item']
var = i['variable']
k = i['kind']
item = Item(k, var)
operation = Operation(item.kind, item, data['tid'])
time_taken = fifo.organise_operations(operation)
print('TIME TAKEN FOR FIFO: ' + str(time_taken) + ' units')
return str(time_taken)
@app.route('/add-operation-ldsf', methods=['POST'])
def add_operation_ldsf():
arr = json.loads(request.data)
print('Request received')
for data in arr:
i = data['item']
var = i['variable']
k = i['kind']
item = Item(k, var)
operation = Operation(item.kind, item, data['tid'])
ldsf.organise_operations(operation, len(arr))
with open('./ldsf_time.txt', 'r') as f:
return f.readline()
return 'OK'
app.run(debug=True, threaded=True) | 25.1 | 60 | 0.687251 |
6d6ea046537ab7c86e729213e35002ea3ad32eb1 | 1,144 | py | Python | benchmarks/utils.py | insolor/pymorphy2 | 92d546f042ff14601376d3646242908d5ab786c1 | [
"MIT"
] | 859 | 2015-01-05T00:48:23.000Z | 2022-03-19T07:42:23.000Z | benchmarks/utils.py | insolor/pymorphy2 | 92d546f042ff14601376d3646242908d5ab786c1 | [
"MIT"
] | 106 | 2015-01-03T12:21:56.000Z | 2022-03-30T11:07:46.000Z | benchmarks/utils.py | insolor/pymorphy2 | 92d546f042ff14601376d3646242908d5ab786c1 | [
"MIT"
] | 118 | 2015-01-05T21:10:35.000Z | 2022-03-15T14:29:29.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import time
import timeit
import gc
def measure(func, inner_iterations=1, repeats=5):
"""
Runs func ``repeats`` times and returns the fastest speed
(inner loop iterations per second). Use ``inner_iterations`` to specify
the number of inner loop iterations.
Use this function for long-running functions.
"""
gc.disable()
times = []
for x in range(repeats):
start = time.time()
func()
times.append(time.time() - start)
gc.enable()
return inner_iterations/min(times)
def bench(stmt, setup, op_count=1, repeats=3, runs=5):
"""
Runs ``stmt`` benchmark ``repeats``*``runs`` times,
selects the fastest run and returns the minimum time.
"""
timer = timeit.Timer(stmt, setup)
times = []
for x in range(runs):
times.append(timer.timeit(repeats))
def op_time(t):
return op_count*repeats / t
return op_time(min(times))
def format_bench(name, result, description='K words/sec'):
return "%25s:\t%0.3f%s" % (name, result, description)
| 26 | 75 | 0.648601 |
bd54505836347ab30425ef887fda27a22860ea85 | 1,037 | py | Python | setup.py | hephex/asyncache | 27ec0ad90ccb2a86ffaa8dcf55bb388059b484a0 | [
"MIT"
] | 44 | 2018-12-14T00:03:40.000Z | 2022-03-31T07:42:37.000Z | setup.py | hephex/asyncache | 27ec0ad90ccb2a86ffaa8dcf55bb388059b484a0 | [
"MIT"
] | 2 | 2020-09-28T10:55:42.000Z | 2020-10-27T01:23:49.000Z | setup.py | hephex/asyncache | 27ec0ad90ccb2a86ffaa8dcf55bb388059b484a0 | [
"MIT"
] | 2 | 2020-11-27T15:16:57.000Z | 2022-02-13T06:12:00.000Z | from setuptools import find_packages, setup
setup(
name="asyncache",
version="0.1.1",
url="https://github.com/hephex/asyncache",
license="MIT",
author="Hephex",
description="Helpers to use cachetools with async functions",
long_description=open("README.rst").read(),
keywords="cache caching memoize memoizing memoization async",
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=["cachetools>=2.1"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 37.035714 | 71 | 0.641273 |
58acfa03d5c545e4d28f6806649dbe09bdc5025f | 1,452 | py | Python | Jokenpo.py | Graziela-Silva/Jogos-Games | 48d8dd45f8471cd3ec1a79747183987ceb0a4630 | [
"MIT"
] | null | null | null | Jokenpo.py | Graziela-Silva/Jogos-Games | 48d8dd45f8471cd3ec1a79747183987ceb0a4630 | [
"MIT"
] | null | null | null | Jokenpo.py | Graziela-Silva/Jogos-Games | 48d8dd45f8471cd3ec1a79747183987ceb0a4630 | [
"MIT"
] | null | null | null | import random
print('*'*25)
print('Vamos jogar jokenpô!!')
opcoes = ['Pedra','Papel','Tesoura']
computador = random.choice(opcoes).lower()
usuario = str(input('Sua escolha: ')).lower().strip()
while usuario != 'pedra' and usuario != 'papel' and usuario != 'tesoura':
print('No jokenpô as opções de escolha são apenas: Pedra, Papel ou Tesoura')
usuario = str(input('Escolha uma opção válida: ')).lower().strip()
if computador == usuario:
print('\nPensamos igual!! \nO jogo deu Empate!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
elif computador == 'pedra' and usuario == 'tesoura':
print('\nHAHAHA!! Eu venci!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
elif computador == 'tesoura' and usuario == 'papel':
print('\nHAHAHA!! Eu venci!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
elif computador == 'papel' and usuario == 'pedra':
print('\nHAHAHA!! Eu venci!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
elif computador == 'pedra' and usuario == 'papel':
print('\nParabéns!! Você venceu!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
elif computador == 'papel' and usuario == 'tesoura':
print('\nParabéns!! Você venceu!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}')
else:
print('\nParabéns!! Você venceu!!')
print(f'Eu escolhi: {computador} \nVocê escolheu: {usuario}') | 46.83871 | 80 | 0.663223 |
033f3f8d2b25f6c01d93ddd8b9ea1c8a9fa70504 | 7,139 | py | Python | python/ccxt/async/paymium.py | morgwn-shaw/bttb | a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5 | [
"MIT"
] | 3 | 2017-11-19T22:08:29.000Z | 2018-02-21T11:14:41.000Z | python/ccxt/async/paymium.py | morgwn-shaw/bttb | a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5 | [
"MIT"
] | null | null | null | python/ccxt/async/paymium.py | morgwn-shaw/bttb | a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class paymium (Exchange):
def describe(self):
return self.deep_extend(super(paymium, self).describe(), {
'id': 'paymium',
'name': 'Paymium',
'countries': ['FR', 'EU'],
'rateLimit': 2000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27790564-a945a9d4-5ff9-11e7-9d2d-b635763f2f24.jpg',
'api': 'https://paymium.com/api',
'www': 'https://www.paymium.com',
'doc': [
'https://github.com/Paymium/api-documentation',
'https://www.paymium.com/page/developers',
],
},
'api': {
'public': {
'get': [
'countries',
'data/{id}/ticker',
'data/{id}/trades',
'data/{id}/depth',
'bitcoin_charts/{id}/trades',
'bitcoin_charts/{id}/depth',
],
},
'private': {
'get': [
'merchant/get_payment/{UUID}',
'user',
'user/addresses',
'user/addresses/{btc_address}',
'user/orders',
'user/orders/{UUID}',
'user/price_alerts',
],
'post': [
'user/orders',
'user/addresses',
'user/payment_requests',
'user/price_alerts',
'merchant/create_payment',
],
'delete': [
'user/orders/{UUID}/cancel',
'user/price_alerts/{id}',
],
},
},
'markets': {
'BTC/EUR': {'id': 'eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
},
'fees': {
'trading': {
'maker': 0.0059,
'taker': 0.0059,
},
},
})
async def fetch_balance(self, params={}):
balances = await self.privateGetUser()
result = {'info': balances}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
lowercase = currency.lower()
account = self.account()
balance = 'balance_' + lowercase
locked = 'locked_' + lowercase
if balance in balances:
account['free'] = balances[balance]
if locked in balances:
account['used'] = balances[locked]
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
orderbook = await self.publicGetDataIdDepth(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
result['bids'] = self.sort_by(result['bids'], 0, True)
return result
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetDataIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = ticker['at'] * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['price']),
'change': None,
'percentage': float(ticker['variation']),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(trade['created_at_int']) * 1000
volume = 'traded_' + market['base'].lower()
return {
'info': trade,
'id': trade['uuid'],
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'],
'price': trade['price'],
'amount': trade[volume],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetDataIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market)
async def create_order(self, market, type, side, amount, price=None, params={}):
order = {
'type': self.capitalize(type) + 'Order',
'currency': self.market_id(market),
'direction': side,
'amount': amount,
}
if type == 'market':
order['price'] = price
response = await self.privatePostUserOrders(self.extend(order, params))
return {
'info': response,
'id': response['uuid'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelOrder(self.extend({
'orderNumber': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
body = self.json(params)
nonce = str(self.nonce())
auth = nonce + url + body
headers = {
'Api-Key': self.apiKey,
'Api-Signature': self.hmac(self.encode(auth), self.secret),
'Api-Nonce': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 37.376963 | 126 | 0.470094 |
3b5d2bebe768720085d85710dec7af5d6db3a64a | 4,765 | py | Python | ariadne/contrib/tracing/opentracing.py | d-danilkin/ariadne | 62b1363752394439f21d8be1b48074b4ba280493 | [
"BSD-3-Clause"
] | 1,778 | 2018-07-09T09:54:24.000Z | 2022-03-31T18:22:56.000Z | ariadne/contrib/tracing/opentracing.py | d-danilkin/ariadne | 62b1363752394439f21d8be1b48074b4ba280493 | [
"BSD-3-Clause"
] | 639 | 2018-07-12T12:39:25.000Z | 2022-03-28T04:02:52.000Z | ariadne/contrib/tracing/opentracing.py | d-danilkin/ariadne | 62b1363752394439f21d8be1b48074b4ba280493 | [
"BSD-3-Clause"
] | 154 | 2018-08-10T18:50:49.000Z | 2022-03-31T17:48:14.000Z | import cgi
import os
from functools import partial
from inspect import isawaitable
from typing import Any, Callable, Dict, Optional, Union
from graphql import GraphQLResolveInfo
from opentracing import Scope, Tracer, global_tracer
from opentracing.ext import tags
from starlette.datastructures import UploadFile
from ...types import ContextValue, Extension, Resolver
from .utils import format_path, should_trace
ArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]
class OpenTracingExtension(Extension):
_arg_filter: Optional[ArgFilter]
_root_scope: Scope
_tracer: Tracer
def __init__(self, *, arg_filter: Optional[ArgFilter] = None):
self._arg_filter = arg_filter
self._tracer = global_tracer()
self._root_scope = None
def request_started(self, context: ContextValue):
self._root_scope = self._tracer.start_active_span("GraphQL Query")
self._root_scope.span.set_tag(tags.COMPONENT, "graphql")
def request_finished(self, context: ContextValue):
self._root_scope.close()
async def resolve(
self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
):
if not should_trace(info):
result = next_(parent, info, **kwargs)
if isawaitable(result):
result = await result
return result
with self._tracer.start_active_span(info.field_name) as scope:
span = scope.span
span.set_tag(tags.COMPONENT, "graphql")
span.set_tag("graphql.parentType", info.parent_type.name)
graphql_path = ".".join(
map(str, format_path(info.path)) # pylint: disable=bad-builtin
)
span.set_tag("graphql.path", graphql_path)
if kwargs:
filtered_kwargs = self.filter_resolver_args(kwargs, info)
for kwarg, value in filtered_kwargs.items():
span.set_tag(f"graphql.param.{kwarg}", value)
result = next_(parent, info, **kwargs)
if isawaitable(result):
result = await result
return result
def filter_resolver_args(
self, args: Dict[str, Any], info: GraphQLResolveInfo
) -> Dict[str, Any]:
args_to_trace = copy_args_for_tracing(args)
if not self._arg_filter:
return args_to_trace
return self._arg_filter(args_to_trace, info)
class OpenTracingExtensionSync(OpenTracingExtension):
def resolve(
self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
): # pylint: disable=invalid-overridden-method
if not should_trace(info):
result = next_(parent, info, **kwargs)
return result
with self._tracer.start_active_span(info.field_name) as scope:
span = scope.span
span.set_tag(tags.COMPONENT, "graphql")
span.set_tag("graphql.parentType", info.parent_type.name)
graphql_path = ".".join(
map(str, format_path(info.path)) # pylint: disable=bad-builtin
)
span.set_tag("graphql.path", graphql_path)
if kwargs:
filtered_kwargs = self.filter_resolver_args(kwargs, info)
for kwarg, value in filtered_kwargs.items():
span.set_tag(f"graphql.param.{kwarg}", value)
result = next_(parent, info, **kwargs)
return result
def opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):
return partial(OpenTracingExtension, arg_filter=arg_filter)
def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):
return partial(OpenTracingExtensionSync, arg_filter=arg_filter)
def copy_args_for_tracing(value: Any) -> Any:
if isinstance(value, dict):
return {k: copy_args_for_tracing(v) for k, v in value.items()}
if isinstance(value, list):
return [copy_args_for_tracing(v) for v in value]
if isinstance(value, (UploadFile, cgi.FieldStorage)):
return repr_upload_file(value)
return value
def repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:
filename = upload_file.filename
if isinstance(upload_file, cgi.FieldStorage):
mime_type = upload_file.type
else:
mime_type = upload_file.content_type
if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):
size = len(upload_file.value) if upload_file.value is not None else 0
else:
file_ = upload_file.file
file_.seek(0, os.SEEK_END)
size = file_.tell()
file_.seek(0)
return (
f"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})"
)
| 34.280576 | 87 | 0.656243 |
5a7ee169ab706b3dd4e65494eb39d6df9932d6a2 | 772 | py | Python | dictances/pearson.py | DavidBerdik/dictances | 7b804b62032bbdecc8e73946cf74b171681fe4f5 | [
"MIT"
] | 30 | 2018-08-30T16:00:14.000Z | 2022-03-14T14:36:17.000Z | dictances/pearson.py | DavidBerdik/dictances | 7b804b62032bbdecc8e73946cf74b171681fe4f5 | [
"MIT"
] | 6 | 2019-06-18T15:37:23.000Z | 2021-04-15T12:40:42.000Z | dictances/pearson.py | DavidBerdik/dictances | 7b804b62032bbdecc8e73946cf74b171681fe4f5 | [
"MIT"
] | 6 | 2019-02-10T23:22:25.000Z | 2020-10-01T16:25:40.000Z | """Return the nth power distance beetween a and b."""
from math import sqrt
from typing import Dict
def pearson(a: Dict, b: Dict)->float:
"""Return the nth power distance beetween a and b."""
bget = b.get
ab = 0
n_mul = 0
a_sum = 0
b_sum = 0
a_sum2 = 0
b_sum2 = 0
for k, a_val in a.items():
b_val = bget(k)
a_sum += a_val
a_sum2 += a_val**2
if b_val:
n_mul += 1
ab += a_val * b_val
for k, b_val in b.items():
b_sum += b_val
b_sum2 += b_val**2
len_a = len(a)
len_b = len(b)
a_mean = a_sum / len_a
b_mean = b_sum / len_b
return 1 - (ab - n_mul * a_mean * b_mean) / (sqrt(a_sum2 - len_a * a_mean**2) * sqrt(b_sum2 - len_b * b_mean**2))
| 22.057143 | 117 | 0.537565 |
b6648e6d55217c4be576e9045d14df741e7ea8c3 | 167 | py | Python | examples/more_custom_filters/filter_patriots2.py | jsnavarr/dactyl | 70adf8c562161a96d4ab99964541027dfae329e0 | [
"MIT"
] | 61 | 2017-03-22T09:21:02.000Z | 2021-12-16T21:23:41.000Z | examples/more_custom_filters/filter_patriots2.py | jsnavarr/dactyl | 70adf8c562161a96d4ab99964541027dfae329e0 | [
"MIT"
] | 61 | 2017-02-22T01:07:25.000Z | 2022-02-24T01:08:46.000Z | examples/more_custom_filters/filter_patriots2.py | jsnavarr/dactyl | 70adf8c562161a96d4ab99964541027dfae329e0 | [
"MIT"
] | 28 | 2017-05-29T13:20:06.000Z | 2022-03-26T16:42:20.000Z | ## 'Patriots' custom filter v2
def filter_markdown(md, **kwargs):
s = md.replace("patriots", "la-li-lu-le-lo")
return s.replace("Patriots", "La-Li-Lu-Le-Lo")
| 27.833333 | 50 | 0.646707 |
2796cce0579b36c39e1d1c9286a159b59b02008d | 1,136 | py | Python | api_request/request.py | felixwortmann/tweeKaLytics | 1930147d5e09f559c1c5490b32ef8821c86decbe | [
"MIT"
] | 2 | 2020-11-05T08:58:02.000Z | 2020-11-18T16:38:49.000Z | api_request/request.py | felixwortmann/tweeKaLytics | 1930147d5e09f559c1c5490b32ef8821c86decbe | [
"MIT"
] | null | null | null | api_request/request.py | felixwortmann/tweeKaLytics | 1930147d5e09f559c1c5490b32ef8821c86decbe | [
"MIT"
] | null | null | null | import tweepy
import kafka_publish
with open("api_key.txt", "r") as f:
api_key = f.read()
with open("api_key_secret.txt", "r") as f:
api_key_secret = f.read()
with open("access_token.txt", "r") as f:
access_token = f.read()
with open("access_token_secret.txt", "r") as f:
access_token_secret = f.read()
auth = tweepy.OAuthHandler(api_key, api_key_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
# Stream Listener
class MyStreamListener(tweepy.StreamListener):
def on_data(self, raw_data):
#print(str(raw_data))
kafka_publish.send_json(raw_data)
def on_status(self, status):
print(status.text)
def on_error(self, status_code):
print(status_code)
if status_code == 420:
return False
class MyStream():
def __init__(self, auth, listener):
self.stream = tweepy.Stream(auth=auth, listener=listener)
def start(self, track):
self.stream.filter(track=track, is_async=True)
listener = MyStreamListener()
stream = MyStream(api.auth, listener)
stream.start(['trump','election','biden'])
| 22.27451 | 65 | 0.683979 |
885e63f2061633ae94044890120bfccc5eaa0526 | 2,426 | py | Python | bigml/tests/test_29_library.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | 1 | 2021-06-20T11:51:22.000Z | 2021-06-20T11:51:22.000Z | bigml/tests/test_29_library.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | null | null | null | bigml/tests/test_29_library.py | javinp/python | bdec1e206ed028990503ed4bebcbc7023d3ff606 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating and updating scripts
"""
from world import world, setup_module, teardown_module
import create_library_steps as library_create
class TestLibrary(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""
Debug information
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario1(self):
"""
Scenario: Successfully creating a whizzml library:
Given I create a whizzml library from a excerpt of code "<source_code>"
And I wait until the library is ready less than <time_1> secs
And I update the library with "<param>", "<param_value>"
And I wait until the library is ready less than <time_2> secs
Then the library code is "<source_code>" and the value of "<param>" is "<param_value>"
Examples:
| source_code | time_1 | time_2 | param | param_value
| (define (mu x) (+ x 1)) | 10 | 10 | name | my library
"""
print self.test_scenario1.__doc__
examples = [
['(define (mu x) (+ x 1))', '10', '10', 'name', 'my library']]
for example in examples:
print "\nTesting with:\n", example
library_create.i_create_a_library(self, example[0])
library_create.the_library_is_finished(self, example[1])
library_create.i_update_a_library(self, example[3], example[4])
library_create.the_library_is_finished(self, example[2])
library_create.the_library_code_and_attributes(self, example[0], example[3], example[4])
| 39.129032 | 102 | 0.609646 |
3cc0dcfb008cd5aebc479de1c41092efe8906f13 | 1,259 | py | Python | pnc_cli/builds.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 2 | 2016-05-18T15:01:34.000Z | 2016-08-11T14:04:17.000Z | pnc_cli/builds.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 47 | 2016-06-23T19:58:40.000Z | 2020-03-10T17:58:11.000Z | pnc_cli/builds.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 21 | 2016-05-30T20:34:17.000Z | 2021-09-07T13:22:20.000Z | __author__ = 'thauser'
from argh import arg
from six import iteritems
import logging
from pnc_cli import swagger_client
from pnc_cli import utils
import pnc_cli.cli_types as types
from pnc_cli.pnc_api import pnc_api
@arg("-p", "--page-size", help="Limit the amount of builds returned")
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_builds(page_size=200, page_index=0, sort="", q=""):
"""
List all builds
:param page_size: number of builds returned per query
:param sort: RSQL sorting query
:param q: RSQL query
:return:
"""
response = utils.checked_api_call(pnc_api.builds_running, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("id", help="Running BuildRecord ID to cancel.", type=types.existing_running_build)
def cancel_running_build(id):
"""
Cancel running build with ID
"""
data = cancel_running_build_raw(id)
if data:
return utils.format_json(data)
def cancel_running_build_raw(id):
response = utils.checked_api_call(pnc_api.builds_running, 'cancel', id=id)
if response:
return response.content | 30.707317 | 132 | 0.707705 |
0663ffca3af7bad61f14c999230b3aa3884b61f7 | 36,887 | py | Python | noxfile.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | noxfile.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | noxfile.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | """
noxfile
~~~~~~~
Nox configuration script
"""
# pylint: disable=resource-leakage,3rd-party-module-not-gated
import datetime
import glob
import os
import shutil
import sys
import tempfile
# fmt: off
if __name__ == "__main__":
sys.stderr.write(
"Do not execute this file directly. Use nox instead, it will know how to handle this file\n"
)
sys.stderr.flush()
exit(1)
# fmt: on
import nox # isort:skip
from nox.command import CommandFailed # isort:skip
IS_PY3 = sys.version_info > (2,)
# Be verbose when runing under a CI context
CI_RUN = (
os.environ.get("JENKINS_URL")
or os.environ.get("CI")
or os.environ.get("DRONE") is not None
)
PIP_INSTALL_SILENT = CI_RUN is False
SKIP_REQUIREMENTS_INSTALL = "SKIP_REQUIREMENTS_INSTALL" in os.environ
EXTRA_REQUIREMENTS_INSTALL = os.environ.get("EXTRA_REQUIREMENTS_INSTALL")
# Global Path Definitions
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, "tests", "support", "coverage")
IS_DARWIN = sys.platform.lower().startswith("darwin")
IS_WINDOWS = sys.platform.lower().startswith("win")
IS_FREEBSD = sys.platform.lower().startswith("freebsd")
# Python versions to run against
_PYTHON_VERSIONS = ("3", "3.5", "3.6", "3.7", "3.8", "3.9")
# Nox options
# Reuse existing virtualenvs
nox.options.reuse_existing_virtualenvs = True
# Don't fail on missing interpreters
nox.options.error_on_missing_interpreters = False
# Change current directory to REPO_ROOT
os.chdir(REPO_ROOT)
RUNTESTS_LOGFILE = os.path.join(
"artifacts",
"logs",
"runtests-{}.log".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S.%f")),
)
# Prevent Python from writing bytecode
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
def find_session_runner(session, name, **kwargs):
for s, _ in session._runner.manifest.list_all_sessions():
if name not in s.signatures:
continue
for signature in s.signatures:
for key, value in kwargs.items():
param = "{}={!r}".format(key, value)
if IS_PY3:
# Under Python2 repr unicode string are always "u" prefixed, ie, u'a string'.
param = param.replace("u'", "'")
if param not in signature:
break
else:
return s
continue
session.error(
"Could not find a nox session by the name {!r} with the following keyword arguments: {!r}".format(
name, kwargs
)
)
def _create_ci_directories():
for dirname in ("logs", "coverage", "xml-unittests-output"):
path = os.path.join("artifacts", dirname)
if not os.path.exists(path):
os.makedirs(path)
def _get_session_python_version_info(session):
try:
version_info = session._runner._real_python_version_info
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session_py_version = session.run(
"python",
"-c",
'import sys; sys.stdout.write("{}.{}.{}".format(*sys.version_info))',
silent=True,
log=False,
)
version_info = tuple(
int(part) for part in session_py_version.split(".") if part.isdigit()
)
session._runner._real_python_version_info = version_info
finally:
session._runner.global_config.install_only = old_install_only_value
return version_info
def _get_session_python_site_packages_dir(session):
try:
site_packages_dir = session._runner._site_packages_dir
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
site_packages_dir = session.run(
"python",
"-c",
"import sys; from distutils.sysconfig import get_python_lib; sys.stdout.write(get_python_lib())",
silent=True,
log=False,
)
session._runner._site_packages_dir = site_packages_dir
finally:
session._runner.global_config.install_only = old_install_only_value
return site_packages_dir
def _get_pydir(session):
version_info = _get_session_python_version_info(session)
if version_info < (3, 5):
session.error("Only Python >= 3.5 is supported")
if IS_WINDOWS and version_info < (3, 6):
session.error("Only Python >= 3.6 is supported on Windows")
return "py{}.{}".format(*version_info)
def _install_system_packages(session):
"""
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
"""
version_info = _get_session_python_version_info(session)
py_version_keys = ["{}".format(*version_info), "{}.{}".format(*version_info)]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
session_site_packages_dir = os.path.relpath(session_site_packages_dir, REPO_ROOT)
for py_version in py_version_keys:
dist_packages_path = "/usr/lib/python{}/dist-packages".format(py_version)
if not os.path.isdir(dist_packages_path):
continue
for aptpkg in glob.glob(os.path.join(dist_packages_path, "*apt*")):
src = os.path.realpath(aptpkg)
dst = os.path.join(session_site_packages_dir, os.path.basename(src))
if os.path.exists(dst):
session.log("Not overwritting already existing %s with %s", dst, src)
continue
session.log("Copying %s into %s", src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
def _get_pip_requirements_file(session, transport, crypto=None, requirements_type="ci"):
assert requirements_type in ("ci", "pkg")
pydir = _get_pydir(session)
if IS_WINDOWS:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-windows.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
session.error("Could not find a windows requirements file for {}".format(pydir))
elif IS_DARWIN:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-darwin.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
session.error("Could not find a darwin requirements file for {}".format(pydir))
elif IS_FREEBSD:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-freebsd.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
session.error("Could not find a freebsd requirements file for {}".format(pydir))
else:
_install_system_packages(session)
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-linux.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
session.error("Could not find a linux requirements file for {}".format(pydir))
def _upgrade_pip_setuptools_and_wheel(session, upgrade=True):
if SKIP_REQUIREMENTS_INSTALL:
session.log(
"Skipping Python Requirements because SKIP_REQUIREMENTS_INSTALL was found in the environ"
)
return False
install_command = [
"python",
"-m",
"pip",
"install",
"--progress-bar=off",
]
if upgrade:
install_command.append("-U")
install_command.extend(
[
"pip>=20.2.4,<21.2",
"setuptools<50.*",
"wheel",
]
)
session.run(*install_command, silent=PIP_INSTALL_SILENT)
return True
def _install_requirements(
session, transport, *extra_requirements, requirements_type="ci"
):
if not _upgrade_pip_setuptools_and_wheel(session):
return
def _install_requirements(
session, transport, *extra_requirements, requirements_type="ci"
):
if not _upgrade_pip_setuptools_and_wheel(session):
return False
# Install requirements
requirements_file = _get_pip_requirements_file(
session, transport, requirements_type=requirements_type
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if extra_requirements:
install_command = ["--progress-bar=off"]
install_command += list(extra_requirements)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if EXTRA_REQUIREMENTS_INSTALL:
session.log(
"Installing the following extra requirements because the"
" EXTRA_REQUIREMENTS_INSTALL environment variable was set: %s",
EXTRA_REQUIREMENTS_INSTALL,
)
# We pass --constraint in this step because in case any of these extra dependencies has a requirement
# we're already using, we want to maintain the locked version
install_command = ["--progress-bar=off", "--constraint", requirements_file]
install_command += EXTRA_REQUIREMENTS_INSTALL.split()
session.install(*install_command, silent=PIP_INSTALL_SILENT)
return True
def _run_with_coverage(session, *test_cmd, env=None):
if SKIP_REQUIREMENTS_INSTALL is False:
session.install(
"--progress-bar=off", "coverage==5.2", silent=PIP_INSTALL_SILENT
)
session.run("coverage", "erase")
python_path_env_var = os.environ.get("PYTHONPATH") or None
if python_path_env_var is None:
python_path_env_var = SITECUSTOMIZE_DIR
else:
python_path_entries = python_path_env_var.split(os.pathsep)
if SITECUSTOMIZE_DIR in python_path_entries:
python_path_entries.remove(SITECUSTOMIZE_DIR)
python_path_entries.insert(0, SITECUSTOMIZE_DIR)
python_path_env_var = os.pathsep.join(python_path_entries)
if env is None:
env = {}
env.update(
{
# The updated python path so that sitecustomize is importable
"PYTHONPATH": python_path_env_var,
# The full path to the .coverage data file. Makes sure we always write
# them to the same directory
"COVERAGE_FILE": os.path.abspath(os.path.join(REPO_ROOT, ".coverage")),
# Instruct sub processes to also run under coverage
"COVERAGE_PROCESS_START": os.path.join(REPO_ROOT, ".coveragerc"),
}
)
try:
session.run(*test_cmd, env=env)
finally:
# Always combine and generate the XML coverage report
try:
session.run("coverage", "combine")
except CommandFailed:
# Sometimes some of the coverage files are corrupt which would trigger a CommandFailed
# exception
pass
# Generate report for salt code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "salt.xml"),
"--omit=tests/*",
"--include=salt/*",
)
# Generate report for tests code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "tests.xml"),
"--omit=salt/*",
"--include=tests/*",
)
# Move the coverage DB to artifacts/coverage in order for it to be archived by CI
shutil.move(".coverage", os.path.join("artifacts", "coverage", ".coverage"))
def _runtests(session):
session.error(
"""\n\nruntests.py support has been removed from Salt. Please try `nox -e '{0}'` """
"""or `nox -e '{0}' -- --help` to know more about the supported CLI flags.\n"""
"For more information, please check "
"https://docs.saltproject.io/en/latest/topics/development/tests/index.html#running-the-tests\n..".format(
session._runner.global_config.sessions[0].replace("runtests", "pytest")
)
)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def runtests_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def runtests(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp")
@nox.parametrize("coverage", [False, True])
def runtests_tcp(session, coverage):
"""
runtests.py session with TCP transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_m2crypto(session, coverage):
"""
runtests.py session with TCP transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_pycryptodome(session, coverage):
"""
runtests.py session with TCP transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-cloud")
@nox.parametrize("coverage", [False, True])
def runtests_cloud(session, coverage):
"""
runtests.py cloud tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tornado")
@nox.parametrize("coverage", [False, True])
def runtests_tornado(session, coverage):
"""
runtests.py tornado tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def pytest_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
# Install requirements
if _install_requirements(session, transport):
if crypto:
session.run(
"pip",
"uninstall",
"-y",
"m2crypto",
"pycrypto",
"pycryptodome",
"pycryptodomex",
silent=True,
)
install_command = [
"--progress-bar=off",
"--constraint",
_get_pip_requirements_file(session, transport, crypto=True),
]
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--transport={}".format(transport),
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def pytest(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp")
@nox.parametrize("coverage", [False, True])
def pytest_tcp(session, coverage):
"""
pytest session with TCP transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_m2crypto(session, coverage):
"""
pytest session with TCP transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_pycryptodome(session, coverage):
"""
pytest session with TCP transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-cloud")
@nox.parametrize("coverage", [False, True])
def pytest_cloud(session, coverage):
"""
pytest cloud tests session
"""
pydir = _get_pydir(session)
if pydir == "py3.5":
session.error(
"Due to conflicting and unsupported requirements the cloud tests only run on Py3.6+"
)
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", pydir, "cloud.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--run-expensive",
"-k",
"cloud",
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tornado")
@nox.parametrize("coverage", [False, True])
def pytest_tornado(session, coverage):
"""
pytest tornado tests session
"""
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
session.install(
"--progress-bar=off", "tornado==5.0.2", silent=PIP_INSTALL_SILENT
)
session.install(
"--progress-bar=off", "pyzmq==17.0.0", silent=PIP_INSTALL_SILENT
)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
] + session.posargs
_pytest(session, coverage, cmd_args)
def _pytest(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
env = {"CI_RUN": "1" if CI_RUN else "0"}
if IS_DARWIN:
# Don't nuke our multiprocessing efforts objc!
# https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr
env["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
if CI_RUN:
# We'll print out the collected tests on CI runs.
# This will show a full list of what tests are going to run, in the right order, which, in case
# of a test suite hang, helps us pinpoint which test is hanging
session.run(
"python", "-m", "pytest", *(cmd_args + ["--collect-only", "-qqq"]), env=env
)
try:
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args,
env=env
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
except CommandFailed: # pylint: disable=try-except-raise
# Not rerunning failed tests for now
raise
# pylint: disable=unreachable
# Re-run failed tests
session.log("Re-running failed tests")
for idx, parg in enumerate(cmd_args):
if parg.startswith("--junitxml="):
cmd_args[idx] = parg.replace(".xml", "-rerun-failed.xml")
cmd_args.append("--lf")
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
# pylint: enable=unreachable
class Tee:
"""
Python class to mimic linux tee behaviour
"""
def __init__(self, first, second):
self._first = first
self._second = second
def write(self, b):
wrote = self._first.write(b)
self._first.flush()
self._second.write(b)
self._second.flush()
def fileno(self):
return self._first.fileno()
def _lint(
session, rcfile, flags, paths, tee_output=True, upgrade_setuptools_and_pip=True
):
if _upgrade_pip_setuptools_and_wheel(session, upgrade=upgrade_setuptools_and_pip):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "lint.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if tee_output:
session.run("pylint", "--version")
pylint_report_path = os.environ.get("PYLINT_REPORT")
cmd_args = ["pylint", "--rcfile={}".format(rcfile)] + list(flags) + list(paths)
cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}}
if tee_output:
stdout = tempfile.TemporaryFile(mode="w+b")
cmd_kwargs["stdout"] = Tee(stdout, sys.__stdout__)
lint_failed = False
try:
session.run(*cmd_args, **cmd_kwargs)
except CommandFailed:
lint_failed = True
raise
finally:
if tee_output:
stdout.seek(0)
contents = stdout.read()
if contents:
if IS_PY3:
contents = contents.decode("utf-8")
else:
contents = contents.encode("utf-8")
sys.stdout.write(contents)
sys.stdout.flush()
if pylint_report_path:
# Write report
with open(pylint_report_path, "w") as wfh:
wfh.write(contents)
session.log("Report file written to %r", pylint_report_path)
stdout.close()
def _lint_pre_commit(session, rcfile, flags, paths):
if "VIRTUAL_ENV" not in os.environ:
session.error(
"This should be running from within a virtualenv and "
"'VIRTUAL_ENV' was not found as an environment variable."
)
if "pre-commit" not in os.environ["VIRTUAL_ENV"]:
session.error(
"This should be running from within a pre-commit virtualenv and "
"'VIRTUAL_ENV'({}) does not appear to be a pre-commit virtualenv.".format(
os.environ["VIRTUAL_ENV"]
)
)
from nox.virtualenv import VirtualEnv
# Let's patch nox to make it run inside the pre-commit virtualenv
try:
session._runner.venv = VirtualEnv( # pylint: disable=unexpected-keyword-arg
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
venv=True,
)
except TypeError:
# This is still nox-py2
session._runner.venv = VirtualEnv(
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
)
_lint(
session,
rcfile,
flags,
paths,
tee_output=False,
upgrade_setuptools_and_pip=False,
)
@nox.session(python="3")
def lint(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
session.notify("lint-salt-{}".format(session.python))
session.notify("lint-tests-{}".format(session.python))
@nox.session(python="3", name="lint-salt")
def lint_salt(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/", "tasks/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python="3", name="lint-tests")
def lint_tests(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-salt-pre-commit")
def lint_salt_pre_commit(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-tests-pre-commit")
def lint_tests_pre_commit(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs(session, compress, update, clean):
"""
Build Salt's Documentation
"""
session.notify("docs-html-{}(compress={})".format(session.python, compress))
session.notify(
find_session_runner(
session,
"docs-man-{}".format(session.python),
compress=compress,
update=update,
clean=clean,
)
)
@nox.session(name="docs-html", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("compress", [False, True])
def docs_html(session, compress, clean):
"""
Build Salt's HTML Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "html", "SPHINXOPTS=-W", external=True)
if compress:
session.run("tar", "-cJvf", "html-archive.tar.xz", "_build/html", external=True)
os.chdir("..")
@nox.session(name="docs-man", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs_man(session, compress, update, clean):
"""
Build Salt's Manpages Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "man", "SPHINXOPTS=-W", external=True)
if update:
session.run("rm", "-rf", "man/", external=True)
session.run("cp", "-Rp", "_build/man", "man/", external=True)
if compress:
session.run("tar", "-cJvf", "man-archive.tar.xz", "_build/man", external=True)
os.chdir("..")
@nox.session(name="invoke", python="3")
def invoke(session):
"""
Run invoke tasks
"""
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "invoke.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd = ["inv"]
files = []
# Unfortunately, invoke doesn't support the nargs functionality like argpase does.
# Let's make it behave properly
for idx, posarg in enumerate(session.posargs):
if idx == 0:
cmd.append(posarg)
continue
if posarg.startswith("--"):
cmd.append(posarg)
continue
files.append(posarg)
if files:
cmd.append("--files={}".format(" ".join(files)))
session.run(*cmd)
@nox.session(name="changelog", python="3")
@nox.parametrize("draft", [False, True])
def changelog(session, draft):
"""
Generate salt's changelog
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "changelog.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
town_cmd = ["towncrier", "--version={}".format(session.posargs[0])]
if draft:
town_cmd.append("--draft")
session.run(*town_cmd)
| 32.499559 | 135 | 0.614173 |
a33899d106180f039e66d3105c7e9403fb8a4f90 | 607 | py | Python | Session1_2018/constructStringFromBinaryString.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | 1 | 2018-09-21T10:51:15.000Z | 2018-09-21T10:51:15.000Z | Session1_2018/constructStringFromBinaryString.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | Session1_2018/constructStringFromBinaryString.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if not t:
return ""
if t.left == None and t.right == None:
return str(t.val) + ""
if t.right == None:
return str(t.val) + "(" + self.tree2str(t.left) + ")"
return str(t.val) + "(" + self.tree2str(t.left) + ")" + "(" + self.tree2str(t.right) + ")"
| 28.904762 | 98 | 0.479407 |
e92cd5fcdbfc3bbf5f3dda51183a5499fdd4e400 | 2,530 | py | Python | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ModifyDBEndpointAddressRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ModifyDBEndpointAddressRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ModifyDBEndpointAddressRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyDBEndpointAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'ModifyDBEndpointAddress','polardb')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ConnectionStringPrefix(self):
return self.get_query_params().get('ConnectionStringPrefix')
def set_ConnectionStringPrefix(self,ConnectionStringPrefix):
self.add_query_param('ConnectionStringPrefix',ConnectionStringPrefix)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_NetType(self):
return self.get_query_params().get('NetType')
def set_NetType(self,NetType):
self.add_query_param('NetType',NetType)
def get_DBEndpointId(self):
return self.get_query_params().get('DBEndpointId')
def set_DBEndpointId(self,DBEndpointId):
self.add_query_param('DBEndpointId',DBEndpointId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 35.138889 | 90 | 0.778656 |
ff84f24de8f807761a937bcbb3464776ce397a70 | 21,562 | py | Python | netbox/netbox/settings.py | pierrechev/netbox | 312246fec2d12bfb1816c88e62a1b9729efd9092 | [
"Apache-2.0"
] | null | null | null | netbox/netbox/settings.py | pierrechev/netbox | 312246fec2d12bfb1816c88e62a1b9729efd9092 | [
"Apache-2.0"
] | null | null | null | netbox/netbox/settings.py | pierrechev/netbox | 312246fec2d12bfb1816c88e62a1b9729efd9092 | [
"Apache-2.0"
] | null | null | null | import importlib
import logging
import os
import platform
import re
import socket
import warnings
from urllib.parse import urlsplit
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.validators import URLValidator
#
# Environment setup
#
VERSION = '3.0.2-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if platform.python_version_tuple() < ('3', '7'):
raise RuntimeError(
f"NetBox requires Python 3.7 or higher (current: Python {platform.python_version()})"
)
#
# Configuration import
#
# Import configuration parameters
try:
from netbox import configuration
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'configuration':
raise ImproperlyConfigured(
"Configuration file is not present. Please define netbox/netbox/configuration.py per the documentation."
)
raise
# Warn on removed config parameters
if hasattr(configuration, 'CACHE_TIMEOUT'):
warnings.warn(
"The CACHE_TIMEOUT configuration parameter was removed in v3.0.0 and no longer has any effect."
)
if hasattr(configuration, 'RELEASE_CHECK_TIMEOUT'):
warnings.warn(
"The RELEASE_CHECK_TIMEOUT configuration parameter was removed in v3.0.0 and no longer has any effect."
)
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ADMINS = getattr(configuration, 'ADMINS', [])
ALLOWED_URL_SCHEMES = getattr(configuration, 'ALLOWED_URL_SCHEMES', (
'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
))
BANNER_BOTTOM = getattr(configuration, 'BANNER_BOTTOM', '')
BANNER_LOGIN = getattr(configuration, 'BANNER_LOGIN', '')
BANNER_TOP = getattr(configuration, 'BANNER_TOP', '')
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CHANGELOG_RETENTION = getattr(configuration, 'CHANGELOG_RETENTION', 90)
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
CUSTOM_VALIDATORS = getattr(configuration, 'CUSTOM_VALIDATORS', {})
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
DOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))
EMAIL = getattr(configuration, 'EMAIL', {})
ENFORCE_GLOBAL_UNIQUE = getattr(configuration, 'ENFORCE_GLOBAL_UNIQUE', False)
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
GRAPHQL_ENABLED = getattr(configuration, 'GRAPHQL_ENABLED', True)
HTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)
INTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MAINTENANCE_MODE = getattr(configuration, 'MAINTENANCE_MODE', False)
MAPS_URL = getattr(configuration, 'MAPS_URL', 'https://maps.google.com/?q=')
MAX_PAGE_SIZE = getattr(configuration, 'MAX_PAGE_SIZE', 1000)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 50)
LOGIN_PERSISTENCE = getattr(configuration, 'LOGIN_PERSISTENCE', False)
PLUGINS = getattr(configuration, 'PLUGINS', [])
PLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})
PREFER_IPV4 = getattr(configuration, 'PREFER_IPV4', False)
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', 22)
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH', 220)
REMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)
REMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])
REMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})
REMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)
REMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
RELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
RQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SESSION_COOKIE_NAME = getattr(configuration, 'SESSION_COOKIE_NAME', 'sessionid')
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
# Validate update repo URL and timeout
if RELEASE_CHECK_URL:
validator = URLValidator(
message=(
"RELEASE_CHECK_URL must be a valid API URL. Example: "
"https://api.github.com/repos/netbox-community/netbox"
)
)
try:
validator(RELEASE_CHECK_URL)
except ValidationError as err:
raise ImproperlyConfigured(str(err))
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'storages':
raise ImproperlyConfigured(
f"STORAGE_BACKEND is set to {STORAGE_BACKEND} but django-storages is not present. It can be "
f"installed by running 'pip install django-storages'."
)
raise e
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
# Background task queuing
if 'tasks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing the 'tasks' subsection."
)
TASKS_REDIS = REDIS['tasks']
TASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')
TASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)
TASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])
TASKS_REDIS_USING_SENTINEL = all([
isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),
len(TASKS_REDIS_SENTINELS) > 0
])
TASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')
TASKS_REDIS_SENTINEL_TIMEOUT = TASKS_REDIS.get('SENTINEL_TIMEOUT', 10)
TASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')
TASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)
TASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)
TASKS_REDIS_SKIP_TLS_VERIFY = TASKS_REDIS.get('INSECURE_SKIP_TLS_VERIFY', False)
# Caching
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
CACHING_REDIS_HOST = REDIS['caching'].get('HOST', 'localhost')
CACHING_REDIS_PORT = REDIS['caching'].get('PORT', 6379)
CACHING_REDIS_DATABASE = REDIS['caching'].get('DATABASE', 0)
CACHING_REDIS_PASSWORD = REDIS['caching'].get('PASSWORD', '')
CACHING_REDIS_SENTINELS = REDIS['caching'].get('SENTINELS', [])
CACHING_REDIS_SENTINEL_SERVICE = REDIS['caching'].get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PROTO = 'rediss' if REDIS['caching'].get('SSL', False) else 'redis'
CACHING_REDIS_SKIP_TLS_VERIFY = REDIS['caching'].get('INSECURE_SKIP_TLS_VERIFY', False)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_HOST}:{CACHING_REDIS_PORT}/{CACHING_REDIS_DATABASE}',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': CACHING_REDIS_PASSWORD,
}
}
}
if CACHING_REDIS_SENTINELS:
DJANGO_REDIS_CONNECTION_FACTORY = 'django_redis.pool.SentinelConnectionFactory'
CACHES['default']['LOCATION'] = f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_SENTINEL_SERVICE}/{CACHING_REDIS_DATABASE}'
CACHES['default']['OPTIONS']['CLIENT_CLASS'] = 'django_redis.client.SentinelClient'
CACHES['default']['OPTIONS']['SENTINELS'] = CACHING_REDIS_SENTINELS
if CACHING_REDIS_SKIP_TLS_VERIFY:
CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_cert_reqs'] = False
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
SESSION_SAVE_EVERY_REQUEST = bool(LOGIN_PERSISTENCE)
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'corsheaders',
'debug_toolbar',
'graphiql_debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'graphene_django',
'mptt',
'rest_framework',
'taggit',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq', # Must come after extras to allow overriding management commands
'drf_yasg',
]
# Middleware
MIDDLEWARE = [
'graphiql_debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'netbox.middleware.ExceptionHandlingMiddleware',
'netbox.middleware.RemoteUserMiddleware',
'netbox.middleware.LoginRequiredMiddleware',
'netbox.middleware.APIVersionMiddleware',
'netbox.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'netbox.context_processors.settings_and_registry',
],
},
},
]
# Set up authentication backends
AUTHENTICATION_BACKENDS = [
REMOTE_AUTH_BACKEND,
'netbox.authentication.ObjectPermissionBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_TZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = f'/{BASE_PATH}static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static', 'dist'),
os.path.join(BASE_DIR, 'project-static', 'img'),
('docs', os.path.join(BASE_DIR, 'project-static', 'docs')), # Prefix with /docs
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted
# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.
EXEMPT_EXCLUDE_MODELS = (
('auth', 'group'),
('auth', 'user'),
('users', 'objectpermission'),
)
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = VERSION.rsplit('.', 1)[0] # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'COERCE_DECIMAL_TO_STRING': False,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_METADATA_CLASS': 'netbox.api.metadata.BulkOperationMetadata',
'DEFAULT_PAGINATION_CLASS': 'netbox.api.pagination.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.authentication.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.renderers.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'PAGE_SIZE': PAGINATE_COUNT,
'SCHEMA_COERCE_METHOD_NAMES': {
# Default mappings
'retrieve': 'read',
'destroy': 'delete',
# Custom operations
'bulk_destroy': 'bulk_delete',
},
'VIEW_NAME_FUNCTION': 'utilities.api.get_view_name',
}
#
# Graphene
#
GRAPHENE = {
# Avoids naming collision on models with 'type' field; see
# https://github.com/graphql-python/graphene-django/issues/185
'DJANGO_CHOICE_FIELD_ENUM_V3_NAMING': True,
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.CustomFieldsDataFieldInspector',
'utilities.custom_inspectors.JSONFieldInspector',
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.ChoiceFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
if TASKS_REDIS_USING_SENTINEL:
RQ_PARAMS = {
'SENTINELS': TASKS_REDIS_SENTINELS,
'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': TASKS_REDIS_SENTINEL_TIMEOUT
},
}
else:
RQ_PARAMS = {
'HOST': TASKS_REDIS_HOST,
'PORT': TASKS_REDIS_PORT,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SSL': TASKS_REDIS_SSL,
'SSL_CERT_REQS': None if TASKS_REDIS_SKIP_TLS_VERIFY else 'required',
'DEFAULT_TIMEOUT': RQ_DEFAULT_TIMEOUT,
}
RQ_QUEUES = {
'high': RQ_PARAMS,
'default': RQ_PARAMS,
'low': RQ_PARAMS,
}
#
# NetBox internal settings
#
# Pagination
if MAX_PAGE_SIZE and PAGINATE_COUNT > MAX_PAGE_SIZE:
raise ImproperlyConfigured(
f"PAGINATE_COUNT ({PAGINATE_COUNT}) must be less than or equal to MAX_PAGE_SIZE ({MAX_PAGE_SIZE}), if set."
)
PER_PAGE_DEFAULTS = [
25, 50, 100, 250, 500, 1000
]
if PAGINATE_COUNT not in PER_PAGE_DEFAULTS:
PER_PAGE_DEFAULTS.append(PAGINATE_COUNT)
PER_PAGE_DEFAULTS = sorted(PER_PAGE_DEFAULTS)
#
# Plugins
#
for plugin_name in PLUGINS:
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ModuleNotFoundError as e:
if getattr(e, 'name') == plugin_name:
raise ImproperlyConfigured(
"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the "
"correct Python environment.".format(plugin_name)
)
raise e
# Determine plugin config and add to INSTALLED_APPS.
try:
plugin_config = plugin.config
INSTALLED_APPS.append("{}.{}".format(plugin_config.__module__, plugin_config.__name__))
except AttributeError:
raise ImproperlyConfigured(
"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file "
"and point to the PluginConfig subclass.".format(plugin_name)
)
# Validate user-provided configuration settings and assign defaults
if plugin_name not in PLUGINS_CONFIG:
PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(PLUGINS_CONFIG[plugin_name], VERSION)
# Add middleware
plugin_middleware = plugin_config.middleware
if plugin_middleware and type(plugin_middleware) in (list, tuple):
MIDDLEWARE.extend(plugin_middleware)
# Create RQ queues dedicated to the plugin
# we use the plugin name as a prefix for queue name's defined in the plugin config
# ex: mysuperplugin.mysuperqueue1
if type(plugin_config.queues) is not list:
raise ImproperlyConfigured(
"Plugin {} queues must be a list.".format(plugin_name)
)
RQ_QUEUES.update({
f"{plugin_name}.{queue}": RQ_PARAMS for queue in plugin_config.queues
})
| 34.609952 | 123 | 0.720944 |
a5dea0a02d280400410abf5e87de51e5335ee980 | 4,215 | py | Python | mmcls/datasets/cifar.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | 1 | 2020-07-20T09:52:50.000Z | 2020-07-20T09:52:50.000Z | mmcls/datasets/cifar.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | null | null | null | mmcls/datasets/cifar.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | 1 | 2020-08-04T05:47:52.000Z | 2020-08-04T05:47:52.000Z | import os
import os.path
import pickle
import numpy as np
from .base_dataset import BaseDataset
from .builder import DATASETS
from .utils import check_integrity, download_and_extract_archive
@DATASETS.register_module()
class CIFAR10(BaseDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This implementation is modified from
https://github.com/pytorch/vision/blob/master/torchvision/datasets/cifar.py # noqa: E501
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def load_annotations(self):
if not self._check_integrity():
download_and_extract_archive(
self.url,
self.data_prefix,
filename=self.filename,
md5=self.tgz_md5)
if not self.test_mode:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.imgs = []
self.gt_labels = []
# load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.data_prefix, self.base_folder,
file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.imgs.append(entry['data'])
if 'labels' in entry:
self.gt_labels.extend(entry['labels'])
else:
self.gt_labels.extend(entry['fine_labels'])
self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32)
self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
data_infos = []
for img, gt_label in zip(self.imgs, self.gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
def _load_meta(self):
path = os.path.join(self.data_prefix, self.base_folder,
self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError(
'Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
data = pickle.load(infile, encoding='latin1')
self.CLASSES = data[self.meta['key']]
self.class_to_idx = {
_class: i
for i, _class in enumerate(self.CLASSES)
}
def _check_integrity(self):
root = self.data_prefix
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
@DATASETS.register_module()
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 32.929688 | 93 | 0.598577 |
9e0a51f633abca6393dc4f759652e23c29f306a5 | 8,582 | py | Python | podaac/podaac_utils.py | wongvh07/SPAC4C | d8186bd4dab25472f3a45a7b0464aa95553c92f9 | [
"Apache-2.0"
] | null | null | null | podaac/podaac_utils.py | wongvh07/SPAC4C | d8186bd4dab25472f3a45a7b0464aa95553c92f9 | [
"Apache-2.0"
] | null | null | null | podaac/podaac_utils.py | wongvh07/SPAC4C | d8186bd4dab25472f3a45a7b0464aa95553c92f9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2019 California Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bs4 import BeautifulSoup
import requests
import xml.etree.ElementTree as ET
try:
from . import podaac as p
except:
import podaac as p
class PodaacUtils:
def __init__(self):
self.URL = 'https://podaac.jpl.nasa.gov/ws/'
def list_all_available_extract_granule_dataset_ids(self):
'''Convenience function which returns an up-to-date \
list of all available granule dataset id's which can be \
used in the granule extraction service.
:returns: a comma-seperated list of granule dataset id's.
'''
dataset_ids = []
html = requests.get(self.URL + 'extract/granule/index.html')
soup = BeautifulSoup(html.text, 'html.parser')
table = soup.find("table", {"id": "tblDataset"})
rows = table.find_all('tr')
rows.remove(rows[0])
for row in rows:
x = row.find_all('td')
dataset_ids.append(x[0].text.encode('utf-8'))
return dataset_ids
def list_all_available_extract_granule_dataset_short_names(self):
'''Convenience function which returns an up-to-date \
list of all available granule dataset short names which can be \
used in the granule extraction service.
:returns: a comma-seperated list of granule dataset short names.
'''
dataset_short_names = []
html = requests.get(self.URL + 'extract/granule/index.html')
soup = BeautifulSoup(html.text, 'html.parser')
table = soup.find("table", {"id": "tblDataset"})
rows = table.find_all('tr')
rows.remove(rows[0])
for row in rows:
x = row.find_all('td')
dataset_short_names.append(x[1].text.encode('utf-8'))
return dataset_short_names
def list_all_available_granule_search_dataset_ids(self):
'''Convenience function which returns an up-to-date \
list of available all granule dataset id's.
:returns: a comma-seperated list of granule dataset id's
'''
data_part1 = requests.get(
self.URL + 'search/dataset/?format=atom&itemsPerPage=400').text
data_part2 = requests.get(
self.URL + 'search/dataset?startIndex=400&itemsPerPage=400&format=atom').text
root1 = ET.fromstring(data_part1.encode('utf-8'))
root2 = ET.fromstring(data_part2.encode('utf-8'))
dataset_ids = []
for entry in root1.findall('{http://www.w3.org/2005/Atom}entry'):
dataset_id = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}datasetId').text
dataset_ids.append(dataset_id)
for entry in root2.findall('{http://www.w3.org/2005/Atom}entry'):
dataset_id = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}datasetId').text
dataset_ids.append(dataset_id)
dataset_ids_level1 = []
dataset_ids_level2 = self.list_available_granule_search_level2_dataset_ids()
dataset_ids_level1 = list(set(dataset_ids) - set(dataset_ids_level2))
return dataset_ids_level1
def list_all_available_granule_search_dataset_short_names(self):
'''Convenience function which returns an up-to-date \
list of available granule dataset short names.
:returns: a comma-seperated list of granule dataset short names.
'''
data_part1 = requests.get(
self.URL + 'search/dataset/?format=atom&itemsPerPage=400').text
data_part2 = requests.get(
self.URL + 'search/dataset?startIndex=400&itemsPerPage=400&format=atom').text
root1 = ET.fromstring(data_part1.encode('utf-8'))
root2 = ET.fromstring(data_part2.encode('utf-8'))
dataset_short_names = []
for entry in root1.findall('{http://www.w3.org/2005/Atom}entry'):
name = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}shortName').text
dataset_short_names.append(name)
for entry in root2.findall('{http://www.w3.org/2005/Atom}entry'):
name = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}shortName').text
dataset_short_names.append(name)
# dataset_short_names_level1 = []
dataset_short_names_level2 = \
self.list_available_granule_search_level2_dataset_short_names()
dataset_short_names_level1 = list(
set(dataset_short_names) - set(dataset_short_names_level2))
return dataset_short_names_level1
def list_available_granule_search_level2_dataset_ids(self):
'''Convenience function which returns an up-to-date \
list of available level2 granule dataset id's.
:returns: a comma-seperated list of granule dataset id's
'''
dataset_ids = []
url = 'https://podaac.jpl.nasa.gov/l2ssIngest/datasets'
response = requests.get(url)
data = response.json()
for item in data["datasets"]:
dataset_ids.append(item["persistentId"])
return dataset_ids
def list_available_granule_search_level2_dataset_short_names(self):
'''Convenience function which returns an up-to-date \
list of available level2 granule dataset short names.
:returns: a comma-seperated list of granule dataset short names.
'''
dataset_ids = []
url = 'https://podaac.jpl.nasa.gov/l2ssIngest/datasets'
response = requests.get(url)
data = response.json()
for item in data["datasets"]:
dataset_ids.append(item["shortName"])
return dataset_ids
def list_level4_dataset_ids(self):
'''Convenience function which returns an up-to-date \
list of level4 dataset id's.
:returns: a comma-seperated list of level4 dataset id's
'''
podaac = p.Podaac()
data = podaac.dataset_search(process_level='4', items_per_page='400')
root = ET.fromstring(data.encode('utf-8'))
dataset_ids = []
for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
dataset_id = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}datasetId').text
dataset_ids.append(dataset_id)
return dataset_ids
def list_level4_dataset_short_names(self):
'''Convenience function which returns an up-to-date \
list of level4 dataset short names.
:returns: a comma-seperated list of level4 dataset short names.
'''
podaac = p.Podaac()
data = podaac.dataset_search(process_level='4', items_per_page='400')
l4_dataset_short_names = []
root = ET.fromstring(data.encode('utf-8'))
for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
l4_dataset_short_name = entry.find(
'{https://podaac.jpl.nasa.gov/opensearch/}shortName').text
l4_dataset_short_names.append(l4_dataset_short_name)
return l4_dataset_short_names
def mine_granules_from_granule_search(self, granule_search_response=''):
'''Convenience function which extracts the granule names for \
a given granule search obtained using podaac.granule_search(). \
The response of this function is an array of strings denoting the \
granule names for the granule search.
:param granule_search_response: the output response of a podaac.granule_search()
:type path: :mod:`string`
:returns: prints an array of granule names.
'''
search_str = '<title>'
granule_list = \
[ str(i) for i in granule_search_response.strip().split()
if search_str in i and 'PO.DAAC' not in i ]
strp_granule_list = \
[ i.replace('<title>','').replace('</title>','') for i in granule_list ]
return strp_granule_list
| 37.806167 | 89 | 0.639711 |
d15863c8e7c1ab5da04d1a6b4c43eae22cfb31df | 4,445 | py | Python | pyOCD/coresight/fpb.py | dragoniteArm/pyOCD_CC3220sf | d871dfc66a6185da7ed77a0b557fc933f5c499bc | [
"Apache-2.0"
] | null | null | null | pyOCD/coresight/fpb.py | dragoniteArm/pyOCD_CC3220sf | d871dfc66a6185da7ed77a0b557fc933f5c499bc | [
"Apache-2.0"
] | null | null | null | pyOCD/coresight/fpb.py | dragoniteArm/pyOCD_CC3220sf | d871dfc66a6185da7ed77a0b557fc933f5c499bc | [
"Apache-2.0"
] | 1 | 2021-09-25T05:04:28.000Z | 2021-09-25T05:04:28.000Z | """
mbed CMSIS-DAP debugger
Copyright (c) 2015-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
from .component import CoreSightComponent
from ..debug.breakpoints.provider import (Breakpoint, BreakpointProvider)
import logging
class HardwareBreakpoint(Breakpoint):
def __init__(self, comp_register_addr, provider):
super(HardwareBreakpoint, self).__init__(provider)
self.comp_register_addr = comp_register_addr
self.type = Target.BREAKPOINT_HW
class FPB(BreakpointProvider, CoreSightComponent):
FP_CTRL = 0xE0002000
FP_CTRL_KEY = 1 << 1
FP_COMP0 = 0xE0002008
@classmethod
def factory(cls, ap, cmpid, address):
fpb = cls(ap, cmpid, address)
assert ap.core
ap.core.connect(fpb)
return fpb
def __init__(self, ap, cmpid=None, addr=None):
CoreSightComponent.__init__(self, ap, cmpid, addr)
BreakpointProvider.__init__(self)
assert self.address == FPB.FP_CTRL, "Unexpected FPB base address 0x%08x" % self.address
self.hw_breakpoints = []
self.nb_code = 0
self.nb_lit = 0
self.num_hw_breakpoint_used = 0
self.enabled = False
## @brief Inits the FPB.
#
# Reads the number of hardware breakpoints available on the core and disable the FPB
# (Flash Patch and Breakpoint Unit), which will be enabled when the first breakpoint is set.
def init(self):
# setup FPB (breakpoint)
fpcr = self.ap.readMemory(FPB.FP_CTRL)
self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)
self.nb_lit = (fpcr >> 7) & 0xf
logging.info("%d hardware breakpoints, %d literal comparators", self.nb_code, self.nb_lit)
for i in range(self.nb_code):
self.hw_breakpoints.append(HardwareBreakpoint(FPB.FP_COMP0 + 4*i, self))
# disable FPB (will be enabled on first bp set)
self.disable()
for bp in self.hw_breakpoints:
self.ap.writeMemory(bp.comp_register_addr, 0)
def bp_type(self):
return Target.BREAKPOINT_HW
def enable(self):
self.ap.writeMemory(FPB.FP_CTRL, FPB.FP_CTRL_KEY | 1)
self.enabled = True
logging.debug('fpb has been enabled')
return
def disable(self):
self.ap.writeMemory(FPB.FP_CTRL, FPB.FP_CTRL_KEY | 0)
self.enabled = False
logging.debug('fpb has been disabled')
return
def available_breakpoints(self):
return len(self.hw_breakpoints) - self.num_hw_breakpoint_used
## @brief Set a hardware breakpoint at a specific location in flash.
def set_breakpoint(self, addr):
if not self.enabled:
self.enable()
if addr >= 0x20000000:
# Hardware breakpoints are only supported in the range
# 0x00000000 - 0x1fffffff on cortex-m devices
logging.error('Breakpoint out of range 0x%X', addr)
return None
if self.available_breakpoints() == 0:
logging.error('No more available breakpoint!!, dropped bp at 0x%X', addr)
return None
for bp in self.hw_breakpoints:
if not bp.enabled:
bp.enabled = True
bp_match = (1 << 30)
if addr & 0x2:
bp_match = (2 << 30)
self.ap.writeMemory(bp.comp_register_addr, addr & 0x1ffffffc | bp_match | 1)
bp.addr = addr
self.num_hw_breakpoint_used += 1
return bp
return None
## @brief Remove a hardware breakpoint at a specific location in flash.
def remove_breakpoint(self, bp):
for hwbp in self.hw_breakpoints:
if hwbp.enabled and hwbp.addr == bp.addr:
hwbp.enabled = False
self.ap.writeMemory(hwbp.comp_register_addr, 0)
self.num_hw_breakpoint_used -= 1
return
| 36.138211 | 98 | 0.64207 |
f295f70d549fc20a5523a0a4afdcf910a93b9a97 | 329 | py | Python | lab_6.py | anas21-meet/meet2019y1lab6 | dd73f213b98863411474ce842d3961b476523a2e | [
"MIT"
] | null | null | null | lab_6.py | anas21-meet/meet2019y1lab6 | dd73f213b98863411474ce842d3961b476523a2e | [
"MIT"
] | null | null | null | lab_6.py | anas21-meet/meet2019y1lab6 | dd73f213b98863411474ce842d3961b476523a2e | [
"MIT"
] | null | null | null | def fizzbuzz(n):
result=[]
for number in range(1,n):
if number%3==0 and number%5==0:
result.append('FizzBuzz')
elif number%3==0:
result.append('Fizz')
elif number%5==0:
result.append("Buzz")
else:
result.append(number)
return(result)
x=fizzbuzz(9)
print(x)
| 19.352941 | 37 | 0.556231 |
9070acf41a2d76dcf675461bc7a74ab3323e9d2c | 4,861 | py | Python | custom_components/lkcomu_interrao/const.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 35 | 2021-02-25T06:30:42.000Z | 2022-03-09T20:18:47.000Z | custom_components/lkcomu_interrao/const.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 22 | 2021-06-16T09:32:55.000Z | 2022-03-27T10:26:34.000Z | custom_components/lkcomu_interrao/const.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 19 | 2021-02-20T05:29:58.000Z | 2022-02-05T16:22:30.000Z | """Constants for lkcomu_interrao integration"""
from typing import Final
DOMAIN: Final = "lkcomu_interrao"
ATTRIBUTION_EN: Final = "Data acquired from %s"
ATTRIBUTION_RU: Final = "Данные получены с %s"
ATTR_ACCOUNT_CODE: Final = "account_code"
ATTR_ACCOUNT_ID: Final = "account_id"
ATTR_ADDRESS: Final = "address"
ATTR_AGENT: Final = "agent"
ATTR_AMOUNT: Final = "amount"
ATTR_BENEFITS: Final = "benefits"
ATTR_CALL_PARAMS: Final = "call_params"
ATTR_CHARGED: Final = "charged"
ATTR_CLEAR: Final = "clear"
ATTR_COMMENT: Final = "comment"
ATTR_CORRECT: Final = "correct"
ATTR_COST: Final = "cost"
ATTR_DESCRIPTION: Final = "description"
ATTR_DETAILS: Final = "details"
ATTR_END: Final = "end"
ATTR_FULL_NAME: Final = "full_name"
ATTR_GROUP: Final = "group"
ATTR_IGNORE_INDICATIONS: Final = "ignore_indications"
ATTR_IGNORE_PERIOD: Final = "ignore_period"
ATTR_INCREMENTAL: Final = "incremental"
ATTR_INDICATIONS: Final = "indications"
ATTR_INITIAL: Final = "initial"
ATTR_INSTALL_DATE: Final = "install_date"
ATTR_INSURANCE: Final = "insurance"
ATTR_INVOICE_ID: Final = "invoice_id"
ATTR_LAST_INDICATIONS_DATE: Final = "last_indications_date"
ATTR_LAST_PAYMENT_AMOUNT: Final = "last_payment_amount"
ATTR_LAST_PAYMENT_DATE: Final = "last_payment_date"
ATTR_LAST_PAYMENT_STATUS: Final = "last_payment_status"
ATTR_LIVING_AREA: Final = "living_area"
ATTR_METER_CATEGORY: Final = "meter_category"
ATTR_METER_CODE: Final = "meter_code"
ATTR_METER_MODEL: Final = "meter_model"
ATTR_MODEL: Final = "model"
ATTR_PAID: Final = "paid"
ATTR_PAID_AT: Final = "paid_at"
ATTR_PENALTY: Final = "penalty"
ATTR_PERIOD: Final = "period"
ATTR_PREVIOUS: Final = "previous"
ATTR_PROVIDER_NAME: Final = "provider_name"
ATTR_PROVIDER_TYPE: Final = "provider_type"
ATTR_REASON: Final = "reason"
ATTR_RECALCULATIONS: Final = "recalculations"
ATTR_REMAINING_DAYS: Final = "remaining_days"
ATTR_RESULT: Final = "result"
ATTR_SERVICE_NAME: Final = "service_name"
ATTR_SERVICE_TYPE: Final = "service_type"
ATTR_START: Final = "start"
ATTR_STATUS: Final = "status"
ATTR_SUBMIT_PERIOD_ACTIVE: Final = "submit_period_active"
ATTR_SUBMIT_PERIOD_END: Final = "submit_period_end"
ATTR_SUBMIT_PERIOD_START: Final = "submit_period_start"
ATTR_SUCCESS: Final = "success"
ATTR_SUM: Final = "sum"
ATTR_TOTAL: Final = "total"
ATTR_TOTAL_AREA: Final = "total_area"
ATTR_UNIT: Final = "unit"
CONF_ACCOUNTS: Final = "accounts"
CONF_DEV_PRESENTATION: Final = "dev_presentation"
CONF_LAST_INVOICE: Final = "last_invoice"
CONF_LAST_PAYMENT: Final = "last_payment"
CONF_LOGOS: Final = "logos"
CONF_METERS: Final = "meters"
CONF_NAME_FORMAT: Final = "name_format"
CONF_USER_AGENT: Final = "user_agent"
DATA_API_OBJECTS: Final = DOMAIN + "_api_objects"
DATA_ENTITIES: Final = DOMAIN + "_entities"
DATA_FINAL_CONFIG: Final = DOMAIN + "_final_config"
DATA_PROVIDER_LOGOS: Final = DOMAIN + "_provider_logos"
DATA_UPDATE_DELEGATORS: Final = DOMAIN + "_update_delegators"
DATA_UPDATE_LISTENERS: Final = DOMAIN + "_update_listeners"
DATA_YAML_CONFIG: Final = DOMAIN + "_yaml_config"
DEFAULT_NAME_FORMAT_EN_ACCOUNTS: Final = "{provider_code_upper} {account_code} {type_en_cap}"
DEFAULT_NAME_FORMAT_EN_METERS: Final = "{provider_code_upper} {account_code} {type_en_cap} {code}"
DEFAULT_NAME_FORMAT_EN_LAST_INVOICE: Final = "{provider_code_upper} {account_code} {type_en_cap}"
DEFAULT_NAME_FORMAT_EN_LAST_PAYMENT: Final = "{provider_code_upper} {account_code} {type_en_cap}"
DEFAULT_NAME_FORMAT_RU_ACCOUNTS: Final = "{provider_code_upper} {account_code} {type_ru_cap}"
DEFAULT_NAME_FORMAT_RU_METERS: Final = "{provider_code_upper} {account_code} {type_ru_cap} {code}"
DEFAULT_NAME_FORMAT_RU_LAST_INVOICE: Final = "{provider_code_upper} {account_code} {type_ru_cap}"
DEFAULT_NAME_FORMAT_RU_LAST_PAYMENT: Final = "{provider_code_upper} {account_code} {type_ru_cap}"
DEFAULT_MAX_INDICATIONS: Final = 3
DEFAULT_SCAN_INTERVAL: Final = 60 * 60 # 1 hour
API_TYPE_DEFAULT: Final = "moscow"
API_TYPE_NAMES: Final = {
"altai": "ЛК Алтай (АО «АлтайЭнергосбыт»)",
"bashkortostan": "ЛКК ЭСКБ (Башэлектросбыт)",
"moscow": "ЕЛК ЖКХ (АО «Мосэнергосбыт», МосОблЕИРЦ, ПАО «Россети Московский регион»)",
"oryol": "ЛКК Орел (ООО «Орловский энергосбыт»)",
"saratov": "ЛК Саратов (ПАО «Саратовэнерго»)",
"sevesk": "ЕЛК Вологда (Северная сбытовая компания)",
"tambov": "ЛК ТЭСК (Тамбовская энергосбытовая компания)",
"tomsk": "ЕЛК Томск (Томскэнергосбыт / Томск РТС)",
"volga": "ЛКК ЭСВ (Энергосбыт Волга)",
}
SUPPORTED_PLATFORMS: Final = ("sensor", "binary_sensor")
FORMAT_VAR_ACCOUNT_CODE: Final = "account_code"
FORMAT_VAR_ACCOUNT_ID: Final = "account_id"
FORMAT_VAR_CODE: Final = "code"
FORMAT_VAR_ID: Final = "id"
FORMAT_VAR_PROVIDER_CODE: Final = "provider_code"
FORMAT_VAR_PROVIDER_NAME: Final = "provider_name"
FORMAT_VAR_TYPE_EN: Final = "type_en"
FORMAT_VAR_TYPE_RU: Final = "type_ru"
| 40.173554 | 98 | 0.778029 |
db05d75421e5f06cb2b44f3b9c5a4a660b0ca393 | 913 | py | Python | invenio_config_tugraz/ext.py | utnapischtim/invenio-config-tugraz | 0a37a8015e107ccbe6b389e9d568030c9826ff46 | [
"MIT"
] | null | null | null | invenio_config_tugraz/ext.py | utnapischtim/invenio-config-tugraz | 0a37a8015e107ccbe6b389e9d568030c9826ff46 | [
"MIT"
] | null | null | null | invenio_config_tugraz/ext.py | utnapischtim/invenio-config-tugraz | 0a37a8015e107ccbe6b389e9d568030c9826ff46 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Mojib Wali.
#
# invenio-config-tugraz is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""invenio module that adds tugraz configs."""
from flask_babelex import gettext as _
from . import config
class InvenioConfigTugraz(object):
"""invenio-config-tugraz extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['invenio-config-tugraz'] = self
def init_config(self, app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('INVENIO_CONFIG_TUGRAZ_'):
app.config.setdefault(k, getattr(config, k))
| 26.852941 | 73 | 0.647317 |
89ea0e9845ca5b5dee5cbbc6eab1feba4c30406b | 607 | py | Python | django_joblog/migrations/0004_alter_state_choices.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | django_joblog/migrations/0004_alter_state_choices.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | django_joblog/migrations/0004_alter_state_choices.py | defgsus/django-joblog | 88467f951d7ebc586c69e421cab39e4caa395cca | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-04-29 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_joblog', '0003_alter_state_choices'),
]
operations = [
migrations.AlterField(
model_name='joblogmodel',
name='state',
field=models.CharField(choices=[('running', '▶ running'), ('finished', '✔ finished'), ('error', '❌ error'), ('blocked', '🖐 blocked'), ('vanished', '⊙ vanished')], db_index=True, default='running', editable=False, max_length=64, verbose_name='state'),
),
]
| 31.947368 | 262 | 0.616145 |
0e9594f02a486f41d99a6acd8345d5013aaaa039 | 163 | py | Python | Coursera/Week.5/Task.8.py | v1nnyb0y/Coursera.BasePython | bbfb3184dc27a4cdb16b087123890991afbc5506 | [
"MIT"
] | null | null | null | Coursera/Week.5/Task.8.py | v1nnyb0y/Coursera.BasePython | bbfb3184dc27a4cdb16b087123890991afbc5506 | [
"MIT"
] | null | null | null | Coursera/Week.5/Task.8.py | v1nnyb0y/Coursera.BasePython | bbfb3184dc27a4cdb16b087123890991afbc5506 | [
"MIT"
] | null | null | null | '''
Замечательные числа - 1
'''
for i in range(10, 100):
string = str(i)
power = int(string[0]) * int(string[1]) * 2
if (power == i):
print(i)
| 18.111111 | 47 | 0.521472 |
c38f49a5244546a8bc04200a2fb529d7602685f1 | 10,253 | py | Python | docs/conf.py | spinleft/M-LOOP | 32cefcde89811edf7b9df8c8831b7ee99a3e7401 | [
"MIT"
] | null | null | null | docs/conf.py | spinleft/M-LOOP | 32cefcde89811edf7b9df8c8831b7ee99a3e7401 | [
"MIT"
] | null | null | null | docs/conf.py | spinleft/M-LOOP | 32cefcde89811edf7b9df8c8831b7ee99a3e7401 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# M-LOOP documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 24 11:34:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'M-LOOP'
copyright = '2016, Michael R Hush'
author = 'Michael R Hush'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# exclude_patterns = ['_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**': ['about.html','navigation.html','relations.html', 'searchbox.html'], }
#'globaltoc.html',
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo':'M-LOOP_logo.png',
'logo_name':True,
'description':'Machine-Learning Online Optimization Package',
'github_user':'michaelhush',
'github_repo':'M-LOOP',
'github_banner':True,
'font_family':"Arial, Helvetica, sans-serif",
'head_font_family':"Arial, Helvetica, sans-serif",
'analytics_id':'UA-83520804-1'}
#'github_button':True,
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'M-LOOP v3.1.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/M-LOOP_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/M-LOOP_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'M-LOOPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'M-LOOP.tex', 'M-LOOP Documentation',
'Michael R Hush', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'M-LOOP_logo.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'm-loop', 'M-LOOP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'M-LOOP', 'M-LOOP Documentation',
author, 'M-LOOP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.446203 | 95 | 0.721545 |
87213dc33a8a88e10ad452b178ee5c73cbb15631 | 5,145 | py | Python | lldb/test/API/functionalities/swift-runtime-reporting/objc-inference/TestObjcInference.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 765 | 2015-12-03T16:44:59.000Z | 2022-03-07T12:41:10.000Z | lldb/test/API/functionalities/swift-runtime-reporting/objc-inference/TestObjcInference.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 3,180 | 2019-10-18T01:21:21.000Z | 2022-03-31T23:25:41.000Z | lldb/test/API/functionalities/swift-runtime-reporting/objc-inference/TestObjcInference.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 284 | 2015-12-03T16:47:25.000Z | 2022-03-12T05:39:48.000Z | # TestObjcInference.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test support of Swift Runtime Reporting for @objc inference.
"""
import lldb
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbtest as lldbtest
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
import json
class SwiftRuntimeReportingObjcInferenceTestCase(lldbtest.TestBase):
mydir = lldbtest.TestBase.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipIfLinux
def test_swift_runtime_reporting(self):
self.build()
self.do_test()
def setUp(self):
lldbtest.TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
self.line_method = lldbtest.line_number(self.main_source, '// method line')
self.line_method2 = lldbtest.line_number(self.main_source, '// method2 line')
def do_test(self):
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, lldbtest.VALID_TARGET)
self.runCmd("run")
self.expect("thread list",
substrs=['stopped', 'stop reason = implicit Objective-C entrypoint'])
thread = target.process.GetSelectedThread()
self.assertEqual(
self.dbg.GetSelectedTarget().process.GetSelectedThread().GetStopReason(),
lldb.eStopReasonInstrumentation)
self.expect("thread info -s",
substrs=["instrumentation_class", "issue_type", "description"])
output_lines = self.res.GetOutput().split('\n')
json_line = '\n'.join(output_lines[2:])
data = json.loads(json_line)
self.assertEqual(data["instrumentation_class"], "SwiftRuntimeReporting")
self.assertEqual(data["issue_type"], "implicit-objc-entrypoint")
self.assertEqual(data["description"],
"implicit Objective-C entrypoint -[a.MyClass memberfunc] is deprecated and will be removed in Swift 4")
self.assertEqual(len(data["notes"]), 1)
self.assertEqual(data["notes"][0]["description"], "add '@objc' to expose this Swift declaration to Objective-C")
self.assertEqual(len(data["notes"][0]["fixits"]), 1)
self.assertTrue(data["notes"][0]["fixits"][0]["filename"].endswith(self.main_source))
self.assertEqual(data["notes"][0]["fixits"][0]["start_line"], self.line_method)
self.assertEqual(data["notes"][0]["fixits"][0]["end_line"], self.line_method)
self.assertEqual(data["notes"][0]["fixits"][0]["start_col"], 3)
self.assertEqual(data["notes"][0]["fixits"][0]["end_col"], 3)
self.assertEqual(data["notes"][0]["fixits"][0]["replacement"], "@objc ")
historical_threads = thread.GetStopReasonExtendedBacktraces(lldb.eInstrumentationRuntimeTypeSwiftRuntimeReporting)
self.assertEqual(historical_threads.GetSize(), 1)
self.runCmd("continue")
self.expect("thread list",
substrs=['stopped', 'stop reason = implicit Objective-C entrypoint'])
self.assertEqual(
self.dbg.GetSelectedTarget().process.GetSelectedThread().GetStopReason(),
lldb.eStopReasonInstrumentation)
self.expect("thread info -s",
substrs=["instrumentation_class", "issue_type", "description"])
output_lines = self.res.GetOutput().split('\n')
json_line = '\n'.join(output_lines[2:])
data = json.loads(json_line)
self.assertEqual(data["instrumentation_class"], "SwiftRuntimeReporting")
self.assertEqual(data["issue_type"], "implicit-objc-entrypoint")
self.assertEqual(data["description"],
"implicit Objective-C entrypoint -[a.MyClass memberfunc2] is deprecated and will be removed in Swift 4")
self.assertEqual(len(data["notes"]), 1)
self.assertEqual(data["notes"][0]["description"], "add '@objc' to expose this Swift declaration to Objective-C")
self.assertEqual(len(data["notes"][0]["fixits"]), 1)
self.assertTrue(data["notes"][0]["fixits"][0]["filename"].endswith(self.main_source))
self.assertEqual(data["notes"][0]["fixits"][0]["start_line"], self.line_method2)
self.assertEqual(data["notes"][0]["fixits"][0]["end_line"], self.line_method2)
self.assertEqual(data["notes"][0]["fixits"][0]["start_col"], 3)
self.assertEqual(data["notes"][0]["fixits"][0]["end_col"], 3)
self.assertEqual(data["notes"][0]["fixits"][0]["replacement"], "@objc ")
historical_threads = thread.GetStopReasonExtendedBacktraces(lldb.eInstrumentationRuntimeTypeSwiftRuntimeReporting)
self.assertEqual(historical_threads.GetSize(), 1)
| 45.131579 | 122 | 0.665112 |
332c1948ac22997e1b4d06db767c5f41f132275d | 3,107 | py | Python | data/p2DJ/New/program/qiskit/QC/startQiskit_QC126.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/QC/startQiskit_QC126.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/QC/startQiskit_QC126.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.x(input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC126.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 27.990991 | 82 | 0.622787 |
3e9247d8f63888251ebda0de00ca145ac66c5b48 | 106 | py | Python | safe/safe.py | tom-010/exception-safe | 0804f095452669af0b99a24fe9d3564fa55a8272 | [
"Apache-2.0"
] | null | null | null | safe/safe.py | tom-010/exception-safe | 0804f095452669af0b99a24fe9d3564fa55a8272 | [
"Apache-2.0"
] | null | null | null | safe/safe.py | tom-010/exception-safe | 0804f095452669af0b99a24fe9d3564fa55a8272 | [
"Apache-2.0"
] | null | null | null | def safe(cb, default=None):
try:
return cb()
except Exception as e:
return default | 21.2 | 27 | 0.584906 |
828e13f03dc884b594a50aaa459c86b0ba020a32 | 977 | py | Python | 2020-04-20-ML-linear-regression/2020-04-20-ML-linear-regression.py | jetorz/Data2Science | 771b581aba9962d006a0dceb39eb6bf4f35e139e | [
"MIT"
] | 1 | 2020-05-07T13:47:37.000Z | 2020-05-07T13:47:37.000Z | 2020-04-20-ML-linear-regression/2020-04-20-ML-linear-regression.py | Future-SuperStar/Data2Science | 771b581aba9962d006a0dceb39eb6bf4f35e139e | [
"MIT"
] | null | null | null | 2020-04-20-ML-linear-regression/2020-04-20-ML-linear-regression.py | Future-SuperStar/Data2Science | 771b581aba9962d006a0dceb39eb6bf4f35e139e | [
"MIT"
] | 2 | 2020-06-26T09:57:15.000Z | 2021-12-10T03:27:23.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
data = np.loadtxt('ex1data1.txt', delimiter=',')
X = data[:, 0].reshape(-1, 1); y = data[:, 1]
X_train, X_test, y_train, y_test = train_test_split(X, y)
lr = LinearRegression().fit(X_train, y_train)
lr.coef_
lr.intercept_
print("lr.coef_: {}".format(lr.coef_))
print("lr.intercept_: {}".format(lr.intercept_))
print("Training set score: {:.2f}".format(lr.score(X_train, y_train)))
print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))
def regLine(coef, inter, xmin, xmax):
ymin = inter + coef * xmin
ymax = inter + coef * xmax
return ymin, ymax
fig = plt.figure(figsize=(8, 8),dpi=100)
ax = fig.add_subplot()
ax.scatter(X, y)
xmin = min(X)
xmax = max(X)
ymin, ymax = regLine(lr.coef_, lr.intercept_, xmin, xmax)
ax.plot([xmin, xmax], [ymin, ymax]) | 27.914286 | 70 | 0.703173 |
e517db25c6a8882db390ac07d8553ce05ef76fb6 | 3,854 | py | Python | jsonprocessor/MemberProcessor.py | abhishek9sharma/MeetupDataCollection | dc528a4490e3b288022fe2943045e2f9df8a2b8b | [
"MIT"
] | null | null | null | jsonprocessor/MemberProcessor.py | abhishek9sharma/MeetupDataCollection | dc528a4490e3b288022fe2943045e2f9df8a2b8b | [
"MIT"
] | null | null | null | jsonprocessor/MemberProcessor.py | abhishek9sharma/MeetupDataCollection | dc528a4490e3b288022fe2943045e2f9df8a2b8b | [
"MIT"
] | null | null | null | __author__ = 'abhisheksh'
from jsonprocessor.TransformationUtil import TransformHelper as thlp
import pandas as pd
import os
import pandas.io.common
import gc
class ProcessSingleGroupMembers:
def __init__(self,memberfilecsvin,opfolder=None):
self.member_file_group=memberfilecsvin
self.opfolder=opfolder
self.emptyfile = False
self.exceptionoccured=False
try:
self.group_members_org=pd.read_csv(memberfilecsvin)
except pandas.io.common.EmptyDataError:
self.emptyfile=True
self.exceptionoccured=False
self.csvinfileinfo=memberfilecsvin.split('_')
if(len(self.csvinfileinfo)>2):
self.group_id = str(str(self.csvinfileinfo[1]).split('/')[-1])
else:
self.group_id = str(str(self.csvinfileinfo[0]).split('/')[-1])
#self.group_id = str(memberfilecsvin.split('_')[0]).split('/')[-1]
#self.group_id=str(memberfilecsvin.split('_')[0]).split('/')[-1]
self.group_members_df_all=None
self.group_members_topics_df=None
self.group_members_joined_df=None
self.thlp=thlp()
def ProcessSingleGroupMembersInfo(self):
groupd_member_listDict=[]
group_members_topics_list=[]
#group_members_joined_list=[]
for idx,row in self.group_members_org.iterrows():
rowdict={}
for c in self.group_members_org.columns:
#for c in ['category']:
if(c=='topics'):
#pass
try:
topics_member=eval(row[c])
for t in topics_member:
t['member_id']=row['id']
#Possible FIX for Integer OVerflow
#t['member_id']=str(row['id'])
#print(t)
group_members_topics_list.append(t)
except:
print("Exception Occured while processing topics of members with id " + str(row['id']))
#elif(c in ['joined','visited']):
# pass
else:
data=row[c]
try:
self.thlp.TransformData(rowdict,data,c)
except:
print("Exception Occured for " + str(idx))
groupd_member_listDict.append(rowdict)
#self.group_members_only=pd.DataFrame(groupd_member_listDict)
self.group_members_df_all=pd.DataFrame(groupd_member_listDict)
self.group_members_topics_df=pd.DataFrame(group_members_topics_list)
self.group_members_joined_df=self.group_members_df_all[['id','joined','visited']]
self.group_members_joined_df['groupid']=self.group_id
self.group_members_df_all=self.group_members_df_all.drop(['joined','visited'],axis=1)
print(" Members Processed " + str(len(groupd_member_listDict)) + " Topics Found " + str(len(group_members_topics_list)))
print(" Shape Transformed from " + str(self.group_members_org.shape) + " to " + str(self.group_members_df_all.shape))
gc.collect()
def WriteConvertedCSV(self):
if(self.opfolder is None):
self.opfolder='../DL/Data/CSVFormat/Members/Members_Groups/'
self.group_members_df_all.to_csv(self.opfolder+str(self.group_id)+'_members_converted.csv',index=False)
self.group_members_topics_df.to_csv(self.opfolder+str(self.group_id)+'_group_members_topics.csv',index=False)
self.group_members_joined_df.to_csv(self.opfolder+str(self.group_id)+'_group_members.csv',index=False)
# self.group_members_df_all = None
# self.group_members_topics_df = None
# self.group_members_joined_df = None
# gc.collect()
| 33.807018 | 129 | 0.608459 |
fc68a491354252905993c0fc973d288671149384 | 8,266 | py | Python | Janaagraha Bot/venv/Lib/site-packages/google/api/config_change_pb2.py | CFGIndia20/team-19 | e2b27ad8009303d262c2dc60551d6fcc4645b3b5 | [
"MIT"
] | 11 | 2021-09-19T06:32:44.000Z | 2022-03-14T19:09:46.000Z | Janaagraha Bot/venv/Lib/site-packages/google/api/config_change_pb2.py | CFGIndia20/team-19 | e2b27ad8009303d262c2dc60551d6fcc4645b3b5 | [
"MIT"
] | null | null | null | Janaagraha Bot/venv/Lib/site-packages/google/api/config_change_pb2.py | CFGIndia20/team-19 | e2b27ad8009303d262c2dc60551d6fcc4645b3b5 | [
"MIT"
] | 2 | 2020-09-22T06:01:26.000Z | 2020-09-22T18:23:49.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/config_change.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/config_change.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\021ConfigChangeProtoP\001ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\242\002\004GAPI",
serialized_pb=b'\n\x1egoogle/api/config_change.proto\x12\ngoogle.api"\x97\x01\n\x0c\x43onfigChange\x12\x0f\n\x07\x65lement\x18\x01 \x01(\t\x12\x11\n\told_value\x18\x02 \x01(\t\x12\x11\n\tnew_value\x18\x03 \x01(\t\x12+\n\x0b\x63hange_type\x18\x04 \x01(\x0e\x32\x16.google.api.ChangeType\x12#\n\x07\x61\x64vices\x18\x05 \x03(\x0b\x32\x12.google.api.Advice"\x1d\n\x06\x41\x64vice\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t*O\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42q\n\x0e\x63om.google.apiB\x11\x43onfigChangeProtoP\x01ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\xa2\x02\x04GAPIb\x06proto3',
)
_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.api.ChangeType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REMOVED", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MODIFIED", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=231,
serialized_end=310,
)
_sym_db.RegisterEnumDescriptor(_CHANGETYPE)
ChangeType = enum_type_wrapper.EnumTypeWrapper(_CHANGETYPE)
CHANGE_TYPE_UNSPECIFIED = 0
ADDED = 1
REMOVED = 2
MODIFIED = 3
_CONFIGCHANGE = _descriptor.Descriptor(
name="ConfigChange",
full_name="google.api.ConfigChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="element",
full_name="google.api.ConfigChange.element",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="old_value",
full_name="google.api.ConfigChange.old_value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="new_value",
full_name="google.api.ConfigChange.new_value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.api.ConfigChange.change_type",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="advices",
full_name="google.api.ConfigChange.advices",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=47,
serialized_end=198,
)
_ADVICE = _descriptor.Descriptor(
name="Advice",
full_name="google.api.Advice",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="description",
full_name="google.api.Advice.description",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=200,
serialized_end=229,
)
_CONFIGCHANGE.fields_by_name["change_type"].enum_type = _CHANGETYPE
_CONFIGCHANGE.fields_by_name["advices"].message_type = _ADVICE
DESCRIPTOR.message_types_by_name["ConfigChange"] = _CONFIGCHANGE
DESCRIPTOR.message_types_by_name["Advice"] = _ADVICE
DESCRIPTOR.enum_types_by_name["ChangeType"] = _CHANGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConfigChange = _reflection.GeneratedProtocolMessageType(
"ConfigChange",
(_message.Message,),
{
"DESCRIPTOR": _CONFIGCHANGE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.ConfigChange)
},
)
_sym_db.RegisterMessage(ConfigChange)
Advice = _reflection.GeneratedProtocolMessageType(
"Advice",
(_message.Message,),
{
"DESCRIPTOR": _ADVICE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.Advice)
},
)
_sym_db.RegisterMessage(Advice)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 32.163424 | 746 | 0.636584 |
1613388897f445fe3c2b8c71ef94b4c6fdede377 | 3,264 | py | Python | glance/db/sqlalchemy/migrate_repo/schema.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | glance/db/sqlalchemy/migrate_repo/schema.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | glance/db/sqlalchemy/migrate_repo/schema.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Various conveniences used for migration scripts
"""
from oslo_log import log as logging
import sqlalchemy.types
from glance import i18n
LOG = logging.getLogger(__name__)
_LI = i18n._LI
String = lambda length: sqlalchemy.types.String(
length=length, convert_unicode=False,
unicode_error=None, _warn_on_bytestring=False)
Text = lambda: sqlalchemy.types.Text(
length=None, convert_unicode=False,
unicode_error=None, _warn_on_bytestring=False)
Boolean = lambda: sqlalchemy.types.Boolean(create_constraint=True, name=None)
DateTime = lambda: sqlalchemy.types.DateTime(timezone=False)
Integer = lambda: sqlalchemy.types.Integer()
BigInteger = lambda: sqlalchemy.types.BigInteger()
PickleType = lambda: sqlalchemy.types.PickleType()
Numeric = lambda: sqlalchemy.types.Numeric()
def from_migration_import(module_name, fromlist):
"""
Import a migration file and return the module
:param module_name: name of migration module to import from
(ex: 001_add_images_table)
:param fromlist: list of items to import (ex: define_images_table)
:retval: module object
This bit of ugliness warrants an explanation:
As you're writing migrations, you'll frequently want to refer to
tables defined in previous migrations.
In the interest of not repeating yourself, you need a way of importing
that table into a 'future' migration.
However, tables are bound to metadata, so what you need to import is
really a table factory, which you can late-bind to your current
metadata object.
Moreover, migrations begin with a number (001...), which means they
aren't valid Python identifiers. This means we can't perform a
'normal' import on them (the Python lexer will 'splode). Instead, we
need to use __import__ magic to bring the table-factory into our
namespace.
Example Usage:
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta)
# Refer to images table
"""
module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name
module = __import__(module_path, globals(), locals(), fromlist, -1)
return [getattr(module, item) for item in fromlist]
def create_tables(tables):
for table in tables:
LOG.info(_LI("creating table %(table)s") % {'table': table})
table.create()
def drop_tables(tables):
for table in tables:
LOG.info(_LI("dropping table %(table)s") % {'table': table})
table.drop()
| 29.944954 | 79 | 0.710172 |
3795017fe3998db49a9495c74dfa523706cb0427 | 2,296 | py | Python | firmware/scripts/verify_signature.py | jreesun/krux-wallet | bdb88dabd28df276b4d9deea8afc23ed7dd2b5b4 | [
"MIT"
] | 21 | 2021-07-28T12:00:58.000Z | 2022-01-27T01:59:12.000Z | firmware/scripts/verify_signature.py | jreesun/krux | bdb88dabd28df276b4d9deea8afc23ed7dd2b5b4 | [
"MIT"
] | 69 | 2021-07-28T09:17:15.000Z | 2022-03-17T04:49:16.000Z | firmware/scripts/verify_signature.py | jreesun/krux-wallet | bdb88dabd28df276b4d9deea8afc23ed7dd2b5b4 | [
"MIT"
] | 11 | 2021-07-28T15:34:58.000Z | 2021-12-08T13:37:33.000Z | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import hashlib
import binascii
from embit.util import secp256k1
# Pulled from firmware.py
def fsize(firmware_filename):
"""Returns the size of the firmware"""
size = 0
with open(firmware_filename, 'rb', buffering=0) as file:
while True:
chunk = file.read(128)
if not chunk:
break
size += len(chunk)
return size
# Pulled from firmware.py
def sha256(firmware_filename, firmware_size):
"""Returns the sha256 hash of the firmware"""
hasher = hashlib.sha256()
hasher.update(b'\x00' + firmware_size.to_bytes(4, 'little'))
with open(firmware_filename, 'rb', buffering=0) as file:
while True:
chunk = file.read(128)
if not chunk:
break
hasher.update(chunk)
return hasher.digest()
if len(sys.argv) != 4:
sys.exit('All arguments must be provided')
sig = open(sys.argv[1], 'rb').read()
firmware_path = sys.argv[2]
pubkey = secp256k1.ec_pubkey_parse(binascii.unhexlify(sys.argv[3]))
if not secp256k1.ecdsa_verify(sig, sha256(firmware_path, fsize(firmware_path)), pubkey):
print('Bad signature')
else:
print('ok')
| 36.444444 | 88 | 0.707753 |
106659a0baf07c29ae002744b35229be061e2ac1 | 8,835 | py | Python | ffn/training/model.py | drewlinsley/ffn_membrane | 4b4638c00eed847fa6a7958a7fdbeedca4236561 | [
"Apache-2.0"
] | 1 | 2019-07-30T11:28:47.000Z | 2019-07-30T11:28:47.000Z | ffn/training/model.py | drewlinsley/ffn_membrane | 4b4638c00eed847fa6a7958a7fdbeedca4236561 | [
"Apache-2.0"
] | null | null | null | ffn/training/model.py | drewlinsley/ffn_membrane | 4b4638c00eed847fa6a7958a7fdbeedca4236561 | [
"Apache-2.0"
] | 1 | 2019-04-17T07:56:28.000Z | 2019-04-17T07:56:28.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for FFN model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.util import deprecation
from . import optimizer_functional
class FFNModel(object):
"""Base class for FFN models."""
# Dimensionality of the model (2 or 3).
dim = None
############################################################################
# (x, y, z) tuples defining various properties of the network.
# Note that 3-tuples should be used even for 2D networks, in which case
# the third (z) value is ignored.
# How far to move the field of view in the respective directions.
deltas = None
# Size of the input image and seed subvolumes to be used during inference.
# This is enough information to execute a single prediction step, without
# moving the field of view.
input_image_size = None
input_seed_size = None
# Size of the predicted patch as returned by the model.
pred_mask_size = None
###########################################################################
# TF op to compute loss optimized during training. This should include all
# loss components in case more than just the pixelwise loss is used.
loss = None
# TF op to call to perform loss optimization on the model.
train_op = None
def __init__(self, deltas, batch_size=None, with_membrane=True, validation_mode=True, tag=''):
assert self.dim is not None
self.deltas = deltas
self.batch_size = batch_size
self.with_membrane=True # with_membrane
self.validation_mode=True # validation_mode
# Initialize the shift collection. This is used during training with the
# fixed step size policy.
self.shifts = []
for dx in (-self.deltas[0], 0, self.deltas[0]):
for dy in (-self.deltas[1], 0, self.deltas[1]):
for dz in (-self.deltas[2], 0, self.deltas[2]):
if dx == 0 and dy == 0 and dz == 0:
continue
self.shifts.append((dx, dy, dz))
# Mask identifying valid examples within the batch. Only valid examples
# contribute to the loss and see object mask updates
self.offset_label = tf.placeholder(tf.string, name='offset_label%s' % tag) #TODO(jk) removed because causes unknown error
if not len(tag):
self.global_step = tf.Variable(0, name='global_step%s' % tag, trainable=False)
# The seed is always a placeholder which is fed externally from the
# training/inference drivers.
self.input_seed = tf.placeholder(tf.float32, name='seed%s' % tag)
self.input_patches = tf.placeholder(tf.float32, name='patches%s' % tag)
# For training, labels should be defined as a TF object.
self.labels = None
# Optional. Provides per-pixel weights with which the loss is multiplied.
# If specified, should have the same shape as self.labels.
self.loss_weights = None
# List of image tensors to save in summaries. The images are concatenated
# along the X axis.
self._images = []
def set_uniform_io_size(self, patch_size, optional_output_size=None):
"""Initializes unset input/output sizes to 'patch_size', sets input shapes.
This assumes that the inputs and outputs are of equal size, and that exactly
one step is executed in every direction during training.
Args:
patch_size: (x, y, z) specifying the input/output patch size
Returns:
None
"""
if self.pred_mask_size is None:
self.pred_mask_size = patch_size
if self.input_seed_size is None:
self.input_seed_size = patch_size
if self.input_image_size is None:
self.input_image_size = patch_size
self.set_input_shapes()
def set_input_shapes(self):
"""Sets the shape inference for input_seed and input_patches.
Assumes input_seed_size and input_image_size are already set.
"""
self.input_seed.set_shape([self.batch_size] +
list(self.input_seed_size[::-1]) + [1])
if self.with_membrane:
self.input_patches.set_shape([self.batch_size] +
list(self.input_image_size[::-1]) + [2])
else:
self.input_patches.set_shape([self.batch_size] +
list(self.input_image_size[::-1]) + [1])
def set_up_sigmoid_pixelwise_loss(self, logits, return_logits=False):
"""Sets up the loss function of the model."""
assert self.labels is not None
assert self.loss_weights is not None
pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=self.labels)
pixel_loss *= self.loss_weights
self.loss = tf.reduce_mean(pixel_loss)
tf.summary.scalar('pixel_loss', self.loss)
self.loss = tf.verify_tensor_all_finite(self.loss, 'Invalid loss detected')
if return_logits:
return logits
def set_up_optimizer(self, TA=None, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
if not self.validation_mode:
if TA is None:
from . import optimizer
opt = optimizer.optimizer_from_flags()
else:
opt = optimizer_functional.optimizer_from_flags(TA) #TODO(jk)
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
print('Clip gradient: ON')
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
# TODO(b/34707785): Hopefully remove need for these deprecated calls. Let
# one warning through so that we have some (low) possibility of noticing if
# the message changes.
# TODO(jk): removed summary gradients for speed
# trainables = tf.trainable_variables()
# if trainables:
# var = trainables[0]
# tf.contrib.deprecated.histogram_summary(var.op.name, var)
# with deprecation.silence():
# for var in trainables[1:]:
# tf.contrib.deprecated.histogram_summary(var.op.name, var)
# for grad, var in grads_and_vars:
# tf.contrib.deprecated.histogram_summary(
# 'gradients/' + var.op.name, grad)
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train')
else:
self.train_op = tf.assign_add(self.global_step, 1)
def show_center_slice(self, image, sigmoid=True):
image = image[:, image.get_shape().dims[1] // 2, :, :, :]
if sigmoid:
image = tf.sigmoid(image)
self._images.append(image)
def add_summaries(self, max_images=4):
# tf.contrib.deprecated.image_summary(
# 'state/' + self.offset_label, tf.concat(self._images, 2),
# max_images=max_images)
tf.contrib.deprecated.image_summary(
'state', tf.concat(self._images, 2),
max_images=max_images)
def update_seed(self, seed, update):
"""Updates the initial 'seed' with 'update'."""
dx = self.input_seed_size[0] - self.pred_mask_size[0]
dy = self.input_seed_size[1] - self.pred_mask_size[1]
dz = self.input_seed_size[2] - self.pred_mask_size[2]
if dx == 0 and dy == 0 and dz == 0:
seed += update
else:
seed += tf.pad(update, [[0, 0],
[dz // 2, dz - dz // 2],
[dy // 2, dy - dy // 2],
[dx // 2, dx - dx // 2],
[0, 0]])
return seed
def define_tf_graph(self):
"""Creates the TensorFlow graph representing the model.
If self.labels is not None, the graph should include operations for
computing and optimizing the loss.
"""
raise NotImplementedError(
'DefineTFGraph needs to be defined by a subclass.')
| 38.246753 | 125 | 0.634748 |
73f55c5c81b4c5ed39e8ab79fec3141da72a0d8d | 5,279 | py | Python | venv/Lib/site-packages/pusher/pusher_client.py | wailord90/Capstone | 5d82339e31905f5c7ef3131f15526d9bc29f0a98 | [
"Apache-2.0"
] | 1 | 2019-03-14T19:29:55.000Z | 2019-03-14T19:29:55.000Z | venv/Lib/site-packages/pusher/pusher_client.py | wailord90/Capstone | 5d82339e31905f5c7ef3131f15526d9bc29f0a98 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pusher/pusher_client.py | wailord90/Capstone | 5d82339e31905f5c7ef3131f15526d9bc29f0a98 | [
"Apache-2.0"
] | 2 | 2019-02-09T21:13:50.000Z | 2019-03-14T21:03:52.000Z | # -*- coding: utf-8 -*-
from __future__ import (
print_function,
unicode_literals,
absolute_import,
division)
import collections
import hashlib
import os
import re
import six
import time
import json
import string
from pusher.util import (
ensure_text,
validate_channel,
validate_socket_id,
join_attributes,
data_to_string)
from pusher.client import Client
from pusher.http import GET, POST, Request, request_method
from pusher.crypto import *
import random
from datetime import datetime
class PusherClient(Client):
def __init__(
self, app_id, key, secret, ssl=True,
host=None, port=None,
timeout=5, cluster=None,
encryption_master_key=None,
json_encoder=None, json_decoder=None,
backend=None, **backend_options):
super(PusherClient, self).__init__(
app_id, key, secret, ssl, host, port, timeout, cluster,
encryption_master_key, json_encoder, json_decoder,
backend, **backend_options)
if host:
self._host = ensure_text(host, "host")
elif cluster:
self._host = (
six.text_type("api-%s.pusher.com") %
ensure_text(cluster, "cluster"))
else:
self._host = six.text_type("api.pusherapp.com")
@request_method
def trigger(self, channels, event_name, data, socket_id=None):
"""Trigger an event on one or more channels, see:
http://pusher.com/docs/rest_api#method-post-event
"""
if isinstance(channels, six.string_types):
channels = [channels]
if isinstance(channels, dict) or not isinstance(
channels, (collections.Sized, collections.Iterable)):
raise TypeError("Expected a single or a list of channels")
if len(channels) > 100:
raise ValueError("Too many channels")
event_name = ensure_text(event_name, "event_name")
if len(event_name) > 200:
raise ValueError("event_name too long")
data = data_to_string(data, self._json_encoder)
if len(data) > 10240:
raise ValueError("Too much data")
channels = list(map(validate_channel, channels))
if len(channels) > 1:
for chan in channels:
if is_encrypted_channel(chan):
raise ValueError("You cannot trigger to multiple channels when using encrypted channels")
if is_encrypted_channel(channels[0]):
data = json.dumps(encrypt(channels[0], data, self._encryption_master_key), ensure_ascii=False)
params = {
'name': event_name,
'channels': channels,
'data': data}
if socket_id:
params['socket_id'] = validate_socket_id(socket_id)
return Request(self, POST, "/apps/%s/events" % self.app_id, params)
@request_method
def trigger_batch(self, batch=[], already_encoded=False):
"""Trigger multiple events with a single HTTP call.
http://pusher.com/docs/rest_api#method-post-batch-events
"""
if not already_encoded:
for event in batch:
validate_channel(event['channel'])
event_name = ensure_text(event['name'], "event_name")
if len(event['name']) > 200:
raise ValueError("event_name too long")
event['data'] = data_to_string(event['data'], self._json_encoder)
if len(event['data']) > 10240:
raise ValueError("Too much data")
if is_encrypted_channel(event['channel']):
event['data'] = json.dumps(encrypt(event['channel'], event['data'], self._encryption_master_key), ensure_ascii=False)
params = {
'batch': batch}
return Request(
self, POST, "/apps/%s/batch_events" % self.app_id, params)
@request_method
def channels_info(self, prefix_filter=None, attributes=[]):
"""Get information on multiple channels, see:
http://pusher.com/docs/rest_api#method-get-channels
"""
params = {}
if attributes:
params['info'] = join_attributes(attributes)
if prefix_filter:
params['filter_by_prefix'] = ensure_text(
prefix_filter, "prefix_filter")
return Request(
self, GET, six.text_type("/apps/%s/channels") % self.app_id, params)
@request_method
def channel_info(self, channel, attributes=[]):
"""Get information on a specific channel, see:
http://pusher.com/docs/rest_api#method-get-channel
"""
validate_channel(channel)
params = {}
if attributes:
params['info'] = join_attributes(attributes)
return Request(
self, GET, "/apps/%s/channels/%s" % (self.app_id, channel), params)
@request_method
def users_info(self, channel):
"""Fetch user ids currently subscribed to a presence channel
http://pusher.com/docs/rest_api#method-get-users
"""
validate_channel(channel)
return Request(
self, GET, "/apps/%s/channels/%s/users" % (self.app_id, channel))
| 29.994318 | 137 | 0.604092 |
88d05f9a42838c78ad8ffbe33fa253a2419c7d51 | 1,997 | py | Python | meilisearch/tests/index/test_index_wait_for_pending_update.py | alallema/meilisearch-python | 866e9e2089aa08abd9a03dced03134f21bd8abda | [
"MIT"
] | null | null | null | meilisearch/tests/index/test_index_wait_for_pending_update.py | alallema/meilisearch-python | 866e9e2089aa08abd9a03dced03134f21bd8abda | [
"MIT"
] | null | null | null | meilisearch/tests/index/test_index_wait_for_pending_update.py | alallema/meilisearch-python | 866e9e2089aa08abd9a03dced03134f21bd8abda | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
from datetime import datetime
import pytest
from meilisearch.errors import MeiliSearchTimeoutError
def test_wait_for_pending_update_default(index_with_documents):
"""Tests waiting for an update with default parameters."""
index = index_with_documents()
response = index.add_documents([{'id': 1, 'title': 'Le Petit Prince'}])
assert 'updateId' in response
update = index.wait_for_pending_update(response['updateId'])
assert isinstance(update, dict)
assert 'status' in update
assert update['status'] != 'enqueued'
def test_wait_for_pending_update_timeout(index_with_documents):
"""Tests timeout risen by waiting for an update."""
with pytest.raises(MeiliSearchTimeoutError):
index_with_documents().wait_for_pending_update(2, timeout_in_ms=0)
def test_wait_for_pending_update_interval_custom(index_with_documents, small_movies):
"""Tests call to wait for an update with custom interval."""
index = index_with_documents()
response = index.add_documents(small_movies)
assert 'updateId' in response
start_time = datetime.now()
wait_update = index.wait_for_pending_update(
response['updateId'],
interval_in_ms=1000,
timeout_in_ms=6000
)
time_delta = datetime.now() - start_time
assert isinstance(wait_update, dict)
assert 'status' in wait_update
assert wait_update['status'] != 'enqueued'
assert time_delta.seconds >= 1
def test_wait_for_pending_update_interval_zero(index_with_documents, small_movies):
"""Tests call to wait for an update with custom interval."""
index = index_with_documents()
response = index.add_documents(small_movies)
assert 'updateId' in response
wait_update = index.wait_for_pending_update(
response['updateId'],
interval_in_ms=0,
timeout_in_ms=6000
)
assert isinstance(wait_update, dict)
assert 'status' in wait_update
assert wait_update['status'] != 'enqueued'
| 38.403846 | 85 | 0.736605 |
c135af59bbaa4c6a66cf58230862a7a3726071d0 | 8,752 | py | Python | parsl/tests/conftest.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | 2 | 2019-02-25T16:43:30.000Z | 2019-03-04T17:25:00.000Z | parsl/tests/conftest.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | null | null | null | parsl/tests/conftest.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | 2 | 2019-04-30T13:46:23.000Z | 2019-06-04T16:14:46.000Z | import importlib.util
import logging
import os
import shutil
import subprocess
from glob import glob
from itertools import chain
import pytest
import _pytest.runner as runner
from pytest_forked import forked_run_report
import parsl
from parsl.dataflow.dflow import DataFlowKernelLoader
from parsl.tests.utils import get_rundir
logger = logging.getLogger('parsl')
def pytest_addoption(parser):
"""Add parsl-specific command-line options to pytest.
"""
parser.addoption(
'--configs',
action='store',
metavar='CONFIG',
nargs='*',
help="only run parsl CONFIG; use 'local' to run locally-defined config"
)
parser.addoption(
'--basic', action='store_true', default=False, help='only run basic configs (local, local_ipp and local_threads)'
)
def pytest_configure(config):
"""Configure help for parsl-specific pytest decorators.
This help is returned by `pytest --markers`.
"""
config.addinivalue_line(
'markers',
'whitelist(config1, config2, ..., reason=None): mark test to run only on named configs. '
'Wildcards (*) are accepted. If `reason` is supplied, it will be included in the report.'
)
config.addinivalue_line(
'markers',
'blacklist(config1, config2, ..., reason=None): mark test to skip named configs. '
'Wildcards (*) are accepted. If `reason` is supplied, it will be included in the report.'
)
config.addinivalue_line(
'markers',
'local: mark test to only run locally-defined config.'
)
config.addinivalue_line(
'markers',
'local(reason): mark test to only run locally-defined config; report will include supplied reason.'
)
config.addinivalue_line(
'markers',
'forked: mark test to only run in a subprocess'
)
config.addinivalue_line(
'markers',
'cleannet: Enable tests that require a clean network connection (such as for testing FTP)'
)
def pytest_generate_tests(metafunc):
"""Assemble the list of configs to test.
"""
config_dir = os.path.join(os.path.dirname(__file__), 'configs')
configs = metafunc.config.getoption('configs')
basic = metafunc.config.getoption('basic')
if basic:
configs = ['local'] + [os.path.join(config_dir, x) for x in ['local_threads.py', 'local_ipp.py']]
elif configs is None:
configs = ['local']
for dirpath, _, filenames in os.walk(config_dir):
for fn in filenames:
path = os.path.join(dirpath, fn)
if ('pycache' not in path) and path.endswith('.py'):
configs += [path]
metafunc.parametrize('config', configs, scope='session')
@pytest.fixture(scope='session')
def setup_docker():
"""Set up containers for docker tests.
Rather than installing Parsl from PyPI, the current state of the source is
copied into the container. In this way we ensure that what we are testing
stays synced with the current state of the code.
"""
if shutil.which('docker') is not None:
subprocess.call(['docker', 'pull', 'python'])
pdir = os.path.join(os.path.dirname(os.path.dirname(parsl.__file__)))
template = """
FROM python:3.6
WORKDIR {home}
COPY ./parsl .
COPY ./requirements.txt .
COPY ./setup.py .
RUN python3 setup.py install
{add}
"""
with open(os.path.join(pdir, 'docker', 'Dockerfile'), 'w') as f:
print(template.format(home=os.environ['HOME'], add=''), file=f)
cmd = ['docker', 'build', '-t', 'parslbase_v0.1', '-f', 'docker/Dockerfile', '.']
subprocess.call(cmd, cwd=pdir)
for app in ['app1', 'app2']:
with open(os.path.join(pdir, 'docker', app, 'Dockerfile'), 'w') as f:
add = 'ADD ./docker/{}/{}.py {}'.format(app, app, os.environ['HOME'])
print(template.format(home=os.environ['HOME'], add=add), file=f)
cmd = ['docker', 'build', '-t', '{}_v0.1'.format(app), '-f', 'docker/{}/Dockerfile'.format(app), '.']
subprocess.call(cmd, cwd=pdir)
@pytest.fixture(autouse=True)
def load_dfk(config):
"""Load the dfk before running a test.
The special path `local` indicates that whatever configuration is loaded
locally in the test should not be replaced. Otherwise, it is expected that
the supplied file contains a dictionary called `config`, which will be
loaded before the test runs.
Args:
config (str) : path to config to load (this is a parameterized pytest fixture)
"""
if config != 'local':
spec = importlib.util.spec_from_file_location('', config)
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
module.config.run_dir = get_rundir() # Give unique rundir; needed running with -n=X where X > 1.
if DataFlowKernelLoader._dfk is not None:
raise ValueError("DFK didn't start as None - there was a DFK from somewhere already")
parsl.clear()
dfk = parsl.load(module.config)
yield
if(parsl.dfk() != dfk):
raise ValueError("DFK changed unexpectedly during test")
dfk.cleanup()
parsl.clear()
except KeyError:
pytest.skip('options in user_opts.py not configured for {}'.format(config))
else:
yield
@pytest.fixture(autouse=True)
def apply_masks(request):
"""Apply whitelist, blacklist, and local markers.
These ensure that if a whitelist decorator is applied to a test, configs which are
not in the whitelist are skipped. Similarly, configs in a blacklist are skipped,
and configs which are not `local` are skipped if the `local` decorator is applied.
"""
config = request.getfixturevalue('config')
m = request.node.get_marker('whitelist')
if m is not None:
if os.path.abspath(config) not in chain.from_iterable([glob(x) for x in m.args]):
if 'reason' not in m.kwargs:
pytest.skip("config '{}' not in whitelist".format(config))
else:
pytest.skip(m.kwargs['reason'])
m = request.node.get_marker('blacklist')
if m is not None:
if os.path.abspath(config) in chain.from_iterable([glob(x) for x in m.args]):
if 'reason' not in m.kwargs:
pytest.skip("config '{}' is in blacklist".format(config))
else:
pytest.skip(m.kwargs['reason'])
m = request.node.get_closest_marker('local')
if m is not None: # is marked as local
if config != 'local':
if len(m.args) == 0:
pytest.skip('skipping non-local config')
else:
pytest.skip(m.args[0])
else: # is not marked as local
if config == 'local':
pytest.skip('skipping local config')
@pytest.fixture()
def setup_data():
import os
if not os.path.isdir('data'):
os.mkdir('data')
with open("data/test1.txt", 'w') as f:
f.write("1\n")
with open("data/test2.txt", 'w') as f:
f.write("2\n")
@pytest.mark.tryfirst
def pytest_runtest_protocol(item):
if 'forked' in item.keywords:
reports = forked_run_report(item)
for rep in reports:
item.ihook.pytest_runtest_logreport(report=rep)
return True
def pytest_make_collect_report(collector):
call = runner.CallInfo(lambda: list(collector.collect()), 'collect')
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
from _pytest.outcomes import Skipped
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(KeyError):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
message = "{} not configured in user_opts.py".format(r.message.split()[-1])
longrepr = (str(r.path), r.lineno, message)
elif call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = runner.CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = runner.CollectReport(collector.nodeid, outcome, longrepr, getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
| 36.466667 | 121 | 0.621458 |
78adfd233ce27f3183b59b0f01d9d05073b86e48 | 376 | py | Python | tests/misc/helper_tests.py | jack3343/xrd-core | 48a6d890d62485c627060b017eadf85602268caf | [
"MIT"
] | null | null | null | tests/misc/helper_tests.py | jack3343/xrd-core | 48a6d890d62485c627060b017eadf85602268caf | [
"MIT"
] | null | null | null | tests/misc/helper_tests.py | jack3343/xrd-core | 48a6d890d62485c627060b017eadf85602268caf | [
"MIT"
] | null | null | null | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from xrd.core.misc import logger
logger.initialize_default()
class TestHelpers(TestCase):
def __init__(self, *args, **kwargs):
super(TestHelpers, self).__init__(*args, **kwargs)
| 26.857143 | 69 | 0.75266 |
f7f0aa85b69dac8cab67a45a04e8000589e5e735 | 30,004 | py | Python | backend/apps/datamodel/models.py | gabrielmbmb/intry4.0-backend | 1ac28ad333c93ed6069dc2db4d9fe3a12b8a1b24 | [
"MIT"
] | null | null | null | backend/apps/datamodel/models.py | gabrielmbmb/intry4.0-backend | 1ac28ad333c93ed6069dc2db4d9fe3a12b8a1b24 | [
"MIT"
] | 5 | 2021-03-30T15:05:05.000Z | 2021-09-22T19:41:51.000Z | backend/apps/datamodel/models.py | gabrielmbmb/intry4.0-backend | 1ac28ad333c93ed6069dc2db4d9fe3a12b8a1b24 | [
"MIT"
] | null | null | null | import io
import uuid
import pytz
import json
import logging
import pandas as pd
from constance import config
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.validators import (
int_list_validator,
MinValueValidator,
)
from django.db.models.signals import pre_delete
from datetime import datetime
from backend.apps.core import clients
logger = logging.getLogger(__name__)
NOT_ATTRIBUTES_KEYS_SUBSCRIPTION = ["id", "type", "TimeInstant"]
class DataModel(models.Model):
"""Class which holds everything related to a Blackbox Anomaly Detection model."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=128, help_text="Model name")
is_training = models.BooleanField(
help_text="Wether the model is being trained or not", default=False,
)
trained = models.BooleanField(
help_text="Wether the model is trained or not", default=False
)
deployed = models.BooleanField(
help_text="Wether the model is deployed or not", default=False
)
date_trained = models.DateTimeField(
help_text="Date the model was trained", default=None, blank=True, null=True
)
date_deployed = models.DateTimeField(
help_text="Date the model was deployed", default=None, blank=True, null=True
)
num_predictions = models.IntegerField(
help_text="Number of predictions made by this model", default=0
)
task_status = models.CharField(
help_text="URL to get the progress of training process",
null=True,
blank=True,
max_length=512,
)
# sensors
plcs = JSONField()
contamination = models.FloatField(
help_text="Contamination fraction in the training dataset",
default=0.1,
validators=[MinValueValidator(0.0)],
null=True,
blank=True,
)
scaler = models.CharField(
help_text="The scaler used to scale the data before training and predicting",
default="minmax",
max_length=48,
null=True,
blank=True,
)
# PCA Mahalanobis
pca_mahalanobis = models.BooleanField(null=True, blank=True, default=False)
n_components = models.IntegerField(
help_text="Numbers of components for the PCA algorithm",
default=2,
validators=[MinValueValidator(1)],
null=True,
blank=True,
)
# Autoencoder
autoencoder = models.BooleanField(null=True, blank=True, default=False)
hidden_neurons = models.CharField(
help_text="Neural Network layers and the number of neurons in each layer",
validators=[
int_list_validator(
sep=",",
message="It should be a string with a list of integers separeted by a comma",
allow_negative=False,
)
],
default="32,16,16,32",
max_length=128,
null=True,
blank=True,
)
dropout_rate = models.FloatField(
help_text="Dropout rate across all the layers of the Neural Network",
default=0.2,
null=True,
blank=True,
)
activation = models.CharField(
help_text="Layers activation function of Neural Network",
choices=[
("elu", "elu"),
("softmax", "softmax"),
("selu", "selu"),
("softplus", "softplus"),
("softsign", "softsign"),
("relu", "relu"),
("tanh", "tanh"),
("sigmoid", "sigmoid"),
("hard_sigmoid", "hard_sigmoid"),
("exponential", "exponential"),
],
default="elu",
max_length=24,
null=True,
blank=True,
)
kernel_initializer = models.CharField(
help_text="Layers kernel initializer of Neural Network",
choices=[
("Zeros", "Zeros"),
("Ones", "Ones"),
("Constant", "Constant"),
("RandomNormal", "RandomNormal"),
("RandomUniform", "RandomUniform"),
("TruncatedNormal", "TruncatedNormal"),
("VarianceScaling", "VarianceScaling"),
("Orthogonal", "Orthogonal"),
("Identity", "Identity"),
("lecun_uniform", "lecun_uniform"),
("glorot_normal", "glorot_normal"),
("glorot_uniform", "glorot_uniform"),
("he_normal", "he_normal"),
("lecun_normal", "lecun_normal"),
("he_uniform", "he_uniform"),
],
default="glorot_uniform",
max_length=24,
null=True,
blank=True,
)
loss_function = models.CharField(
help_text="Loss function of the Neural Network",
default="mse",
max_length=24,
null=True,
blank=True,
)
optimizer = models.CharField(
help_text="Optimizer of Neural Network",
choices=[
("sgd", "sgd"),
("rmsprop", "rmsprop"),
("adagrad", "adagrad"),
("adadelta", "adadelta"),
("adam", "adam"),
("adamax", "adamax"),
("nadam", "nadam"),
],
default="adam",
max_length=24,
null=True,
blank=True,
)
epochs = models.IntegerField(
help_text="Number of times that all the batches will be processed in the "
" Neural Network",
default=100,
null=True,
blank=True,
)
batch_size = models.IntegerField(
help_text="Batch size", default=32, null=True, blank=True
)
validation_split = models.FloatField(
help_text="Percentage of the training data that will be used for purpouses in"
" the Neural Network",
default=0.05,
null=True,
blank=True,
)
early_stopping = models.BooleanField(
help_text="Stops the training process in the Neural Network when it's not"
" getting any improvement",
default=False,
null=True,
blank=True,
)
# K-Means
kmeans = models.BooleanField(null=True, blank=True, default=False)
n_clusters = models.IntegerField(
help_text="Number of clusters for the K-Means algorithm",
default=None,
null=True,
blank=True,
)
max_cluster_elbow = models.IntegerField(
help_text="Maximun number of cluster to test in the Elbow Method",
default=100,
null=True,
blank=True,
)
# One Class SVM
ocsvm = models.BooleanField(null=True, blank=True, default=False)
kernel = models.CharField(
help_text="Kernel type for One Class SVM",
choices=[
("linear", "linear"),
("poly", "poly"),
("rbf", "rbf"),
("sigmoid", "sigmoid"),
("precomputed", "precomputed"),
],
default="rbf",
max_length=24,
null=True,
blank=True,
)
degree = models.IntegerField(
help_text="Degree of the polynomal kernel function for One Class SVM",
default=3,
null=True,
blank=True,
)
gamma = models.CharField(
help_text="Kernel coefficient for 'rbf', 'poly' and 'sigmoid' in One Class SVM."
" It can 'scale', 'auto' or float",
default="scale",
max_length=24,
null=True,
blank=True,
)
coef0 = models.FloatField(
help_text="Independent term in kernel function for One Class SVM. Only "
"significant in 'poly'",
default=0.0,
null=True,
blank=True,
)
tol = models.FloatField(
help_text="Tolerance for stopping criterion for One Class SVM",
default=0.001,
null=True,
blank=True,
)
shrinking = models.BooleanField(
help_text="Whether to use the shrinking heuristic for One Class SVM",
default=True,
null=True,
blank=True,
)
cache_size = models.IntegerField(
help_text="Specify the size of the kernel cache in MB for One Class SVM",
default=200,
null=True,
blank=True,
)
# Gaussian Distribution
gaussian_distribution = models.BooleanField(null=True, blank=True, default=False)
epsilon_candidates = models.IntegerField(
help_text="Number of epsilon values that will be tested to find the best one",
default=100000000,
null=True,
blank=True,
)
# Isolation Forest
isolation_forest = models.BooleanField(null=True, blank=True, default=False)
n_estimators = models.IntegerField(
help_text="The number of base estimators in the ensemble for Isolation "
"Forest",
default=100,
null=True,
blank=True,
)
max_features = models.FloatField(
help_text="Number of features to draw from X to train each base estimator"
" for Isolation Forest",
default=1.0,
null=True,
blank=True,
)
bootstrap = models.BooleanField(
help_text="Indicates if the Bootstrap technique is going to be applied "
"for Isolation FOrest",
default=False,
null=True,
blank=True,
)
# Local Outlier Factor
lof = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_lof = models.IntegerField(
help_text="Number of neighbors to use in LOF", default=20, null=True, blank=True
)
algorithm_lof = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in LOF",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_lof = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in LOF",
default=30,
null=True,
blank=True,
)
metric_lof = models.CharField(
help_text="The distance metric to use for the tree in LOF",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_lof = models.IntegerField(
help_text="Paremeter of the Minkowski metric in LOF",
default=2,
null=True,
blank=True,
)
# K-Nearest Neighbors
knn = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_knn = models.IntegerField(
help_text="Number of neighbors to use in KNN", default=5, null=True, blank=True
)
radius = models.FloatField(
help_text="The range of parameter space to use by default for "
"radius_neighbors",
default=1.0,
null=True,
blank=True,
)
algorithm_knn = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in KNN",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_knn = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in KNN",
default=30,
null=True,
blank=True,
)
metric_knn = models.CharField(
help_text="The distance metric to use for the tree in KNN",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_knn = models.IntegerField(
help_text="Paremeter of the Minkowski metric in knn",
default=2,
null=True,
blank=True,
)
score_func = models.CharField(
help_text="The function used to score anomalies in KNN",
choices=[
("max_distance", "max_distance"),
("average", "average"),
("median", "median"),
],
default="max_distance",
max_length=24,
null=True,
blank=True,
)
# orion subscriptions
subscriptions = ArrayField(models.CharField(max_length=128), default=list)
# data from subscripitons
data_from_subscriptions = JSONField(default=dict)
dates = JSONField(default=dict)
# clients
blackbox_client = clients.BlackboxClient()
crate_client = clients.CrateClient()
orion_client = clients.OrionClient()
def create_blackbox(self):
"""Creates a Blackbox model in the Anomaly Detection API."""
self.blackbox_client.create_blackbox(self)
def get_models_columns(self):
"""Returns a dict containing two lists, one with the columns and the other
with the models
Returns:
dict or None: containing two lists.
"""
data = {"models": [], "columns": []}
if self.pca_mahalanobis:
data["models"].append("pca_mahalanobis")
if self.autoencoder:
data["models"].append("autoencoder")
if self.kmeans:
data["models"].append("kmeans")
if self.ocsvm:
data["models"].append("one_class_svm")
if self.gaussian_distribution:
data["models"].append("gaussian_distribution")
if self.isolation_forest:
data["models"].append("isolation_forest")
if self.lof:
data["models"].append("local_outlier_factor")
if self.knn:
data["models"].append("knearest_neighbors")
for sensors in self.plcs.values():
data["columns"] = data["columns"] + sensors
if data["models"] and data["columns"]:
return data
return None
def train(
self,
with_source: str,
n: int = None,
from_date: str = None,
to_date: str = None,
train_df=None,
) -> bool:
"""Trains the datamodel either with data from Crate or from a CSV
Args:
with_source (:obj:`str`): source of the training data. Valid choices are
'db' or 'csv'.
n (:obj:`int`): the number of rows to take from the database. Defaults to
None.
from_date (:obj:`str`): date from which the rows has to be taken. Defaults
to None.
to_date (:obj:`str`): date until which the rows has to be taken. Defaults to
None.
train_df (:obj:`pandas.core.frame.DataFrame`): the dataframe to perform the
training of the model. Defaults to None.
Returns:
bool: wether the process of training has been initiated or not.
"""
if not self.is_training:
if with_source == "db":
df = self.crate_client.get_data_from_plc(
self.plcs, n=n, from_date=from_date, to_date=to_date
)
# train with data from CSV
else:
df = train_df
if df is None:
return False
train_data_json = json.loads(df.to_json(orient="split"))
payload = self.to_json()
payload["columns"] = train_data_json["columns"]
payload["data"] = train_data_json["data"]
self.task_status = self.blackbox_client.train(self.id, payload)
self.is_training = True
self.trained = False
if self.deployed:
self.set_deployed()
self.save()
return True
return False
def to_json(self):
"""Gets the model as json format."""
json_ = {
"contamination": self.contamination,
"scaler": self.scaler,
"n_jobs": -1,
}
if self.pca_mahalanobis:
json_["pca_mahalanobis"] = {"n_components": self.n_components}
if self.autoencoder:
json_["autoencoder"] = {
"hidden_neurons": list(
map(lambda x: int(x), self.hidden_neurons.split(","))
),
"dropout_rate": self.dropout_rate,
"activation": self.activation,
"kernel_initializer": self.kernel_initializer,
"loss_function": self.loss_function,
"optimizer": self.optimizer,
"epochs": self.epochs,
"batch_size": self.batch_size,
"validation_split": self.validation_split,
"early_stopping": self.early_stopping,
}
if self.kmeans:
json_["kmeans"] = {"max_cluster_elbow": self.max_cluster_elbow}
if self.n_clusters:
json_["kmeans"]["n_clusters"] = self.n_clusters
if self.ocsvm:
json_["one_class_svm"] = {
"kernel": self.kernel,
"degree": self.degree,
"gamma": self.gamma,
"coef0": self.coef0,
"tol": self.tol,
"shrinking": self.shrinking,
"cache_size": self.cache_size,
}
if self.gaussian_distribution:
json_["gaussian_distribution"] = {
"epsilon_candidates": self.epsilon_candidates
}
if self.isolation_forest:
json_["isolation_forest"] = {
"n_estimators": self.n_estimators,
"max_features": self.max_features,
"bootstrap": self.bootstrap,
}
if self.knn:
json_["knearest_neighbors"] = {
"n_neighbors": self.n_neighbors_knn,
"radius": self.radius,
"algorithm": self.algorithm_knn,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
"score_func": self.score_func,
}
if self.lof:
json_["local_outlier_factor"] = {
"n_neighbors": self.n_neighbors_lof,
"algorithm": self.algorithm_lof,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
}
return json_
def set_trained(self):
"""Sets the datamodel to the trained state."""
logger.info(f"Setting datamodel with id {self.id} to trained!")
self.is_training = False
self.trained = True
self.date_trained = datetime.now(tz=pytz.UTC)
self.save()
def set_deployed(self):
"""Sets the datamodel to the deployed state."""
self.deployed = not self.deployed
if self.deployed:
self.date_deployed = datetime.now(tz=pytz.UTC)
# create subscriptions in OCB
notification_url = (
f"http://{config.SERVER_IP}/api/v1/datamodels/{self.id}/predict/"
)
subscriptions = []
data_from_subscriptions = {}
for (plc, sensors) in self.plcs.items():
subscription = self.orion_client.create_subscription(
url=notification_url, pattern=plc, conditions=sensors, throttling=5
)
subscriptions.append(subscription)
data_from_subscriptions[plc] = {}
self.subscriptions = subscriptions
self.data_from_subscriptions = data_from_subscriptions
else:
self.date_deployed = None
# remove subscriptions in OCB
self.orion_client.delete_subscriptions(self.subscriptions)
self.subscriptions = []
self.save()
def check_csv_columns(self, file, index_column: str = None) -> bool:
"""Checks if a CSV has all the columns necessary to train this datamodel.
Args:
file (django.core.files.uploadedfile.TemporaryUploadedFile): training file.
index_column (:obj:`str`): the name of the index column if there is one.
Defaults to None.
Returns:
tuple: containing a bool which indicates if the CSV is valid. The second
value is a dataframe in the case that CSV was valid or None if not.
"""
if index_column:
df = pd.read_csv(
io.StringIO(file.read().decode("UTF-8")), index_col=index_column
)
else:
df = pd.read_csv(io.StringIO(file.read().decode("UTF-8")))
# get the columns that should be in the csv
columns_that_should_be_in_csv = []
for columns in self.plcs.values():
for column in columns:
columns_that_should_be_in_csv.append(column)
columns_csv = list(df.columns)
if all(
column in columns_csv for column in columns_that_should_be_in_csv
) and all(column in columns_that_should_be_in_csv for column in columns_csv):
return True, df
return False, None
def _all_data_from_subscriptions_received(self) -> bool:
"""Checks if data from all subscriptions has been received
Returns:
bool: weather if all data has been received.
"""
return all(
[data_sub != {} for data_sub in self.data_from_subscriptions.values()]
)
def _create_prediction_df(self):
"""Creates a dataframe which contains data from Orion subscriptions to make a
prediction.
Returns:
pandas.core.frame.DataFrame: dataframe with data from subscriptions.
"""
dfs = []
data_from_subscriptions = {}
for (plc, data_sub) in self.data_from_subscriptions.items():
df = pd.DataFrame(data=data_sub["rows"], columns=data_sub["columns"])
dfs.append(df)
data_from_subscriptions[plc] = {}
self.data_from_subscriptions = data_from_subscriptions
df = pd.concat(dfs, axis=1)
return df
def set_subscription_data_and_predict(self, data: dict):
"""Sets subscription data and once it has received the data from all the
subscriptions, it sends them to the Anomaly Detection API to generate a new
prediction.
Args:
data (:obj:`str`): data from a subscription in OCB entity form.
"""
entity_id = data["id"]
# Get the attributes data of the subscription
sub_data = {"rows": [[]], "columns": []}
for key in data.keys():
if key not in NOT_ATTRIBUTES_KEYS_SUBSCRIPTION:
sub_data["rows"][0].append(data[key]["value"])
sub_data["columns"].append(key)
# save the data from this subscription
if self.data_from_subscriptions[entity_id] == {}:
logger.info(
f"Received data from {entity_id} for datamodel {self.id}. Columns: {sub_data['columns']}"
)
# Save the time instant when the value of the sensors were updated
for column in sub_data["columns"]:
self.dates[column] = data["TimeInstant"]["value"]
self.data_from_subscriptions[entity_id] = sub_data
if self._all_data_from_subscriptions_received():
logger.info(
f"All data received for datamodel {self.id}. Sending to Anomaly Backend..."
)
df = self._create_prediction_df()
payload = json.loads(df.to_json(orient="split"))
prediction = DataModelPrediction(
datamodel=self, data=payload.copy(), dates=self.dates
)
payload["id"] = str(prediction.id)
prediction.task_status = self.blackbox_client.predict(self.id, payload)
prediction.save()
prediction.send_notification()
self.save()
def send_prediction_to_orion(self, predictions: dict):
"""Sends the predictions received from the Anomaly Detection API to the Orion
Context Broker.
Args:
predictions (:obj:`dict`): predictions made by the Anomaly Detection API.
"""
prediction = DataModelPrediction.objects.get(
datamodel=self, id=predictions["id"]
)
logger.debug(f"Prediction is: {prediction}")
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
predictions_to_orion = {}
for (key, value) in predictions.items():
predictions_to_orion[key] = value[0]
attrs = {
"name": {"type": "String", "value": self.name},
"entities": {"type": "Object", "value": self.plcs},
"date": {"type": "DateTime", "value": datetime.now().isoformat()},
"predictions": {"type": "Object", "value": predictions_to_orion},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
self.num_predictions += 1
self.save()
def set_prediction_results(self, data: dict):
"""Set the results of the prediction received by the Anomaly Detection API.
Args:
data (:obj:`dict`): a dictionary containing the predictions and the ID of
the prediction.
"""
prediction = DataModelPrediction.objects.get(pk=data["id"])
prediction.predictions = {
key: value[0] for (key, value) in data.items() if key != "id"
}
prediction.predictions_received_on = datetime.now(tz=pytz.UTC)
prediction.save()
self.num_predictions += 1
self.save()
prediction.send_to_orion()
prediction.send_notification()
def get_task_status(self):
"""Gets the status of a task in the Anomaly Detection API."""
return self.blackbox_client.get_task_status(self.task_status)
def pre_delete_datamodel_handler(sender, instance, **kwargs):
"""Handles the signal post delete of a model `DataModel` requesting Anomaly
Detection to delete a Blackbox model
Args:
sender (backend.apps.models.DataModel): the datamodel just deleted.
"""
instance.blackbox_client.delete_blackbox(instance)
pre_delete.connect(pre_delete_datamodel_handler, sender=DataModel)
class DataModelPrediction(models.Model):
"""Class which holds data of a prediction made by a `DataModel`."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
data = JSONField(help_text="The data to be predicted")
dates = JSONField(help_text="When the date to be predicted was created")
predictions = JSONField(help_text="The predictions", default=dict)
task_status = models.CharField(
help_text="URL to get the progress of predicting process",
null=True,
blank=True,
max_length=512,
)
ack = models.BooleanField(
help_text="Wether the prediction has been acknowledged", default=False
)
user_ack = models.CharField(
help_text="The name of the user who acknowledged the prediction",
max_length=128,
blank=True,
null=True,
)
created_on = models.DateTimeField(
help_text="When the prediction was created", auto_now_add=True
)
predictions_received_on = models.DateTimeField(
help_text="When the predictions where received",
default=None,
null=True,
blank=True,
)
orion_client = clients.OrionClient()
notification_client = clients.NotificationClient()
def send_to_orion(self):
"""Sends the prediction to the Orion Context Broker."""
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
attrs = {
"datamodel_id": {"type": "String", "value": str(self.datamodel.id)},
"datamodel_name": {"type": "String", "value": self.datamodel.name},
"data": {
"type": "Object",
"value": {
column: value
for (column, value) in zip(
self.data["columns"], self.data["data"][0]
)
},
},
"dates": {"type": "Object", "value": self.dates},
"predictions": {"type": "Object", "value": self.predictions},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
def send_notification(self):
"""Sends the prediction to the Notification Backend."""
self.notification_client.send_prediction(self.to_dict(["_state"]))
def to_dict(self, exclude: list = None):
"""Serialize the class into a dict.
Args:
exclude(:obj:`list`): a list of str containing the keys to exclude.
Returns:
dict: the DataModelPrediction data.
"""
to_exclude = exclude
if to_exclude is None:
to_exclude = []
data = {}
for (key, value) in self.__dict__.items():
if key not in to_exclude:
if type(value) is uuid.UUID:
data[key] = str(value)
elif type(value) is datetime:
data[key] = value.isoformat()
else:
data[key] = value
return data
def set_ack(self, user: str):
"""Sets the ACK for the prediction.
Args:
user (:obj:`str`): the user who sent the ACK.
"""
self.ack = True
self.user_ack = user
self.save()
logger.info(f"DataModel Prediction with {self.id} ACKed by {user}.")
class TrainFile(models.Model):
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
file = models.FileField(
blank=False,
null=False,
help_text="A CSV training file containing the columns of the DataModel",
)
index_column = models.CharField(max_length=128, blank=True, null=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
class Meta:
get_latest_by = "uploaded_at"
| 33.116998 | 105 | 0.582822 |
7b75ed9d16814974207b44c49d7baa0e3ea6e4e9 | 1,038 | py | Python | measured-vs-model.py | afiskon/radiation-pattern-measurement | 40feb2bb1cbb845c107945ae49582966e4325ce6 | [
"MIT"
] | 2 | 2022-02-21T23:02:45.000Z | 2022-03-23T08:56:21.000Z | measured-vs-model.py | afiskon/radiation-pattern-measurement | 40feb2bb1cbb845c107945ae49582966e4325ce6 | [
"MIT"
] | null | null | null | measured-vs-model.py | afiskon/radiation-pattern-measurement | 40feb2bb1cbb845c107945ae49582966e4325ce6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3 -u
# vim: set ai et ts=4 sw=4:
from math import pi
import matplotlib.pyplot as plt
import csv
def load_data(fname):
xs, ys = [], []
with open(fname, newline = '') as f:
for row in csv.reader(f, delimiter = ',', quotechar = '"'):
xs += [ 2*pi*float(row[0])/360 ]
ys += [ float(row[1]) ]
max_y = max(ys)
ys = [ y - max_y for y in ys ]
return xs, ys
xs, ys = load_data('data/raw-data.csv')
model_xs, model_ys = load_data('data/model-data.csv')
dpi = 80
fig = plt.figure(dpi = dpi, figsize = (512 / dpi, 384 / dpi) )
ax = plt.subplot(111, projection='polar')
ax.set_theta_offset(2*pi*90/360)
ax.plot(xs, ys, linestyle = 'solid', linewidth=3)
ax.plot(model_xs, model_ys, linestyle='dashed', color='red')
ax.set_rmax(0)
ax.set_rticks([-6*i for i in range(0,7)])
ax.set_yticklabels([''] + [str(-6*i) for i in range(1,7)])
ax.set_rlabel_position(0)
ax.set_thetagrids(range(0, 360, 15))
ax.set_theta_direction(-1)
ax.grid(True)
fig.savefig('measured-vs-model.png')
| 28.054054 | 67 | 0.631985 |
85ddfe4c73b30524e5d418acab4e9e4f05a10b86 | 6,122 | py | Python | src/common/sequence.py | janka2012/digIS | 0386c36e49880af25390d208a37129318516e502 | [
"MIT"
] | 2 | 2021-05-23T02:49:23.000Z | 2021-05-24T19:10:32.000Z | src/common/sequence.py | janka2012/digIS | 0386c36e49880af25390d208a37129318516e502 | [
"MIT"
] | 2 | 2021-06-10T06:44:12.000Z | 2021-12-15T21:24:37.000Z | src/common/sequence.py | janka2012/digIS | 0386c36e49880af25390d208a37129318516e502 | [
"MIT"
] | null | null | null | import os
import re
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
def transform_range(start, end, frame, seqlen):
offset = [0, 1, 2, 2, 1, 0][frame-1]
if frame <= 3:
start_pos = (start-1)*3 + offset + 1
end_pos = end*3 + offset
else:
start_pos = seqlen - (end*3 + offset) + 1
end_pos = seqlen - ((start-1)*3 + offset)
return start_pos, end_pos
def translate_dna_seq_biopython(seqrec, outseq):
with open(outseq, 'w') as aa_fa:
aa_seqs = []
dna_seqs = [seqrec.seq, seqrec.seq.reverse_complement()]
for dna_seq in dna_seqs:
for frame in range(1, 4):
# correction of end position such that the length is multiple of 3
start = frame - 1
excess = (len(dna_seq) - start) % 3
end = len(dna_seq) - excess
seq = dna_seq[start:end]
aa_seqs.append(seq.translate(table=11))
for frame, aa_seq in enumerate(aa_seqs, start=1):
seq_id = seqrec.id + "_" + str(frame)
description = seq_id + seqrec.description.replace(seqrec.id, "")
aa_record = SeqRecord(aa_seq, id=seq_id, name=seq_id, description=description)
SeqIO.write(aa_record, aa_fa, 'fasta')
def __is_non_zero_file(filepath):
return os.path.isfile(filepath) and os.path.getsize(filepath) > 0
def get_seqlen(filename):
rec = SeqIO.read(filename, "fasta")
return len(rec.seq)
def get_ids_from_fasta(filename):
ids = set()
records = SeqIO.parse(filename, "fasta")
for rec in records:
rec_id = rec.id.split("|")[0]
rec_id = rec_id.split(".")[0]
ids.add(rec_id)
return ids
def get_full_ids_from_fasta(filename):
ids = []
records = SeqIO.parse(filename, "fasta")
for rec in records:
# rec_id = rec.id.split("|")[0]
# rec_id = rec_id.split(".")[0]
ids.append(rec.id)
return ids
def filter_fasta_re(in_file, out_file, regexp):
out_recs = []
records = SeqIO.parse(in_file, "fasta")
for rec in records:
if re.search(regexp, rec.id, re.IGNORECASE):
out_recs.append(rec)
SeqIO.write(out_recs, out_file, "fasta")
def get_sequence_record(filename, start, end, strand, protein=True):
record = SeqIO.read(filename, "fasta")
seq = record.seq[start-1:end]
if strand == '-':
seq = seq.reverse_complement()
if protein:
seq = seq.translate(table=11)
return SeqRecord(seq, id=record.id, description='')
def get_sequence_record_ids(filename, ids):
out = []
ids_set = set(ids)
recs = SeqIO.parse(filename, "fasta")
for rec in recs:
if rec.id in ids_set:
out.append(rec)
return out
def get_sequence_record_id(filename, rec_id):
out = []
recs = SeqIO.parse(filename, "fasta")
for rec in recs:
if rec_id in rec.id:
out.append(rec)
return out
def get_sequence_ids(filename):
out = []
recs = SeqIO.parse(filename, "fasta")
for rec in recs:
out.append(rec.id)
return out
def get_sixframe_record(filename, start, end):
# correction of end position such that the length is multiple of 3
record = SeqIO.read(filename, "fasta")
s = start
e = end + 3 - ((end - start + 1) % 3)
all_recs = []
for i in [0, 1, 2]:
seq = record.seq[s - 1 + i:e + i]
all_recs.append(SeqRecord(seq.translate(table=11), id=record.id, description='Frame: ' + str(i)))
rc_seq = seq.reverse_complement()
all_recs.append(SeqRecord(rc_seq.translate(table=11), id=record.id, description='Frame: ' + str(i + 3)))
return all_recs
def merge_all_fasta_files(dir_path, out_file):
all_recs = []
for (dirpath, dirnames, filenames) in os.walk(dir_path):
for filename in filenames:
records = SeqIO.parse(os.path.join(dirpath, filename), "fasta")
for rec in records:
all_recs.append(rec)
SeqIO.write(all_recs, out_file, "fasta")
def merge_fasta_files(filenames, out_file):
all_recs = []
for filename in filenames:
records = SeqIO.parse(filename, "fasta")
for rec in records:
all_recs.append(rec)
SeqIO.write(all_recs, out_file, "fasta")
def get_max_seq_len(filename):
max_len = 0
records = SeqIO.parse(filename, "fasta")
for rec in records:
max_len = max(max_len, len(rec.seq))
return max_len
def get_maxlen_seq(filename):
max_len = 0
out = None
records = SeqIO.parse(filename, "fasta")
for rec in records:
if len(rec.seq) > max_len:
max_len = len(rec.seq)
out = rec
return out
def get_seq_lens(filename, seq_type):
lens = []
records = SeqIO.parse(filename, "fasta")
for rec in records:
alphabet = IUPAC.protein.letters if seq_type == 'prot' else IUPAC.unambiguous_dna.letters
rec.seq = trim_sequence(rec.seq, alphabet)
lens.append(len(rec.seq))
return lens
def trim_sequence(seq, alphabet):
end_pos = len(seq)
for i in range(len(seq)):
if seq[i] not in alphabet:
end_pos = i
break
return seq[0:end_pos]
def save_to_fasta_file(records, output_file, mode="w+"):
with open(output_file, mode) as output_file:
SeqIO.write(records, output_file, "fasta")
def prepare_flank_sequences(seq_records, flank, ids=None):
seq_recs = []
seq_ranges = []
seq_original_ranges = []
for i, rec in enumerate(seq_records):
seq_len = len(rec)
seq_rec = rec.get_sequence(flank=flank)
seq_original_range = rec.get_flank_range(flank=flank)
flank_lens = rec.get_flank_lengths(flank)
if ids:
seq_rec.id = seq_rec.id + "_" + ids[i]
seq_range = (flank_lens[0] + 1, flank_lens[0] + seq_len)
seq_recs.append(seq_rec)
seq_ranges.append(seq_range)
seq_original_ranges.append(seq_original_range)
return seq_recs, seq_ranges, seq_original_ranges
| 26.733624 | 112 | 0.618425 |
5d28ba15273caeab003eb8ca0f704633f24cf09e | 1,560 | py | Python | evap/development/management/commands/reload_testdata.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 29 | 2020-02-28T23:03:41.000Z | 2022-02-19T09:29:36.000Z | evap/development/management/commands/reload_testdata.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 247 | 2020-02-19T17:18:15.000Z | 2022-03-31T20:59:47.000Z | evap/development/management/commands/reload_testdata.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 35 | 2020-04-10T21:50:02.000Z | 2022-03-10T16:42:11.000Z | from django.core.management import call_command
from django.core.management.base import BaseCommand
from evap.evaluation.management.commands.tools import confirm_harmful_operation
class Command(BaseCommand):
args = ""
help = "Drops the database, recreates it and then loads the testdata."
def handle(self, *args, **options):
self.stdout.write("")
self.stdout.write("WARNING! This will drop the database and cause IRREPARABLE DATA LOSS.")
if not confirm_harmful_operation(self.stdout):
return
self.stdout.write('Executing "python manage.py reset_db"')
call_command("reset_db", interactive=False)
self.stdout.write('Executing "python manage.py migrate"')
call_command("migrate")
# clear any data the migrations created.
# their pks might differ from the ones in the dump, which results in errors on loaddata
self.stdout.write('Executing "python manage.py flush"')
call_command("flush", interactive=False)
self.stdout.write('Executing "python manage.py loaddata test_data"')
call_command("loaddata", "test_data")
self.stdout.write('Executing "python manage.py clear_cache"')
call_command("clear_cache")
self.stdout.write('Executing "python manage.py refresh_results_cache"')
call_command("refresh_results_cache")
self.stdout.write('Executing "python manage.py clear_cache --cache=sessions"')
call_command("clear_cache", "--cache=sessions")
self.stdout.write("Done.")
| 38.04878 | 98 | 0.695513 |
95ae99dbf33c6237e2b5b7a7fee7cd30decb0d97 | 587 | py | Python | article/migrations/0063_auto_20181130_1837.py | higab85/drugsandme | 7db66d9687ac9a04132de94edda364f191d497d7 | [
"MIT"
] | 3 | 2016-10-10T10:07:39.000Z | 2018-10-29T19:57:52.000Z | article/migrations/0063_auto_20181130_1837.py | higab85/drugsandme | 7db66d9687ac9a04132de94edda364f191d497d7 | [
"MIT"
] | 12 | 2016-11-04T18:59:17.000Z | 2022-03-11T23:32:52.000Z | article/migrations/0063_auto_20181130_1837.py | higab85/drugsandme | 7db66d9687ac9a04132de94edda364f191d497d7 | [
"MIT"
] | 2 | 2016-09-29T22:48:26.000Z | 2019-10-01T19:55:14.000Z | # Generated by Django 2.1 on 2018-11-30 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0062_auto_20181130_1144'),
]
operations = [
migrations.AlterField(
model_name='articleconstants',
name='cookie_banner_message_en',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='articleconstants',
name='cookie_banner_message_es',
field=models.TextField(blank=True),
),
]
| 24.458333 | 47 | 0.609881 |
0e49cebee2b5d4602ff8025126e9cd506647030f | 8,921 | py | Python | tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 36 | 2016-12-17T15:25:25.000Z | 2022-01-29T21:50:53.000Z | tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 59 | 2019-06-17T09:37:49.000Z | 2022-01-19T01:21:34.000Z | tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 36 | 2017-07-27T21:12:40.000Z | 2022-02-03T16:45:56.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Slurm workload manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import subprocess
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
@tf_export('distribute.cluster_resolver.SlurmClusterResolver')
class SlurmClusterResolver(ClusterResolver):
"""Cluster Resolver for system with Slurm workload manager.
This is an implementation of cluster resolvers for Slurm clusters. This allows
the specification of jobs and task counts, number of tasks per node, number of
GPUs on each node and number of GPUs for each task, It retrieves system
attributes by Slurm environment variables, resolves allocated computing node
names, construct a cluster and return a Cluster Resolver object which an be
use for distributed TensorFlow.
"""
def _resolve_hostnames(self):
"""Resolve host names of nodes allocated in current jobs.
Returns:
A list of node names as strings.
"""
hostlist = (subprocess.check_output(['scontrol', 'show', 'hostname']).
decode('utf-8').strip().split('\n'))
return hostlist
def __init__(self,
jobs,
port_base=8888,
gpus_per_node=1,
gpus_per_task=1,
tasks_per_node=None,
auto_set_gpu=True,
rpc_layer='grpc'):
"""Creates a new SlurmClusterResolver object.
This takes in parameters and creates a SlurmClusterResolver object. It uses
those parameters to check which nodes will processes reside and resolves
their hostnames. With the number of the GPUs on each node and number of GPUs
for each task it offsets the port number for each processes and allocate
GPUs to tasks by setting environment variables. The resolver currently
supports homogeneous tasks and default Slurm process allocation.
Args:
jobs: Dictionary with job names as key and number of tasks in the job as
value
port_base: The first port number to start with for processes on a node.
gpus_per_node: Number of GPUs available on each node.
gpus_per_task: Number of GPUs to be used for each task.
tasks_per_node: Number of tasks to run on each node, if not set defaults
to Slurm's output environment variable SLURM_NTASKS_PER_NODE.
auto_set_gpu: Set the visible CUDA devices automatically while resolving
the cluster by setting CUDA_VISIBLE_DEVICES environment variable.
Defaults to True.
rpc_layer: (Optional) The protocol TensorFlow uses to communicate between
nodes. Defaults to 'grpc'.
Returns:
A ClusterResolver object which can be used with distributed TensorFlow.
Raises:
RuntimeError: If requested more GPUs per node then available or requested
more tasks then assigned tasks.
"""
# check if launched by mpirun
if 'OMPI_COMM_WORLD_RANK' in os.environ:
self._rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
num_tasks = int(os.environ['OMPI_COMM_WORLD_SIZE'])
else:
self._rank = int(os.environ['SLURM_PROCID'])
num_tasks = int(os.environ['SLURM_NTASKS'])
self._jobs = collections.OrderedDict(sorted(jobs.items()))
self._port_base = port_base
# user specification overrides SLURM specification
if tasks_per_node is not None:
self._tasks_per_node = tasks_per_node
elif tasks_per_node is None and 'SLURM_NTASKS_PER_NODE' in os.environ:
self._tasks_per_node = int(os.environ['SLURM_NTASKS_PER_NODE'])
else:
raise RuntimeError('Neither `tasks_per_node` or '
'SLURM_NTASKS_PER_NODE is set.')
self._gpus_per_node = gpus_per_node
self._gpus_per_task = gpus_per_task
self._auto_set_gpu = auto_set_gpu
self.task_type = None
self.task_id = None
self.rpc_layer = rpc_layer
self._gpu_allocation = []
self._cluster_allocation = {}
if self._tasks_per_node * self._gpus_per_task > self._gpus_per_node:
raise RuntimeError('Requested more GPUs per node then available.')
if sum(self._jobs.values()) != num_tasks:
raise RuntimeError('Requested more tasks then assigned tasks.')
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified initialization parameters and Slurm environment variables. The
cluster specification is resolved each time this function is called. The
resolver extract hostnames of nodes by scontrol and pack tasks in that
order until a node a has number of tasks that is equal to specification.
GPUs on nodes are allocated to tasks by specification through setting
CUDA_VISIBLE_DEVICES environment variable.
Returns:
A ClusterSpec containing host information retrieved from Slurm's
environment variables.
"""
hostlist = self._resolve_hostnames()
task_list = []
self._gpu_allocation = []
self._cluster_allocation = {}
for host in hostlist:
for port_offset, gpu_offset in zip(
range(self._tasks_per_node),
range(0, self._gpus_per_node, self._gpus_per_task)):
host_addr = '%s:%d' % (host, self._port_base + port_offset)
task_list.append(host_addr)
gpu_id_list = []
for gpu_id in range(gpu_offset, gpu_offset + self._gpus_per_task):
gpu_id_list.append(str(gpu_id))
self._gpu_allocation.append(','.join(gpu_id_list))
cluster_rank_offset_start = 0
cluster_rank_offset_end = 0
for task_type, num_tasks in self._jobs.items():
cluster_rank_offset_end = cluster_rank_offset_start + num_tasks
self._cluster_allocation[task_type] = (
task_list[cluster_rank_offset_start:cluster_rank_offset_end])
if cluster_rank_offset_start <= self._rank < cluster_rank_offset_end:
self.task_type = task_type
self.task_id = self._rank - cluster_rank_offset_start
cluster_rank_offset_start = cluster_rank_offset_end
if self._auto_set_gpu is True:
os.environ['CUDA_VISIBLE_DEVICES'] = self._gpu_allocation[self._rank]
return ClusterSpec(self._cluster_allocation)
def get_task_info(self):
"""Returns job name and task_id for the process which calls this.
This returns the job name and task index for the process which calls this
function according to its rank and cluster specification. The job name and
task index are set after a cluster is constructed by cluster_spec otherwise
defaults to None.
Returns:
A string specifying job name the process belongs to and an integner
specifying the task index the process belongs to in that job.
"""
return self.task_type, self.task_id
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master string for connecting to a TensorFlow master.
Args:
task_type: (Optional) Overrides the default auto-selected task type.
task_id: (Optional) Overrides the default auto-slected task index.
rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses
to communicate across nodes.
Returns:
A connection string for connecting to a TensorFlow master.
"""
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
if task_type is not None and task_id is not None:
return format_master_url(
self.cluster_spec().task_address(task_type, task_id),
rpc_layer or self.rpc_layer)
return ''
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
# Unused, since this is set in __init__ manually.
del task_type, task_id, config_proto
return {'GPU': self._gpus_per_node}
| 39.299559 | 92 | 0.71696 |
759e52eb8d5c2acb7e4bde15a8c1ba47faa1d5f3 | 3,345 | py | Python | ckanext/metadata/model/workflow_state.py | SAEONData/ckanext-metadata | af1a137e5d924f05ea1835b81f36f808700d3aa7 | [
"MIT"
] | null | null | null | ckanext/metadata/model/workflow_state.py | SAEONData/ckanext-metadata | af1a137e5d924f05ea1835b81f36f808700d3aa7 | [
"MIT"
] | 76 | 2018-04-10T12:51:56.000Z | 2021-02-22T11:41:03.000Z | ckanext/metadata/model/workflow_state.py | SAEONData/ckanext-metadata | af1a137e5d924f05ea1835b81f36f808700d3aa7 | [
"MIT"
] | null | null | null | # encoding: utf-8
from sqlalchemy import types, Table, Column, ForeignKey
import vdm.sqlalchemy
from ckan.model import meta, core, types as _types, domain_object
workflow_state_table = Table(
'workflow_state', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('name', types.UnicodeText, nullable=False, unique=True),
Column('title', types.UnicodeText),
Column('description', types.UnicodeText),
Column('workflow_rules_json', types.UnicodeText),
Column('metadata_records_private', types.Boolean, nullable=False),
# we implement the self-relation "softly", otherwise revision table
# auto-generation gets confused about how to join to this table
Column('revert_state_id', types.UnicodeText), # ForeignKey('workflow_state.id')),
)
vdm.sqlalchemy.make_table_stateful(workflow_state_table)
workflow_state_revision_table = core.make_revisioned_table(workflow_state_table)
class WorkflowState(vdm.sqlalchemy.RevisionedObjectMixin,
vdm.sqlalchemy.StatefulObjectMixin,
domain_object.DomainObject):
@classmethod
def get(cls, reference):
"""
Returns a workflow_state object referenced by its id or name.
"""
if not reference:
return None
workflow_state = meta.Session.query(cls).get(reference)
if workflow_state is None:
workflow_state = cls.by_name(reference)
return workflow_state
@classmethod
def get_revert_state(cls, reference):
"""
Returns a workflow_state object referenced by the revert_state_id of the object
with the given id or name.
"""
workflow_state = cls.get(reference)
if not workflow_state:
return None
return cls.get(workflow_state.revert_state_id)
@classmethod
def revert_path_exists(cls, from_state_id, to_state_id):
"""
Determines whether it is possible to change from from_state_id to to_state_id
by a series of successive reverts.
Note 1: all states in the path must be active.
Note 2: this only considers explicit reverts, not the implicit revert to null
when revert_state_id is empty.
"""
if not from_state_id or not to_state_id:
return False
if from_state_id == to_state_id:
return False
from_state = meta.Session.query(cls) \
.filter(cls.id == from_state_id) \
.filter(cls.state == 'active') \
.first()
to_state = meta.Session.query(cls) \
.filter(cls.id == to_state_id) \
.filter(cls.state == 'active') \
.first()
if not from_state or not to_state:
return False
if from_state.revert_state_id == to_state_id:
return True
if cls.revert_path_exists(from_state.revert_state_id, to_state_id):
return True
return False
meta.mapper(WorkflowState, workflow_state_table,
extension=[vdm.sqlalchemy.Revisioner(workflow_state_revision_table)])
vdm.sqlalchemy.modify_base_object_mapper(WorkflowState, core.Revision, core.State)
WorkflowStateRevision = vdm.sqlalchemy.create_object_version(
meta.mapper, WorkflowState, workflow_state_revision_table)
| 35.585106 | 87 | 0.677429 |
2f8d7e873bc13a4ac0a1127578e447e6916863de | 770 | py | Python | jp.atcoder/abc177/abc177_e/27749342.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc177/abc177_e/27749342.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc177/abc177_e/27749342.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import collections
import typing
def main() -> typing.NoReturn:
n = int(input())
a = list(map(int, input().split()))
appeared_cnt = collections.defaultdict(int)
for x in a:
for p in prime_factorize(x):
appeared_cnt[p] += 1
cnt = appeared_cnt.values()
if max(cnt) == 1:
print('pairwise coprime')
elif max(cnt) < n:
print('setwise coprime')
else:
print('not coprime')
def prime_factorize(n: int) -> typing.DefaultDict[int, int]:
import collections
cnt = collections.defaultdict(int)
i = 1
while i * i < n:
i += 1
while n % i == 0:
n //= i
cnt[i] += 1
if n > 1: cnt[n] = 1
return cnt
main()
| 21.388889 | 61 | 0.516883 |
acde3d0e0065ac7181e3afcb9d61888ef9e42eb2 | 1,120 | py | Python | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiobase(RPackage):
"""Biobase: Base functions for Bioconductor.
Functions that are needed by many other packages or which replace R
functions."""
bioc = "Biobase"
version('2.54.0', commit='8215d76ce44899e6d10fe8a2f503821a94ef6b40')
version('2.50.0', commit='9927f90d0676382f2f99e099d8d2c8e2e6f1b4de')
version('2.44.0', commit='bde2077f66047986297ec35a688751cdce150dd3')
version('2.42.0', commit='3e5bd466b99e3cc4af1b0c3b32687fa56d6f8e4d')
version('2.40.0', commit='6555edbbcb8a04185ef402bfdea7ed8ac72513a5')
version('2.38.0', commit='83f89829e0278ac014b0bc6664e621ac147ba424')
version('2.36.2', commit='15f50912f3fa08ccb15c33b7baebe6b8a59ce075')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biocgenerics@0.3.2:', type=('build', 'run'))
depends_on('r-biocgenerics@0.27.1:', type=('build', 'run'), when='@2.42.0:')
| 40 | 80 | 0.724107 |
48f88e2f86a8b5ae8b249190e581680dd99fb696 | 11,002 | py | Python | PyDSS/reports.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | PyDSS/reports.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | PyDSS/reports.py | dvaidhyn/PyDSS | 0d220d00900da4945e2ab6e7774de5edb58b36a9 | [
"BSD-3-Clause"
] | null | null | null | """Creates reports on data exported by PyDSS"""
import abc
import logging
import os
from PyDSS.exceptions import InvalidConfiguration, InvalidParameter
from PyDSS.utils.dataframe_utils import write_dataframe
from PyDSS.utils.utils import dump_data
REPORTS_DIR = "Reports"
logger = logging.getLogger(__name__)
class Reports:
"""Generate reports from a PyDSS project"""
def __init__(self, results):
self._results = results
self._report_names = []
self._report_options = results.simulation_config["Reports"]
for report in self._report_options["Types"]:
if report["enabled"]:
self._report_names.append(report["name"])
self._output_dir = os.path.join(results.project_path, REPORTS_DIR)
os.makedirs(self._output_dir, exist_ok=True)
@staticmethod
def append_required_exports(exports, options):
"""Append export properties required by the configured reports.
Parameters
----------
exports : ExportListReader
options : dict
Simulation options
"""
report_options = options.get("Reports")
if report_options is None:
return
for report in report_options["Types"]:
if not report["enabled"]:
continue
name = report["name"]
if name not in REPORTS:
raise InvalidConfiguration(f"{name} is not a valid report")
required = REPORTS[name].get_required_reports()
for elem_class, required_properties in required.items():
for req_prop in required_properties:
found = False
store_type = req_prop["store_values_type"]
for prop in exports.list_element_properties(elem_class):
if prop.name == req_prop["property"] and \
prop.store_values_type.value == store_type:
found = True
break
if not found:
exports.append_property(elem_class, req_prop)
logger.debug("Add required property: %s %s", elem_class, req_prop)
@classmethod
def generate_reports(cls, results):
"""Generate all reports specified in the configuration.
Parameters
----------
results : PyDssResults
Returns
-------
list
list of report filenames
"""
reports = Reports(results)
return reports.generate()
def generate(self):
"""Generate all reports specified in the configuration.
Returns
-------
list
list of report filenames
"""
filenames = []
for name in self._report_names:
report = REPORTS[name](self._results, self._report_options)
filename = report.generate(self._output_dir)
filenames.append(filename)
return filenames
class ReportBase(abc.ABC):
"""Base class for reports"""
def __init__(self, results, report_options):
self._results = results
self._report_options = report_options
@abc.abstractmethod
def generate(self, output_dir):
"""Generate a report in output_dir.
Returns
-------
str
path to report
"""
@staticmethod
@abc.abstractmethod
def get_required_reports():
"""Return the properties required for the report for export.
Returns
-------
dict
"""
class PvClippingReport(ReportBase):
"""Reports PV Clipping for the simulation."""
FILENAME = "pv_clipping.json"
def __init__(self, results, report_options):
super().__init__(results, report_options)
assert len(results.scenarios) == 2
self._pf1_scenario = results.scenarios[0]
self._control_mode_scenario = results.scenarios[1]
self._pv_system_names = self._control_mode_scenario.list_element_names("PVSystems")
self._pf1_pv_systems = {
x["name"]: x for x in self._pf1_scenario.read_pv_profiles()["pv_systems"]
}
self._control_mode_pv_systems = {
x["name"]: x for x in self._control_mode_scenario.read_pv_profiles()["pv_systems"]
}
def _get_pv_system_info(self, pv_system, scenario):
if scenario == "pf1":
pv_systems = self._pf1_pv_systems
else:
pv_systems = self._control_mode_pv_systems
return pv_systems[pv_system]
def calculate_pv_clipping(self, pv_system):
"""Calculate PV clipping for one PV system.
Returns
-------
int
"""
cm_info = self._get_pv_system_info(pv_system, "control_mode")
pmpp = cm_info["pmpp"]
irradiance = cm_info["irradiance"]
total_irradiance = cm_info["load_shape_pmult_sum"]
annual_dc_power = pmpp * irradiance * total_irradiance
pf1_real_power = self._pf1_scenario.get_dataframe(
"PVSystems", "Powers", pv_system, real_only=True
)
annual_pf1_real_power = sum([abs(x) for x in pf1_real_power.sum()])
clipping = annual_dc_power - annual_pf1_real_power
logger.debug("PV clipping for %s = %s", pv_system, clipping)
return clipping
def generate(self, output_dir):
data = {"pv_systems": []}
for name in self._pv_system_names:
clipping = {
"name": name,
"pv_clipping": self.calculate_pv_clipping(name),
}
data["pv_systems"].append(clipping)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated PV Clipping report %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"PVSystems": [
{
"property": "Powers",
"store_values_type": "all",
}
]
}
class PvCurtailmentReport(ReportBase):
"""Reports PV Curtailment at every time point in the simulation."""
FILENAME = "pv_curtailment"
def __init__(self, results, report_options):
super().__init__(results, report_options)
assert len(results.scenarios) == 2
self._pf1_scenario = results.scenarios[0]
self._control_mode_scenario = results.scenarios[1]
self._pv_system_names = self._control_mode_scenario.list_element_names("PVSystems")
self._pf1_pv_systems = {
x["name"]: x for x in self._pf1_scenario.read_pv_profiles()["pv_systems"]
}
self._control_mode_pv_systems = {
x["name"]: x for x in self._control_mode_scenario.read_pv_profiles()["pv_systems"]
}
def _get_pv_system_info(self, pv_system, scenario):
if scenario == "pf1":
pv_systems = self._pf1_pv_systems
else:
pv_systems = self._control_mode_pv_systems
return pv_systems[pv_system]
def generate(self, output_dir):
df = self.calculate_pv_curtailment()
filename = os.path.join(
output_dir,
self.FILENAME
) + "." + self._report_options["Format"]
write_dataframe(df, filename, compress=True)
logger.info("Generated PV Clipping report %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"PVSystems": [
{
"property": "Powers",
"store_values_type": "all",
}
]
}
def calculate_pv_curtailment(self):
"""Calculate PV curtailment for all PV systems.
Returns
-------
pd.DataFrame
"""
pf1_power = self._pf1_scenario.get_full_dataframe(
"PVSystems", "Powers", real_only=True
)
control_mode_power = self._control_mode_scenario.get_full_dataframe(
"PVSystems", "Powers", real_only=True
)
# TODO: needs work
return (pf1_power - control_mode_power) / pf1_power * 100
class CapacitorStateChangeReport(ReportBase):
"""Reports the state changes per Capacitor."""
FILENAME = "capacitor_state_changes.json"
def generate(self, output_dir):
data = {"scenarios": []}
for scenario in self._results.scenarios:
scenario_data = {"name": scenario.name, "capacitors": []}
for capacitor in scenario.list_element_names("Capacitors"):
try:
change_count = int(scenario.get_element_property_number(
"Capacitors", "TrackStateChanges", capacitor
))
except InvalidParameter:
change_count = 0
changes = {"name": capacitor, "change_count": change_count}
scenario_data["capacitors"].append(changes)
data["scenarios"].append(scenario_data)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"Capacitors": [
{
"property": "TrackStateChanges",
"store_values_type": "change_count",
}
]
}
class RegControlTapNumberChangeReport(ReportBase):
"""Reports the tap number changes per RegControl."""
FILENAME = "reg_control_tap_number_changes.json"
def generate(self, output_dir):
data = {"scenarios": []}
for scenario in self._results.scenarios:
scenario_data = {"name": scenario.name, "reg_controls": []}
for reg_control in scenario.list_element_names("RegControls"):
change_count = int(scenario.get_element_property_number(
"RegControls", "TrackTapNumberChanges", reg_control
))
changes = {"name": reg_control, "change_count": change_count}
scenario_data["reg_controls"].append(changes)
data["scenarios"].append(scenario_data)
filename = os.path.join(output_dir, self.FILENAME)
dump_data(data, filename, indent=2)
logger.info("Generated %s", filename)
return filename
@staticmethod
def get_required_reports():
return {
"RegControls": [
{
"property": "TrackTapNumberChanges",
"store_values_type": "change_count",
}
]
}
REPORTS = {
"PV Clipping": PvClippingReport,
"PV Curtailment": PvCurtailmentReport,
"Capacitor State Change Counts": CapacitorStateChangeReport,
"RegControl Tap Number Change Counts": RegControlTapNumberChangeReport,
}
| 31.889855 | 94 | 0.59162 |
ffc00ad0b6c81e1d27835ffe4b5ecdba7884455d | 2,043 | py | Python | ecommerce/accounts/views.py | mishelshaji/py-2-ecommerce | e30940609ed191b98585b24baccf4925f404ec32 | [
"MIT"
] | null | null | null | ecommerce/accounts/views.py | mishelshaji/py-2-ecommerce | e30940609ed191b98585b24baccf4925f404ec32 | [
"MIT"
] | null | null | null | ecommerce/accounts/views.py | mishelshaji/py-2-ecommerce | e30940609ed191b98585b24baccf4925f404ec32 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import AuthenticationForm
from .forms import *
# Create your views here.
def user_login(request):
if request.method == "GET":
context = {}
context['login_form'] = AuthenticationForm()
context['reg_form'] = UserForm()
context['details_form'] = CustomerDetailsForm()
return render(request, 'accounts/login.html', context)
elif request.method == "POST":
lf = AuthenticationForm(data=request.POST)
if lf.is_valid():
username = lf.cleaned_data.get('username')
password = lf.cleaned_data.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
if user.is_customer:
return redirect('customer_home')
elif user.is_admin:
return redirect('admin_home')
context = {}
context['login_form'] = lf
context['reg_form'] = UserForm()
context['details_form'] = CustomerDetailsForm()
return render(request, 'accounts/login.html', context)
@require_POST
def register(request):
uf = UserForm(request.POST)
cdf = CustomerDetailsForm(request.POST)
if uf.is_valid() and cdf.is_valid():
user = uf.save(commit=False)
password = uf.cleaned_data.get('password')
user.set_password(password)
user.save()
data = cdf.save(commit=False)
data.user = user
data.save()
messages.success(request, "Please login now")
return redirect('accounts_login')
context = {}
context['login_form'] = AuthenticationForm()
context['reg_form'] = uf
context['details_form'] = cdf
return render(request, 'accounts/login.html', context)
| 34.627119 | 78 | 0.633872 |
c378b117939d33c1b8c342896d8d83e4f79e49ea | 12,538 | py | Python | tiled/client/constructors.py | zthatch/tiled | 156811c1cc180bb2744bcfcd9e09fc91389f3c93 | [
"BSD-3-Clause"
] | null | null | null | tiled/client/constructors.py | zthatch/tiled | 156811c1cc180bb2744bcfcd9e09fc91389f3c93 | [
"BSD-3-Clause"
] | null | null | null | tiled/client/constructors.py | zthatch/tiled | 156811c1cc180bb2744bcfcd9e09fc91389f3c93 | [
"BSD-3-Clause"
] | null | null | null | import collections
import collections.abc
import urllib.parse
import httpx
from .context import context_from_tree, Context, DEFAULT_TOKEN_CACHE
from .node import Node
from .utils import DEFAULT_ACCEPTED_ENCODINGS, EVENT_HOOKS
from ..utils import import_object
def from_uri(
uri,
structure_clients="numpy",
*,
cache=None,
offline=False,
username=None,
token_cache=DEFAULT_TOKEN_CACHE,
special_clients=None,
verify=True,
authentication_uri=None,
headers=None,
):
"""
Connect to a Node on a local or remote server.
Parameters
----------
uri : str
e.g. "http://localhost:8000"
structure_clients : str or dict, optional
Use "dask" for delayed data loading and "numpy" for immediate
in-memory structures (e.g. normal numpy arrays, pandas
DataFrames). For advanced use, provide dict mapping
structure_family names ("array", "dataframe", "variable",
"data_array", "dataset") to client objects. See
``Node.DEFAULT_STRUCTURE_CLIENT_DISPATCH``.
cache : Cache, optional
offline : bool, optional
False by default. If True, rely on cache only.
username : str, optional
Username for authenticated access.
token_cache : str, optional
Path to directory for storing refresh tokens.
special_clients : dict, optional
Advanced: Map spec from the server to special client
tree objects. See also
``Node.discover_special_clients()`` and
``Node.DEFAULT_SPECIAL_CLIENT_DISPATCH``.
verify : bool, optional
Verify SSL certifications. True by default. False is insecure,
intended for development and testing only.
authentication_uri : str, optional
URL of authentication server
headers : dict, optional
Extra HTTP headers.
"""
# The uri is expected to reach the root or /metadata/[...] route.
url = httpx.URL(uri)
headers = headers or {}
headers.setdefault("accept-encoding", ",".join(DEFAULT_ACCEPTED_ENCODINGS))
params = {}
# If ?api_key=... is present, move it from the query into a header.
# The server would accept it in the query parameter, but using
# a header is a little more secure (e.g. not logged) and makes
# it is simpler to manage the client.base_url.
parsed_query = urllib.parse.parse_qs(url.query.decode())
api_key_list = parsed_query.pop("api_key", None)
if api_key_list is not None:
if len(api_key_list) != 1:
raise ValueError("Cannot handle two api_key query parameters")
(api_key,) = api_key_list
headers["X-TILED-API-KEY"] = api_key
params.update(urllib.parse.urlencode(parsed_query, doseq=True))
# Construct the URL *without* the params, which we will pass in separately.
base_uri = urllib.parse.urlunsplit(
(url.scheme, url.netloc.decode(), url.path, {}, url.fragment)
)
client = httpx.Client(
base_url=base_uri,
verify=verify,
event_hooks=EVENT_HOOKS,
timeout=httpx.Timeout(5.0, read=20.0),
headers=headers,
params=params,
)
context = Context(
client,
username=username,
authentication_uri=authentication_uri,
cache=cache,
offline=offline,
token_cache=token_cache,
)
return from_context(
context,
structure_clients=structure_clients,
special_clients=special_clients,
)
def from_tree(
tree,
authentication=None,
server_settings=None,
query_registry=None,
serialization_registry=None,
compression_registry=None,
structure_clients="numpy",
*,
cache=None,
offline=False,
username=None,
special_clients=None,
token_cache=DEFAULT_TOKEN_CACHE,
headers=None,
):
"""
Connect to a Node directly, running the app in this same process.
NOTE: This is experimental. It may need to be re-designed or even removed.
In this configuration, we are using the server, but we are communicating
with it directly within this process, not over a local network. It is
generally faster.
Specifically, we are using HTTP over ASGI rather than HTTP over TCP.
There are no sockets or network-related syscalls.
Parameters
----------
tree : Node
authentication : dict, optional
Dict of authentication configuration.
username : str, optional
Username for authenticated access.
structure_clients : str or dict, optional
Use "dask" for delayed data loading and "numpy" for immediate
in-memory structures (e.g. normal numpy arrays, pandas
DataFrames). For advanced use, provide dict mapping
structure_family names ("array", "dataframe", "variable",
"data_array", "dataset") to client objects. See
``Node.DEFAULT_STRUCTURE_CLIENT_DISPATCH``.
cache : Cache, optional
offline : bool, optional
False by default. If True, rely on cache only.
special_clients : dict, optional
Advanced: Map spec from the server to special client
tree objects. See also
``Node.discover_special_clients()`` and
``Node.DEFAULT_SPECIAL_CLIENT_DISPATCH``.
token_cache : str, optional
Path to directory for storing refresh tokens.
"""
context = context_from_tree(
tree=tree,
authentication=authentication,
server_settings=server_settings,
query_registry=query_registry,
serialization_registry=serialization_registry,
compression_registry=compression_registry,
# The cache and "offline" mode do not make much sense when we have an
# in-process connection, but we support it for the sake of testing and
# making direct access a drop in replacement for the normal service.
cache=cache,
offline=offline,
token_cache=token_cache,
username=username,
headers=headers,
)
return from_context(
context,
structure_clients=structure_clients,
special_clients=special_clients,
)
def from_context(
context,
structure_clients="numpy",
*,
path=None,
special_clients=None,
):
"""
Advanced: Connect to a Node using a custom instance of httpx.Client or httpx.AsyncClient.
Parameters
----------
context : tiled.client.context.Context
structure_clients : str or dict, optional
Use "dask" for delayed data loading and "numpy" for immediate
in-memory structures (e.g. normal numpy arrays, pandas
DataFrames). For advanced use, provide dict mapping
structure_family names ("array", "dataframe", "variable",
"data_array", "dataset") to client objects. See
``Node.DEFAULT_STRUCTURE_CLIENT_DISPATCH``.
username : str, optional
Username for authenticated access.
cache : Cache, optional
offline : bool, optional
False by default. If True, rely on cache only.
special_clients : dict, optional
Advanced: Map spec from the server to special client
tree objects. See also
``Node.discover_special_clients()`` and
``Node.DEFAULT_SPECIAL_CLIENT_DISPATCH``.
token_cache : str, optional
Path to directory for storing refresh tokens.
authentication_uri : str, optional
URL of authentication server
"""
# Interpret structure_clients="numpy" and structure_clients="dask" shortcuts.
if isinstance(structure_clients, str):
structure_clients = Node.DEFAULT_STRUCTURE_CLIENT_DISPATCH[structure_clients]
path = path or []
# Do entrypoint discovery if it hasn't yet been done.
if Node.DEFAULT_SPECIAL_CLIENT_DISPATCH is None:
Node.discover_special_clients()
special_clients = collections.ChainMap(
special_clients or {},
Node.DEFAULT_SPECIAL_CLIENT_DISPATCH,
)
content = context.get_json(f"/metadata/{'/'.join(context.path_parts)}")
item = content["data"]
instance = Node(
context,
item=item,
path=path,
structure_clients=structure_clients,
special_clients=special_clients,
)
return instance.client_for_item(item, path=path)
def from_profile(name, structure_clients=None, **kwargs):
"""
Build a Node based a 'profile' (a named configuration).
List available profiles and the source filepaths from Python like:
>>> from tiled.client.profiles import list_profiles
>>> list_profiles()
or from a CLI like:
$ tiled profile list
Or show the file contents like:
>>> from tiled.client.profiles import load_profiles
>>> load_profiles()
or from a CLI like:
$ tiled profile show PROFILE_NAME
Any additional parameters override profile content. See from_uri for details.
"""
from ..profiles import load_profiles, paths, ProfileNotFound
profiles = load_profiles()
try:
filepath, profile_content = profiles[name]
except KeyError as err:
raise ProfileNotFound(
f"Profile {name!r} not found. Found profiles {list(profiles)} "
f"from directories {paths}."
) from err
merged = {**profile_content, **kwargs}
if structure_clients is not None:
merged["structure_clients"] = structure_clients
cache_config = merged.pop("cache", None)
if cache_config is not None:
from tiled.client.cache import Cache
if isinstance(cache_config, collections.abc.Mapping):
# All necessary validation has already been performed
# in load_profiles().
((key, value),) = cache_config.items()
if key == "memory":
cache = Cache.in_memory(**value)
elif key == "disk":
cache = Cache.on_disk(**value)
else:
# Interpret this as a Cache object passed in directly.
cache = cache_config
merged["cache"] = cache
structure_clients_ = merged.pop("structure_clients", None)
if structure_clients_ is not None:
if isinstance(structure_clients_, str):
# Nothing to do.
merged["structure_clients"] = structure_clients_
else:
# This is a dict mapping structure families like "array" and "dataframe"
# to values. The values may be client objects or importable strings.
result = {}
for key, value in structure_clients_.items():
if isinstance(value, str):
class_ = import_object(value)
else:
class_ = value
result[key] = class_
merged["structure_clients"] = result
if "direct" in merged:
# The profiles specifies that there is no server. We should create
# an app ourselves and use it directly via ASGI.
from ..config import construct_serve_tree_kwargs
serve_tree_kwargs = construct_serve_tree_kwargs(
merged.pop("direct", None), source_filepath=filepath
)
return from_tree(**serve_tree_kwargs, **merged)
else:
return from_uri(**merged)
def from_config(
config,
authentication_uri=None,
username=None,
cache=None,
offline=False,
token_cache=DEFAULT_TOKEN_CACHE,
**kwargs,
):
"""
Build Nodes directly, running the app in this same process.
NOTE: This is experimental. It may need to be re-designed or even removed.
Parameters
----------
config : str or dict
May be:
* Path to config file
* Path to directory of config files
* Dict of config
Examples
--------
From config file:
>>> from_config("path/to/file.yml")
From directory of config files:
>>> from_config("path/to/directory")
From configuration given directly, as dict:
>>> from_config(
{
"trees":
[
"path": "/",
"tree": "tiled.files.Node.from_files",
"args": {"diretory": "path/to/files"}
]
}
)
"""
from ..config import construct_serve_tree_kwargs
serve_tree_kwargs = construct_serve_tree_kwargs(config)
context = context_from_tree(
# authentication_uri=authentication_uri,
username=username,
cache=cache,
offline=offline,
token_cache=token_cache,
**serve_tree_kwargs,
)
return from_context(context, **kwargs)
| 32.908136 | 93 | 0.648828 |
0b2edbb644269a5a19208c9090aa14af45638a69 | 4,124 | py | Python | bentoml/adapters/tensorflow_tensor_input.py | narennadig/BentoML | c4c4e1a0f0273804be7bcc36b7a5dab9e61dda0e | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/tensorflow_tensor_input.py | narennadig/BentoML | c4c4e1a0f0273804be7bcc36b7a5dab9e61dda0e | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/tensorflow_tensor_input.py | narennadig/BentoML | c4c4e1a0f0273804be7bcc36b7a5dab9e61dda0e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import traceback
from typing import Iterable, Sequence, Tuple
from bentoml.adapters.string_input import StringInput
from bentoml.adapters.utils import TF_B64_KEY
from bentoml.types import InferenceTask, JsonSerializable
def b64_hook(o):
if isinstance(o, dict) and TF_B64_KEY in o:
return base64.b64decode(o[TF_B64_KEY])
return o
ApiFuncArgs = Tuple[
Sequence[JsonSerializable],
]
class TfTensorInput(StringInput):
"""
Tensor input adapter for Tensorflow models.
Transform incoming tf tensor data from http request, cli or lambda event into
tf tensor.
The behaviour should be compatible with tensorflow serving REST API:
* https://www.tensorflow.org/tfx/serving/api_rest#classify_and_regress_api
* https://www.tensorflow.org/tfx/serving/api_rest#predict_api
Args:
* method: equivalence of serving API methods: (predict, classify, regress)
Raises:
BentoMLException: BentoML currently doesn't support Content-Type
"""
BATCH_MODE_SUPPORTED = True
METHODS = (PREDICT, CLASSIFY, REGRESS) = ("predict", "classify", "regress")
def __init__(self, method=PREDICT, **base_kwargs):
super().__init__(**base_kwargs)
self.method = method
@property
def config(self):
base_config = super().config
return dict(base_config, method=self.method,)
@property
def request_schema(self):
if self.method == self.PREDICT:
return {
"application/json": {
"schema": {
"type": "object",
"properties": {
"signature_name": {"type": "string", "default": None},
"instances": {
"type": "array",
"items": {"type": "object"},
"default": None,
},
"inputs": {"type": "object", "default": None},
},
}
}
}
else:
raise NotImplementedError(f"method {self.method} is not implemented")
def extract_user_func_args(
self, tasks: Iterable[InferenceTask[str]]
) -> ApiFuncArgs:
import tensorflow as tf
instances_list = []
for task in tasks:
try:
parsed_json = json.loads(task.data, object_hook=b64_hook)
if parsed_json.get("instances") is None:
task.discard(
http_status=400, err_msg="input format is not implemented",
)
else:
instances = parsed_json.get("instances")
if (
task.http_headers.is_batch_input
or task.http_headers.is_batch_input is None
):
task.batch = len(instances)
instances_list.extend(instances)
else:
instances_list.append(instances)
except json.JSONDecodeError:
task.discard(http_status=400, err_msg="Not a valid JSON format")
except Exception: # pylint: disable=broad-except
err = traceback.format_exc()
task.discard(http_status=500, err_msg=f"Internal Server Error: {err}")
parsed_tensor = tf.constant(instances_list)
return (parsed_tensor,)
| 35.247863 | 86 | 0.583657 |
c76196680b6678b4d4c8ba6148f1a6e31d8733d7 | 8,475 | py | Python | 3_Inference/Detector.py | imsanika03/TrainYourOwnYOLO | c03d78f1d9594b44f39096bfb8aa018dfcfff908 | [
"MIT"
] | null | null | null | 3_Inference/Detector.py | imsanika03/TrainYourOwnYOLO | c03d78f1d9594b44f39096bfb8aa018dfcfff908 | [
"MIT"
] | null | null | null | 3_Inference/Detector.py | imsanika03/TrainYourOwnYOLO | c03d78f1d9594b44f39096bfb8aa018dfcfff908 | [
"MIT"
] | null | null | null | import os
import sys
def get_parent_dir(n=1):
""" returns the n-th parent dicrectory of the current
working directory """
current_path = os.path.dirname(os.path.abspath(__file__))
for k in range(n):
current_path = os.path.dirname(current_path)
return current_path
src_path = os.path.join(get_parent_dir(1), "2_Training", "src")
utils_path = os.path.join(get_parent_dir(1), "Utils")
sys.path.append(src_path)
sys.path.append(utils_path)
import argparse
from keras_yolo3.yolo import YOLO, detect_video
from PIL import Image
from timeit import default_timer as timer
from utils import load_extractor_model, load_features, parse_input, detect_object
import test
import utils
import pandas as pd
import numpy as np
from Get_File_Paths import GetFileList
import random
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Set up folder names for default values
data_folder = os.path.join(get_parent_dir(n=1), "Data")
image_folder = os.path.join(data_folder, "Source_Images")
image_test_folder = os.path.join(image_folder, "Test_Images")
detection_results_folder = os.path.join(image_folder, "Test_Image_Detection_Results")
detection_results_file = os.path.join(detection_results_folder, "Detection_Results.csv")
model_folder = os.path.join(data_folder, "Model_Weights")
model_weights = os.path.join(model_folder, "trained_weights_final.h5")
model_classes = os.path.join(model_folder, "data_classes.txt")
anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
FLAGS = None
if __name__ == "__main__":
# Delete all default flags
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
"""
Command line options
"""
parser.add_argument(
"--input_path",
type=str,
default=image_test_folder,
help="Path to image/video directory. All subdirectories will be included. Default is "
+ image_test_folder,
)
parser.add_argument(
"--output",
type=str,
default=detection_results_folder,
help="Output path for detection results. Default is "
+ detection_results_folder,
)
parser.add_argument(
"--no_save_img",
default=False,
action="store_true",
help="Only save bounding box coordinates but do not save output images with annotated boxes. Default is False.",
)
parser.add_argument(
"--file_types",
"--names-list",
nargs="*",
default=[],
help="Specify list of file types to include. Default is --file_types .jpg .jpeg .png .mp4",
)
parser.add_argument(
"--yolo_model",
type=str,
dest="model_path",
default=model_weights,
help="Path to pre-trained weight files. Default is " + model_weights,
)
parser.add_argument(
"--anchors",
type=str,
dest="anchors_path",
default=anchors_path,
help="Path to YOLO anchors. Default is " + anchors_path,
)
parser.add_argument(
"--classes",
type=str,
dest="classes_path",
default=model_classes,
help="Path to YOLO class specifications. Default is " + model_classes,
)
parser.add_argument(
"--gpu_num", type=int, default=1, help="Number of GPU to use. Default is 1"
)
parser.add_argument(
"--confidence",
type=float,
dest="score",
default=0.25,
help="Threshold for YOLO object confidence score to show predictions. Default is 0.25.",
)
parser.add_argument(
"--box_file",
type=str,
dest="box",
default=detection_results_file,
help="File to save bounding box results to. Default is "
+ detection_results_file,
)
parser.add_argument(
"--postfix",
type=str,
dest="postfix",
default="_catface",
help='Specify the postfix for images with bounding boxes. Default is "_catface"',
)
FLAGS = parser.parse_args()
save_img = not FLAGS.no_save_img
file_types = FLAGS.file_types
if file_types:
print("Found input path")
input_paths = GetFileList(FLAGS.input_path, endings=file_types)
else:
print("Found input path")
input_paths = GetFileList(FLAGS.input_path)
# Split images and videos
img_endings = (".jpg", ".jpg", ".png", ".JPG")
vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")
input_image_paths = []
input_video_paths = []
print("Looking for images")
for item in input_paths:
if item.endswith(img_endings):
print("Found images")
input_image_paths.append(item)
elif item.endswith(vid_endings):
input_video_paths.append(item)
output_path = FLAGS.output
if not os.path.exists(output_path):
os.makedirs(output_path)
# define YOLO detector
yolo = YOLO(
**{
"model_path": FLAGS.model_path,
"anchors_path": FLAGS.anchors_path,
"classes_path": FLAGS.classes_path,
"score": FLAGS.score,
"gpu_num": FLAGS.gpu_num,
"model_image_size": (416, 416),
}
)
# Make a dataframe for the prediction outputs
out_df = pd.DataFrame(
columns=[
"image",
"image_path",
"xmin",
"ymin",
"xmax",
"ymax",
"label",
"confidence",
"x_size",
"y_size",
]
)
# labels to draw on images
class_file = open(FLAGS.classes_path, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
print("Found {} input labels: {} ...".format(len(input_labels), input_labels))
if input_image_paths:
print(
"Found {} input images: {} ...".format(
len(input_image_paths),
[os.path.basename(f) for f in input_image_paths[:5]],
)
)
start = timer()
text_out = ""
# This is for images
for i, img_path in enumerate(input_image_paths):
print(img_path)
prediction, image = detect_object(
yolo,
img_path,
save_img=save_img,
save_img_path=FLAGS.output,
postfix=FLAGS.postfix,
)
y_size, x_size, _ = np.array(image).shape
for single_prediction in prediction:
out_df = out_df.append(
pd.DataFrame(
[
[
os.path.basename(img_path.rstrip("\n")),
img_path.rstrip("\n"),
]
+ single_prediction
+ [x_size, y_size]
],
columns=[
"image",
"image_path",
"xmin",
"ymin",
"xmax",
"ymax",
"label",
"confidence",
"x_size",
"y_size",
],
)
)
end = timer()
print(
"Processed {} images in {:.1f}sec - {:.1f}FPS".format(
len(input_image_paths),
end - start,
len(input_image_paths) / (end - start),
)
)
out_df.to_csv(FLAGS.box, index=False)
# This is for videos
if input_video_paths:
print(
"Found {} input videos: {} ...".format(
len(input_video_paths),
[os.path.basename(f) for f in input_video_paths[:5]],
)
)
start = timer()
for i, vid_path in enumerate(input_video_paths):
output_path = os.path.join(
FLAGS.output,
os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
)
detect_video(yolo, vid_path, output_path=output_path)
end = timer()
print(
"Processed {} videos in {:.1f}sec".format(
len(input_video_paths), end - start
)
)
# Close the current yolo session
yolo.close_session()
| 29.224138 | 120 | 0.554454 |
1c295f4a3e105467abaf2c4d1e2dd38fb48202a7 | 7,563 | py | Python | migratemonitors.py | psi09/nr-account-migration | 31ab8a2d998b2ee66305fe454346f3c874982f42 | [
"Apache-2.0"
] | null | null | null | migratemonitors.py | psi09/nr-account-migration | 31ab8a2d998b2ee66305fe454346f3c874982f42 | [
"Apache-2.0"
] | null | null | null | migratemonitors.py | psi09/nr-account-migration | 31ab8a2d998b2ee66305fe454346f3c874982f42 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import sys
import time
import library.localstore as store
import library.migrationlogger as migrationlogger
from library.clients.monitorsclient import get_monitor, post_monitor_definition, populate_script, \
put_script, apply_tags
import library.monitortypes as monitortypes
import library.securecredentials as securecredentials
import library.status.monitorstatus as mskeys
import library.utils as utils
# migratemonitors must be used after doing a fetchmonitors
# specify the source account, timestamp that you want to migrate
# Also specify the targetAccount and targetApiKey to which you want to migrate the stored monitors
logger = migrationlogger.get_logger(os.path.basename(__file__))
headers = {}
args = None
fetch_latest = True
parser = argparse.ArgumentParser(description='Migrate Synthetic Monitors from one account to another')
# The following two constants are used to create the alert policy to which the monitor check alerts are migrated
SYNTHETICS_ALERT_POLICY_NAME = 'Synthetics Check Failures'
INCIDENT_PREFERENCE_OPTIONS = {'PER_POLICY': 'PER_POLICY', 'PER_CONDITION': 'PER_CONDITION',
'PER_CONDITION_TARGET': 'PER_CONDITION PER_CONDITION_AND_TARGET'}
def setup_params():
parser.add_argument('--fromFile', nargs=1, type=str, required=True, help='Path to file with monitor names')
parser.add_argument('--sourceAccount', nargs=1, type=str, required=True, help='Source accountId local Store \
like db/<sourceAccount>/monitors .')
parser.add_argument('--sourceApiKey', nargs=1, type=str, required=True, help='Source account API Key, \
ignored if useLocal is passed')
parser.add_argument('--targetAccount', nargs=1, type=str, required=True, help='Target accountId or \
set environment variable ENV_SOURCE_API_KEY')
parser.add_argument('--targetApiKey', nargs=1, type=str, required=True, help='Target API Key, \
or set environment variable ENV_TARGET_API_KEY')
parser.add_argument('--personalApiKey', nargs=1, type=str, required=True, help='Personal API Key for GraphQL client \
alternately environment variable ENV_PERSONAL_API_KEY')
parser.add_argument('--timeStamp', nargs=1, type=str, required=True, help='timeStamp to migrate')
parser.add_argument('--useLocal', dest='useLocal', required=False, action='store_true',
help='By default latest monitors are fetched. Pass this argument to useLocal')
# prints args and also sets the fetch_latest flag
def print_args(target_api_key, per_api_key):
global fetch_latest
logger.info("Using fromFile : " + args.fromFile[0])
logger.info("Using sourceAccount : " + str(args.sourceAccount[0]))
if args.sourceApiKey:
logger.info("Using sourceApiKey(ignored if --useLocal is passed) : " +
len(args.sourceApiKey[0][:-4])*"*"+args.sourceApiKey[0][-4:])
if args.useLocal:
fetch_latest = False
logger.info("Using useLocal : " + str(args.useLocal))
logger.info("Switched fetch_latest to :" + str(fetch_latest))
else:
logger.info("Default fetch_latest :" + str(fetch_latest))
logger.info("Using targetAccount : " + str(args.targetAccount[0]))
logger.info("Using targetApiKey : " + len(target_api_key[:-4])*"*"+target_api_key[-4:])
logger.info("Using personalApiKey : " + len(per_api_key[:-4]) * "*" + per_api_key[-4:])
logger.info("Using timeStamp : " + args.timeStamp[0])
def ensure_target_api_key():
if args.targetApiKey:
target_api_key = args.targetApiKey[0]
else:
target_api_key = os.environ.get('ENV_TARGET_API_KEY')
if not target_api_key:
logger.error('Error: Missing param targetApiKey or env variable ENV_TARGET_API_KEY .\n \
e.g. export ENV_TARGET_API_KEY="NRAA7893asdfhkh" or pass as param')
sys.exit()
return target_api_key
def get_labels(source_monitor_id, all_monitor_labels):
monitor_labels = []
if len(all_monitor_labels) > 0 and source_monitor_id in all_monitor_labels:
monitor_labels = all_monitor_labels[source_monitor_id]
return monitor_labels
def migrate(all_monitors_json, src_account_id, src_api_key, tgt_acct_id, tgt_api_key, per_api_key):
monitor_status = {}
all_monitor_labels = store.load_monitor_labels(src_account_id)
logger.info("Loaded labels " + str(len(all_monitor_labels)))
scripted_monitors = []
for monitor_json in all_monitors_json:
logger.debug(monitor_json)
monitor_name = monitor_json['definition']['name']
source_monitor_id = monitor_json['definition']['id']
if fetch_latest:
result = get_monitor(src_api_key, source_monitor_id)
if result['status'] != 200:
logger.error('Did not find monitor ' + source_monitor_id)
logger.error(result)
continue
monitor_json['definition'] = result['monitor']
post_monitor_definition(tgt_api_key, monitor_name, monitor_json, monitor_status)
if monitortypes.is_scripted(monitor_json['definition']):
scripted_monitors.append(monitor_json)
if fetch_latest:
populate_script(src_api_key, monitor_json, source_monitor_id)
put_script(tgt_api_key, monitor_json, monitor_name, monitor_status)
logger.info(monitor_status)
monitor_status[monitor_name][mskeys.SEC_CREDENTIALS] = monitor_json[mskeys.SEC_CREDENTIALS]
monitor_status[monitor_name][mskeys.CHECK_COUNT] = monitor_json[mskeys.CHECK_COUNT]
# need a delay to wait for the guid to be indexed
time.sleep(0.5)
monitor_labels = get_labels(source_monitor_id, all_monitor_labels)
apply_tags(tgt_acct_id, per_api_key, monitor_labels, monitor_name, monitor_status)
logger.debug(monitor_status[monitor_name])
securecredentials.create(tgt_api_key, scripted_monitors)
return monitor_status
def migrate_monitors(from_file, src_acct, src_api_key, time_stamp, tgt_acct_id, target_api_key, per_api_key):
monitor_names = store.load_names(from_file)
logger.debug(monitor_names)
all_monitors_json = store.load_monitors(src_acct, time_stamp, monitor_names)
monitor_status = migrate(all_monitors_json, src_acct, src_api_key, tgt_acct_id, target_api_key, per_api_key)
logger.debug(monitor_status)
file_name = utils.file_name_from(from_file)
status_csv = src_acct + "_" + file_name + "_" + tgt_acct_id + ".csv"
store.save_status_csv(status_csv, monitor_status, mskeys)
def main():
setup_params()
global args
args = parser.parse_args()
target_api_key = ensure_target_api_key()
if not target_api_key:
utils.error_and_exit('target_api_key', 'ENV_TARGET_API_KEY')
personal_api_key = utils.ensure_personal_api_key(args)
if not personal_api_key:
utils.error_and_exit('personal_api_key', 'ENV_PERSONAL_API_KEY')
print_args(target_api_key, personal_api_key)
migrate_monitors(args.fromFile[0], args.sourceAccount[0], args.sourceApiKey[0], args.timeStamp[0],
args.targetAccount[0], target_api_key, personal_api_key)
if __name__ == '__main__':
main()
| 50.42 | 127 | 0.693772 |
b0aaa8d4804628e399e3e960bdf90657addfe094 | 224 | py | Python | setupIBM.py | hpssjellis/qiskit-community-tutorials | 94ccdd7ef7197c452e57839142c054ff8b7171a9 | [
"Apache-2.0"
] | 1 | 2021-05-11T22:13:28.000Z | 2021-05-11T22:13:28.000Z | setupIBM.py | hpssjellis/qiskit-community-tutorials | 94ccdd7ef7197c452e57839142c054ff8b7171a9 | [
"Apache-2.0"
] | null | null | null | setupIBM.py | hpssjellis/qiskit-community-tutorials | 94ccdd7ef7197c452e57839142c054ff8b7171a9 | [
"Apache-2.0"
] | 1 | 2021-05-11T22:13:34.000Z | 2021-05-11T22:13:34.000Z | #from qiskit import IBMQ
#IBMQ.save_account('MY_API_TOKEN')
#
#login and find your IBM Quantum computing account token
#https://quantum-computing.ibm.com/account
#
from qiskit import IBMQ
IBMQ.save_account('MY_API_TOKEN')
| 20.363636 | 56 | 0.790179 |
0517863a19c0278c1e39f2cb12b1f70fbf7e98d2 | 816 | py | Python | migrations/versions/40f6977ec0bf_make_session_id_on_form_response_not_.py | drewbent/duo-backend | 5a947ff2bf90aa92104d8fd15e63c9b31226852a | [
"MIT"
] | null | null | null | migrations/versions/40f6977ec0bf_make_session_id_on_form_response_not_.py | drewbent/duo-backend | 5a947ff2bf90aa92104d8fd15e63c9b31226852a | [
"MIT"
] | null | null | null | migrations/versions/40f6977ec0bf_make_session_id_on_form_response_not_.py | drewbent/duo-backend | 5a947ff2bf90aa92104d8fd15e63c9b31226852a | [
"MIT"
] | null | null | null | """Make session id on form_response not nullable
Revision ID: 40f6977ec0bf
Revises: d99bd085cddc
Create Date: 2020-02-17 16:35:14.531274
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '40f6977ec0bf'
down_revision = 'd99bd085cddc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('form_response', 'session_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('form_response', 'session_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
| 24.727273 | 65 | 0.664216 |
6c709c529cabdff6fb41c3f6e58b2bc52ed29624 | 659 | py | Python | fireworks/examples/tutorial_examples/introduction.py | jmmshn/fireworks | 5c2f0586e76ab08cadf8b9f4f85638d838f15448 | [
"BSD-3-Clause-LBNL"
] | 251 | 2015-01-05T17:44:47.000Z | 2022-03-28T07:25:42.000Z | fireworks/examples/tutorial_examples/introduction.py | jmmshn/fireworks | 5c2f0586e76ab08cadf8b9f4f85638d838f15448 | [
"BSD-3-Clause-LBNL"
] | 332 | 2015-01-06T18:40:53.000Z | 2022-03-18T04:44:33.000Z | fireworks/examples/tutorial_examples/introduction.py | jmmshn/fireworks | 5c2f0586e76ab08cadf8b9f4f85638d838f15448 | [
"BSD-3-Clause-LBNL"
] | 176 | 2015-01-16T14:06:53.000Z | 2022-02-15T00:45:57.000Z | """
This code is described in the Introductory tutorial,
https://materialsproject.github.io/fireworks/introduction.html
"""
from fireworks import Firework, LaunchPad, ScriptTask
from fireworks.core.rocket_launcher import launch_rocket
if __name__ == "__main__":
# set up the LaunchPad and reset it
launchpad = LaunchPad()
# launchpad.reset('', require_password=False)
# create the Firework consisting of a single task
firetask = ScriptTask.from_str('echo "howdy, your job launched successfully!"')
firework = Firework(firetask)
# store workflow and launch it locally
launchpad.add_wf(firework)
launch_rocket(launchpad)
| 31.380952 | 83 | 0.751138 |
75471d41fe25ead143a94b83944eb71f60b7d114 | 2,946 | py | Python | tabular/src/autogluon/tabular/models/lgb/hyperparameters/parameters.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 4,462 | 2019-12-09T17:41:07.000Z | 2022-03-31T22:00:41.000Z | tabular/src/autogluon/tabular/models/lgb/hyperparameters/parameters.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 1,408 | 2019-12-09T17:48:59.000Z | 2022-03-31T20:24:12.000Z | tabular/src/autogluon/tabular/models/lgb/hyperparameters/parameters.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 623 | 2019-12-10T02:04:18.000Z | 2022-03-20T17:11:01.000Z | """ Default (fixed) hyperparameter values used in Gradient Boosting model. """
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS
DEFAULT_NUM_BOOST_ROUND = 10000 # default for single training run
def get_param_baseline_custom(problem_type):
if problem_type == BINARY:
return get_param_binary_baseline_custom()
elif problem_type == MULTICLASS:
return get_param_multiclass_baseline_custom()
elif problem_type == REGRESSION:
return get_param_regression_baseline_custom()
elif problem_type == SOFTCLASS:
return get_param_softclass_baseline_custom()
else:
return get_param_binary_baseline_custom()
def get_param_baseline(problem_type):
if problem_type == BINARY:
return get_param_binary_baseline()
elif problem_type == MULTICLASS:
return get_param_multiclass_baseline()
elif problem_type == REGRESSION:
return get_param_regression_baseline()
elif problem_type == SOFTCLASS:
return get_param_softclass_baseline()
else:
return get_param_binary_baseline()
def get_param_multiclass_baseline_custom():
params = {
'learning_rate': 0.03,
'num_leaves': 128,
'feature_fraction': 0.9,
'min_data_in_leaf': 3,
# TODO: Bin size max increase
}
return params
def get_param_binary_baseline():
params = {
'num_boost_round': DEFAULT_NUM_BOOST_ROUND,
'num_threads': -1,
'learning_rate': 0.05,
'objective': 'binary',
'verbose': -1,
'boosting_type': 'gbdt',
'two_round': True,
}
return params
def get_param_multiclass_baseline():
params = {
'num_boost_round': DEFAULT_NUM_BOOST_ROUND,
'num_threads': -1,
'learning_rate': 0.05,
'objective': 'multiclass',
'verbose': -1,
'boosting_type': 'gbdt',
'two_round': True,
}
return params
def get_param_regression_baseline():
params = {
'num_boost_round': DEFAULT_NUM_BOOST_ROUND,
'num_threads': -1,
'learning_rate': 0.05,
'objective': 'regression',
'verbose': -1,
'boosting_type': 'gbdt',
'two_round': True,
}
return params
def get_param_binary_baseline_custom():
params = {
'learning_rate': 0.03,
'num_leaves': 128,
'feature_fraction': 0.9,
'min_data_in_leaf': 5,
}
return params
def get_param_regression_baseline_custom():
params = {
'learning_rate': 0.03,
'num_leaves': 128,
'feature_fraction': 0.9,
'min_data_in_leaf': 5,
}
return params
def get_param_softclass_baseline():
params = get_param_multiclass_baseline()
params.pop('metric', None)
return params
def get_param_softclass_baseline_custom():
params = get_param_multiclass_baseline_custom()
params.pop('metric', None)
return params
| 25.842105 | 78 | 0.652749 |
2ade7fffbc61a1d77b2e7de82abd64bd338f215b | 21,172 | py | Python | oscar/lib/python2.7/site-packages/PIL/ImageFont.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/PIL/ImageFont.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/PIL/ImageFont.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | #
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isDirectory, isPath
import os
import sys
class _imagingft_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imagingft C module is not installed")
try:
from . import _imagingft as core
except ImportError:
core = _imagingft_not_installed()
LAYOUT_BASIC = 0
LAYOUT_RAQM = 1
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
class ImageFont(object):
"PIL font wrapper"
def _load_pilfont(self, filename):
with open(filename, "rb") as fp:
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(fp, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != b"PILfont\n":
raise SyntaxError("Not a PILfont file")
file.readline().split(b";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == b"DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256*20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
def getsize(self, text, *args, **kwargs):
return self.font.getsize(text)
def getmask(self, text, mode="", *args, **kwargs):
return self.font.getmask(text, mode)
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont(object):
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, font=None, size=10, index=0, encoding="",
layout_engine=None):
# FIXME: use service provider instead
self.path = font
self.size = size
self.index = index
self.encoding = encoding
if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
layout_engine = LAYOUT_BASIC
if core.HAVE_RAQM:
layout_engine = LAYOUT_RAQM
if layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
layout_engine = LAYOUT_BASIC
self.layout_engine = layout_engine
if isPath(font):
self.font = core.getfont(font, size, index, encoding, layout_engine=layout_engine)
else:
self.font_bytes = font.read()
self.font = core.getfont(
"", size, index, encoding, self.font_bytes, layout_engine)
def getname(self):
return self.font.family, self.font.style
def getmetrics(self):
return self.font.ascent, self.font.descent
def getsize(self, text, direction=None, features=None):
size, offset = self.font.getsize(text, direction, features)
return (size[0] + offset[0], size[1] + offset[1])
def getoffset(self, text):
return self.font.getsize(text)[1]
def getmask(self, text, mode="", direction=None, features=None):
return self.getmask2(text, mode, direction=direction, features=features)[0]
def getmask2(self, text, mode="", fill=Image.core.fill, direction=None, features=None):
size, offset = self.font.getsize(text, direction, features)
im = fill("L", size, 0)
self.font.render(text, im.id, mode == "1", direction, features)
return im, offset
def font_variant(self, font=None, size=None, index=None, encoding=None,
layout_engine=None):
"""
Create a copy of this FreeTypeFont object,
using any specified arguments to override the settings.
Parameters are identical to the parameters used to initialize this
object.
:return: A FreeTypeFont object.
"""
return FreeTypeFont(font=self.path if font is None else font,
size=self.size if size is None else size,
index=self.index if index is None else index,
encoding=self.encoding if encoding is None else encoding,
layout_engine=self.layout_engine if layout_engine is None else layout_engine
)
class TransposedFont(object):
"Wrapper for writing rotated or mirrored text"
def __init__(self, font, orientation=None):
"""
Wrapper that creates a transposed font from any existing font
object.
:param font: A font object.
:param orientation: An optional orientation. If given, this should
be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
"""
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getsize(self, text, *args, **kwargs):
w, h = self.font.getsize(text)
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w
return w, h
def getmask(self, text, mode="", *args, **kwargs):
im = self.font.getmask(text, mode, *args, **kwargs)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
def load(filename):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
f = ImageFont()
f._load_pilfont(filename)
return f
def truetype(font=None, size=10, index=0, encoding="",
layout_engine=None):
"""
Load a TrueType or OpenType font file, and create a font object.
This function loads a font object from the given file, and creates
a font object for a font of the given size.
This function requires the _imagingft service.
:param font: A truetype font file. Under Windows, if the file
is not found in this filename, the loader also looks in
Windows :file:`fonts/` directory.
:param size: The requested size, in points.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Common
encodings are "unic" (Unicode), "symb" (Microsoft
Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert),
and "armn" (Apple Roman). See the FreeType documentation
for more information.
:param layout_engine: Which layout engine to use, if available:
`ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.
:return: A font object.
:exception IOError: If the file could not be read.
"""
try:
return FreeTypeFont(font, size, index, encoding, layout_engine)
except IOError:
ttf_filename = os.path.basename(font)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ('linux', 'linux2'):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = '/usr/share'
dirs += [os.path.join(lindir, "fonts")
for lindir in lindirs.split(":")]
elif sys.platform == 'darwin':
dirs += ['/Library/Fonts', '/System/Library/Fonts',
os.path.expanduser('~/Library/Fonts')]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
return FreeTypeFont(fontpath, size, index, encoding, layout_engine)
elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == '.ttf':
return FreeTypeFont(fontpath, size, index, encoding, layout_engine)
if not ext and first_font_with_a_different_extension is None:
first_font_with_a_different_extension = fontpath
if first_font_with_a_different_extension:
return FreeTypeFont(first_font_with_a_different_extension, size,
index, encoding, layout_engine)
raise
def load_path(filename):
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
for directory in sys.path:
if isDirectory(directory):
if not isinstance(filename, str):
if bytes is str:
filename = filename.encode("utf-8")
else:
filename = filename.decode("utf-8")
try:
return load(os.path.join(directory, filename))
except IOError:
pass
raise IOError("cannot find font file")
def load_default():
"""Load a "better than nothing" default font.
.. versionadded:: 1.1.4
:return: A font object.
"""
from io import BytesIO
import base64
f = ImageFont()
f._load_pilfont_data(
# courB08
BytesIO(base64.b64decode(b'''
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
''')), Image.open(BytesIO(base64.b64decode(b'''
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
'''))))
return f
| 46.429825 | 105 | 0.712828 |
a73d14e31bff7b9102b04a05a7ee6cd3bc535e7b | 1,791 | py | Python | models/mean_predictor/container/mean_predictor.py | AlexandreRozier/BigDataAnalytics | 50ac5fe0d87d29a1938b9f19c3785baa59e0b83e | [
"Apache-2.0"
] | null | null | null | models/mean_predictor/container/mean_predictor.py | AlexandreRozier/BigDataAnalytics | 50ac5fe0d87d29a1938b9f19c3785baa59e0b83e | [
"Apache-2.0"
] | null | null | null | models/mean_predictor/container/mean_predictor.py | AlexandreRozier/BigDataAnalytics | 50ac5fe0d87d29a1938b9f19c3785baa59e0b83e | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
class MeanPredictor():
def __init__(self, freq='5min'):
self.freq = freq
def fit(self,x,square_deviation=False):
# Fits the model with a dataframe provided in x
# param: square_deviation - Weather to use the standard deviation or mean euclidean distance
x_group = x.groupby(np.array(list(map(self.__time_hash,x.index))).T.tolist())
x_mean = x_group.mean()
self.mean_dict = dict(zip(list(x_mean.index),x_mean.values.flatten()))
if square_deviation:
x_std_dist = x_group.std().values.squeeze()
else:
x_std_dist = np.array([np.abs(df.values - self.mean_dict[idx]).mean() for idx,df in list(x_group)])
self.std_dict = dict(zip(list(x_mean.index),x_std_dist))
def __time_hash(self,t):
# Hashing function used to sort the data points into buckets
return (t.weekday() < 5,t.hour,t.minute)
def __predict(self,t):
t_hash = self.__time_hash(t)
return self.mean_dict[t_hash], self.std_dict[t_hash]
def __getitem__(self,t):
if type(t) is slice:
assert type(t.start) is pd.Timestamp
assert type(t.stop) is pd.Timestamp
assert t.step is None or type(t.step) is int
step = t.step if type(t.step) is int else 1
start, stop = t.start.ceil(self.freq), t.stop.floor(self.freq)
time_range = pd.date_range(start,stop,freq=self.freq)[::t.step]
predictions = list(map(self.__predict,time_range))
df = pd.DataFrame(index=time_range,data=predictions,columns=['Value','Std'])
df.index.name = 'Timestamp'
return df
else:
assert t is pd.Timestamp
| 39.8 | 111 | 0.61474 |
5915aaee17d5fb55bcef6f1c7a74d6647271fbb0 | 1,555 | py | Python | onnx_tf/handlers/backend/cast.py | malisit/onnx-tensorflow | 3eb41dc923f350ca533f1024f602a842dd55de45 | [
"Apache-2.0"
] | 1,110 | 2017-11-13T19:34:24.000Z | 2022-03-29T09:14:56.000Z | onnx_tf/handlers/backend/cast.py | malisit/onnx-tensorflow | 3eb41dc923f350ca533f1024f602a842dd55de45 | [
"Apache-2.0"
] | 768 | 2017-11-17T00:06:27.000Z | 2022-03-31T20:20:09.000Z | onnx_tf/handlers/backend/cast.py | malisit/onnx-tensorflow | 3eb41dc923f350ca533f1024f602a842dd55de45 | [
"Apache-2.0"
] | 281 | 2017-11-16T19:56:17.000Z | 2022-03-28T06:25:33.000Z | import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
from onnx_tf.handlers.handler import partial_support
from onnx_tf.handlers.handler import ps_description
@onnx_op("Cast")
@tf_func(tf.cast)
@partial_support(True)
@ps_description("Cast string to data types other than " +
"float32/float64/int32/int64 is not supported in Tensorflow")
class Cast(BackendHandler):
@classmethod
def get_attrs_processor_param(cls):
return {"rename": {"to": "dtype"}}
@classmethod
def _common(cls, node, **kwargs):
inp = kwargs["tensor_dict"][node.inputs[0]]
to_type = node.attrs.get("to")
if to_type == tf.string:
return [tf.as_string(inp)]
if inp.dtype == tf.string:
if to_type not in [tf.float32, tf.float64, tf.int32, tf.int64]:
raise RuntimeError(
"Cast string to type {} is not supported in Tensorflow.".format(
to_type))
return [tf.strings.to_number(inp, to_type)]
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_1(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_6(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_9(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
| 28.796296 | 77 | 0.701608 |
324172e51a9efe3e0d626880912102eea1fad27e | 4,771 | py | Python | library/oneview_storage_pool_facts.py | nabhajit-ray/oneview-ansible | b31af8a696013bac7a1900748a2fa5ba491fe8e2 | [
"Apache-2.0"
] | 108 | 2016-06-28T18:14:08.000Z | 2022-02-21T09:16:06.000Z | library/oneview_storage_pool_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
] | 248 | 2016-07-14T12:50:17.000Z | 2022-02-06T18:57:16.000Z | library/oneview_storage_pool_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
] | 88 | 2016-06-29T15:52:44.000Z | 2022-03-10T12:34:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_storage_pool_facts
short_description: Retrieve facts about one or more Storage Pools.
description:
- Retrieve facts about one or more of the Storage Pools from OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author: "Gustavo Hennig (@GustavoHennig)"
options:
name:
description:
- Storage Pool name.
required: false
options:
description:
- "List with options to gather additional facts about Storage Pools.
Options allowed:
C(reachableStoragePools) gets the list of reachable Storage pools based on the network param.
If the network param is not specified it gets all of them."
required: false
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Storage Pools
oneview_storage_pool_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
delegate_to: localhost
- debug: var=storage_pools
- name: Gather paginated, filtered and sorted facts about Storage Pools
oneview_storage_pool_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 3
sort: 'name:descending'
filter: status='OK'
- debug: var=storage_pools
- name: Gather facts about a Storage Pool by name
oneview_storage_pool_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "CPG_FC-AO"
delegate_to: localhost
- debug: var=storage_pools
- name: Gather facts about the reachable Storage Pools
oneview_storage_pool_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- reachableStoragePools
params:
sort: 'name:ascending'
filter: status='OK'
networks:
- /rest/network/123456A
- /rest/network/123456B
scope_exclusions:
- /rest/storage-pools/5F9CA89B-C632-4F09-BC55-A8AA00DA5C4A
scope_uris: '/rest/scopes/754e0dce-3cbd-4188-8923-edf86f068bf7'
delegate_to: localhost
- debug: var=storage_pools_reachable_storage_pools
'''
RETURN = '''
storage_pools:
description: Has all the OneView facts about the Storage Pools.
returned: Always, but can be null.
type: dict
storage_pools_reachable_storage_pools:
description: Has all the OneView facts about the Reachable Storage Pools.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class StoragePoolFactsModule(OneViewModule):
def __init__(self):
argument_spec = dict(
name=dict(required=False, type='str'),
params=dict(required=False, type='dict'),
options=dict(required=False, type='list')
)
super(StoragePoolFactsModule, self).__init__(additional_arg_spec=argument_spec)
self.set_resource_object(self.oneview_client.storage_pools)
def execute_module(self):
facts = {}
pools = []
if self.module.params['name']:
pools = self.resource_client.get_by('name', self.module.params['name'])
else:
pools = self.resource_client.get_all(**self.facts_params)
facts['storage_pools'] = pools
self.__get_options(facts)
return dict(changed=False, ansible_facts=facts)
def __get_options(self, facts):
if self.options:
if self.options.get('reachableStoragePools'):
query_params = self.options['reachableStoragePools']
facts['storage_pools_reachable_storage_pools'] = \
self.resource_client.get_reachable_storage_pools(**query_params)
def main():
StoragePoolFactsModule().run()
if __name__ == '__main__':
main()
| 31.183007 | 103 | 0.685181 |
725240532860f9e3f85418690c7a1c2c417aa937 | 967 | py | Python | setup.py | karjanme/wx-twitter-bot | 6e026b76dd581fcd1322611f43414a6de0fdac6c | [
"MIT"
] | null | null | null | setup.py | karjanme/wx-twitter-bot | 6e026b76dd581fcd1322611f43414a6de0fdac6c | [
"MIT"
] | 2 | 2022-01-27T19:04:03.000Z | 2022-01-30T03:14:09.000Z | setup.py | karjanme/wx-twitter-bot | 6e026b76dd581fcd1322611f43414a6de0fdac6c | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="wxtwitterbot",
version="0.2.1",
author="Karl Jansen",
author_email="jnsnkrl@live.com",
license="MIT",
description="Wx Twitter Bot",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/karjanme/wx-twitter-bot",
package_dir={'wxtwitterbot': 'src'},
packages=['wxtwitterbot', 'wxtwitterbot.tasks'],
python_requires='>=3.8',
install_requires=[
"astral",
"pylunar",
"python-dotenv",
"pytz",
"tweepy",
"requests"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
)
| 27.628571 | 54 | 0.584281 |
7cf75330a5a303ac57eca72fa4859c38b3125e84 | 374 | py | Python | game/game_objects/windows/SplashScreen.py | mikaelparsekyan/SpaceWars | 97fba68a6f95a6ea55af8e81d36e904a3e6269a3 | [
"MIT"
] | null | null | null | game/game_objects/windows/SplashScreen.py | mikaelparsekyan/SpaceWars | 97fba68a6f95a6ea55af8e81d36e904a3e6269a3 | [
"MIT"
] | null | null | null | game/game_objects/windows/SplashScreen.py | mikaelparsekyan/SpaceWars | 97fba68a6f95a6ea55af8e81d36e904a3e6269a3 | [
"MIT"
] | null | null | null | import time
import pygame
pygame.init()
class SplashScreen():
def __init__(self, screen):
text = str("splashhhh")
font = pygame.font.SysFont('arial', 200)
text = font.render(text, True, (255,0,0))
try:
screen.screen.blit(text, (12, 12))
time.sleep(1.5)
except InterruptedError:
print("asd")
| 22 | 49 | 0.561497 |
7c287c3ddc4169e9d15aca1951c4d2ed6df2ba20 | 8,393 | py | Python | GArDen/transform/minimal_cycle_annotation.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | GArDen/transform/minimal_cycle_annotation.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | GArDen/transform/minimal_cycle_annotation.py | zaidurrehman/EDeN | 1f29d4c9d458edb2bd62a98e57254d78a1f2093f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Provides annotation of minimal cycles."""
import eden
from collections import defaultdict
from sklearn.base import BaseEstimator, ClassifierMixin
import networkx as nx
import logging
logger = logging.getLogger(__name__)
class AnnotateMinimalCycles(BaseEstimator, ClassifierMixin):
"""Annotate minimal cycles."""
def __init__(self,
attribute='label',
part_id='part_id',
part_name='part_name'):
"""Construct.
Parameters
----------
graphs nx.graph iterator
part_id attributename to write the part_id
part_name attributename to write the part_name
"""
self.attribute = attribute
self.part_id = part_id
self.part_name = part_name
def transform(self, graphs):
"""Transform."""
try:
for graph in graphs:
yield self._transform_single(graph)
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _transform_single(self, graph):
# letz annotateo oOoO
# make basic annotation
for n, d in graph.nodes(data=True):
d['__cycle'] = list(node_to_cycle(
graph, n, attribute_name=self.attribute))
d['__cycle'].sort()
graph.node[n][self.part_id] = set()
# graph.node[n][part_name] = set()
# process cycle annotation
def get_name(graph, n, attribute):
return _getname(graph, n, self.attribute)
namedict = {}
for n, d in graph.nodes(data=True):
idd = _fhash(d['__cycle'])
name = namedict.get(idd, None)
if name is None:
name = get_name(graph, n, self.attribute)
namedict[idd] = name
for nid in d['__cycle']:
graph.node[nid][self.part_id].add(idd)
# graph.node[nid][part_name].add(name)
# transform sets to lists
for n, d in graph.nodes(data=True):
d[self.part_id] = list(d[self.part_id])
d[self.part_name] = [namedict[id] for id in d[self.part_id]]
d[self.part_id].sort()
d[self.part_name].sort()
return graph
def _fhash(stuff):
return eden.fast_hash(stuff, 2 ** 20 - 1)
def node_to_cycle(graph, n, attribute_name='label', min_cycle_size=3):
"""Node to cycle.
:param graph:
:param n: start node
:param min_cycle_size:
:return: a cycle the node belongs to
so we start in node n,
then we expand 1 node further in each step.
if we meet a node we had before we found a cycle.
there are 3 possible cases.
- frontier hits frontier -> cycle of even length
- frontier hits visited nodes -> cycle of uneven length
- it is also possible that the newly found cycle doesnt contain our
start node. so we check for that
"""
def close_cycle(collisions, parent, root, graph):
"""We found a cycle.
But that does not say that the root node is part of that cycle.
"""
def extend_path_to_root(work_list, parent_dict, root, graph):
"""Extend.
:param work_list: list with start node
:param parent_dict: the tree like dictionary that contains each
nodes parent(s)
:param root: root node. probably we dont really need this since the
root node is the orphan
:return: cyclenodes or none
--- mm we dont care if we get the shortest path.. that is true for
cycle checking.. but may be a
problem in cycle finding.. dousurururururu?
"""
current = work_list[-1]
while current != root:
# if we have 1 partent, we use it
if len(parent_dict[current]) > 1:
work_list.append(parent_dict[current][0])
# otherwise we look at all of them.
# and choose the lexicographically smallest parent.
else:
bestparent = parent_dict[current][0]
bestlabel = graph.node[bestparent][attribute_name]
for parent in parent_dict[current]:
if graph.node[parent][attribute_name] < bestlabel:
bestlabel = graph.node[parent][attribute_name]
bestparent = parent
work_list.append(bestparent)
current = work_list[-1]
return work_list[:-1]
# any should be fine. e is closing a cycle,
# note: e might contain more than one hit but we dont care
e = collisions.pop()
# print 'r',e
# we closed a cycle on e so e has 2 parents...
li = parent[e]
a = [li[0]]
b = [li[1]]
# print 'pre',a,b
# get the path until the root node
a = extend_path_to_root(a, parent, root, graph)
b = extend_path_to_root(b, parent, root, graph)
# print 'comp',a,b
# if the paths to the root node dont overlap, the root node must be
# in the loop
a = set(a)
b = set(b)
intersect = a & b
if len(intersect) == 0:
paths = a | b
paths.add(e)
paths.add(root)
return paths
return False
# START OF ACTUAL FUNCTION
no_cycle_default = set([n])
frontier = set([n])
step = 0
visited = set()
parent = defaultdict(list)
while frontier:
# print frontier
step += 1
# give me new nodes:
next = []
for front_node in frontier:
new = set(graph.neighbors(front_node)) - visited
next.append(new)
for e in new:
parent[e].append(front_node)
# we merge the new nodes. if 2 sets collide, we found a cycle of
# even length
while len(next) > 1:
# merge
s1 = next[1]
s2 = next[0]
merge = s1 | s2
# check if we havee a cycle => s1,s2 overlap
if len(merge) < len(s1) + len(s2):
col = s1 & s2
cycle = close_cycle(col, parent, n, graph)
if cycle:
if step * 2 > min_cycle_size:
return cycle
return no_cycle_default
# delete from list
next[0] = merge
del next[1]
next = next[0]
# now we need to check for cycles of uneven length => the new nodes hit
# the old frontier
if len(next & frontier) > 0:
col = next & frontier
cycle = close_cycle(col, parent, n, graph)
if cycle:
if step * 2 - 1 > min_cycle_size:
return cycle
return no_cycle_default
# we know that the current frontier didntclose cycles so we dont need
# to look at them again
visited = visited | frontier
frontier = next
return no_cycle_default
def _getname(graph, n, attribute_name='label'):
# more complicated naming scheme looks at cycle and uses
# lexicographicaly smallest name.
# trivial case with cycle length 1:
if len(graph.node[n]['__cycle']) == 1:
return graph.node[n][attribute_name]
# first we need the nodelabels in order
g = nx.Graph(graph.subgraph(graph.node[n]['__cycle']))
startnode = graph.node[n]['__cycle'][0]
neighbor = g.neighbors(startnode)[0]
g.remove_edge(startnode, neighbor)
result = []
while len(g) > 1:
neighbor = g.neighbors(startnode)[0]
result.append(g.node[startnode][attribute_name])
g.remove_node(startnode)
startnode = neighbor
result.append(g.node[startnode][attribute_name])
# we have the labels in order now.
# we want to cycle until we find the lex lowest configuration
def min_lex(li):
def all_lex(li):
n = len(li)
for i in range(n):
yield li
li = li[1:] + [li[0]]
il = list(li)
il.reverse()
return ''.join(min(min(all_lex(li)), min(all_lex(il))))
return min_lex(result)
| 32.657588 | 79 | 0.553437 |
9c5831689fee97fab76df799ad7676723ed8649e | 2,926 | py | Python | app/core/tests/test_models.py | PFrangeskos/recipe-app-api | 79654094c8c0a160ebc82c90eda3f504ed7b6a64 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | PFrangeskos/recipe-app-api | 79654094c8c0a160ebc82c90eda3f504ed7b6a64 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | PFrangeskos/recipe-app-api | 79654094c8c0a160ebc82c90eda3f504ed7b6a64 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@londonappdev.com', password='testpass'):
# create a sample user.
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
# Test creating a new user with an email is successful
# print('core.m.test_create_user_with_email_successful')
email = 'p@test.com'
password = 'testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_normalized(self):
# test the email for a new user is normalized.
# print('core.m.test_new_user_normalized')
email = 'test@LONDON.COM'
user = get_user_model().objects.create_user(email, 'testpass123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
# test creating user with no email raises error
# print('core.m.test_new_user_invalid_email')
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
# print('core.m.test_create_new_superuser')
# test creating a new superuser
user = get_user_model().objects.create_superuser(
'p@gmail.com',
'test12'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
# test the tag rerpresentation.
# print('core.m.test_tag_str')
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
# Test the ingredient string representation.
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
# test the recipe str representation.
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
# test that the image is saved in the correct location.
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 31.804348 | 73 | 0.646617 |
acbd1f6e08ad2586cfd9c2e86fdef3447f5c9f3b | 16,469 | py | Python | src/python/DTNRMLibs/pycurl_manager.py | cmscaltech/siterm | 6af2b0cc61d52391255e0b125d221e8cdebb1997 | [
"Apache-2.0"
] | null | null | null | src/python/DTNRMLibs/pycurl_manager.py | cmscaltech/siterm | 6af2b0cc61d52391255e0b125d221e8cdebb1997 | [
"Apache-2.0"
] | null | null | null | src/python/DTNRMLibs/pycurl_manager.py | cmscaltech/siterm | 6af2b0cc61d52391255e0b125d221e8cdebb1997 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
# pylint: disable=R0913,W0702,R0914,R0912,R0201
"""
File: pycurl_manager.py
Author: Valentin Kuznetsov <vkuznet@gmail.com>
Description: a basic wrapper around pycurl library.
The RequestHandler class provides basic APIs to get data
from a single resource or submit mutliple requests to
underlying data-services.
Examples:
# CERN SSO: http://linux.web.cern.ch/linux/docs/cernssocookie.shtml
# use RequestHandler with CERN SSO enabled site
mgr = RequestHandler()
url = "https://cms-gwmsmon.cern.ch/prodview/json/site_summary"
params = {}
tfile = tempfile.NamedTemporaryFile()
cern_sso_cookie(url, tfile.name, cert, ckey)
cookie = {url: tfile.name}
header, data = mgr.request(url3, params, cookie=cookie)
if header.status != 200:
print "ERROR"
# fetch multiple urls at onces from various urls
tfile = tempfile.NamedTemporaryFile()
ckey = os.path.join(os.environ['HOME'], '.globus/userkey.pem')
cert = os.path.join(os.environ['HOME'], '.globus/usercert.pem')
url1 = "https://cmsweb.cern.ch/dbs/prod/global/DBSReader/help"
url2 = "https://cmsweb.cern.ch/dbs/prod/global/DBSReader/datatiers"
url3 = "https://cms-gwmsmon.cern.ch/prodview/json/site_summary"
cern_sso_cookie(url3, tfile.name, cert, ckey)
cookie = {url3: tfile.name}
urls = [url1, url2, url3]
data = getdata(urls, ckey, cert, cookie=cookie)
for row in data:
print(row)
"""
from __future__ import print_function
# system modules
import os
import re
import sys
import cStringIO as StringIO
import httplib
import json
import logging
import urllib
import subprocess
# 3d-party libraries
import pycurl
# python3
if sys.version.startswith('3.'):
import io
class ResponseHeader(object):
"""ResponseHeader parses HTTP response header"""
def __init__(self, response):
super(ResponseHeader, self).__init__()
self.header = {}
self.reason = ''
self.fromcache = False
self.parse(response)
def parse(self, response):
"""Parse response header and assign class member data"""
for row in response.split('\r'):
row = row.replace('\n', '')
if not row:
continue
if row.find('HTTP') != -1 and row.find('100') == -1:
# HTTP/1.1 100 found: real header is later
res = row.replace('HTTP/1.1', '')
res = res.replace('HTTP/1.0', '')
res = res.strip()
status, reason = res.split(' ', 1)
self.status = int(status)
self.reason = reason
continue
try:
key, val = row.split(':', 1)
self.header[key.strip()] = val.strip()
except:
pass
class RequestHandler(object):
"""
RequestHandler provides APIs to fetch single/multiple
URL requests based on pycurl library
"""
def __init__(self, config=None, logger=None):
super(RequestHandler, self).__init__()
if not config:
config = {}
self.nosignal = config.get('nosignal', 1)
self.timeout = config.get('timeout', 30)
self.connecttimeout = config.get('connecttimeout', 30)
self.followlocation = config.get('followlocation', 1)
self.maxredirs = config.get('maxredirs', 5)
self.logger = logger if logger else logging.getLogger()
def encode_params(self, params, verb, doseq):
""" Encode request parameters for usage with the 4 verbs.
Assume params is alrady encoded if it is a string and
uses a different encoding depending on the HTTP verb
(either json.dumps or urllib.urlencode)
"""
# data is already encoded, just return it
if isinstance(params, basestring):
return params
# data is not encoded, we need to do that
if verb in ['GET', 'HEAD']:
if params:
encoded_data = urllib.urlencode(params, doseq=doseq)
else:
return ''
else:
if params:
encoded_data = json.dumps(params)
else:
return {}
return encoded_data
def set_opts(self, curl, url, params, headers,
ckey=None, cert=None, capath=None, verbose=None,
verb='GET', doseq=True, cainfo=None, cookie=None):
"""Set options for given curl object, params should be a dictionary"""
if not (isinstance(params, (dict, basestring)) or params is None):
raise TypeError("pycurl parameters should be passed as dictionary or an (encoded) string")
curl.setopt(pycurl.NOSIGNAL, self.nosignal)
curl.setopt(pycurl.TIMEOUT, self.timeout)
curl.setopt(pycurl.CONNECTTIMEOUT, self.connecttimeout)
curl.setopt(pycurl.FOLLOWLOCATION, self.followlocation)
curl.setopt(pycurl.MAXREDIRS, self.maxredirs)
if cookie and url in cookie:
curl.setopt(pycurl.COOKIEFILE, cookie[url])
curl.setopt(pycurl.COOKIEJAR, cookie[url])
encoded_data = self.encode_params(params, verb, doseq)
if verb == 'GET':
if encoded_data:
url = url + '?' + encoded_data
elif verb == 'HEAD':
if encoded_data:
url = url + '?' + encoded_data
curl.setopt(pycurl.CUSTOMREQUEST, verb)
curl.setopt(pycurl.HEADER, 1)
curl.setopt(pycurl.NOBODY, True)
elif verb == 'POST':
curl.setopt(pycurl.POST, 1)
if encoded_data:
curl.setopt(pycurl.POSTFIELDS, encoded_data)
elif verb == 'DELETE' or verb == 'PUT':
curl.setopt(pycurl.CUSTOMREQUEST, verb)
curl.setopt(pycurl.HTTPHEADER, ['Transfer-Encoding: chunked'])
if encoded_data:
curl.setopt(pycurl.POSTFIELDS, encoded_data)
else:
raise Exception('Unsupported HTTP method "%s"' % verb)
curl.setopt(pycurl.URL, str(url))
if headers:
curl.setopt(pycurl.HTTPHEADER,
["%s: %s" % (k, v) for k, v in headers.items()])
bbuf = StringIO.StringIO()
hbuf = StringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, bbuf.write)
curl.setopt(pycurl.HEADERFUNCTION, hbuf.write)
if capath:
curl.setopt(pycurl.CAPATH, capath)
curl.setopt(pycurl.SSL_VERIFYPEER, True)
if cainfo:
curl.setopt(pycurl.CAINFO, cainfo)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, False)
if ckey:
curl.setopt(pycurl.SSLKEY, ckey)
if cert:
curl.setopt(pycurl.SSLCERT, cert)
if verbose:
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, self.debug)
return bbuf, hbuf
def debug(self, debug_type, debug_msg):
"""Debug callback implementation"""
print("debug(%d): %s" % (debug_type, debug_msg))
def parse_body(self, data, decode=False):
"""
Parse body part of URL request (by default use json).
This method can be overwritten.
"""
if decode:
try:
res = json.loads(data)
except ValueError as exc:
msg = 'Unable to load JSON data, %s, data type=%s, pass as is' % (str(exc), type(data))
logging.warning(msg)
return data
return res
return data
def parse_header(self, header):
"""
Parse response header.
This method can be overwritten.
"""
return ResponseHeader(header)
def request(self, url, params, headers=None, verb='GET',
verbose=0, ckey=None, cert=None, capath=None,
doseq=True, decode=False, cainfo=None, cookie=None):
"""Fetch data for given set of parameters"""
curl = pycurl.Curl()
bbuf, hbuf = self.set_opts(curl, url, params, headers, ckey, cert,
capath, verbose, verb, doseq, cainfo, cookie)
curl.perform()
if verbose:
print(verb, url, params, headers)
header = self.parse_header(hbuf.getvalue())
if header.status < 300:
if verb == 'HEAD':
data = ''
else:
data = self.parse_body(bbuf.getvalue(), decode)
else:
data = bbuf.getvalue()
msg = 'url=%s, code=%s, reason=%s, headers=%s' % (url, header.status, header.reason, header.header)
exc = httplib.HTTPException(msg)
setattr(exc, 'req_data', params)
setattr(exc, 'req_headers', headers)
setattr(exc, 'url', url)
setattr(exc, 'result', data)
setattr(exc, 'status', header.status)
setattr(exc, 'reason', header.reason)
setattr(exc, 'headers', header.header)
bbuf.flush()
hbuf.flush()
raise exc
bbuf.flush()
hbuf.flush()
return header, data
def getdata(self, url, params, headers=None, verb='GET',
verbose=0, ckey=None, cert=None, doseq=True, cookie=None):
"""Fetch data for given set of parameters"""
_, data = self.request(url=url, params=params, headers=headers, verb=verb,
verbose=verbose, ckey=ckey, cert=cert, doseq=doseq, cookie=cookie)
return data
def getheader(self, url, params, headers=None, verb='GET',
verbose=0, ckey=None, cert=None, doseq=True):
"""Fetch HTTP header"""
header, _ = self.request(url, params, headers, verb,
verbose, ckey, cert, doseq)
return header
def multirequest(self, url, parray, headers=None, ckey=None,
cert=None, verbose=None, cookie=None):
"""Fetch data for given set of parameters"""
multi = pycurl.CurlMulti()
for params in parray:
curl = pycurl.Curl()
bbuf, hbuf = \
self.set_opts(curl, url, params, headers, ckey, cert, verbose, cookie=cookie)
multi.add_handle(curl)
while True:
ret, num_handles = multi.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while num_handles:
ret = multi.select(1.0)
if ret == -1:
continue
while True:
ret, num_handles = multi.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
dummyNumq, response, dummyErr = multi.info_read()
for dummyCobj in response:
data = json.loads(bbuf.getvalue())
if isinstance(data, dict):
data.update(params)
yield data
if isinstance(data, list):
for item in data:
if isinstance(item, dict):
item.update(params)
yield item
else:
err = 'Unsupported data format: data=%s, type=%s'\
% (item, type(item))
raise Exception(err)
bbuf.flush()
hbuf.flush()
HTTP_PAT = re.compile("(https|http)://[-A-Za-z0-9_+&@#/%?=~_|!:,.;]*[-A-Za-z0-9+&@#/%=~_|]")
def validate_url(url):
"Validate URL"
if HTTP_PAT.match(url):
return True
return False
def pycurl_options():
"Default set of options for pycurl"
opts = {
'FOLLOWLOCATION': 1,
'CONNECTTIMEOUT': 270,
'MAXREDIRS': 5,
'NOSIGNAL': 1,
'TIMEOUT': 270,
'SSL_VERIFYPEER': False,
'VERBOSE': 0
}
return opts
def cern_sso_cookie(url, fname, cert, ckey):
"Obtain cern SSO cookie and store it in given file name"
cmd = 'cern-get-sso-cookie -cert %s -key %s -r -u %s -o %s' % (cert, ckey, url, fname)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ)
proc.wait()
def getdata(urls, ckey, cert, headers=None, options=None, num_conn=100, cookie=None):
"""
Get data for given list of urls, using provided number of connections
and user credentials
"""
if not options:
options = pycurl_options()
# Make a queue with urls
queue = [u for u in urls if validate_url(u)]
# Check args
num_urls = len(queue)
num_conn = min(num_conn, num_urls)
# Pre-allocate a list of curl objects
mcurl = pycurl.CurlMulti()
mcurl.handles = []
for _ in range(num_conn):
curl = pycurl.Curl()
curl.fp = None
for key, val in options.items():
curl.setopt(getattr(pycurl, key), val)
curl.setopt(pycurl.SSLKEY, ckey)
curl.setopt(pycurl.SSLCERT, cert)
mcurl.handles.append(curl)
if headers:
curl.setopt(pycurl.HTTPHEADER,
["%s: %s" % (k, v) for k, v in headers.items()])
# Main loop
freelist = mcurl.handles[:]
num_processed = 0
while num_processed < num_urls:
# If there is an url to process and a free curl object,
# add to multi-stack
while queue and freelist:
url = queue.pop(0)
curl = freelist.pop()
curl.setopt(pycurl.URL, url.encode('ascii', 'ignore'))
if cookie and url in cookie:
curl.setopt(pycurl.COOKIEFILE, cookie[url])
curl.setopt(pycurl.COOKIEJAR, cookie[url])
if sys.version.startswith('3.'):
bbuf = io.BytesIO()
hbuf = io.BytesIO()
else:
bbuf = StringIO.StringIO()
hbuf = StringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, bbuf.write)
curl.setopt(pycurl.HEADERFUNCTION, hbuf.write)
mcurl.add_handle(curl)
# store some info
curl.hbuf = hbuf
curl.bbuf = bbuf
curl.url = url
# Run the internal curl state machine for the multi stack
while True:
ret, _ = mcurl.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
# Check for curl objects which have terminated, and add them to the
# freelist
while True:
num_q, ok_list, err_list = mcurl.info_read()
for curl in ok_list:
if sys.version.startswith('3.'):
hdrs = curl.hbuf.getvalue().decode('utf-8')
data = curl.bbuf.getvalue().decode('utf-8')
else:
hdrs = curl.hbuf.getvalue()
data = curl.bbuf.getvalue()
url = curl.url
curl.bbuf.flush()
curl.bbuf.close()
curl.hbuf.close()
curl.hbuf = None
curl.bbuf = None
mcurl.remove_handle(curl)
freelist.append(curl)
yield {'url': url, 'data': data, 'headers': hdrs}
for curl, errno, errmsg in err_list:
hdrs = curl.hbuf.getvalue()
data = curl.bbuf.getvalue()
url = curl.url
curl.bbuf.flush()
curl.bbuf.close()
curl.hbuf.close()
curl.hbuf = None
curl.bbuf = None
mcurl.remove_handle(curl)
freelist.append(curl)
yield {'url': url, 'data': None, 'headers': hdrs,
'error': errmsg, 'code': errno}
num_processed = num_processed + len(ok_list) + len(err_list)
if num_q == 0:
break
# Currently no more I/O is pending, could do something in the meantime
# (display a progress bar, etc.).
# We just call select() to sleep until some more data is available.
mcurl.select(1.0)
cleanup(mcurl)
def cleanup(mcurl):
"Clean-up MultiCurl handles"
for curl in mcurl.handles:
if curl.hbuf is not None:
curl.hbuf.close()
curl.hbuf = None
if curl.bbuf is not None:
curl.bbuf.close()
curl.bbuf = None
curl.close()
mcurl.close()
| 36.27533 | 111 | 0.556864 |
38a664573b1d3442683e507afca893580e6ef49e | 1,389 | py | Python | savecode/pythonpackages/commonbaby/proxy/proxydbconfig.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-05-19T11:54:26.000Z | 2019-05-19T12:03:49.000Z | savecode/pythonpackages/commonbaby/proxy/proxydbconfig.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 1 | 2020-11-27T07:55:15.000Z | 2020-11-27T07:55:15.000Z | savecode/pythonpackages/commonbaby/proxy/proxydbconfig.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2021-09-06T18:06:12.000Z | 2021-12-31T07:44:43.000Z | """db sqlite config"""
# -*- coding:utf-8 -*-
import os
class ProxyDbConfig:
"""表示sqlite专用配置。\n
dbdir: 数据库文件存放路径,默认./_db\n
dbname: 数据库文件名,默认data.db\n
maxdbfisize: 最大数据库文件大小,默认100MB\n
errordbdelete: 异常数据库文件是否删除,默认False不删除,而是改名存放。\n
connecttimeoutsec: 数据库链接超时时间,float,单位秒,默认60秒\n
delete_on_error: 当数据库文件出错时是否删除,默认为True"""
def __init__(
self,
dbname: str = 'data.db',
pagesize: int = 512,
max_page_count: int = 204800,
errordbdelete: bool = False,
connecttimeoutsec: float = 60,
delete_on_error: bool = True,
):
self._dbname = 'data.db'
if not isinstance(dbname, str) and not dbname == "":
self._dbname = dbname
self._pagesize: int = pagesize
self._maxpagecount: int = max_page_count
self._errordbdelete: bool = False
if isinstance(errordbdelete, bool):
self._errordbdelete = errordbdelete
self._connecttimeoutsec: float = 60
if type(connecttimeoutsec) in [int, float]:
if connecttimeoutsec <= 0:
self._connecttimeoutsec = None
else:
self._connecttimeoutsec = connecttimeoutsec
self._delete_on_error: bool = True
if isinstance(delete_on_error, bool):
self._delete_on_error = delete_on_error
| 29.553191 | 60 | 0.607631 |
72639bba261018a41514143ec5217124b25f898b | 672 | py | Python | test.py | pgupta119/LeetCode | 3e7418402d736cba19362fe7525fdc9067cfcaef | [
"MIT"
] | null | null | null | test.py | pgupta119/LeetCode | 3e7418402d736cba19362fe7525fdc9067cfcaef | [
"MIT"
] | null | null | null | test.py | pgupta119/LeetCode | 3e7418402d736cba19362fe7525fdc9067cfcaef | [
"MIT"
] | null | null | null | # You have a set of integers s, which originally contains all the numbers from 1 to n. Unfortunately, due to some error, one of the numbers in s got duplicated to another number in the set, which results in repetition of one number and loss of another number.
# You are given an integer array nums representing the data status of this set after the error.
# Find the number that occurs twice and the number that is missing and return them in the form of an array.
#Input: nums = [1,2,2,4]
#Output: [2,3]
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
return [sum(nums) - sum(set(nums)), sum(range(1, len(nums)+1)) - sum(set(nums))] | 51.692308 | 259 | 0.723214 |
6c0f81d67436f0b8d4fddfd44fb06994c15ede6e | 2,908 | py | Python | pygui/widget/editor/tabbed_textpad.py | clark3493/pygui | 7de660341dfd1486de269edd116f642805e9ecb0 | [
"MIT"
] | null | null | null | pygui/widget/editor/tabbed_textpad.py | clark3493/pygui | 7de660341dfd1486de269edd116f642805e9ecb0 | [
"MIT"
] | null | null | null | pygui/widget/editor/tabbed_textpad.py | clark3493/pygui | 7de660341dfd1486de269edd116f642805e9ecb0 | [
"MIT"
] | null | null | null | import os
import tkinter as tk
from .textpad import TextPad
from ..tab_view import AbstractTabView
class TabbedTextpad(AbstractTabView):
NEW_TAB_BASENAME = "new%d"
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.set_options()
self.add_tab()
def add_tab(self, event=None, widget=None, text=None, **kwargs):
if widget is None:
return self._add_default_tab(text=None, **kwargs)
else:
return super().add_tab(widget=widget, text=text)
def bind_keys(self):
super().bind_keys()
for key in ['<Control-n>', '<Control-N>']:
self.bind(key, self.add_tab)
def bind_child_keys(self, child):
for key in ['<Control-n>', '<Control-N>']:
child.bind(key, self.add_tab)
def get_widget(self, index, widget='!textpad'):
return super().get_widget(index, widget=widget)
def on_right_click(self, event=None):
if event.widget.identify(event.x, event.y) == 'label':
index = event.widget.index('@%d,%d' % (event.x, event.y))
frame = self.get_container(index)
if '!textpad' in frame.children:
popup = _TextpadTabPopup(self, index)
popup.tk_popup(event.x_root, event.y_root)
def save_tab(self, index):
pad = self.get_widget(index)
path = pad.functions['save_file']()
self.tab(self.tabs()[index], text=os.path.split(path)[-1])
def save_tab_as(self, index):
pad = self.get_widget(index)
path = pad.functions['save_file_as']()
self.tab(self.tabs()[index], text=os.path.split(path)[-1])
def set_options(self):
self.option_add('*tearOff', False)
def _add_default_tab(self, text=None, frame_kwargs={}, textpad_kwargs={}, tab_kwargs={}):
child = tk.Frame(self, **frame_kwargs)
new_tab = super().add_tab(widget=child, text=text, tab_kwargs=tab_kwargs)
pad = TextPad(child, **textpad_kwargs)
pad.pack(expand=True, fill=tk.BOTH)
self.bind_child_keys(pad)
return new_tab, child, pad
class _TextpadTabPopup(tk.Menu):
def __init__(self, parent, tab_index):
super().__init__(parent)
self.parent = parent
self.tab_index = tab_index
self.add_command(label="Save", command=self.save_tab)
self.add_command(label="Save As", command=self.save_tab_as)
def save_tab(self, event=None):
self.parent.save_tab(self.tab_index)
def save_tab_as(self, event=None):
self.parent.save_tab_as(self.tab_index)
if __name__ == "__main__":
root = tk.Tk()
nb = TabbedTextpad(root)
nb.pack(expand=1, fill='both')
nb.add_tab()
root.mainloop()
| 30.93617 | 93 | 0.594911 |
f2a800a9dae55fa650c4e2617b5b23f7180177ba | 6,325 | py | Python | selfdrive/car/hyundai/values.py | raykholo/openpilot | 12a79cdd7ea2c38b3938b6c37663cf08e65009a1 | [
"MIT"
] | 85 | 2019-06-14T17:51:31.000Z | 2022-02-09T22:18:20.000Z | selfdrive/car/hyundai/values.py | raykholo/openpilot | 12a79cdd7ea2c38b3938b6c37663cf08e65009a1 | [
"MIT"
] | 2 | 2018-05-25T04:12:56.000Z | 2018-10-24T19:09:08.000Z | selfdrive/car/hyundai/values.py | raykholo/openpilot | 12a79cdd7ea2c38b3938b6c37663cf08e65009a1 | [
"MIT"
] | 25 | 2019-08-13T09:52:05.000Z | 2021-11-03T02:04:58.000Z | from cereal import car
from selfdrive.car import dbc_dict
VisualAlert = car.CarControl.HUDControl.VisualAlert
def get_hud_alerts(visual_alert):
if visual_alert == VisualAlert.steerRequired:
return 5
else:
return 0
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 255 # 409 is the max, 255 is stock
STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 7
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017"
GENESIS = "HYUNDAI GENESIS 2018"
KIA_OPTIMA = "KIA OPTIMA SX 2019"
KIA_SORENTO = "KIA SORENTO GT LINE 2018"
KIA_STINGER = "KIA STINGER GT2 2018"
SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
CANCEL = 4
FINGERPRINTS = {
CAR.ELANTRA: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 897: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2001: 8, 2003: 8, 2004: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
}],
CAR.KIA_OPTIMA: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.KIA_SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.KIA_STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},
{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
}
],
}
class ECU:
CAM = 0
ECU_FINGERPRINT = {
ECU.CAM: [832, 1156, 1191, 1342]
}
CHECKSUM = {
"crc8": [CAR.SANTA_FE],
"6B": [CAR.KIA_SORENTO, CAR.GENESIS],
"7B": [CAR.KIA_STINGER, CAR.ELANTRA, CAR.KIA_OPTIMA],
}
FEATURES = {
"use_cluster_gears": [CAR.ELANTRA], # Use Cluster for Gear Selection, rather than Transmission
"use_tcu_gears": [CAR.KIA_OPTIMA], # Use TCU Message for Gear Selection
}
DBC = {
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 100
| 68.75 | 781 | 0.574545 |
90b8d878967e9913da03f1c316be31584a72199f | 269 | py | Python | core/migrations/0008_merge_20200424_1243.py | methods/phe-cv19-site | fd5f877b8a0725a9893fa3c24a54e9495e749744 | [
"MIT"
] | 1 | 2020-09-21T16:35:16.000Z | 2020-09-21T16:35:16.000Z | core/migrations/0008_merge_20200424_1243.py | methods/phe-cv19-site | fd5f877b8a0725a9893fa3c24a54e9495e749744 | [
"MIT"
] | 14 | 2020-03-25T10:29:26.000Z | 2022-01-13T03:45:38.000Z | core/migrations/0008_merge_20200424_1243.py | methods/phe-cv19-site | fd5f877b8a0725a9893fa3c24a54e9495e749744 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.11 on 2020-04-24 12:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20200414_0959'),
('core', '0007_merge_20200416_1038'),
]
operations = [
]
| 17.933333 | 48 | 0.643123 |
9cb14027003c468788977684bf1f7f72a3bc6e91 | 2,216 | py | Python | example/testspider.py | bopopescu/pspider | b83430f00d168deb1ab3f15e7a7c93735bd307f7 | [
"MIT"
] | 168 | 2017-10-13T08:20:49.000Z | 2018-08-17T08:42:27.000Z | example/testspider.py | bopopescu/pspider | b83430f00d168deb1ab3f15e7a7c93735bd307f7 | [
"MIT"
] | 2 | 2021-03-31T19:10:18.000Z | 2021-12-13T19:56:02.000Z | example/testspider.py | bopopescu/pspider | b83430f00d168deb1ab3f15e7a7c93735bd307f7 | [
"MIT"
] | 7 | 2019-05-09T05:00:12.000Z | 2021-12-06T03:39:32.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Created on : 2019-03-07 12:28
# @Author : zpy
# @Software: PyCharm
from spider.pspider import Pspider, req
from spider.model import BaseModel
class LagouSpider(Pspider):
def task(self):
return self.tasks
def req_resp(self):
self.jobmodel = BaseModel(
[('company', str), ('companyid', str), ('positionname', str), ('salary', str), ('require', str)])
@req()
def pages():
url = self.task()
return {"request": {
'url': url,
},
"response": {
"handler": self.parse_data,
"result_tag": 'job'
}}
yield pages
def parse_data(self, resp):
for d in resp.html.xpath('//*[@id="s_position_list"]/ul/li'):
self.jobmodel.res.require = ''.join(d.xpath('//div[1]/div[1]/div[2]/div/text()'))
self.jobmodel.res.company = d.attrs.get('data-company')
self.jobmodel.res.companyid = d.attrs.get('data-companyid')
self.jobmodel.res.salary = d.attrs.get('data-salary')
self.jobmodel.res.positionname = d.attrs.get('data-positionname')
self.jobmodel.save()
return self.jobmodel
class SiteSpider(Pspider):
def task(self):
return "https://www.zhihu.com/people/pyy-69-54/following"
def req_resp(self):
@req(timeout=2)
def first_page():
url = self.task()
return {"request":{
'url': url,
},
"response":{
"handler": self.parse_data,
'result_tag': 'test'
}}
yield first_page
def parse_data(self, resp):
html = resp.content
return 'skr'
if __name__ == '__main__':
t = SiteSpider()
t.start()
# sp = SiteSpider()
# sp.start()
# sp = LagouSpider()
# sp.tasks = ['https://www.lagou.com/zhaopin/Python/1']
# sp.start()
#
# for s in sp.result['job'].export_sql('test.test'):
# print(s)
# sp.result['job'].export_csvfile('/Users/mioji/Desktop/newpy/pspider/example/lagoutest.csv') | 29.157895 | 109 | 0.525722 |
8dbc64d621c06184f0da8675ed32647555452720 | 2,771 | py | Python | src/ndn/bin/nfdc/cmd_get_status.py | tianyuan129/python-ndn | f390b3122d2a233a9a22a1ee9468b1241c46ef86 | [
"Apache-2.0"
] | 5 | 2019-10-03T01:26:43.000Z | 2020-07-07T15:21:52.000Z | src/ndn/bin/nfdc/cmd_get_status.py | tianyuan129/python-ndn | f390b3122d2a233a9a22a1ee9468b1241c46ef86 | [
"Apache-2.0"
] | 12 | 2019-10-28T03:17:16.000Z | 2020-08-26T22:10:52.000Z | src/ndn/bin/nfdc/cmd_get_status.py | tianyuan129/python-ndn | f390b3122d2a233a9a22a1ee9468b1241c46ef86 | [
"Apache-2.0"
] | 10 | 2019-10-18T21:16:43.000Z | 2021-06-24T07:26:22.000Z | # -----------------------------------------------------------------------------
# Copyright (C) 2019-2021 The python-ndn authors
#
# This file is part of python-ndn.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import argparse
import datetime
from ...app import NDNApp
from ...app_support.nfd_mgmt import GeneralStatus
from .utils import express_interest
def add_parser(subparsers):
parser = subparsers.add_parser('Get-Status', aliases=['status'])
parser.set_defaults(executor=execute)
def execute(_args: argparse.Namespace):
app = NDNApp()
async def after_start():
try:
data = await express_interest(app, "/localhost/nfd/status/general")
msg = GeneralStatus.parse(data)
print('General status:')
print(f'{"version":>25}\t{msg.nfd_version}')
st_time = datetime.datetime.fromtimestamp(msg.start_timestamp / 1000)
print(f'{"startTime":>25}\t{st_time.strftime("%Y-%m-%d %H:%M:%S.%f")}')
cur_time = datetime.datetime.fromtimestamp(msg.current_timestamp / 1000)
print(f'{"currentTime":>25}\t{cur_time.strftime("%Y-%m-%d %H:%M:%S.%f")}')
up_time = cur_time - st_time
print(f'{"upTime":>25}\t{up_time}')
print(f'{"nNameTreeEntries":>25}\t{msg.n_name_tree_entries}')
print(f'{"nFibEntries":>25}\t{msg.n_fib_entries}')
print(f'{"nPitEntries":>25}\t{msg.n_pit_entries}')
print(f'{"nMeasurementsEntries":>25}\t{msg.n_measurement_entries}')
print(f'{"nCsEntries":>25}\t{msg.n_cs_entries}')
print(f'{"nInInterests":>25}\t{msg.n_in_interests}')
print(f'{"nOutInterests":>25}\t{msg.n_out_interests}')
print(f'{"nInData":>25}\t{msg.n_in_data}')
print(f'{"nOutData":>25}\t{msg.n_out_data}')
print(f'{"nInNacks":>25}\t{msg.n_in_nacks}')
print(f'{"nOutNacks":>25}\t{msg.n_out_nacks}')
print(f'{"nSatisfiedInterests":>25}\t{msg.n_satisfied_interests}')
print(f'{"nUnsatisfiedInterests":>25}\t{msg.n_unsatisfied_interests}')
finally:
app.shutdown()
app.run_forever(after_start())
| 43.296875 | 86 | 0.61061 |
7a031d870e82ceb26b6f058d2445ef4a4b49e90c | 1,488 | py | Python | pyopengl/triforce/triforce.py | kenjisatoh/pygame | cf5ba2331dc6b4a930f9c3dacbaa7954f51498db | [
"MIT"
] | 22 | 2016-06-02T06:05:59.000Z | 2021-05-17T03:38:28.000Z | pyopengl/triforce/triforce.py | kenjisatoh/pygame | cf5ba2331dc6b4a930f9c3dacbaa7954f51498db | [
"MIT"
] | null | null | null | pyopengl/triforce/triforce.py | kenjisatoh/pygame | cf5ba2331dc6b4a930f9c3dacbaa7954f51498db | [
"MIT"
] | 17 | 2016-03-04T12:29:15.000Z | 2020-11-29T13:11:28.000Z | #!/usr/bin/env python
#coding:utf-8
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
angle = 0.0
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(300, 300)
glutInitWindowPosition(100, 100)
glutCreateWindow("神々のトライフォース")
init()
glutDisplayFunc(display)
glutReshapeFunc(reshape)
glutIdleFunc(idle)
glutMainLoop()
def init():
glClearColor(0.0, 0.0, 0.0, 0.0)
glEnable(GL_DEPTH_TEST)
def display():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glLoadIdentity()
gluLookAt(0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
glRotatef(angle, 0.0, 1.0, 0.0)
draw_triforce()
glutSwapBuffers()
def draw_triforce():
"""トライフォースを描画"""
glColor3f(1.0, 1.0, 0.0)
glBegin(GL_TRIANGLES)
# 上の三角形
glVertex2f(0, 0.8)
glVertex2f(-0.4, 0.0)
glVertex2f(0.4, 0.0)
# 左下の三角形
glVertex2f(-0.4, 0.0)
glVertex2f(-0.8, -0.8)
glVertex2f(0.0, -0.8)
# 右下の三角形
glVertex2f(0.4, 0.0)
glVertex2f(0.0, -0.8)
glVertex2f(0.8, -0.8)
glEnd()
def reshape(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, width/height, 1.0, 100.0)
glMatrixMode(GL_MODELVIEW)
def idle():
global angle
angle += 0.05
glutPostRedisplay()
if __name__ == "__main__":
main()
| 21.257143 | 60 | 0.633065 |
d9d3469186bb06173219a7d0d2fce6daaa7dbb8a | 11,261 | py | Python | maskrcnn_benchmark/config/defaults.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | maskrcnn_benchmark/config/defaults.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | maskrcnn_benchmark/config/defaults.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.MASKIOU_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
_C.MODEL.MASKIOU_LOSS_WEIGHT = 1.0
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
_C.MODEL.PRETRAINED_MODELS = "pretrained_models"
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = 800 # (800,)
_C.INPUT.MIN_SIZE_TRAIN = (800, )
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1200
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1200
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1., 1., 1.]
# Convert image to BGR format (for Caffe2 models), in range 0-255
_C.INPUT.TO_BGR255 = True
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
_C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Custom rpn head, empty to use default conv or separable conv
_C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead"
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# Whether or not resize and translate masks to the input image.
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 6000
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 2
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 2
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "models/"
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
| 40.65343 | 83 | 0.638487 |
a2e627d0b18a32696752c06ece81e1f73a69758f | 7,355 | py | Python | store/apps/models/models.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | store/apps/models/models.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | store/apps/models/models.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from datetime import date as pydate
# Site-related tables
# -------------------------
class Customers(models.Model):
first_name = models.CharField(max_length=90)
second_name = models.CharField(max_length=120)
surname = models.CharField(max_length=130)
email = models.EmailField(max_length=100)
phone_number = models.CharField(max_length=20)
address = models.CharField(max_length=150)
account_create_date = models.DateField(default=pydate.today)
timezone = models.ForeignKey(
'Timezones',
on_delete=models.CASCADE
)
# ToDo: add timezones from pytz.common_timezones
class Timezones(models.Model):
tz_name = models.CharField(max_length=40)
tz_utc_name = models.CharField(max_length=3)
# Product-related tables
# -------------------------
class Manufacturer(models.Model):
man_name = models.CharField(max_length=80)
description = models.TextField()
contacts = models.CharField(max_length=100)
class Product(models.Model):
"""Is abstract class. Added for flexibility.
With this class can easily be added new categories:
accessories, modules, etc.
There are fields, common for every category
of products.
"""
full_name = models.CharField(max_length=100)
short_name = models.CharField(max_length=30)
manufacturer_id = models.ForeignKey(
'Manufacturer',
on_delete=models.CASCADE
)
discontinued = models.BooleanField(default=False)
description = models.TextField()
class Meta:
ordering = ["short_name"]
class Prices(models.Model):
product_id = models.ForeignKey(
'Product',
on_delete=models.CASCADE
)
price = models.DecimalField(max_digits=10, decimal_places=2)
price_date = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ["price_date"]
# SBC specifications-related tables
# -------------------------
class Cpu_platforms(models.Model):
"""Corresponds to cpu_platform.
arm, x86, other
"""
cpu_platform_name = models.CharField(max_length=10)
class Cpu(models.Model):
# Full name of cpu to display in filters
# Example:
# Broadcom BCM2711B0 quad-core A72(ARMv8-A) 64-bit
name = models.CharField(max_length=100)
platform = models.ForeignKey(
'Cpu_platforms',
on_delete=models.CASCADE
)
# Cannot be negative or zero
core_amount = models.SmallIntegerField()
is_64_bit = models.BooleanField(default=False)
# Represents frequency in mhz
frequency = models.IntegerField()
class Sbc_types(models.Model):
# SOM/COM/module
# SBC
# Carrier board
type_name = models.CharField(max_length=15)
class Ram_types(models.Model):
"""Represents standard of ram"""
# DDR4, LPDDR3, etc.
ram_type_name = models.CharField(max_length=25)
class Sbc(Product):
sbc_name = models.CharField(max_length=70)
product_id = models.ForeignKey(
'Product',
related_name='product',
on_delete=models.CASCADE
)
sbc_type_id = models.ForeignKey(
'Sbc_types',
on_delete=models.CASCADE
)
cpu_id = models.ForeignKey(
'Cpu',
on_delete=models.CASCADE
)
gpu_id = models.ForeignKey(
'Gpu',
on_delete=models.CASCADE
)
mcu_id = models.ForeignKey(
'Mcu',
on_delete=models.CASCADE
)
npu_id = models.ForeignKey(
'Npu',
on_delete=models.CASCADE
)
fpga_id = models.ForeignKey(
'Fpga',
on_delete=models.CASCADE
)
ram_type_id = models.ForeignKey(
'Ram_types',
on_delete=models.CASCADE
)
storage_type_id = models.ForeignKey(
'Sbc_storage_types',
on_delete=models.CASCADE
)
storage_amount = models.SmallIntegerField()
wifi_standard_id = models.ForeignKey(
'Wifi_standards',
on_delete=models.CASCADE
)
bl_standard_id = models.ForeignKey(
'Bluetooth',
on_delete=models.CASCADE
)
bl_low_energy = models.BooleanField(default=False)
ethernet_speed_id = models.ForeignKey(
'Ethernet_speeds',
on_delete=models.CASCADE
)
ethernet_port_amount = models.SmallIntegerField()
gpio_pins = models.SmallIntegerField()
class Wifi_standards(models.Model):
wifi_standard_name = models.CharField(max_length=40)
class Bluetooth(models.Model):
"""Stores version of Bluetooth"""
# Examples: 5.0, 4.2, 2.1
bluetooth_version = models.CharField(max_length=3)
class Sbc_storage_types(models.Model):
storage_type_name = models.CharField(max_length=15)
class Storage_expansions(models.Model):
# Examples: 'm.2 nvme', 'sata', 'micro sd'
storage_expansion_name = models.CharField(max_length=25)
# Represents capacity in GBytes
max_capacity = models.DecimalField(max_digits=5, decimal_places=2)
class Sbc_storage_expansions(models.Model):
"""Resolves many-to-many relationship
between Storage_expansions and Sbc"""
sbc_id = models.ForeignKey(
'Sbc',
on_delete=models.CASCADE
)
st_exp_id = models.ForeignKey(
'Storage_expansions',
on_delete=models.CASCADE
)
class Os_families(models.Model):
family_name = models.CharField(max_length=25)
class Os(models.Model):
os_name = models.CharField(max_length=50)
os_family_id = models.ForeignKey(
'Os_families',
on_delete=models.CASCADE
)
class Supported_os(models.Model):
"""Resolves many-to-many relationship
between Sbc and Os"""
sbc_id = models.ForeignKey(
'Sbc',
on_delete=models.CASCADE
)
os_id = models.ForeignKey(
'Os',
on_delete=models.CASCADE
)
class Ethernet_speeds(models.Model):
# Has only three values:
# 10 = 10 Mbps;
# 100 = 100 Mbps;
# 1000 = 1 Gbps
ethernet_speed = models.SmallIntegerField(default=100)
class Npu(models.Model):
npu_name = models.CharField(max_length=45)
class Mcu(models.Model):
mcu_name = models.CharField(max_length=45)
class Fpga(models.Model):
fpga_name = models.CharField(max_length=45)
class Port_names(models.Model):
port_name = models.CharField(max_length=40)
class Available_ports(models.Model):
"""Resolves many-to-many relationship
between Port_names and Sbc"""
sbc_id = models.ForeignKey(
'Sbc',
on_delete=models.CASCADE
)
port_name_id = models.ForeignKey(
'Port_names',
on_delete=models.CASCADE
)
# Is present, if amount of ports > 1
amount = models.SmallIntegerField(default=1)
class Gpu_family(models.Model):
gpu_family_name = models.CharField(max_length=50)
class Gpu(models.Model):
name = models.CharField(max_length=100)
gpu_family = models.ForeignKey(
'Gpu_family',
on_delete=models.CASCADE
)
frequency = models.IntegerField()
class Graphics_api(models.Model):
graph_api_name = models.CharField(max_length=50)
class Supported_graph_api(models.Model):
gpu_id = models.ForeignKey(
'Gpu',
on_delete=models.CASCADE
)
graph_api_id = models.ForeignKey(
'Graphics_api',
on_delete=models.CASCADE
)
graph_api_version = models.CharField(max_length=40)
| 25.538194 | 70 | 0.674371 |
5dc83b3e1ce4ff6cd2432e8944d8c15026444368 | 2,414 | py | Python | main.py | emersonrafaels/deep_check_orientation | 6c98b29f88b29fe2e707b700cbd6a08e474f9933 | [
"MIT"
] | null | null | null | main.py | emersonrafaels/deep_check_orientation | 6c98b29f88b29fe2e707b700cbd6a08e474f9933 | [
"MIT"
] | null | null | null | main.py | emersonrafaels/deep_check_orientation | 6c98b29f88b29fe2e707b700cbd6a08e474f9933 | [
"MIT"
] | null | null | null | """
UTILIZAÇÃO DE APRENDIZADO PROFUNDO (DEEP LEARNING) PARA VERIFICAÇÃO DA ORIENTAÇÃO DE UMA IMAGEM
E ROTAÇÃO ADEQUADA DA MESMA. AO SER ENVIADA UMA IMAGEM (EM QUALQUER FORMATO),
RETORNA-SE O NÚMERO DE ROTAÇÕES NECESÁRIAS E A IMAGEM ROTACIONADA CORRETAMENTE.
OS PASSOS REALIZADOS SÃO:
1) LEITURA DA IMAGEM EM RGB
2) PIPELINE DE AUMENTO DE IMAGEM USANDO ALBUMENTATIONS (CLASSE: COMPOSE)
3) REALIZAÇÃO DA PREDIÇÃO USANDO UMA REDE NEURAL DO TIPO RESNET
4) OBTENÇÃO DAS PREDIÇÕES DE ORIENTAÇÃO DA IMAGEM
5) CALCULO DO NÚMERO DE ROTAÇÕES NECESSÁRIAS PARA ORIENTAÇÃO CORRETA DA IMAGEM.
# Arguments
caminho_imagem - Required : Imagem para verificação da orientação (String)
# Returns
predictions - Required : Predições do modelo para 0º, 90º. 180º, 270º (List)
number_rotate - Required : Número de rotações necessárias (Integer)
image_correct_rotate - Required : Imagem após aplicação do número de rotações necessárias (PIL)
"""
__version__ = "1.0"
__author__ = """Emerson V. Rafael (EMERVIN)"""
__data_atualizacao__ = "16/08/2021"
from inspect import stack
import sys
from deep_check_orientation import check_orientation
from utils.image_view import view_image
if __name__ == '__main__':
try:
# OBTENDO O CAMINHO DA IMAGEM ENVIADA PELO USUÁRIO
IMAGE_FILE_LOCATION = sys.argv[1]
orquestrador = check_orientation()
predictions_check_orientation, number_rotations, image_correct_orientation = orquestrador.orchesta_model(IMAGE_FILE_LOCATION)
print("AS PREDIÇÕES DO MODELO SÃO: {}"
"\nPARA 0º: {}"
"\nPARA 90º: {}"
"\nPARA 180º: {}"
"\nPARA 270º: {}".format(predictions_check_orientation,
predictions_check_orientation[0],
predictions_check_orientation[1],
predictions_check_orientation[2],
predictions_check_orientation[3]))
print("NÚMERO DE ROTAÇÕES NECESSÁRIAS: {} ROTAÇÕES".format(number_rotations))
# VISUALIZANDO A IMAGEM ROTACIONADA CORRETAMENTE
view_image(image_correct_orientation, window_name="IMAGEM ROTACIONADA")
except Exception as ex:
print("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex)) | 40.233333 | 133 | 0.658658 |
89bd2d3056b29a261d3d4b1b1d6309c820bd45ac | 3,914 | py | Python | python/bifrost/blocks/reverse.py | Radio-Camera-Initiative/bifrost | d4891bcc97335fb6e7955314c40631ef9136f556 | [
"BSD-3-Clause"
] | null | null | null | python/bifrost/blocks/reverse.py | Radio-Camera-Initiative/bifrost | d4891bcc97335fb6e7955314c40631ef9136f556 | [
"BSD-3-Clause"
] | 1 | 2022-02-09T00:25:09.000Z | 2022-02-09T00:25:09.000Z | python/bifrost/blocks/reverse.py | Radio-Camera-Initiative/bifrost | d4891bcc97335fb6e7955314c40631ef9136f556 | [
"BSD-3-Clause"
] | 1 | 2021-12-14T21:59:46.000Z | 2021-12-14T21:59:46.000Z |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.DataType import DataType
from copy import deepcopy
class ReverseBlock(TransformBlock):
def __init__(self, iring, axes, *args, **kwargs):
super(ReverseBlock, self).__init__(iring, *args, **kwargs)
if not isinstance(axes, list) or isinstance(axes, tuple):
axes = [axes]
self.specified_axes = axes
def define_valid_input_spaces(self):
return ('cuda',)
def on_sequence(self, iseq):
ihdr = iseq.header
itensor = ihdr['_tensor']
self.axes = [itensor['labels'].index(axis)
if isinstance(axis, basestring)
else axis
for axis in self.specified_axes]
frame_axis = itensor['shape'].index(-1)
if frame_axis in self.axes:
raise KeyError("Cannot reverse frame axis")
ohdr = deepcopy(ihdr)
otensor = ohdr['_tensor']
oshape = otensor['shape']
if 'scales' in itensor:
for ax in self.axes:
scale_step = otensor['scales'][ax][1]
scale_shift = oshape[ax] * scale_step
otensor['scales'][ax][0] += scale_shift
otensor['scales'][ax][1] = -scale_step
return ohdr
def on_data(self, ispan, ospan):
idata = ispan.data
odata = ospan.data
shape = idata.shape
ind_names = ['i%i' % i for i in xrange(idata.ndim)]
inds = list(ind_names)
for ax in self.axes:
inds[ax] = '-' + inds[ax]
inds = ','.join(inds)
bf.map("b = a(%s)" % inds, shape=shape, axis_names=ind_names,
data={'a': idata, 'b': odata})
def reverse(iring, axes, *args, **kwargs):
"""Reverse data along an axis or set of axes.
Args:
iring (Ring or Block): Input data source.
axes: (List of) strings or integers specifying axes to reverse.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Input: [...], dtype = any, space = CUDA
Output: [...], dtype = any, space = CUDA
Returns:
ReverseBlock: A new block instance.
"""
return ReverseBlock(iring, axes, *args, **kwargs)
| 41.638298 | 75 | 0.661983 |
d4c18a78c242bb70a8f383c5bf65a65401d006d0 | 652 | py | Python | fbpcs/pid/service/pid_service/pid_stage_input.py | bliud/fbpcs | 35a479fb7f3673a11ccfb012bf882b1de00b0b28 | [
"MIT"
] | 63 | 2021-08-18T01:50:22.000Z | 2022-03-25T06:44:36.000Z | fbpcs/pid/service/pid_service/pid_stage_input.py | bliud/fbpcs | 35a479fb7f3673a11ccfb012bf882b1de00b0b28 | [
"MIT"
] | 672 | 2021-08-18T05:20:32.000Z | 2022-03-31T23:30:13.000Z | fbpcs/pid/service/pid_service/pid_stage_input.py | bliud/fbpcs | 35a479fb7f3673a11ccfb012bf882b1de00b0b28 | [
"MIT"
] | 61 | 2021-08-18T20:02:30.000Z | 2022-03-31T22:44:17.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class PIDStageInput:
input_paths: List[str]
output_paths: List[str]
num_shards: int
instance_id: str
fail_fast: bool = False
is_validating: Optional[bool] = False
synthetic_shard_path: Optional[str] = None
hmac_key: Optional[str] = None
def add_to_inputs(self, input_path: str) -> None:
self.input_paths.append(input_path)
| 27.166667 | 65 | 0.728528 |
2887bd3df62ccbd017f3aaf9e671e9e698b28751 | 20,516 | py | Python | Game5/modules/sprites/tanks.py | ttkaixin1998/pikachupythongames | 609a3a5a2be3f5a187c332c7980bb5bb14548f02 | [
"MIT"
] | 4,013 | 2018-06-16T08:00:02.000Z | 2022-03-30T11:48:14.000Z | Game5/modules/sprites/tanks.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 22 | 2018-10-18T00:15:50.000Z | 2022-01-13T08:16:15.000Z | Game5/modules/sprites/tanks.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 2,172 | 2018-07-20T04:03:14.000Z | 2022-03-31T14:18:29.000Z | '''
Function:
坦克类
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import pygame
import random
from .foods import Foods
from .bullet import Bullet
'''玩家坦克类'''
class PlayerTank(pygame.sprite.Sprite):
def __init__(self, name, player_tank_image_paths, position, border_len, screensize, direction='up', bullet_image_paths=None, protected_mask_path=None, boom_image_path=None, **kwargs):
pygame.sprite.Sprite.__init__(self)
# 玩家1/玩家2
self.name = name
# 坦克图片路径
self.player_tank_image_paths = player_tank_image_paths.get(name)
# 地图边缘宽度
self.border_len = border_len
# 屏幕大小
self.screensize = screensize
# 初始坦克方向
self.init_direction = direction
# 初始位置
self.init_position = position
# 子弹图片
self.bullet_image_paths = bullet_image_paths
# 保护罩图片路径
self.protected_mask = pygame.image.load(protected_mask_path)
self.protected_mask_flash_time = 25
self.protected_mask_flash_count = 0
self.protected_mask_pointer = False
# 坦克爆炸图
self.boom_image = pygame.image.load(boom_image_path)
self.boom_last_time = 5
self.booming_flag = False
self.boom_count = 0
# 坦克生命数量
self.num_lifes = 3
# 重置
self.reset()
'''移动'''
def move(self, direction, scene_elems, player_tanks_group, enemy_tanks_group, home):
# 爆炸时无法移动
if self.booming_flag:
return
# 方向不一致先改变方向
if self.direction != direction:
self.setDirection(direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
# 移动(使用缓冲)
self.move_cache_count += 1
if self.move_cache_count < self.move_cache_time:
return
self.move_cache_count = 0
if self.direction == 'up':
speed = (0, -self.speed)
elif self.direction == 'down':
speed = (0, self.speed)
elif self.direction == 'left':
speed = (-self.speed, 0)
elif self.direction == 'right':
speed = (self.speed, 0)
rect_ori = self.rect
self.rect = self.rect.move(speed)
# --碰到场景元素
for key, value in scene_elems.items():
if key in ['brick_group', 'iron_group', 'river_group']:
if pygame.sprite.spritecollide(self, value, False, None):
self.rect = rect_ori
elif key in ['ice_group']:
if pygame.sprite.spritecollide(self, value, False, None):
self.rect = self.rect.move(speed)
# --碰到其他玩家坦克
if pygame.sprite.spritecollide(self, player_tanks_group, False, None):
self.rect = rect_ori
# --碰到敌方坦克
if pygame.sprite.spritecollide(self, enemy_tanks_group, False, None):
self.rect = rect_ori
# --碰到玩家大本营
if pygame.sprite.collide_rect(self, home):
self.rect = rect_ori
# --碰到边界
if self.rect.left < self.border_len:
self.rect.left = self.border_len
elif self.rect.right > self.screensize[0]-self.border_len:
self.rect.right = self.screensize[0] - self.border_len
elif self.rect.top < self.border_len:
self.rect.top = self.border_len
elif self.rect.bottom > self.screensize[1]-self.border_len:
self.rect.bottom = self.screensize[1] - self.border_len
# 为了坦克轮动特效切换图片
self.switch_count += 1
if self.switch_count > self.switch_time:
self.switch_count = 0
self.switch_pointer = not self.switch_pointer
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
'''更新'''
def update(self):
# 坦克子弹冷却更新
if self.is_bullet_cooling:
self.bullet_cooling_count += 1
if self.bullet_cooling_count >= self.bullet_cooling_time:
self.bullet_cooling_count = 0
self.is_bullet_cooling = False
# 无敌状态更新
if self.is_protected:
self.protected_count += 1
if self.protected_count > self.protected_time:
self.is_protected = False
self.protected_count = 0
# 爆炸状态更新
if self.booming_flag:
self.image = self.boom_image
self.boom_count += 1
if self.boom_count > self.boom_last_time:
self.boom_count = 0
self.booming_flag = False
self.reset()
'''设置坦克方向'''
def setDirection(self, direction):
self.direction = direction
if self.direction == 'up':
self.tank_direction_image = self.tank_image.subsurface((0, 0), (96, 48))
elif self.direction == 'down':
self.tank_direction_image = self.tank_image.subsurface((0, 48), (96, 48))
elif self.direction == 'left':
self.tank_direction_image = self.tank_image.subsurface((0, 96), (96, 48))
elif self.direction == 'right':
self.tank_direction_image = self.tank_image.subsurface((0, 144), (96, 48))
'''射击'''
def shoot(self):
# 爆炸时无法射击
if self.booming_flag:
return False
# 子弹不在冷却状态时
if not self.is_bullet_cooling:
self.is_bullet_cooling = True
if self.tanklevel == 0:
is_stronger = False
speed = 8
elif self.tanklevel == 1:
is_stronger = False
speed = 10
elif self.tanklevel >= 2:
is_stronger = True
speed = 10
if self.direction == 'up':
position = (self.rect.centerx, self.rect.top-1)
elif self.direction == 'down':
position = (self.rect.centerx, self.rect.bottom+1)
elif self.direction == 'left':
position = (self.rect.left-1, self.rect.centery)
elif self.direction == 'right':
position = (self.rect.right+1, self.rect.centery)
return Bullet(bullet_image_paths=self.bullet_image_paths, screensize=self.screensize, direction=self.direction, position=position, border_len=self.border_len, is_stronger=is_stronger, speed=speed)
return False
'''提高坦克等级'''
def improveTankLevel(self):
if self.booming_flag:
return False
self.tanklevel = min(self.tanklevel+1, len(self.player_tank_image_paths)-1)
self.tank_image = pygame.image.load(self.player_tank_image_paths[self.tanklevel]).convert_alpha()
self.setDirection(self.direction)
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
return True
'''降低坦克等级'''
def decreaseTankLevel(self):
if self.booming_flag:
return False
self.tanklevel -= 1
if self.tanklevel < 0:
self.num_lifes -= 1
self.booming_flag = True
else:
self.tank_image = pygame.image.load(self.player_tank_image_paths[self.tanklevel]).convert_alpha()
self.setDirection(self.direction)
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
return True if self.tanklevel < 0 else False
'''增加生命值'''
def addLife(self):
self.num_lifes += 1
'''设置为无敌状态'''
def setProtected(self):
self.is_protected = True
'''画我方坦克'''
def draw(self, screen):
screen.blit(self.image, self.rect)
if self.is_protected:
self.protected_mask_flash_count += 1
if self.protected_mask_flash_count > self.protected_mask_flash_time:
self.protected_mask_pointer = not self.protected_mask_pointer
self.protected_mask_flash_count = 0
screen.blit(self.protected_mask.subsurface((48*self.protected_mask_pointer, 0), (48, 48)), self.rect)
'''重置坦克, 重生的时候用'''
def reset(self):
# 坦克方向
self.direction = self.init_direction
# 移动缓冲, 用于避免坦克连续运行不方便调整位置
self.move_cache_time = 4
self.move_cache_count = 0
# 是否无敌状态
self.is_protected = False
self.protected_time = 1500
self.protected_count = 0
# 坦克移动速度
self.speed = 8
# 子弹冷却时间
self.bullet_cooling_time = 30
self.bullet_cooling_count = 0
self.is_bullet_cooling = False
# 坦克等级
self.tanklevel = 0
# 坦克轮子转动效果
self.switch_count = 0
self.switch_time = 1
self.switch_pointer = False
# 坦克图片
self.tank_image = pygame.image.load(self.player_tank_image_paths[self.tanklevel]).convert_alpha()
self.setDirection(self.direction)
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = self.init_position
'''敌方坦克类'''
class EnemyTank(pygame.sprite.Sprite):
def __init__(self, enemy_tank_image_paths, appear_image_path, position, border_len, screensize, bullet_image_paths=None, food_image_paths=None, boom_image_path=None, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.bullet_image_paths = bullet_image_paths
self.border_len = border_len
self.screensize = screensize
# 坦克出场特效
appear_image = pygame.image.load(appear_image_path).convert_alpha()
self.appear_images = [appear_image.subsurface((0, 0), (48, 48)), appear_image.subsurface((48, 0), (48, 48)), appear_image.subsurface((96, 0), (48, 48))]
# 坦克类型
self.tanktype = random.choice(list(enemy_tank_image_paths.keys()))
self.enemy_tank_image_paths = enemy_tank_image_paths.get(self.tanktype)
# 坦克等级
self.tanklevel = random.randint(0, len(self.enemy_tank_image_paths)-2)
self.food = None
if (random.random() >= 0.6) and (self.tanklevel == len(self.enemy_tank_image_paths)-2):
self.tanklevel += 1
self.food = Foods(food_image_paths=food_image_paths, screensize=self.screensize)
# 坦克轮子转动效果
self.switch_count = 0
self.switch_time = 1
self.switch_pointer = False
# 移动缓冲
self.move_cache_time = 4
self.move_cache_count = 0
# 坦克图片路径
self.tank_image = pygame.image.load(self.enemy_tank_image_paths[self.tanklevel]).convert_alpha()
self.direction = random.choice(['up', 'down', 'left', 'right'])
self.setDirection(self.direction)
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = position
self.image = self.appear_images[0]
# 坦克爆炸图
self.boom_image = pygame.image.load(boom_image_path)
self.boom_last_time = 5
self.boom_count = 0
self.booming_flag = False
# 子弹冷却时间
self.bullet_cooling_time = 120 - self.tanklevel * 10
self.bullet_cooling_count = 0
self.is_bullet_cooling = False
# 用于给刚生成的坦克播放出生特效
self.is_borning = True
self.borning_left_time = 90
# 坦克是否可以行动(玩家坦克捡到食物clock可以触发为True)
self.is_keep_still = False
self.keep_still_time = 500
self.keep_still_count = 0
# 坦克移动速度
self.speed = 10 - int(self.tanktype) * 2
'''射击'''
def shoot(self):
if not self.is_bullet_cooling:
self.is_bullet_cooling = True
if self.tanklevel == 0:
is_stronger = False
speed = 8
elif self.tanklevel == 1:
is_stronger = False
speed = 10
elif self.tanklevel >= 2:
is_stronger = False
speed = 10
if self.direction == 'up':
position = (self.rect.centerx, self.rect.top-1)
elif self.direction == 'down':
position = (self.rect.centerx, self.rect.bottom+1)
elif self.direction == 'left':
position = (self.rect.left-1, self.rect.centery)
elif self.direction == 'right':
position = (self.rect.right+1, self.rect.centery)
return Bullet(bullet_image_paths=self.bullet_image_paths, screensize=self.screensize, direction=self.direction, position=position, border_len=self.border_len, is_stronger=is_stronger, speed=speed)
return False
'''实时更新坦克'''
def update(self, scene_elems, player_tanks_group, enemy_tanks_group, home):
data_return = dict()
# 死后爆炸
if self.booming_flag:
self.image = self.boom_image
self.boom_count += 1
data_return['boomed'] = False
if self.boom_count > self.boom_last_time:
self.boom_count = 0
self.booming_flag = False
data_return['boomed'] = True
return data_return
# 禁止行动时不更新
if self.is_keep_still:
self.keep_still_count += 1
if self.keep_still_count > self.keep_still_time:
self.is_keep_still = False
self.keep_still_count = 0
return data_return
# 播放出生特效
if self.is_borning:
self.borning_left_time -= 1
if self.borning_left_time < 0:
self.is_borning = False
elif self.borning_left_time <= 10:
self.image = self.appear_images[2]
elif self.borning_left_time <= 20:
self.image = self.appear_images[1]
elif self.borning_left_time <= 30:
self.image = self.appear_images[0]
elif self.borning_left_time <= 40:
self.image = self.appear_images[2]
elif self.borning_left_time <= 50:
self.image = self.appear_images[1]
elif self.borning_left_time <= 60:
self.image = self.appear_images[0]
elif self.borning_left_time <= 70:
self.image = self.appear_images[2]
elif self.borning_left_time <= 80:
self.image = self.appear_images[1]
elif self.borning_left_time <= 90:
self.image = self.appear_images[0]
# 出生后实时更新
else:
# --坦克移动
self.move(scene_elems, player_tanks_group, enemy_tanks_group, home)
# --坦克子弹冷却更新
if self.is_bullet_cooling:
self.bullet_cooling_count += 1
if self.bullet_cooling_count >= self.bullet_cooling_time:
self.bullet_cooling_count = 0
self.is_bullet_cooling = False
# --能射击就射击
data_return['bullet'] = self.shoot()
return data_return
'''随机移动坦克'''
def move(self, scene_elems, player_tanks_group, enemy_tanks_group, home):
# 移动(使用缓冲)
self.move_cache_count += 1
if self.move_cache_count < self.move_cache_time:
return
self.move_cache_count = 0
if self.direction == 'up':
speed = (0, -self.speed)
elif self.direction == 'down':
speed = (0, self.speed)
elif self.direction == 'left':
speed = (-self.speed, 0)
elif self.direction == 'right':
speed = (self.speed, 0)
rect_ori = self.rect
self.rect = self.rect.move(speed)
# --碰到场景元素
for key, value in scene_elems.items():
if key in ['brick_group', 'iron_group', 'river_group']:
if pygame.sprite.spritecollide(self, value, False, None):
self.rect = rect_ori
directions = ['up', 'down', 'left', 'right']
directions.remove(self.direction)
self.direction = random.choice(directions)
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
elif key in ['ice_group']:
if pygame.sprite.spritecollide(self, value, False, None):
self.rect = self.rect.move(speed)
# --碰到玩家坦克
if pygame.sprite.spritecollide(self, player_tanks_group, False, None):
self.rect = rect_ori
self.direction = random.choice(['up', 'down', 'left', 'right'])
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
# --碰到其他敌方坦克
if pygame.sprite.spritecollide(self, enemy_tanks_group, False, None):
self.rect = rect_ori
self.direction = random.choice(['up', 'down', 'left', 'right'])
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
# --碰到玩家大本营
if pygame.sprite.collide_rect(self, home):
self.rect = rect_ori
self.direction = random.choice(['up', 'down', 'left', 'right'])
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
# --碰到边界
if self.rect.left < self.border_len:
directions = ['up', 'down', 'left', 'right']
directions.remove(self.direction)
self.direction = random.choice(directions)
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
self.rect.left = self.border_len
elif self.rect.right > self.screensize[0]-self.border_len:
directions = ['up', 'down', 'left', 'right']
directions.remove(self.direction)
self.direction = random.choice(directions)
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
self.rect.right = self.screensize[0] - self.border_len
elif self.rect.top < self.border_len:
directions = ['up', 'down', 'left', 'right']
directions.remove(self.direction)
self.direction = random.choice(directions)
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
self.rect.top = self.border_len
elif self.rect.bottom > self.screensize[1]-self.border_len:
directions = ['up', 'down', 'left', 'right']
directions.remove(self.direction)
self.direction = random.choice(directions)
self.setDirection(self.direction)
self.switch_count = self.switch_time
self.move_cache_count = self.move_cache_time
self.rect.bottom = self.screensize[1] - self.border_len
# 为了坦克轮动特效切换图片
self.switch_count += 1
if self.switch_count > self.switch_time:
self.switch_count = 0
self.switch_pointer = not self.switch_pointer
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
'''设置坦克方向'''
def setDirection(self, direction):
self.direction = direction
if self.direction == 'up':
self.tank_direction_image = self.tank_image.subsurface((0, 0), (96, 48))
elif self.direction == 'down':
self.tank_direction_image = self.tank_image.subsurface((0, 48), (96, 48))
elif self.direction == 'left':
self.tank_direction_image = self.tank_image.subsurface((0, 96), (96, 48))
elif self.direction == 'right':
self.tank_direction_image = self.tank_image.subsurface((0, 144), (96, 48))
'''降低坦克等级'''
def decreaseTankLevel(self):
if self.booming_flag:
return False
self.tanklevel -= 1
self.tank_image = pygame.image.load(self.enemy_tank_image_paths[self.tanklevel]).convert_alpha()
self.setDirection(self.direction)
self.image = self.tank_direction_image.subsurface((48*int(self.switch_pointer), 0), (48, 48))
if self.tanklevel < 0:
self.booming_flag = True
return True if self.tanklevel < 0 else False
'''设置静止'''
def setStill(self):
self.is_keep_still = True | 42.920502 | 208 | 0.595486 |
332017817e281c8d4761ee99399c7c579124d674 | 2,128 | py | Python | valohai_cli/commands/project/status.py | JohnCHarrington/valohai-cli | 8703538d43331b97f5dd8339047a6b84b97aed79 | [
"MIT"
] | null | null | null | valohai_cli/commands/project/status.py | JohnCHarrington/valohai-cli | 8703538d43331b97f5dd8339047a6b84b97aed79 | [
"MIT"
] | null | null | null | valohai_cli/commands/project/status.py | JohnCHarrington/valohai-cli | 8703538d43331b97f5dd8339047a6b84b97aed79 | [
"MIT"
] | null | null | null | import click
from valohai_cli.api import request
from valohai_cli.ctx import get_project
from valohai_cli.table import print_table
from valohai_cli.models.project import Project
@click.command()
@click.option('--summary/--no-summary', default=True, help='Show execution summary')
@click.option('--incomplete/--no-incomplete', default=True, help='Show details of incomplete executions')
def status(summary: bool, incomplete: bool) -> None:
"""
Get the general status of the linked project
"""
project = get_project(require=True)
project_data = request('get', f'/api/v0/projects/{project.id}/').json()
click.secho(f'# Project {click.style(project.name, underline=True)}', bold=True)
if 'urls' in project_data:
click.secho(f" {project_data['urls']['display']}")
click.secho('')
if summary:
print_execution_summary(project_data)
if incomplete:
print_incomplete_executions(project)
def print_incomplete_executions(project: Project) -> None:
incomplete_executions = request('get', '/api/v0/executions/', params={
'project': project.id,
'status': 'incomplete',
'ordering': 'counter',
}).json().get('results', ())
if not incomplete_executions:
return
click.secho(f'## {len(incomplete_executions)} Incomplete Executions\n', bold=True)
print_table(incomplete_executions, ['counter', 'status', 'step'], headers=['#', 'Status', 'Step'])
def print_execution_summary(project_data: dict) -> None:
execution_summary = project_data.get('execution_summary', {}).copy()
if not execution_summary:
return
total = execution_summary.pop('count')
if not total:
click.secho('No executions yet.', fg='cyan')
return
click.secho(f'## Summary of {total} executions\n', bold=True)
print_table(
[
{'status': key.replace('_count', ''), 'count': value}
for (key, value)
in sorted(execution_summary.items())
if value
],
columns=['status', 'count'],
headers=['Status', 'Count'],
)
click.secho('\n')
| 33.25 | 105 | 0.651316 |
e9ea7416d5781e834e9fabbf3b99c1e356ab4332 | 1,997 | py | Python | retro/i3info/track_and_cascade_photon_parameterizations.py | eat5210/retro | 4b426422bcb102d8031bc2f5715fd766630162fc | [
"Apache-2.0"
] | null | null | null | retro/i3info/track_and_cascade_photon_parameterizations.py | eat5210/retro | 4b426422bcb102d8031bc2f5715fd766630162fc | [
"Apache-2.0"
] | null | null | null | retro/i3info/track_and_cascade_photon_parameterizations.py | eat5210/retro | 4b426422bcb102d8031bc2f5715fd766630162fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
Display photon production info for tracks and cascades, as parameterized in
IceCube software
"""
from __future__ import absolute_import, division, print_function
from os.path import expandvars
from icecube.clsim.traysegments.common import parseIceModel
from icecube.clsim import NumberOfPhotonsPerMeter
from icecube import clsim
from icecube.icetray import I3Units
mediumProperties = parseIceModel(
expandvars("$I3_SRC/clsim/resources/ice/spice_mie"),
disableTilt=True
)
domAcceptance = clsim.GetIceCubeDOMAcceptance()
photons_per_m_trck = NumberOfPhotonsPerMeter(
mediumProperties.GetPhaseRefractiveIndex(0),
domAcceptance,
mediumProperties.GetMinWavelength(),
mediumProperties.GetMaxWavelength()
)
density = mediumProperties.GetMediumDensity() * (I3Units.cm3 / I3Units.g)
"""Density in units of g/cm^3"""
# PPC parametrerization
photons_per_gev_em_cscd = 5.21 * 0.924 / density
# TODO: what factor to use here?
#photons_per_gev_had_cscd = photons_per_gev_em_cscd * ???
m_per_gev_trck = 15 / 3.33
photons_per_gev_trck = photons_per_m_trck * m_per_gev_trck
if __name__ == '__main__':
print('Medium density is reported to be %.5f g/cm^3' % density)
print('')
print('Muon track:')
print(' %10.3f photons per m' % photons_per_m_trck)
print(' %10.3f photons per GeV' % photons_per_gev_trck)
print(' %10.3f m per GeV' % m_per_gev_trck)
print('')
print('Electromagnetic cascade:')
print(' %10.3f photons per GeV' % photons_per_gev_em_cscd)
print('')
#print('Hadronic cascade:')
#print(' %10.3f photons per GeV' % photons_per_gev_had_cscd)
#print('')
print('10 GeV EM cascade : %10.3f photons'
% (10*photons_per_gev_em_cscd))
#print('10 GeV hadronic cascade: %10.3f photons'
# % (10*photons_per_gev_had_cscd))
print('10 GeV muon track : %10.3f photons'
% (10*photons_per_m_trck))
| 31.203125 | 75 | 0.718077 |
e9b8c5e52eb8b3d5d12182b24c0fe555e51a3a7a | 3,771 | py | Python | server/app.py | jsdelivrbot/allocate-me | 88ccf78d664d06b2885efeb83f4e340de60c28ec | [
"MIT"
] | null | null | null | server/app.py | jsdelivrbot/allocate-me | 88ccf78d664d06b2885efeb83f4e340de60c28ec | [
"MIT"
] | null | null | null | server/app.py | jsdelivrbot/allocate-me | 88ccf78d664d06b2885efeb83f4e340de60c28ec | [
"MIT"
] | 1 | 2018-12-08T11:33:37.000Z | 2018-12-08T11:33:37.000Z | #!/usr/bin/env python3
import os
import re
import uuid
from datetime import timedelta
import pyexcel
import pytz
import vobject
from dateutil.parser import parse
from dateutil.rrule import rrule, rruleset, WEEKLY
from flask import Flask, render_template, request, send_from_directory
from xlrd.biffh import XLRDError
from .utils import dates_between_dates, filter_row, get_pretty_location, parse_dates
app = Flask(__name__)
DURATION_REGEX = re.compile(r'([\d.]+) hrs?')
DEFAULT_TIMEZONE = pytz.timezone('Australia/Melbourne')
BUILD_DIR = os.path.join(app.root_path, '../', 'dist')
def build_event(record):
date_ranges = parse_dates(record['Dates'], record['Time'], record['Duration'])
excluded_dates = []
duration = timedelta(hours=float(DURATION_REGEX.match(record['Duration']).group(1)))
event_start = date_ranges[0][0]
event_end = event_start + duration
subject = record['Subject Code'].split('_')[0]
for i, date_range in enumerate(date_ranges):
if (i + 1) < len(date_ranges):
previous_date = date_range[1]
next_date = date_ranges[i + 1][0]
excluded_dates.extend(dates_between_dates(previous_date, next_date))
return {
'event_start': DEFAULT_TIMEZONE.localize(event_start),
'event_end': DEFAULT_TIMEZONE.localize(event_end),
'day_index': parse(record['Day']).weekday(),
'until': DEFAULT_TIMEZONE.localize(date_ranges[-1][-1]),
'excludes': map(DEFAULT_TIMEZONE.localize, excluded_dates),
'location': get_pretty_location(record['Location']),
'title': '{subject} {group}'.format(subject=subject, group=record['Group']),
'description': '{description}\n\nStaff: {staff}'.format(
description=record['Description'],
staff=record['Staff']
)
}
@app.route('/upload', methods=['POST', ])
def upload():
if request.method == 'POST':
# Check if the post request has the file part
if 'file' not in request.files:
return 'No file received.'
input_file = request.files['file']
# If the user doesn't select a file, the browser also
# submit an empty part without a filename.
if input_file.filename == '':
return 'No file selected.'
if not input_file.filename.lower().endswith('.xls'):
return 'File must be XLS format.'
try:
sheet = pyexcel.get_sheet(file_content=input_file.read(), file_type='xls', name_columns_by_row=1)
del sheet.row[filter_row] # Delete empty rows
records = sheet.to_records()
except XLRDError as e:
return 'File is corrupt.'
cal = vobject.iCalendar()
for record in records:
event = build_event(record)
vevent = cal.add('vevent')
vevent.add('uid').value = str(uuid.uuid4()).upper()
vevent.add('summary').value = event['title']
vevent.add('description').value = event['description']
vevent.add('location').value = event['location']
vevent.add('dtstart').value = event['event_start']
vevent.add('dtend').value = event['event_end']
ruleset = rruleset()
ruleset.rrule(
rrule(WEEKLY, byweekday=event['day_index'], until=event['until'])
)
for exdate in event['excludes']:
ruleset.exdate(exdate)
vevent.rruleset = ruleset
return cal.serialize()
return ''
@app.route('/')
def home():
return send_from_directory(BUILD_DIR, 'index.html')
@app.route('/<path:path>')
def send_static(path):
print(path)
return send_from_directory(BUILD_DIR, path)
if __name__ == '__main__':
app.run()
| 32.508621 | 109 | 0.632458 |
f63db5828ef2da8e787ab600e19a956026e9f0d2 | 3,892 | py | Python | poloniex/api.py | luisan00/poloniex-wrapper | 8b676f25103f6a0b47e22fe55a6a44e1fa3bd14b | [
"MIT"
] | null | null | null | poloniex/api.py | luisan00/poloniex-wrapper | 8b676f25103f6a0b47e22fe55a6a44e1fa3bd14b | [
"MIT"
] | null | null | null | poloniex/api.py | luisan00/poloniex-wrapper | 8b676f25103f6a0b47e22fe55a6a44e1fa3bd14b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import certifi
import json
import time
import urllib3
class public():
'''
Acording to the documentation available in:
https://poloniex.com/support/api/
There is six API public methods.
'''
def __init__(self):
self.pool = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
def request(self, data):
'''
@description: Base request for all methods
'''
response = self.pool.request(
'GET', 'https://poloniex.com/public', fields=data)
result = {
'result': json.loads(response.data.decode('utf-8')),
'timestamp': '%.0f' % time.time()
}
return result
def returnTicker(self):
'''
@description: Ticker for all markets.
@return: Object (JSON)
'''
data = {
'command': 'returnTicker'
}
return self.request(data)
def return24hVolume(self):
'''
@description: 24-hour volume for all markets.
@return: Object (JSON)
'''
data = {
'command': 'return24hVolume'
}
return self.request(data)
def returnOrderBook(self, currencyPair, depth=10):
'''
@description: Order book for a given market or -
for all if is equal to: "all".
@param: currencyPair
@param: depth, default=10
@return: Object (JSON)
'''
data = {
'command': 'returnOrderBook',
'currencyPair': currencyPair,
'depth': depth
}
return self.request(data)
def returnTradeHistory(self, currencyPair, start=None, end=None):
'''
@description: the last 200 trades for a given market or -
up to 50,000 trades between start and end params.
@param: currencyPair
@param: start - (UTC-timestamp), None by default
@param: end - (UTC-timestamp), None by default
@return: Object (JSON)
'''
if (start | end):
data = {
'command': 'returnTradeHistory',
'currencyPair': currencyPair,
'start': start,
'end': end
}
else:
data = {
'command': 'returnTradeHistory',
'currencyPair': currencyPair
}
return self.request(data)
def returnChartData(self, currencyPair, period, start, end):
'''
@description Candlestick chart data for a given [currencyPair]
and given [period] in seconds.
between [start] and [end] timestamps.
@currencyPair String - The required pair.
@period Integer - candlestick period.
@start Integer - UTC timestamp.
@end Integer - UTC timestamp.
@return Object (JSON)
'''
data = {
'command': 'returnChartData',
'currencyPair': currencyPair,
'period': period,
'start': start,
'end': end
}
return self.request(data)
def returnCurrencies(self):
'''
@description: Information about currencies.
@return: Object (JSON)
'''
data = {
'command': 'returnCurrencies'
}
return self.request(data)
def returnLoanOrders(self, currency):
'''
@description: List of loan offers and demands for a given currency.
@param: currency - String
@return: Object (JSON)
'''
data = {
'command': 'returnLoanOrders'
}
return self.request(data)
| 29.938462 | 77 | 0.501542 |
398e73be6f6a466c4db3a11250b01b5ff23cac5a | 24,924 | py | Python | samples/deploy_and_test.py | dirkrichtsteiger/cloud-security-xsuaa-integration | 712941a94fa1eb4d74b714194628f5d318bb9cde | [
"Apache-2.0"
] | null | null | null | samples/deploy_and_test.py | dirkrichtsteiger/cloud-security-xsuaa-integration | 712941a94fa1eb4d74b714194628f5d318bb9cde | [
"Apache-2.0"
] | null | null | null | samples/deploy_and_test.py | dirkrichtsteiger/cloud-security-xsuaa-integration | 712941a94fa1eb4d74b714194628f5d318bb9cde | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import abc
import subprocess
import urllib.request
from urllib.parse import urlencode
from urllib.error import HTTPError
from base64 import b64encode
import json
import unittest
import logging
import os
import time
import re
from getpass import getpass
# Usage information
# To run this script you must be logged into CF via 'cf login' Also make sure
# to change settings in vars.yml to your needs. This script deploys sample
# apps and fires post request against their endpoints. For some samples it
# needs to create a password token for which you need to provide your password
# (same as you would use for 'cf login'). You can do this by either supplying
# it via the system environment variable 'CFPASSWORD' or by typing when the
# script prompts for the password. The same goes for the username with the
# variable 'CFUSER'.
# Dependencies
# The script depends on python3 and the cloud foundry command line tool 'cf'.
# Running the script
# If the script is made executable, it can be started with cd
# It can also be started like so: python3 ./deploy_and_test.py
# By default it will run all unit tests.
# It is also possible to run specific test classes:
# python3 -m unittest deploy_and_test.TestJavaSecurity.test_hello_java_security
# This would only the run the test called 'test_hello_java_security'
# inside the test class 'TestJavaSecurity' inside the deploy_and_test.py file.
logging.basicConfig(level=logging.INFO)
class Credentials:
def __init__(self):
self.username = self.__get_env_variable('CFUSER', lambda: input("Username: "))
self.password = self.__get_env_variable('CFPASSWORD', lambda: getpass())
def __get_env_variable(self, env_variable_name, prompt_function):
value = os.getenv(env_variable_name)
if (value is None):
value = prompt_function()
return value
credentials = Credentials()
class SampleTest(abc.ABC, unittest.TestCase):
@abc.abstractmethod
def get_app(self):
"""Should return the sample app that should be tested """
self.skipTest('Dont run abstract base class')
return CFApp(None, None)
def setUp(self):
vars_file = open('./vars.yml')
self.vars_parser = VarsParser(vars_file.read())
vars_file.close()
self.cf_apps = CFApps()
self.__deployed_app = None
self.__api_access = None
self.credentials = credentials
self.get_app().deploy()
time.sleep(2) # waiting for deployed apps to be available
def tearDown(self):
self.get_app().delete()
if self.__api_access is not None:
self.__api_access.delete()
def add_user_to_role(self, role):
logging.info('Assigning role collection {} for user {}'.format(role, self.credentials.username))
user = self.__get_api_access().get_user_by_username(self.credentials.username)
user_id = user.get('id')
resp = self.__get_api_access().add_user_to_group(user_id, role)
if (not resp.is_ok):
logging.error("Could not set role " + role)
exit()
def perform_get_request(self, path, username=None, password=None):
if (username is not None and password is not None):
authorization_value = b64encode(bytes(username + ':' + password, 'utf-8')).decode("ascii")
return self.__perform_get_request(path=path, additional_headers={'Authorization': 'Basic ' + authorization_value})
return self.__perform_get_request(path=path)
def perform_get_request_with_token(self, path, additional_headers={}):
access_token = self.__get_user_access_token()
if (access_token is None):
logging.error("Cannot continue without access token")
exit()
return self.__perform_get_request(path=path, access_token=self.__get_user_access_token(), additional_headers=additional_headers)
def get_deployed_app(self):
if (self.__deployed_app is None):
deployed_app = self.cf_apps.app_by_name(self.get_app().name)
if (deployed_app is None):
logging.error('Could not find app: ' + self.get_app().name)
exit()
self.__deployed_app = deployed_app
return self.__deployed_app
def __get_api_access(self):
if (self.__api_access is None):
deployed_app = self.get_deployed_app()
self.__api_access = ApiAccessService(
xsuaa_service_url=deployed_app.xsuaa_service_url,
xsuaa_api_url=deployed_app.xsuaa_api_url)
return self.__api_access
def __get_user_access_token(self):
deployed_app = self.get_deployed_app()
return HttpUtil().get_access_token(
xsuaa_service_url=deployed_app.xsuaa_service_url,
clientid=deployed_app.clientid,
clientsecret=deployed_app.clientsecret,
grant_type='password',
username=self.credentials.username,
password=self.credentials.password)
def __perform_get_request(self, path, access_token=None, additional_headers={}):
url = 'https://{}-{}.{}{}'.format(
self.get_app().name,
self.vars_parser.user_id,
self.vars_parser.landscape_apps_domain,
path)
logging.info('GET request to {} {}'.format(url, 'using access token' if access_token else ''))
resp = HttpUtil().get_request(url, access_token=access_token, additional_headers=additional_headers)
logging.info('Response: ' + str(resp))
return resp
class TestTokenClient(SampleTest):
def get_app(self):
return CFApp(name='java-tokenclient-usage', xsuaa_service_name='xsuaa-token-client')
def test_hello_token_client(self):
response = self.perform_get_request('/hello-token-client')
self.assertEqual(response.status, 200, 'Expected HTTP status 200')
body = response.body
self.assertIsNotNone(body)
self.assertRegex(body, "Access-Token: ")
self.assertRegex(body, "Access-Token-Payload: ")
self.assertRegex(body, "Expired-At: ")
class TestJavaSecurity(SampleTest):
def get_app(self):
return CFApp(name='java-security-usage', xsuaa_service_name='xsuaa-java-security')
def test_hello_java_security(self):
resp = self.perform_get_request('/hello-java-security')
self.assertEqual(resp.status, 401, 'Expected HTTP status 401')
resp = self.perform_get_request_with_token('/hello-java-security')
self.assertEqual(resp.status, 403, 'Expected HTTP status 403')
self.add_user_to_role('JAVA_SECURITY_SAMPLE_Viewer')
resp = self.perform_get_request_with_token('/hello-java-security')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
xsappname = self.get_deployed_app().get_credentials_property('xsappname')
expected_scope = xsappname + '.Read'
self.assertIsNotNone(resp.body)
self.assertRegex(resp.body, self.credentials.username, "Did not find username '{}' in response body".format(self.credentials.username))
self.assertRegex(resp.body, expected_scope, "Expected to find scope '{}' in response body: ".format(expected_scope))
class TestSpringSecurity(SampleTest):
def get_app(self):
return CFApp(name='spring-security-xsuaa-usage', xsuaa_service_name='xsuaa-authentication',
app_router_name='approuter-spring-security-xsuaa-usage')
def test_sayHello(self):
resp = self.perform_get_request('/v1/sayHello')
self.assertEqual(resp.status, 401, 'Expected HTTP status 401')
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 403, 'Expected HTTP status 403')
self.add_user_to_role('Viewer')
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
xsappname = self.get_deployed_app().get_credentials_property('xsappname')
self.assertRegex(resp.body, xsappname, 'Expected to find xsappname in response')
json.loads(resp.body)
def test_tokenFlows(self):
self.add_user_to_role('Viewer')
resp = self.perform_get_request_with_token('/v2/sayHello')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
resp = self.perform_get_request_with_token('/v3/requestClientCredentialsToken')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
resp = self.perform_get_request_with_token('/v3/requestUserToken')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
# TODO fetch refresh token from resp.body
#pathWithRefreshToken = '/v3/requestRefreshToken/' + resp.body
#resp = self.perform_get_request_with_token(pathWithRefreshToken)
#self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
class TestJavaBuildpackApiUsage(SampleTest):
def get_app(self):
return CFApp(name='sap-java-buildpack-api-usage',
xsuaa_service_name='xsuaa-buildpack',
app_router_name='approuter-sap-java-buildpack-api-usage')
def test_hello_token_servlet(self):
resp = self.perform_get_request('/hello-token')
self.assertEqual(resp.status, 401, 'Expected HTTP status 401')
resp = self.perform_get_request_with_token('/hello-token')
self.assertEqual(resp.status, 403, 'Expected HTTP status 403')
self.add_user_to_role('Buildpack_API_Viewer')
resp = self.perform_get_request_with_token('/hello-token')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
self.assertRegex(resp.body, self.credentials.username, 'Expected to find username in response')
class SpringSecurityBasicAuthTest(SampleTest):
def get_app(self):
return CFApp(name='spring-security-basic-auth', xsuaa_service_name='xsuaa-basic')
def test_hello_token(self):
resp = self.perform_get_request('/hello-token')
self.assertEqual(resp.status, 401, 'Expected HTTP status 401')
resp = self.perform_get_request('/hello-token', username=self.credentials.username, password=self.credentials.password)
self.assertEqual(resp.status, 403, 'Expected HTTP status 403')
def test_hello_token_status_ok(self):
# second test needed because tokens are cached in application
self.add_user_to_role('BASIC_AUTH_API_Viewer')
resp = self.perform_get_request('/hello-token', username=self.credentials.username, password=self.credentials.password)
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
self.assertRegex(resp.body, self.credentials.username, 'Expected to find username in response')
class SpringWebfluxSecurityXsuaaUsage(SampleTest):
def get_app(self):
return CFApp(name='spring-webflux-security-xsuaa-usage',
xsuaa_service_name='xsuaa-webflux',
app_router_name='approuter-spring-webflux-security-xsuaa-usage')
def test_say_hello(self):
resp = self.perform_get_request('/v1/sayHello')
self.assertEqual(resp.status, 401, 'Expected HTTP status 401')
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 403, 'Expected HTTP status 403')
self.add_user_to_role('Webflux_API_Viewer')
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 200, 'Expected HTTP status 200')
self.assertRegex(resp.body, self.credentials.username, 'Expected to find username in response')
class HttpUtil:
class HttpResponse:
def __init__(self, response, error=None):
if (error):
self.body = error.reason
self.status = error.code
self.is_ok = False
else:
self.body = response.read().decode()
self.status = response.status
self.is_ok = True
logging.debug(self)
@classmethod
def error(cls, error):
return cls(response=None, error=error)
def __str__(self):
if (len(self.body) > 150):
body = self.body[:150] + '... (truncated)'
else:
body = self.body
return 'HTTP status: {}, body: {}'.format(self.status, body)
def get_request(self, url, access_token=None, additional_headers={}):
logging.debug('Performing GET request to ' + url)
req = urllib.request.Request(url, method='GET')
self.__add_headers(req, access_token, additional_headers)
return self.__execute(req)
def post_request(self, url, data=None, access_token=None, additional_headers={}):
logging.debug('Performing POST request to ' + url)
req = urllib.request.Request(url, data=data, method='POST')
self.__add_headers(req, access_token, additional_headers)
return self.__execute(req)
def get_access_token(self, xsuaa_service_url, clientid, clientsecret, grant_type, username=None, password=None):
post_req_body = urlencode({'client_id': clientid,
'client_secret': clientsecret,
'grant_type': grant_type,
'response_type': 'token',
'username': username,
'password': password}).encode()
url = xsuaa_service_url + '/oauth/token'
resp = HttpUtil().post_request(url, data=post_req_body)
if (resp.is_ok):
return json.loads(resp.body).get('access_token')
else:
logging.error('Could not retrieve access token')
return None
def __add_headers(self, req, access_token, additional_headers):
if (access_token):
self.__add_header(req, 'Authorization', 'Bearer ' + access_token)
for header_key in additional_headers:
self.__add_header(req, header_key, additional_headers[header_key])
def __add_header(self, req, header_name, header_value):
logging.debug('adding HTTP header {} -> {}'.format(header_name, header_value))
req.add_header(header_name, header_value)
def __execute(self, req):
try:
res = urllib.request.urlopen(req)
return HttpUtil.HttpResponse(response=res)
except HTTPError as error:
return HttpUtil.HttpResponse.error(error=error)
class ApiAccessService:
def __init__(self, xsuaa_service_url, xsuaa_api_url, name='api-access-service'):
self.name = name
self.service_key_name = self.name + '-sk'
self.xsuaa_api_url = xsuaa_api_url
self.xsuaa_service_url = xsuaa_service_url
self.http_util = HttpUtil()
subprocess.run(['cf', 'create-service', 'xsuaa', 'apiaccess', name])
subprocess.run(['cf', 'create-service-key', name,
self.service_key_name])
service_key_output = subprocess.run(
['cf', 'service-key', name, self.service_key_name], capture_output=True)
lines = service_key_output.stdout.decode().split('\n')
self.data = json.loads(''.join(lines[1:]))
logging.debug('Created ' + str(self))
def delete(self):
subprocess.run(['cf', 'delete-service-key', '-f',
self.name, self.service_key_name])
subprocess.run(['cf', 'delete-service', '-f', self.name])
def get_user_by_username(self, username):
query_parameters = urlencode(
{'filter': 'userName eq "{}"'.format(username)})
url = '{}/Users?{}'.format(self.xsuaa_api_url, query_parameters)
res = self.http_util.get_request(
url, access_token=self.__get_access_token())
if (not res.is_ok):
self.__panic_user_not_found(username)
users = json.loads(res.body).get('resources')
if (users is None or len(users) < 1):
self.__panic_user_not_found(username)
return users[0]
def add_user_to_group(self, user_id, group_name):
post_req_body = json.dumps(
{'value': user_id, 'origin': 'ldap', 'type': 'USER'}).encode()
url = '{}/Groups/{}/members'.format(self.xsuaa_api_url, group_name)
return self.http_util.post_request(url, data=post_req_body,
access_token=self.__get_access_token(),
additional_headers={'Content-Type': 'application/json'})
def __panic_user_not_found(self, username):
logging.error('Could not find user {}'.format(username))
exit()
def __get_access_token(self):
return self.http_util.get_access_token(
xsuaa_service_url=self.xsuaa_service_url,
clientid=self.data.get('clientid'),
clientsecret=self.data.get('clientsecret'),
grant_type='client_credentials')
def __str__(self):
formatted_data = json.dumps(self.data, indent=2)
return 'Name: {}, Service-Key-Name: {}, Data: {}'.format(
self.name, self.service_key_name, formatted_data)
class CFApps:
def __init__(self):
token = subprocess.run(['cf', 'oauth-token'], capture_output=True)
target = subprocess.run(['cf', 'target'], capture_output=True)
self.bearer_token = token.stdout.strip().decode()
[self.cf_api_endpoint, self.user_id, space_name] = self.__parse_target_output(target.stdout.decode())
space = subprocess.run(['cf', 'space', space_name, '--guid'], capture_output=True)
self.space_guid = space.stdout.decode().strip()
def app_by_name(self, app_name):
url = '{}/apps?space_guids={}&names={}'.format(self.cf_api_endpoint, self.space_guid, app_name)
paginated_apps = self.__get_with_token(url)
app = self.__get_first_paginated_resource(paginated_apps)
if (app is None):
logging.error('App {} not found'.format(app_name))
exit()
vcap_services = self.__vcap_services_by_guid(app.get('guid'))
return DeployedApp(vcap_services)
def __get_first_paginated_resource(self, paginated_resources):
pagination = paginated_resources.get('pagination')
if (pagination and pagination.get('total_results') > 0):
if (pagination.get('total_results') > 1):
logging.warn(
'More than one resource found, taking the first one!')
return paginated_resources.get('resources')[0]
def __get_with_token(self, url):
res = HttpUtil().get_request(url, additional_headers={
'Authorization': self.bearer_token})
return json.loads(res.body)
def __vcap_services_by_guid(self, guid):
env = self.__get_with_token(
self.cf_api_endpoint + '/apps/{}/env'.format(guid))
return env.get('system_env_json').get('VCAP_SERVICES')
def __parse_target_output(self, target_output):
api_endpoint_match = re.search(r'api endpoint:(.*)', target_output)
user_id_match = re.search(r'user:(.*)', target_output)
space_match = re.search(r'space:(.*)', target_output)
api_endpoint = api_endpoint_match.group(1)
user_id = user_id_match.group(1)
space_name = space_match.group(1)
return [api_endpoint.strip() + '/v3', user_id.strip(), space_name.strip()]
class VarsParser:
"""
This class parses the content of the vars.yml file in the samples directory, e.g:
>>> vars = VarsParser('# change to another value, e.g. your User ID\\nID: X0000000\\n# Choose cfapps.eu10.hana.ondemand.com for the EU10 landscape, cfapps.us10.hana.ondemand.com for US10\\nLANDSCAPE_APPS_DOMAIN: cfapps.sap.hana.ondemand.com\\n#LANDSCAPE_APPS_DOMAIN: api.cf.eu10.hana.ondemand.com\\n')
>>> vars.user_id
'X0000000'
>>> vars.landscape_apps_domain
'cfapps.sap.hana.ondemand.com'
"""
def __init__(self, vars_file_content):
self.vars_file_content = self.__strip_comments(vars_file_content)
@property
def user_id(self):
id_match = re.search(r'ID:(.*)', self.vars_file_content)
return id_match.group(1).strip()
@property
def landscape_apps_domain(self):
landscape_match = re.search(r'LANDSCAPE_APPS_DOMAIN:(.*)',
self.vars_file_content)
return landscape_match.group(1).strip()
def __strip_comments(self, content):
result = ''
for line in content.split('\n'):
commented_line = re.search(r'\w*#', line)
if (commented_line is None):
result += line + '\n'
return result
class DeployedApp:
"""
This class parses VCAP_SERVICES (as dictionary) and supplies its content, e.g.:
>>> vcap_services = {'xsuaa': [{'label': 'xsuaa', 'provider': None, 'plan': 'application', 'name': 'xsuaa-java-security', 'tags': ['xsuaa'], 'instance_name': 'xsuaa-java-security', 'binding_name': None, 'credentials': {'tenantmode': 'dedicated', 'sburl': 'https://internal-xsuaa.authentication.sap.hana.ondemand.com', 'clientid': 'sb-java-security-usage!t1785', 'xsappname': 'java-security-usage!t1785', 'clientsecret': 'b1GhPeHArXQCimhsCiwOMzT8wOU=', 'url': 'https://saschatest01.authentication.sap.hana.ondemand.com', 'uaadomain': 'authentication.sap.hana.ondemand.com', 'verificationkey': '-----BEGIN PUBLIC KEY-----MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx/jN5v1mp/TVn9nTQoYVIUfCsUDHa3Upr5tDZC7mzlTrN2PnwruzyS7w1Jd+StqwW4/vn87ua2YlZzU8Ob0jR4lbOPCKaHIi0kyNtJXQvQ7LZPG8epQLbx0IIP/WLVVVtB8bL5OWuHma3pUnibbmATtbOh5LksQ2zLMngEjUF52JQyzTpjoQkahp0BNe/drlAqO253keiY63FL6belKjJGmSqdnotSXxB2ym+HQ0ShaNvTFLEvi2+ObkyjGWgFpQaoCcGq0KX0y0mPzOvdFsNT+rBFdkHiK+Jl638Sbim1z9fItFbH9hiVwY37R9rLtH1YKi3PuATMjf/DJ7mUluDQIDAQAB-----END PUBLIC KEY-----', 'apiurl': 'https://api.authentication.sap.hana.ondemand.com', 'identityzone': 'saschatest01', 'identityzoneid': '54d48a27-0ff4-42b8-b39e-a2b6df64d78a', 'tenantid': '54d48a27-0ff4-42b8-b39e-a2b6df64d78a'}, 'syslog_drain_url': None, 'volume_mounts': []}]}
>>> app = DeployedApp(vcap_services)
>>> app.get_credentials_property('clientsecret')
'b1GhPeHArXQCimhsCiwOMzT8wOU='
>>> app.clientsecret
'b1GhPeHArXQCimhsCiwOMzT8wOU='
"""
def __init__(self, vcap_services):
self.vcap_services = vcap_services
self.xsuaa_properties = self.vcap_services.get('xsuaa')[0]
@property
def xsuaa_api_url(self):
return self.get_credentials_property('apiurl')
@property
def xsuaa_service_url(self):
return self.get_credentials_property('url')
@property
def clientid(self):
return self.get_credentials_property('clientid')
@property
def clientsecret(self):
return self.get_credentials_property('clientsecret')
def get_credentials_property(self, property_name):
return self.xsuaa_properties.get('credentials').get(property_name)
def __str__(self):
return json.dumps(self.vcap_services, indent=2)
class CFApp:
def __init__(self, name, xsuaa_service_name, app_router_name=None):
if (name is None or xsuaa_service_name is None):
raise(Exception('Name and xsua service name must be provided'))
self.name = name
self.xsuaa_service_name = xsuaa_service_name
self.app_router_name = app_router_name
@property
def working_dir(self):
return './' + self.name
def deploy(self):
subprocess.run(['cf', 'create-service', 'xsuaa', 'application', self.xsuaa_service_name, '-c', 'xs-security.json'], cwd=self.working_dir)
subprocess.run(['mvn', 'clean', 'verify'], cwd=self.working_dir)
subprocess.run(['cf', 'push', '--vars-file', '../vars.yml'], cwd=self.working_dir)
def delete(self):
subprocess.run(['cf', 'delete', '-f', self.name])
if (self.app_router_name is not None):
subprocess.run(['cf', 'delete', '-f', self.app_router_name])
subprocess.run(
['cf', 'delete-service', '-f', self.xsuaa_service_name])
def __str__(self):
return 'Name: {}, Xsuaa-Service-Name: {}, App-Router-Name: {}'.format(
self.name, self.xsuaa_service_name, self.app_router_name)
def is_logged_off():
target = subprocess.run(['cf', 'target'], capture_output=True)
return not target or target.stdout.decode().startswith('FAILED')
if __name__ == '__main__':
if (is_logged_off()):
print('To run this script you must be logged into CF via "cf login"')
print('Also make sure to change settings in vars.yml')
else:
import doctest
doctest.testmod()
unittest.main()
subprocess.run(['mvn', 'delete-orphaned-routes', '-f'])
| 43.649737 | 1,295 | 0.667549 |
dedb67ba08576881950196e8c081818e57a24cea | 10,534 | py | Python | features/steps/project_steps.py | Reveal-Energy-Services/orchid-python-api | 21ed6058009f6b8793050a934238d2858a7fa0c9 | [
"Apache-2.0"
] | null | null | null | features/steps/project_steps.py | Reveal-Energy-Services/orchid-python-api | 21ed6058009f6b8793050a934238d2858a7fa0c9 | [
"Apache-2.0"
] | 28 | 2020-08-14T14:08:43.000Z | 2022-02-07T14:11:38.000Z | features/steps/project_steps.py | Reveal-Energy-Services/orchid-python-api | 21ed6058009f6b8793050a934238d2858a7fa0c9 | [
"Apache-2.0"
] | 1 | 2021-12-01T21:20:07.000Z | 2021-12-01T21:20:07.000Z | # Copyright 2017-2021 Reveal Energy Services, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of Orchid and related technologies.
#
# noinspection PyPackageRequirements
from behave import *
use_step_matcher("parse")
from hamcrest import assert_that, equal_to, is_, not_none
import pendulum
import toolz.curried as toolz
import orchid
import common_functions as cf
FIELD_NAME_PATHNAME_MAP = {
'Bakken': str(orchid.training_data_path().joinpath('frankNstein_Bakken_UTM13_FEET.ifrac')),
'Permian': str(orchid.training_data_path().joinpath('Project_frankNstein_Permian_UTM13_FEET.ifrac')),
'Montney': str(orchid.training_data_path().joinpath('Project-frankNstein_Montney_UTM13_METERS.ifrac')),
'Permian-u': str(orchid.training_data_path().joinpath(
'Project-frankNstein_Permian_UTM13FT_DF_PR2298_vs263.ifrac')),
'Permian-c': str(orchid.training_data_path().joinpath(
'Project-frankNstein_Permian_UTM13FT_0412_PjtDataFrame.ifrac')),
'Permian-n': str(orchid.training_data_path().joinpath(
'ProjectPermian_LocalTime.ifrac')),
'GnG': str(orchid.training_data_path().joinpath('GnG_DemoProject_wDataFrames.ifrac')),
}
@given("I have loaded a project from the file, '{filename}'")
def step_impl(context, filename):
"""
:type context: behave.runner.Context
:param filename: The name of the .ifrac file to be loaded.
"""
project_pathname = str(orchid.training_data_path().joinpath(filename))
if project_pathname not in context.loaded_projects:
context.loaded_projects[project_pathname] = orchid.core.load_project(project_pathname)
context.project = context.loaded_projects[project_pathname]
@given("I have loaded the project for the field, '{field}'")
def step_impl(context, field):
"""
:type context: behave.runner.Context
:param field: The name of the field of the project.
"""
context.field = field
project_pathname = FIELD_NAME_PATHNAME_MAP[field]
if project_pathname not in context.loaded_projects:
context.loaded_projects[project_pathname] = orchid.core.load_project(project_pathname)
context.project = context.loaded_projects[project_pathname]
@when("I query the project name")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.actual_project_name = context.project.name
@then('I see the text "{expected_project_name}"')
def step_impl(context, expected_project_name):
"""
:param expected_project_name: The expected name of the project.
:type context: behave.runner.Context
"""
assert_that(context.actual_project_name, equal_to(expected_project_name))
@when("I query the project wells")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.actual_wells = context.project.wells()
# noinspection PyBDDParameters
@then("I see that the project, {project}, has {well_count:d} wells")
def step_impl(context, project, well_count):
"""
Args:
context (behave.runner.Context): The test context.
project (str): The name identifying the project of interest.
well_count (int): The number of wells in the project of interest.
"""
context.execute_steps(f'When I query the project name')
context.execute_steps(f'Then I see the text "{project}"')
assert_that(len(context.actual_wells), equal_to(well_count))
@then("I see the well details {well}, {display_name}, and {uwi} for {object_id}")
def step_impl(context, well, display_name, uwi, object_id):
def actual_details_to_check(well_adapter):
return well_adapter.name, well_adapter.display_name, well_adapter.uwi, str(well_adapter.object_id)
def expected_details_to_check():
return well, display_name, uwi, object_id
candidate_wells = list(context.actual_wells.find_by_name(well))
assert_that(len(candidate_wells), equal_to(1),
f'Expected 1 well for project in field, {context.field}'
f' but found {len(candidate_wells)}.')
well_to_test = candidate_wells[0]
tmp_to_test = actual_details_to_check(well_to_test)
actual_to_test = tmp_to_test
if tmp_to_test[2] == '':
actual_to_test = (tmp_to_test[0], tmp_to_test[1], None, str(tmp_to_test[3]))
assert_that(actual_to_test, equal_to(expected_details_to_check()))
@when("I query the project default well colors")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.actual_default_well_colors = context.project.default_well_colors()
@then("I see the colors")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
assert_that(len(context.actual_default_well_colors), equal_to(len(context.table.rows)))
for (actual, expected) in zip(context.actual_default_well_colors, context.table):
for component_index, component_name in zip(range(3), ['red', 'green', 'blue']):
assert_that(actual[component_index], equal_to(float(expected[component_name])))
@when("I query the project bounds")
def step_impl(context):
"""
Args:
context (behave.runner.Context): The test context.
"""
min_x, max_x, min_y, max_y, min_depth, max_depth = context.project.project_bounds()
context.project_bounds = {
'min_x': min_x,
'max_x': max_x,
'min_y': min_y,
'max_y': max_y,
'min_depth': min_depth,
'max_depth': max_depth,
}
@then("I see project bounds {min_x}, {max_x}, {min_y}, {max_y}, {min_depth}, and {max_depth},")
def step_impl(context, min_x, max_x, min_y, max_y, min_depth, max_depth):
"""
Args:
context (behave.runner.Context): The test context.
min_x (str): The project's minimum x-coordinate (in project units and relative to the absolute state plane).
max_x (str): The project's maximum y-coordinate.
min_y (str): The project's minimum y-coordinate.
max_y (str): The project's maximum y-coordinate.
min_depth (str): The project's minimum (total vertical) depth coordinate.
max_depth (str): The project's maximum depth coordinate.
"""
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['min_x'], min_x)
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['max_x'], max_x)
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['min_y'], min_y)
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['max_y'], max_y)
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['min_depth'], min_depth)
cf.assert_that_actual_measurement_close_to_expected(context.project_bounds['max_depth'], max_depth)
@when("I query the project measurements")
def step_impl(context):
"""
Args:
context (behave.runner.Context): The test context.
"""
context.project_measurements = {
'fluid_density': context.project.fluid_density,
'azimuth': context.project.azimuth,
'center_x': context.project.project_center().x,
'center_y': context.project.project_center().y,
}
@then("I see project measurements {fluid_density}, {azimuth}, {center_x}, and {center_y}")
def step_impl(context, fluid_density, azimuth, center_x, center_y):
"""
Args:
context (behave.runner.Context): The test context.
fluid_density (str): The fluid density measurement in project units.
azimuth (str): The azimuth in project units.
center_x (str): The x-coordinate of the project center in project units.
center_y (str): The y-coordinate of the project center in project units.
"""
cf.assert_that_actual_measurement_close_to_expected(context.project_measurements['fluid_density'], fluid_density)
cf.assert_that_actual_measurement_close_to_expected(context.project_measurements['azimuth'], azimuth)
cf.assert_that_actual_measurement_close_to_expected(context.project_measurements['center_x'], center_x)
cf.assert_that_actual_measurement_close_to_expected(context.project_measurements['center_y'], center_y)
@when("I query the project well time series")
def step_impl(context):
"""
Args:
context (behave.runner.Context): The test context
"""
context.time_series = context.project.time_series()
assert_that(context.time_series, is_(not_none()))
# noinspection PyBDDParameters
@then("I see the samples {index:d}, {qty_name}, {time}, and {value} for {name}")
def step_impl(context, index, qty_name, time, value, name):
"""
Args:
context (behave.runner.Context): The test context
index (int): The index of the well time series sample of interest.
qty_name (str): The phenomenon type of sample of interest.
time (str): The time of the sample of interest
value (str): The measured value of the sample of interest.
name (str): The name of the sampled time series curve.
"""
def is_candidate(curve_to_test):
return curve_to_test.name == name and curve_to_test.sampled_quantity_name == qty_name
candidate_curves = list(toolz.filter(is_candidate, context.time_series.all_objects()))
assert_that(len(candidate_curves), equal_to(1),
f'Expected 1 curve with name, {name}, and sampled quantity_name, {qty_name}.' +
f' Found {len(candidate_curves)}')
curve = candidate_curves[0]
actual_quantity_name = curve.sampled_quantity_name
assert_that(actual_quantity_name, equal_to(qty_name))
samples = curve.data_points()
actual_sample_time = samples.index[index]
expected_sample_time = pendulum.parse(time)
assert_that(actual_sample_time, equal_to(expected_sample_time))
actual_sample_magnitude = samples[actual_sample_time]
actual_sample_measurement = orchid.make_measurement(curve.sampled_quantity_unit(), actual_sample_magnitude)
cf.assert_that_actual_measurement_close_to_expected(actual_sample_measurement, value)
| 40.053232 | 117 | 0.724701 |
2cfcb1b9f33bf6860ca90c3f3c4d82a2442eabc0 | 3,057 | py | Python | magnebot/object_static.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | magnebot/object_static.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | magnebot/object_static.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | import numpy as np
from json import loads
from tdw.object_init_data import TransformInitData
from magnebot.paths import OBJECT_CATEGORIES_PATH
class ObjectStatic:
"""
Info for an object that doesn't change between frames, such as its ID and mass.
```python
from magnebot import Magnebot
m = Magnebot()
m.init_scene(scene="2a", layout=1)
# Print each object ID and segmentation color.
for object_id in m.objects_static:
o = m.objects_static[object_id]
print(object_id, o.segmentation_color)
```
"""
# Objects that we can assume are kinematic.
__KINEMATIC = ['24_in_wall_cabinet_white_wood', '24_in_wall_cabinet_wood_beach_honey', 'aquostv',
'cabinet_24_two_drawer_white_wood', 'cabinet_24_two_drawer_wood_beach_honey',
'cabinet_24_white_wood', 'cabinet_24_wood_beach_honey', 'cabinet_36_white_wood',
'cabinet_36_wood_beach_honey', 'cabinet_full_height_white_wood',
'cabinet_full_height_wood_beach_honey', 'elf_painting', 'framed_painting', 'fruit_basket',
'its_about_time_painting', 'silver_frame_painting', 'sink_base_white_wood',
'sink_base_wood_beach_honey']
# A dictionary of object categories. Key = object name. Value = category.
__CATEGORIES = loads(OBJECT_CATEGORIES_PATH.read_text(encoding="utf-8"))
def __init__(self, name: str, object_id: int, mass: float, segmentation_color: np.array, size: np.array):
"""
:param name: The name of the object.
:param object_id: The unique ID of the object.
:param mass: The mass of the object.
:param segmentation_color: The segmentation color of the object.
:param size: The size of the object.
"""
""":field
The unique ID of the object.
"""
self.object_id = object_id
""":field
[The name of the model.](https://github.com/threedworld-mit/tdw/blob/master/Documentation/python/librarian/model_librarian.md)
"""
self.name = name.lower()
if self.name in ObjectStatic.__CATEGORIES:
""":field
The semantic category of the object.
"""
self.category = ObjectStatic.__CATEGORIES[self.name]
else:
self.category = TransformInitData.LIBRARIES["models_core.json"].get_record(self.name).wcategory
""":field
If True, this object is kinematic, and won't respond to physics.
Examples: a painting hung on a wall or built-in furniture like a countertop.
"""
self.kinematic = self.name in ObjectStatic.__KINEMATIC
""":field
The RGB segmentation color for the object as a numpy array: `[r, g, b]`
"""
self.segmentation_color = segmentation_color
""":field
The mass of the object.
"""
self.mass = mass
""":field
The size of the object as a numpy array: `[width, height, length]`
"""
self.size = size
| 39.701299 | 134 | 0.643441 |
30d160db52cdff97ef4865dad0b96e27b41fd45e | 4,639 | py | Python | torchkge/utils/operations.py | sucpark/kge_visualization | 547e501a38b129a030a66295c03a98aefc899c68 | [
"MIT"
] | null | null | null | torchkge/utils/operations.py | sucpark/kge_visualization | 547e501a38b129a030a66295c03a98aefc899c68 | [
"MIT"
] | null | null | null | torchkge/utils/operations.py | sucpark/kge_visualization | 547e501a38b129a030a66295c03a98aefc899c68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright TorchKGE developers
@author: Armand Boschin <aboschin@enst.fr>
"""
from pandas import DataFrame
from torch import zeros, cat
def get_mask(length, start, end):
"""Create a mask of length `length` filled with 0s except between indices
`start` (included) and `end` (excluded).
Parameters
----------
length: int
Length of the mask to be created.
start: int
First index (included) where the mask will be filled with 0s.
end: int
Last index (excluded) where the mask will be filled with 0s.
Returns
-------
mask: `torch.Tensor`, shape: (length), dtype: `torch.bool`
Mask of length `length` filled with 0s except between indices `start`
(included) and `end` (excluded).
"""
mask = zeros(length)
mask[[i for i in range(start, end)]] = 1
return mask.bool()
def get_rank(data, true, low_values=False):
"""Computes the rank of entity at index true[i]. If the rank is k then
there are k-1 entities with better (higher or lower) value in data.
Parameters
----------
data: `torch.Tensor`, dtype: `torch.float`, shape: (n_facts, dimensions)
Scores for each entity.
true: `torch.Tensor`, dtype: `torch.int`, shape: (n_facts)
true[i] is the index of the true entity for test i of the batch.
low_values: bool, optional (default=False)
if True, best rank is the lowest score else it is the highest.
Returns
-------
ranks: `torch.Tensor`, dtype: `torch.int`, shape: (n_facts)
ranks[i] - 1 is the number of entities which have better (or same)
scores in data than the one and index true[i]
"""
true_data = data.gather(1, true.long().view(-1, 1))
if low_values:
return (data <= true_data).sum(dim=1)
else:
return (data >= true_data).sum(dim=1)
def get_dictionaries(df, ent=True):
"""Build entities or relations dictionaries.
Parameters
----------
df: `pandas.DataFrame`
Data frame containing three columns [from, to, rel].
ent: bool
if True then ent2ix is returned, if False then rel2ix is returned.
Returns
-------
dict: dictionary
Either ent2ix or rel2ix.
"""
if ent:
tmp = list(set(df['from'].unique()).union(set(df['to'].unique())))
return {ent: i for i, ent in enumerate(sorted(tmp))}
else:
tmp = list(df['rel'].unique())
return {rel: i for i, rel in enumerate(sorted(tmp))}
def get_tph(t):
"""Get the average number of tail per heads for each relation.
Parameters
----------
t: `torch.Tensor`, dtype: `torch.long`, shape: (b_size, 3)
First column contains head indices, second tails and third relations.
Returns
-------
d: dict
keys: relation indices, values: average number of tail per heads.
"""
df = DataFrame(t.numpy(), columns=['from', 'to', 'rel'])
df = df.groupby(['from', 'rel']).count().groupby('rel').mean()
df.reset_index(inplace=True)
return {i: v for i, v in df.values}
# return {df.loc[i].values[0]: df.loc[i].values[1] for i in df.index}
def get_hpt(t):
"""Get the average number of head per tails for each relation.
Parameters
----------
t: `torch.Tensor`, dtype: `torch.long`, shape: (b_size, 3)
First column contains head indices, second tails and third relations.
Returns
-------
d: dict
keys: relation indices, values: average number of head per tails.
"""
df = DataFrame(t.numpy(), columns=['from', 'to', 'rel'])
df = df.groupby(['rel', 'to']).count().groupby('rel').mean()
df.reset_index(inplace=True)
return {i: v for i, v in df.values}
# return {df.loc[i].values[0]: df.loc[i].values[1] for i in df.index}
def get_bernoulli_probs(kg):
"""Evaluate the Bernoulli probabilities for negative sampling as in the
TransH original paper by Wang et al. (2014).
Parameters
----------
kg: `torchkge.data_structures.KnowledgeGraph`
Returns
-------
tph: dict
keys: relations , values: sampling probabilities as described by
Wang et al. in their paper.
"""
t = cat((kg.head_idx.view(-1, 1),
kg.tail_idx.view(-1, 1),
kg.relations.view(-1, 1)), dim=1)
hpt = get_hpt(t)
tph = get_tph(t)
assert hpt.keys() == tph.keys()
# for k in tph.keys():
# tph[k] = tph[k] / (tph[k] + hpt[k])
# return tph
bern_prob = [0.5]*kg.n_rel
for r in tph.keys():
bern_prob[int(r)] = tph[r] / (tph[r] + hpt[r])
return bern_prob
| 29.547771 | 77 | 0.602285 |
98d92e515fa4da6531cc091c7883a3c043c3ff74 | 22,356 | py | Python | tests/test_lambda.py | prubesh/cloud-custodian | 2d8d135256d34ed7edeee104eab9b1956f457076 | [
"Apache-2.0"
] | 1 | 2020-12-31T05:09:30.000Z | 2020-12-31T05:09:30.000Z | tests/test_lambda.py | prubesh/cloud-custodian | 2d8d135256d34ed7edeee104eab9b1956f457076 | [
"Apache-2.0"
] | 11 | 2019-12-15T17:52:14.000Z | 2020-12-09T05:06:24.000Z | tests/test_lambda.py | lfranchini31/cloud-custodian | 1830fe4b9a59ff6afb675985c9ea531571616a76 | [
"Apache-2.0"
] | 9 | 2019-11-18T07:46:44.000Z | 2020-04-15T11:20:20.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
from mock import patch
from botocore.exceptions import ClientError
from .common import BaseTest, functional
from c7n.executor import MainThreadExecutor
from c7n.resources.aws import shape_validate
from c7n.resources.awslambda import AWSLambda, ReservedConcurrency
from c7n.mu import PythonPackageArchive
SAMPLE_FUNC = """\
def handler(event, context):
print("hello world")
"""
class LambdaPermissionTest(BaseTest):
def create_function(self, client, name):
archive = PythonPackageArchive()
self.addCleanup(archive.remove)
archive.add_contents("index.py", SAMPLE_FUNC)
archive.close()
lfunc = client.create_function(
FunctionName=name,
Runtime="python2.7",
MemorySize=128,
Handler="index.handler",
Publish=True,
Role="arn:aws:iam::644160558196:role/lambda_basic_execution",
Code={"ZipFile": archive.get_bytes()},
)
self.addCleanup(client.delete_function, FunctionName=name)
return lfunc
@functional
def test_lambda_permission_matched(self):
factory = self.replay_flight_data("test_lambda_permission_matched")
client = factory().client("lambda")
name = "func-b"
self.create_function(client, name)
client.add_permission(
FunctionName=name,
StatementId="PublicInvoke",
Principal="*",
Action="lambda:InvokeFunction",
)
client.add_permission(
FunctionName=name,
StatementId="SharedInvoke",
Principal="arn:aws:iam::185106417252:root",
Action="lambda:InvokeFunction",
)
p = self.load_policy(
{
"name": "lambda-perms",
"resource": "lambda",
"filters": [
{"FunctionName": name},
{"type": "cross-account", "whitelist": ["185106417252"]},
],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
policy = json.loads(client.get_policy(FunctionName=name).get("Policy"))
self.assertEqual(
[s["Sid"] for s in policy.get("Statement", ())], ["SharedInvoke"]
)
@functional
def test_lambda_permission_named(self):
factory = self.replay_flight_data("test_lambda_permission_named")
client = factory().client("lambda")
name = "func-d"
self.create_function(client, name)
client.add_permission(
FunctionName=name,
StatementId="PublicInvoke",
Principal="*",
Action="lambda:InvokeFunction",
)
p = self.load_policy(
{
"name": "lambda-perms",
"resource": "lambda",
"filters": [{"FunctionName": name}],
"actions": [
{"type": "remove-statements", "statement_ids": ["PublicInvoke"]}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_policy, FunctionName=name)
class LambdaLayerTest(BaseTest):
def test_lambda_layer_cross_account(self):
factory = self.replay_flight_data('test_lambda_layer_cross_account')
p = self.load_policy({
'name': 'lambda-layer-cross',
'resource': 'lambda-layer',
'filters': [{'type': 'cross-account'}],
'actions': [{'type': 'remove-statements',
'statement_ids': 'matched'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertTrue('CrossAccountViolations' in resources[0].keys())
client = factory().client('lambda')
with self.assertRaises(client.exceptions.ResourceNotFoundException):
client.get_layer_version_policy(
LayerName=resources[0]['LayerName'],
VersionNumber=resources[0]['Version']).get('Policy')
def test_delete_layer(self):
factory = self.replay_flight_data('test_lambda_layer_delete')
p = self.load_policy({
'name': 'lambda-layer-delete',
'resource': 'lambda-layer',
'filters': [{'LayerName': 'test'}],
'actions': [{'type': 'delete'}]},
session_factory=factory)
resources = p.run()
client = factory().client('lambda')
with self.assertRaises(client.exceptions.ResourceNotFoundException):
client.get_layer_version(
LayerName='test',
VersionNumber=resources[0]['Version'])
class LambdaTest(BaseTest):
def test_lambda_check_permission(self):
# lots of pre-conditions, iam role with iam read only policy attached
# and a permission boundary with deny on iam read access.
factory = self.replay_flight_data('test_lambda_check_permission')
p = self.load_policy(
{
'name': 'lambda-check',
'resource': 'lambda',
'filters': [
{'FunctionName': 'custodian-log-age'},
{'type': 'check-permissions',
'match': 'allowed',
'actions': ['iam:ListUsers']}]
},
session_factory=factory)
resources = p.run()
assert not resources
def test_lambda_config_source(self):
factory = self.replay_flight_data("test_aws_lambda_config_source")
p = self.load_policy(
{
"name": "lambda-config",
"resource": "lambda",
"source": "config",
'query': [
{'clause': "resourceId = 'omnissm-handle-registrations'"},
],
},
session_factory=factory, config={'region': 'us-east-2'})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['FunctionName'], 'omnissm-handle-registrations')
self.assertEqual(
resources[0]["Tags"], [{"Key": "lambda:createdBy", "Value": "SAM"}]
)
self.assertTrue("c7n:Policy" in resources[0])
def test_post_finding(self):
factory = self.replay_flight_data('test_lambda_post_finding')
p = self.load_policy({
'name': 'lambda',
'resource': 'aws.lambda',
'actions': [
{'type': 'post-finding',
'types': [
'Software and Configuration Checks/OrgStandard/abc-123']}]},
session_factory=factory, config={'region': 'us-west-2'})
functions = p.resource_manager.get_resources([
'custodian-ec2-ssm-query'])
rfinding = p.resource_manager.actions[0].format_resource(functions[0])
self.maxDiff = None
self.assertEqual(
rfinding,
{'Details': {'AwsLambdaFunction': {
'CodeSha256': 'Pq32lM46RbVovW/Abh14XfrFHIeUM/cAEC51fwkf+tk=',
'Code': {
'S3Bucket': 'awslambda-us-west-2-tasks',
'S3Key': 'snapshots/644160558196/custodian-ec2-ssm-query-c3bed681-aa99-4bb2-a155-2f5897de20d2', # noqa
'S3ObjectVersion': 'Nupr9wOmyG9eZbta8NGFUV9lslQ5NI7m'},
'Handler': 'custodian_policy.run',
'LastModified': '2019-07-29T22:37:20.844+0000',
'MemorySize': 512,
'RevisionId': '8bbaf510-0ae1-40a5-8980-084bebd3f9c6',
'Role': 'arn:aws:iam::644160558196:role/CloudCustodianRole',
'Runtime': 'python3.7',
'Timeout': 900,
'TracingConfig': {'Mode': 'PassThrough'},
'Version': '$LATEST',
'VpcConfig': {'SecurityGroupIds': [],
'SubnetIds': []}}},
'Id': 'arn:aws:lambda:us-west-2:644160558196:function:custodian-ec2-ssm-query',
'Partition': 'aws',
'Region': 'us-west-2',
'Tags': {'custodian-info': 'mode=config-rule:version=0.8.44.2'},
'Type': 'AwsLambdaFunction'})
shape_validate(
rfinding['Details']['AwsLambdaFunction'],
'AwsLambdaFunctionDetails', 'securityhub')
def test_delete(self):
factory = self.replay_flight_data("test_aws_lambda_delete")
p = self.load_policy(
{
"name": "lambda-events",
"resource": "lambda",
"filters": [{"FunctionName": "superduper"}],
"actions": [{"type": "delete"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["FunctionName"], "superduper")
client = factory().client("lambda")
self.assertEqual(client.list_functions()["Functions"], [])
def test_delete_reserved_concurrency(self):
self.patch(ReservedConcurrency, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_aws_lambda_delete_concurrency")
p = self.load_policy(
{
"name": "lambda-concurrency",
"resource": "lambda",
"filters": [
{"FunctionName": "envcheck"},
{"type": "reserved-concurrency", "value": "present"},
],
"actions": [{"type": "set-concurrency", "value": None}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["FunctionName"], "envcheck")
client = factory().client("lambda")
info = client.get_function(FunctionName=resources[0]["FunctionName"])
self.assertFalse("Concurrency" in info)
def test_set_expr_concurrency(self):
self.patch(ReservedConcurrency, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_aws_lambda_set_concurrency_expr")
p = self.load_policy(
{
"name": "lambda-concurrency",
"resource": "lambda",
"filters": [
{
"type": "metrics",
"name": "Invocations",
"statistics": "Sum",
"op": "greater-than",
"value": 0,
}
],
"actions": [
{
"type": "set-concurrency",
"expr": True,
"value": '"c7n.metrics"."AWS/Lambda.Invocations.Sum"[0].Sum',
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["FunctionName"], "envcheck")
client = factory().client("lambda")
info = client.get_function(FunctionName=resources[0]["FunctionName"])
self.assertEqual(info["Concurrency"]["ReservedConcurrentExecutions"], 5)
def test_set_filter_concurrency(self):
self.patch(ReservedConcurrency, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_aws_lambda_set_concurrency")
p = self.load_policy(
{
"name": "lambda-concurrency",
"resource": "lambda",
"filters": [{"type": "reserved-concurrency", "value": "absent"}],
"actions": [{"type": "set-concurrency", "value": 10}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["FunctionName"], "envcheck")
client = factory().client("lambda")
info = client.get_function(FunctionName=resources[0]["FunctionName"])
self.assertEqual(info["Concurrency"]["ReservedConcurrentExecutions"], 10)
def test_event_source(self):
factory = self.replay_flight_data("test_aws_lambda_source")
p = self.load_policy(
{
"name": "lambda-events",
"resource": "lambda",
"filters": [{"type": "event-source", "key": "", "value": "not-null"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["c7n:EventSources"][0] for r in resources}, {"iot.amazonaws.com"}
)
def test_sg_filter(self):
factory = self.replay_flight_data("test_aws_lambda_sg")
p = self.load_policy(
{
"name": "sg-lambda",
"resource": "lambda",
"filters": [
{"FunctionName": "mys3"},
{"type": "security-group", "key": "GroupName", "value": "default"},
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["FunctionName"], "mys3")
self.assertEqual(resources[0]["c7n:matched-security-groups"], ["sg-f9cc4d9f"])
class LambdaTagTest(BaseTest):
def test_lambda_tag_and_remove(self):
self.patch(AWSLambda, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_lambda_tag_and_remove")
client = session_factory().client("lambda")
policy = self.load_policy(
{
"name": "lambda-tag",
"resource": "lambda",
"filters": [
{"FunctionName": "CloudCustodian"},
{"tag:Env": "Dev"},
],
"actions": [
{"type": "tag", "key": "xyz", "value": "abcdef"},
{"type": "remove-tag", "tags": ["Env"]}
]
},
session_factory=session_factory, config={
'account_id': '644160558196',
'region': 'us-west-2'})
resources = policy.run()
self.assertEqual(len(resources), 1)
arn = resources[0]["FunctionArn"]
after_tags = client.list_tags(Resource=arn)["Tags"]
before_tags = {
t['Key']: t['Value'] for t in resources[0]['Tags']}
self.assertEqual(before_tags, {'Env': 'Dev'})
self.assertEqual(after_tags, {'xyz': 'abcdef'})
def test_mark_and_match(self):
session_factory = self.replay_flight_data("test_lambda_mark_and_match")
client = session_factory().client("lambda")
policy = self.load_policy(
{
"name": "lambda-mark",
"resource": "lambda",
"filters": [{"FunctionName": "CloudCustodian"}],
"actions": [
{
"type": "mark-for-op",
"op": "delete",
"tag": "custodian_next",
"days": 1,
}
],
},
config={'region': 'us-west-2',
'account_id': '644160558196'},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
arn = resources[0]["FunctionArn"]
after_tags = client.list_tags(Resource=arn)["Tags"]
before_tags = {
t['Key']: t['Value'] for t in resources[0]['Tags']}
self.assertEqual(before_tags, {'xyz': 'abcdef'})
self.assertEqual(
after_tags,
{'custodian_next': 'Resource does not meet policy: delete@2019/02/09',
'xyz': 'abcdef'})
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_lambda_remove_matched_security_groups(self):
# Test conditions:
# - list with two functions, matching only one "resource-fixer"
# - this function is in a VPC and has 3 SGs attached
# - removing a third SG, "sg_controllers" (sg-c573e6b3)
# - start with 3 SGs, end with 2, match function by regex
session_factory = self.replay_flight_data(
"test_lambda_remove_matched_security_groups"
)
p = self.load_policy(
{
"name": "lambda-remove-matched-security-groups",
"resource": "lambda",
"filters": [
{
"type": "value",
"key": "FunctionName",
"value": "resource-fixer",
"op": "eq",
},
{
"type": "security-group",
"key": "GroupName",
"value": ".*controllers",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": "sg-01a19f602ecaf25f4",
}
],
},
session_factory=session_factory,
)
resources = p.run()
client = session_factory().client('lambda')
response = client.list_functions()
clean_resources = response['Functions']
self.assertEqual(len(resources), 1)
self.assertIn("fixer", resources[0]["FunctionName"])
self.assertEqual(len(resources[0]["VpcConfig"]["SecurityGroupIds"]), 3)
# check result is expected
self.assertEqual(len(clean_resources[0]["VpcConfig"]["SecurityGroupIds"]), 2)
self.assertNotIn("sg-c573e6b3", clean_resources[0]["VpcConfig"]["SecurityGroupIds"])
# verify by name that the removed SG is not there
def test_lambda_add_security_group(self):
# Test conditions:
# - list with two functions, matching only one "resource-fixer"
# - this function is in a VPC and has 2 SGs attached
# - adding a third SG, "sg_controllers" (sg-c573e6b3)
# - start with 2 SGs, end with 3, match functuin by exact name
session_factory = self.replay_flight_data("test_lambda_add_security_group")
p = self.load_policy(
{
"name": "add-sg-to-lambda",
"resource": "lambda",
"filters": [
{
"type": "value",
"key": "FunctionName",
"value": ".*",
"op": "regex",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-c573e6b3"}],
},
session_factory=session_factory,
)
resources = p.run()
client = session_factory().client('lambda')
response = client.list_functions()
clean_resources = response['Functions']
self.assertEqual(len(resources), 2)
self.assertEqual("resource-fixer", resources[0]["FunctionName"])
self.assertEqual(len(resources[0]["VpcConfig"]["SecurityGroupIds"]), 2)
self.assertNotIn("sg-c573e6b3", resources[0]["VpcConfig"]["SecurityGroupIds"])
# check SG was added
self.assertEqual(len(clean_resources[0]["VpcConfig"]["SecurityGroupIds"]), 3)
self.assertIn("sg-c573e6b3", clean_resources[0]["VpcConfig"]["SecurityGroupIds"])
def test_nonvpc_function(self):
session_factory = self.replay_flight_data("test_lambda_add_security_group")
p = self.load_policy(
{
"name": "test-with-nonvpc-lambda",
"resource": "lambda",
"filters": [
{
"type": "value",
"key": "FunctionName",
"value": "test-func.*",
"op": "regex",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-c573e6b3"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual("test-func-2", resources[0]["FunctionName"])
def test_lambda_notfound_exception(self):
error_response = {'Error': {'Code': 'ResourceNotFoundException'}}
operation_name = 'UpdateFunctionConfiguration'
with patch("c7n.resources.awslambda.local_session") as mock_local_session:
updatefunc = mock_local_session.client.update_function_configuration
updatefunc.side_effect = ClientError(error_response, operation_name)
with self.assertRaises(ClientError):
groups = ['sg-12121212', 'sg-34343434']
updatefunc(FunctionName='badname', VpcConfig={'SecurityGroupIds': groups})
updatefunc.assert_called_once()
def test_lambda_kms_alias(self):
session_factory = self.replay_flight_data("test_lambda_kms_key_filter")
kms = session_factory().client('kms')
p = self.load_policy(
{
"name": "lambda-kms-alias",
"resource": "lambda",
"filters": [
{
'FunctionName': "test"
},
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "alias/skunk/trails",
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertTrue(len(resources), 1)
aliases = kms.list_aliases(KeyId=resources[0]['KMSKeyArn'])
self.assertEqual(aliases['Aliases'][0]['AliasName'], 'alias/skunk/trails')
| 38.150171 | 123 | 0.529388 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.