hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a03e018d6f0824e05a9d8e6a88a97f990d466cd
| 11,812
|
py
|
Python
|
oscarapi/serializers/fields.py
|
samitnuk/django-oscar-api
|
71f9ba268e17c6ab64b18e848f26b73cf45cf444
|
[
"BSD-3-Clause"
] | null | null | null |
oscarapi/serializers/fields.py
|
samitnuk/django-oscar-api
|
71f9ba268e17c6ab64b18e848f26b73cf45cf444
|
[
"BSD-3-Clause"
] | null | null | null |
oscarapi/serializers/fields.py
|
samitnuk/django-oscar-api
|
71f9ba268e17c6ab64b18e848f26b73cf45cf444
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import operator
from os.path import basename, join
from io import BytesIO
from urllib.parse import urlsplit
from urllib.request import urlopen
from django.conf import settings
from django.db import IntegrityError
from django.utils.translation import ugettext as _
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files import File
from rest_framework import serializers, relations
from rest_framework.fields import get_attribute
from oscar.core.loading import get_model, get_class
from oscarapi.utils.loading import get_api_class
from oscarapi.utils.exists import bound_unique_together_get_or_create
from .exceptions import FieldError
logger = logging.getLogger(__name__)
ProductAttribute = get_model("catalogue", "ProductAttribute")
Category = get_model("catalogue", "Category")
create_from_breadcrumbs = get_class("catalogue.categories", "create_from_breadcrumbs")
entity_internal_value = get_api_class("serializers.hooks", "entity_internal_value")
attribute_details = operator.itemgetter("code", "value")
class TaxIncludedDecimalField(serializers.DecimalField):
def __init__(self, excl_tax_field=None, excl_tax_value=None, **kwargs):
self.excl_tax_field = excl_tax_field
self.excl_tax_value = excl_tax_value
super(TaxIncludedDecimalField, self).__init__(**kwargs)
def get_attribute(self, instance):
if instance.is_tax_known:
return super(TaxIncludedDecimalField, self).get_attribute(instance)
if self.excl_tax_field:
return get_attribute(instance, (self.excl_tax_field,))
return self.excl_tax_value
class DrillDownHyperlinkedMixin:
def __init__(self, *args, **kwargs):
try:
self.extra_url_kwargs = kwargs.pop("extra_url_kwargs")
except KeyError:
msg = "DrillDownHyperlink Fields require an 'extra_url_kwargs' argument"
raise ValueError(msg)
super().__init__(*args, **kwargs)
def get_extra_url_kwargs(self, obj):
return {
key: operator.attrgetter(path)(obj)
for key, path in self.extra_url_kwargs.items()
}
def get_url(
self, obj, view_name, request, format
): # pylint: disable=redefined-builtin
"""
Given an object, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
if hasattr(obj, "pk") and obj.pk in (None, ""):
return None
lookup_value = getattr(obj, self.lookup_field)
kwargs = {self.lookup_url_kwarg: lookup_value}
kwargs.update(self.get_extra_url_kwargs(obj))
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class DrillDownHyperlinkedIdentityField(
DrillDownHyperlinkedMixin, relations.HyperlinkedIdentityField
):
pass
class DrillDownHyperlinkedRelatedField(
DrillDownHyperlinkedMixin, relations.HyperlinkedRelatedField
):
def use_pk_only_optimization(self):
# we always want the full object so the mixin can filter on the attributes
# specified with get_extra_url_kwargs
return False
class AttributeValueField(serializers.Field):
"""
This field is used to handle the value of the ProductAttributeValue model
Because the value is dependant on the type of the corresponding attribute,
it is not fixed. This field solves the problem of handling the different
types.
"""
def __init__(self, **kwargs):
# this field always needs the full object
kwargs["source"] = "*"
kwargs["error_messages"] = {
"no_such_option": _("{code}: Option {value} does not exist."),
"invalid": _("Wrong type, {error}."),
"attribute_validation_error": _(
"Error assigning `{value}` to {code}, {error}."
),
"attribute_required": _("Attribute {code} is required."),
"attribute_missing": _(
"No attribute exist with code={code}, "
"please define it in the product_class first."
),
"child_without_parent": _(
"Can not find attribute if product_class is empty and "
"parent is empty as well, child without parent?"
),
}
super(AttributeValueField, self).__init__(**kwargs)
def get_value(self, dictionary):
# return all the data because this field uses everything
return dictionary
def to_internal_value(self, data):
assert "product" in data or "product_class" in data or "parent" in data
try:
code, value = attribute_details(data)
internal_value = value
if "product" in data:
# we need the attribute to determine the type of the value
attribute = ProductAttribute.objects.get(
code=code, product_class__products__id=data["product"]
)
elif "product_class" in data and data["product_class"] is not None:
attribute = ProductAttribute.objects.get(
code=code, product_class__slug=data.get("product_class")
)
elif "parent" in data:
attribute = ProductAttribute.objects.get(
code=code, product_class__products__id=data["parent"]
)
if attribute.required and value is None:
self.fail("attribute_required", code=code)
# some of these attribute types need special processing, or their
# validation will fail
if attribute.type == attribute.OPTION:
internal_value = attribute.option_group.options.get(option=value)
elif attribute.type == attribute.MULTI_OPTION:
if attribute.required and not value:
self.fail("attribute_required", code=code)
internal_value = attribute.option_group.options.filter(option__in=value)
if len(value) != internal_value.count():
non_existing = set(value) - set(
internal_value.values_list("option", flat=True)
)
non_existing_as_error = ",".join(sorted(non_existing))
self.fail("no_such_option", value=non_existing_as_error, code=code)
elif attribute.type == attribute.DATE:
date_field = serializers.DateField()
internal_value = date_field.to_internal_value(value)
elif attribute.type == attribute.DATETIME:
date_field = serializers.DateTimeField()
internal_value = date_field.to_internal_value(value)
elif attribute.type == attribute.ENTITY:
internal_value = entity_internal_value(attribute, value)
# the rest of the attribute types don't need special processing
try:
attribute.validate_value(internal_value)
except TypeError as e:
self.fail(
"attribute_validation_error",
code=code,
value=internal_value,
error=e,
)
except ValidationError as e:
self.fail(
"attribute_validation_error",
code=code,
value=internal_value,
error=",".join(e.messages),
)
return {"value": internal_value, "attribute": attribute}
except ProductAttribute.DoesNotExist:
if (
"product_class" in data
and "parent" in data
and data["product_class"] is None
and data["parent"] is None
):
self.fail("child_without_parent")
else:
self.fail("attribute_missing", **data)
except ObjectDoesNotExist:
self.fail("no_such_option", value=value, code=code)
except KeyError as e:
(field_name,) = e.args
raise FieldError(
detail={field_name: self.error_messages["required"]}, code="required"
)
def to_representation(self, value):
obj_type = value.attribute.type
if obj_type == value.attribute.OPTION:
return value.value.option
elif obj_type == value.attribute.MULTI_OPTION:
return value.value.values_list("option", flat=True)
elif obj_type == value.attribute.FILE:
return value.value.url
elif obj_type == value.attribute.IMAGE:
return value.value.url
elif obj_type == value.attribute.ENTITY:
if hasattr(value.value, "json"):
return value.value.json()
else:
return _(
"%(entity)s has no json method, can not convert to json"
% {"entity": repr(value.value)}
)
# return the value as stored on ProductAttributeValue in the correct type
return value.value
class CategoryField(serializers.RelatedField):
def __init__(self, **kwargs):
kwargs["queryset"] = Category.objects
super(CategoryField, self).__init__(**kwargs)
def to_internal_value(self, data):
return create_from_breadcrumbs(data)
def to_representation(self, value):
return value.full_name
class SingleValueSlugRelatedField(serializers.SlugRelatedField):
"""
Represents a queryset as a list of slugs, and can be used to create new
items, as long as only the slug_field is required
"""
def get_bound_queryset(self):
parent = self.parent
source_name = parent.source
if hasattr(parent, "child_relation"):
parent = parent.parent
return getattr(parent.instance, source_name, None)
def to_internal_value(self, data):
qs = self.get_bound_queryset()
if qs is not None: # first try to obtain a bound item.
try:
return bound_unique_together_get_or_create(qs, {self.slug_field: data})
except IntegrityError:
pass
# if no bound item can be found, return an unbound unsaved instance.
qs = self.get_queryset()
return {self.slug_field: data}
class ImageUrlField(serializers.ImageField):
def __init__(self, **kwargs):
super(ImageUrlField, self).__init__(**kwargs)
self.use_url = True
def to_internal_value(self, data):
http_prefix = data.startswith(("http:", "https:"))
if http_prefix:
request = self.context.get("request", None)
if request: # if there is a request, we can get the hostname from that
parsed_url = urlsplit(data)
host = request.get_host()
if (
host != parsed_url.netloc
): # we are only downloading files from a foreign server
# it is a foreign image, download it
response = urlopen(data)
image_file_like = BytesIO(response.read())
file_object = File(image_file_like, name=basename(parsed_url.path))
else:
location = parsed_url.path
path = join(
settings.MEDIA_ROOT, location.replace(settings.MEDIA_URL, "", 1)
)
file_object = File(open(path, "rb"))
return super(ImageUrlField, self).to_internal_value(file_object)
return super(ImageUrlField, self).to_internal_value(data)
| 38.855263
| 88
| 0.617846
|
4a03e05f316bb4ff8fd2f0427024d667319645d1
| 4,234
|
py
|
Python
|
handle_openstreetmap_v3_cropped_image.py
|
bogeunnet/Handle_openstreetmap
|
f81150f8011433f2fa5a52983661bca019c52d34
|
[
"MIT"
] | 3
|
2019-03-07T02:21:55.000Z
|
2019-03-07T02:28:59.000Z
|
handle_openstreetmap_v3_cropped_image.py
|
bogeunnet/Handle_openstreetmap
|
f81150f8011433f2fa5a52983661bca019c52d34
|
[
"MIT"
] | null | null | null |
handle_openstreetmap_v3_cropped_image.py
|
bogeunnet/Handle_openstreetmap
|
f81150f8011433f2fa5a52983661bca019c52d34
|
[
"MIT"
] | 1
|
2019-03-07T02:24:45.000Z
|
2019-03-07T02:24:45.000Z
|
import os
import math
from PIL import Image
import urllib.request
from io import BytesIO
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def get_map(east, west, north, south, zoom):
# Equations References
# https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
source_for_colap = "http://a.tile.openstreetmap.org/"
source_colap = "https://maps.wikimedia.org/osm-intl/{0}/{1}/{2}.png" ## 이 스타일이 가장 네이버지도와 흡사
tilestore = os.getcwd() + '/cachemap'
if not os.path.exists(tilestore):
os.makedirs(tilestore)
top_left = deg2num(north, west, zoom)
bottom_right = deg2num(south, east, zoom)
source_for_colap = "http://c.tile.openstreetmap.org/" + str(zoom) + "/" + str(top_left[0]) + "/" + str(
top_left[1]) + ".png"
y_zero_ul = num2deg(top_left[0], top_left[1], zoom)[0]
x_zero_ul = num2deg(top_left[0], top_left[1], zoom)[1]
### 19. 3/12 Tile에서 맨 아래 오른쪽 지점을 찾기 위해서는, 그 타일의 시작점이 아니라, 대각선 아래의 타일의 시작점을 잡아야 진정한 bottom_right가 된다
y_zero_br = num2deg(bottom_right[0]+1, bottom_right[1]+1, zoom)[0]
x_zero_br = num2deg(bottom_right[0]+1, bottom_right[1]+1, zoom)[1]
# create tile list
tiles = []
# 원래 소스에는 top_left[0] ~ bottom_right[0] 인데
# 실제 range 함수의 특성상, bottom_right 에 1을 더해서 range를 활용해야한다.
for x in range(top_left[0], bottom_right[0] + 1):
for y in range(top_left[1], bottom_right[1] + 1):
tiles.append((zoom, x, y))
# download tiles and make map
height = abs(bottom_right[1] - top_left[1]+ 1) * 256
width = abs(bottom_right[0] - top_left[0]+1) * 256
img = Image.new("RGB", (width, height))
for idx, tile in enumerate(tiles):
zoom, x, y = tile
fName = '_'.join([str(f) for f in tile]) + '.png'
fName = os.path.join(tilestore, fName)
f = urllib.request.urlopen(source_for_colap)
im = Image.open(BytesIO(f.read()))
print
'[%i/%i] %s' % (idx + 1, len(tiles), fName),
if not os.path.exists(fName):
url = source_colap.format(*tile)
urllib.request.urlretrieve(url, fName)
print(' ok')
else:
print(' cached')
print(fName)
# paste
tmp = Image.open(fName)
img.paste(tmp, (256 * (x - top_left[0]), 256 * (y - top_left[1])))
try:
img.save("img" + "ABC" + ".jpg")
except:
print("Cached. So, No operations happened")
return x_zero_ul, y_zero_ul,x_zero_br, y_zero_br, width, height
east = 127.059400
west = 126.86648
north = 37.481263
south = 37.426031
# ############## zoom is 11
# zoom=11
# margin_width = 0.05
# margin_height = 0.05
# ############## zoom is 12
# zoom=12
# margin_width = 0.01
# margin_height = 0.01
############ zoom is 13
zoom = 13
margin_width = 0.005
margin_height = 0.005
# ############ zoom is 14
# zoom = 14
# margin_width = 0.005
# margin_height = 0.005
xyzero = get_map(east,west,north,south, zoom)
x_zero_ul = xyzero[0]
y_zero_ul = xyzero[1]
x_zero_br = xyzero[2]
y_zero_br = xyzero[3]
width = xyzero[4]
height = xyzero[5]
wholesize_x = abs(x_zero_ul - x_zero_br)
wholesize_y = abs(y_zero_ul - y_zero_br)
dx_west = abs(x_zero_ul - west)
dy_north = abs(y_zero_ul - north)
dx_east = abs(x_zero_br - east)
dy_south = abs(y_zero_br - south)
print(dy_south, dx_east)
cut_west = (width * (dx_west)) / wholesize_x
cut_north = (height * (dy_north)) / wholesize_y
cut_east = (width * (dx_east)) / wholesize_x
cut_south = (height * (dy_south)) / wholesize_y
print(cut_south, cut_east)
image = Image.open('imgABC.jpg')
box = (cut_west, cut_north, width - cut_east , height - cut_south)
cropped_image = image.crop(box)
cropped_image.save('v3_img_'+str(zoom)+'_.jpg')
print(width,height)
print(cut_west, cut_north, width - cut_east , height - cut_south)
| 27.855263
| 107
| 0.627539
|
4a03e091cd16b8042efd643d1e89990b2d4f9ea2
| 1,697
|
py
|
Python
|
face_recognizer.py
|
siddhartboss/-Real-Time-Face-Mask-Detection-Email-Authentication-with-Age-Prediction
|
d88f7bb0bb5ba89e69c3b645a3f93175c3839cdf
|
[
"MIT"
] | null | null | null |
face_recognizer.py
|
siddhartboss/-Real-Time-Face-Mask-Detection-Email-Authentication-with-Age-Prediction
|
d88f7bb0bb5ba89e69c3b645a3f93175c3839cdf
|
[
"MIT"
] | null | null | null |
face_recognizer.py
|
siddhartboss/-Real-Time-Face-Mask-Detection-Email-Authentication-with-Age-Prediction
|
d88f7bb0bb5ba89e69c3b645a3f93175c3839cdf
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import os
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer.yml')
face_cascade_Path = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(face_cascade_Path)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
# names related to ids: The names associated to the ids: 1 for Mohamed, 2 for Jack, etc...
names = ['None','Barun','Nikhil','Aditya','Tayde','Sawant'] # add a name into this list
#Video Capture
cam = cv2.VideoCapture(0)
cam.set(3, 640)
cam.set(4, 480)
# Min Height and Width for the window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
else:
# Unknown Face
id = "Who are you ?"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)
cv2.imshow('camera', img)
# Escape to exit the webcam / program
k = cv2.waitKey(10) & 0xff
if k == 27:
break
print("\n [INFO] Exiting Program.")
cam.release()
cv2.destroyAllWindows()
| 29.258621
| 90
| 0.605775
|
4a03e09983d0058578d51ce27329ef11e938e61c
| 19,255
|
py
|
Python
|
central_computer/TrackingCode/track.py
|
ACSLab/pheeno_robot_code
|
d9b64ea1a4e3f5b0ddf26a0cbf00e7d5b08df6e5
|
[
"BSD-3-Clause"
] | 2
|
2017-05-08T20:43:43.000Z
|
2018-04-08T02:50:12.000Z
|
central_computer/TrackingCode/track.py
|
ACSLab/pheeno_robot_code
|
d9b64ea1a4e3f5b0ddf26a0cbf00e7d5b08df6e5
|
[
"BSD-3-Clause"
] | null | null | null |
central_computer/TrackingCode/track.py
|
ACSLab/pheeno_robot_code
|
d9b64ea1a4e3f5b0ddf26a0cbf00e7d5b08df6e5
|
[
"BSD-3-Clause"
] | 1
|
2017-07-07T05:20:07.000Z
|
2017-07-07T05:20:07.000Z
|
# Import NumPy Libraries
import numpy as np
from numpy import average as avg
from numpy import subtract as sub
# Import System Libraries
import math
import time
import csv
import sys
from optparse import OptionParser
import socket
# Import OpenCV Libraries
import cv2
# Debug?
DEBUG = False
DISPLAY_TAGS = True
# Colors (BGR codes) for different useful colors
RED = [0, 0, 255]
GREEN = [0, 255, 0]
BLUE = [255, 0, 0]
DARK_BLUE = [255, 51, 51]
YELLOW = [0, 255, 255]
DARK_RED = [0, 0, 170]
MAGENTA = [163, 85, 255]
CUSTOM_COLOR = [163, 85, 255]
# Scaling Pixel Values to CM
CMperPIXEL = 0.1913
FRAMES_PER_SEC = 30
TAG_CENTER_DIST = 8 / CMperPIXEL # CM adjustment from tag to robot center.
# use the following to run the code profiler
# python -m cProfile camera.py
parser = OptionParser()
parser.add_option('-o',
action='store_true',
default=False,
dest='stdout',
help='Sends collected data to stdout.')
parser.add_option('-t',
action='store_true',
default=False,
dest='track',
help='Sends collected data to csvFile.')
parser.add_option('-v', '--vid',
action='store_true',
default=False,
dest='vid',
help='Save results in a video file.')
parser.add_option('--tracking',
action='store',
dest='csvfile',
default='experiment1.csv',
help='give custom name to tracking file (must use .csv)')
parser.add_option('--vidfile',
action='store',
dest='vidfile',
default='experiment1.avi',
help='Give custom name to video output file (must use .avi)')
parser.add_option('--notrack',
action='store_false',
default=False,
dest='track',
help='Suppress tracking output file.')
parser.add_option('--notags',
action='store_false',
default=True,
dest='displayTags',
help='Prevents individual tag windows from spawning.')
options, args = parser.parse_args()
# Create the capture from the webcam.
camera = cv2.VideoCapture(0)
# set the width and height
camera.set(3, 1920) # Set the width of the capture
camera.set(4, 1080) # Set the height of the capture
camera.set(5, 30) # Set the framerate of the capture
# make a call to time.clock to start the clock (future calls to
# time.clock() will report time since this call)
time.clock()
t0 = time.clock()
if options.track:
f = open(options.csvfile, 'wb')
writer = csv.writer(f)
writer.writerow(['TagID', 'center_x', 'center_y',
'angle (radians from x axis)', 'time', 'Frame Number'])
if options.vid:
global OPENSUCCESS
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(options.vidfile, fourcc,
FRAMES_PER_SEC, (1920, 1080))
def drawPointPosition(x0, y0, img, color):
"""
Draws points on specified positions.
Parameters
----------
x0 : float
X location for drawn circle.
y0 : float
Y location for drawn circle.
img : obj
Object frame on which the points will be drawn.
color : set of RGB values
Color of the point
"""
# draw all contours in red, with thickness 2
cv2.circle(img, (int(x0), int(y0)), 1, color, 2)
##########################################################
# TRACKING ROBOTS SET UP AND FUNCTIONS
##########################################################
# Initialization Constants
VAL = 60
MIN_SIZE = 1000
MAX_SIZE = 2200
MIN_VAL = 100
MAX_VAL = 2000
Robots = {} # begin with an empty dictionary of robots
MAPPING = {'11': 'l', '30': 29, '40': 22, '03': 27, '22': 28,
'77': 26} # Map from the tag number : IP address
mapping = {11: 'l', 30: 29, 40: 22, 3: 27, 22: 28, 77: 26}
sockets = {}
frameNum = 0
# All vectors are assumed to be two dimensional
# The three white squares on the binary tag must be on the robots' left side.
HCELLS = 3 # number of horizontal cells on a tag
VCELLS = 3 # number of vertical cells on a tag
class RobotData(object):
"""
Robot Data class
Attributes
----------
center : Set of integers
The center position of the robot.
orientation : float
Something
updated : bool
The state of the classes attributes. If false, it will allow for
future value updates.
Methods
-------
update(center, angle)
Updates all the attributes of the class.
reset()
Resets the updated boolean attribute for future value updates.
"""
def __init__(self, center, orientation):
self.center = center
self.orientation = orientation
self.updated = True
# center is an (x,y) tuple, orientation is an angle in degrees measured
# from the positive x axis, frame is a number which designates which frame
# the robot is in, and updated is a boolean which tells if that particular
# robot has been updated.
def __repr__(self):
center = integerize(
(self.center[0] * CMperPIXEL, self.center[1] * CMperPIXEL))
return ("Robot at " + str(center) + " with orientation " +
str(self.orientation) + ".")
def update(self, updated_center, updated_angle):
self.center = updated_center
self.orientation = updated_angle
self.updated = True
def reset(self):
self.updated = False
def threshold(src, value=100):
ret, thresh = cv2.threshold(src, VAL, 255, cv2.THRESH_BINARY)
return thresh
def findAprilTags(threshed, img):
# Contouring
contourImage, contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return filter(lambda c: isTag(c, img), contours)
def isTag(c, img):
# determines if the image is a tag, based on its area and intensity
return (MIN_SIZE < cv2.contourArea(c) < MAX_SIZE) and \
(MIN_VAL < averageValue(c) < MAX_VAL) and \
goodAspectRatio(c)
def goodAspectRatio(c):
_, (width, height), _ = cv2.minAreaRect(c)
aspectRatio = max([width / height, height / width])
return 1 < aspectRatio < 2
def averageValue(img):
height, width = img.shape[:2]
val = avg(img.sum(axis=0).sum(axis=0))
return val / (height * width)
def drawTags(tagList, img):
""" Draw all contours in red, with thickness 2. """
cv2.drawContours(img, tagList, -1, DARK_RED, 2)
def drawCorners(bottom, left, top, right, img):
""" Draw contours in varying colors on an OpenCV image. """
# draw all contours in red, with thickness 2
cv2.circle(img, tuple(bottom), 1, BLUE, 2)
# draw all contours in red, with thickness 2
cv2.circle(img, tuple(top), 1, BLUE, 2)
# draw all contours in red, with thickness 2
cv2.circle(img, tuple(left), 1, BLUE, 2)
# draw all contours in red, with thickness 2
cv2.circle(img, tuple(right), 1, BLUE, 2)
# marks each robot with its index and angle on the given image
def drawRobots(img):
arrow_length = 22
for index in Robots:
center = integerize(Robots[index].center)
angle = Robots[index].orientation
if index in MAPPING and MAPPING[index] != 'l':
cv2.circle(img, integerize(center), 2, RED, 4)
# Robot's name.
cv2.putText(img, str(index), (center[0] + 28, center[1]),
cv2.FONT_HERSHEY_SIMPLEX, .7, CUSTOM_COLOR, 2)
if index in MAPPING and MAPPING[index] != 'l':
p2 = integerize((center[0] + arrow_length * math.cos(angle),
center[1] - arrow_length * math.sin(angle)))
cv2.line(img, center, p2, (255, 255, 0), 2, 2)
def updateDict(tagList, img, thresh):
global Robots
tagViews = []
for tag in tagList:
rect = cv2.minAreaRect(tag)
tagImg = getTagImg(tag, rect, img)
id_matrix = identify(tagImg)
# We could not calculate the intensity of the cells, so the image was
# not a valid tag.
if id_matrix is None:
continue
index = matrixToIndex(id_matrix)
# The tag did not have three cells on one side which were all light, so
# the image was not a valid tag.
if index is None:
continue
tagViews.append(tagImg)
angle = calculateAngle(tag, rect, id_matrix)
Robots[index] = RobotData(rect[0], angle)
# Remove any robots from our list that were not updated
Robots = {key: rob for key, rob in Robots.items() if rob.updated}
# Reset all robots to their 'non-updated' status for the next iteration.
for r in Robots.values():
r.reset()
return tagViews
def getTagImg(tag, rect, img):
# extracts the image of the tag from the main image, and rotates it
# appropriately
bottom, left, top, right = cv2.boxPoints(rect)
# drawCorners(bottom, left, top, right, imageTrack)
try:
if dist(left, top) < dist(left, bottom):
pos_slope = False
theta = math.atan((left[1] - bottom[1]) / (left[0] - bottom[0]))
else:
pos_slope = True
theta = math.atan((right[1] - bottom[1]) / (right[0] - bottom[0]))
except ZeroDivisionError:
theta = math.atan(float('inf')) # Slope is pi/2.
height = dist(right, bottom)
width = dist(right, top)
if pos_slope:
width, height = height, width
fcenter = rect[0][0], rect[0][1]
return subimage(img, fcenter, theta, width, height)
# Developed from code by user xaedes of Stack Overflow:
# http://stackoverflow.com/questions/11627362/how-to-straighten-a-rotated-rectangle-area-of-an-image-using-opencv-in-python
def subimage(image, center, theta, width, height):
v_x = (np.cos(theta), np.sin(theta))
v_y = (-np.sin(theta), np.cos(theta))
s_x = center[0] - v_x[0] * (width / 2) - v_y[0] * (height / 2)
s_y = center[1] - v_x[1] * (width / 2) - v_y[1] * (height / 2)
new_mapping = np.array([[v_x[0], v_y[0], s_x],
[v_x[1], v_y[1], s_y]])
return cv2.warpAffine(image, new_mapping, (width, height),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_REPLICATE)
def identify(img):
x_buff = 6 # pixels of buffer zone in x
y_buff = 3 # pixels of buffer zone in y
matrix = np.zeros((VCELLS, HCELLS), dtype=bool)
threshed = threshold(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), 132)
h, w, _ = np.shape(img)
x, y = 1, 1
dx = int((w - 2 * x_buff) / float(HCELLS))
dy = int((h - 2 * y_buff) / float(VCELLS))
for i in range(HCELLS):
for j in range(VCELLS):
# Because we're interested in the white squares now
white = not isBlack(threshed,
(x + x_buff + i * dx, y + y_buff + j * dy),
(x + x_buff + (i + 1) * dx,
y + y_buff + (j + 1) * dy), dx * dy, img)
if white is not None:
matrix[j, i] = white
else:
return None
return matrix
def largestContour(contour_list):
contour = None
size = 0
for current in contour_list:
current_area = cv2.contourArea(current)
if current_area > size:
contour = current
size = current_area
return contour
def isBlack(img, p1, p2, area, defacing):
# Dark squares will have an intensity below this percentage.
DARKNESS_THRESHOLD = 0.3
intensity = 0
p1, p2 = integerize(p1), integerize(p2)
for x in range(p1[0], p2[0]):
for y in range(p1[1], p2[1]):
intensity += bool(img[y, x])
if x in (p1[0], p2[0] - 1) or y in (p1[1], p2[1] - 1):
defacing[y, x] = RED
# This means that we are picking up some edge motion.
if area == 0:
return None
filled = (intensity / float((p2[1] - p1[1]) *
(p2[0] - p1[0]))) < DARKNESS_THRESHOLD
return filled
# calculates the cartesian distance between two points
def dist(p1, p2):
return np.linalg.norm(sub(p1, p2))
def calculateAngle(tag, rect, id_matrix):
bottom, left, top, right = cv2.boxPoints(rect)
if dist(left, top) < dist(left, bottom):
if left[0] == bottom[0]:
theta = math.atan(-float('inf')) # Avoid division by zero.
else:
theta = math.atan2((bottom[1] - left[1]), (left[0] - bottom[0]))
theta -= math.pi / 2
else:
if right[0] == bottom[0]:
theta = math.atan(-float('inf')) # Avoid division by zero.
else:
theta = math.atan2((bottom[1] - left[1]), (left[0] - bottom[0]))
# Top is light.
if id_matrix[0, 0] and id_matrix[1, 0] and id_matrix[2, 0]:
return theta
# Bottom is light.
elif id_matrix[0, 2] and id_matrix[1, 2] and id_matrix[2, 2]:
return theta + math.pi
# No else case because any such matricies would already be filtered out by
# matrixToIndex (returns None)
def binaryDigitsToDecimalString(L):
return str(int(''.join([str(int(x)) for x in L]), 2))
def matrixToIndex(matrix):
if np.all(matrix[:, 0]):
index = binaryDigitsToDecimalString(
matrix[:, 2]) + binaryDigitsToDecimalString(matrix[:, 1])
elif np.all(matrix[:, 2]):
index = binaryDigitsToDecimalString(
matrix[:, 0][::-1]) + \
binaryDigitsToDecimalString(matrix[:, 1][::-1])
else:
index = None
return index
def fixRobotData(robotID):
global Robots
if robotID in MAPPING and MAPPING[robotID] != 'l':
center = Robots[robotID].center
angle = Robots[robotID].orientation
trueCenter = (center[0], center[1])
Robots[robotID].center = trueCenter
def integerize(tup):
return tuple([int(x) for x in tup])
def writeData(frameNum):
for robot_index in Robots:
robot = Robots[robot_index]
centerxCentimeters = robot.center[0] * CMperPIXEL
centeryCentimeters = robot.center[1] * CMperPIXEL
if options.stdout:
if not sys.excepthook:
exit(0)
try:
print(robot_index, centerxCentimeters, centeryCentimeters,
robot.orientation, time.clock(), frameNum)
except IOError:
exit(0)
if options.track:
writer.writerow([robot_index, centerxCentimeters,
centeryCentimeters, robot.orientation,
time.clock(), frameNum])
def tagToRobot(ID):
if int(ID) in mapping:
return mapping[int(ID)]
else:
return None
def sendCommand(robots):
global sockets
bots = [list(x) for x in robots]
bots = filter(lambda b: b[0] in mapping, bots)
bots = sorted(bots, key=lambda bot: bot[4])
for robotInd in range(len(bots)):
tup = integerize(
(bots[robotInd][1], bots[robotInd][2], bots[robotInd][3]))
cmd = 'M!'.format(*tup)
robotNumber = tagToRobot(int(bots[robotInd][0]))
if robotNumber is None:
continue
if robotNumber in sockets:
safesend(cmd, sockets[robotNumber])
else:
dest = '192.168.119.{0}'.format(robotNumber)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(4)
s.connect((dest, 12345))
s.settimeout(None)
except socket.error:
print("Could not connect to", dest)
exit(0)
safesend(cmd, s)
sockets[robotNumber] = s
return
def sendStopCommand(robots):
global sockets
bots = [list(x) for x in robots]
bots = filter(lambda b: b[0] in mapping, bots)
bots = sorted(bots, key=lambda bot: bot[4])
for robotInd in range(len(bots)):
tup = integerize(
(bots[robotInd][1], bots[robotInd][2], bots[robotInd][3]))
cmd = 'S!'.format(*tup)
robotNumber = tagToRobot(int(bots[robotInd][0]))
if robotNumber is None:
continue
if robotNumber in sockets:
safesend(cmd, sockets[robotNumber])
else:
dest = '192.168.119.{0}'.format(robotNumber)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(4)
s.connect((dest, 12345))
s.settimeout(None)
except socket.error:
print("Could not connect to", dest)
exit(0)
safesend(cmd, s)
sockets[robotNumber] = s
return
def safesend(msg, socket):
totalSent = 0
while totalSent < len(msg):
sent = socket.send(msg[totalSent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalSent = totalSent + sent
print("Sent", msg)
def closeAll():
for sock in sockets.values():
sock.close()
#########################################################
# MAIN
#########################################################
while True:
# Determine Time Step
deltaT = time.clock() - t0
t0 += deltaT
# Capture Frame
ret, image = camera.read()
# Operations on the frame
thresh = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(thresh, (3, 3), 0)
cv2.threshold(gray, VAL, 255, cv2.THRESH_BINARY, thresh)
# Find Tags
tagList = findAprilTags(gray, image)
tagViews = updateDict(tagList, image, thresh)
# Translate Tag Centers to Robot Centers.
for robot_index in Robots:
fixRobotData(robot_index)
robotList = []
for robot_index in Robots:
if robot_index not in MAPPING:
continue
robot = Robots[robot_index]
robotList.append([int(robot_index), robot.center[0] * CMperPIXEL,
robot.center[1] * CMperPIXEL,
robot.orientation * 180 / math.pi + 180,
time.clock(), frameNum])
# Draw Tags.
drawTags(tagList, image)
drawRobots(image)
# Resize the image to fit screen.
resizedImage = cv2.resize(image, (1280, 720))
# Display the images.
cv2.imshow('Tracking!', resizedImage)
if frameNum > 50:
if frameNum % 10 == 0:
for robot_index in Robots:
robot = Robots[robot_index]
sendCommand(robotList)
if options.track or options.stdout:
writeData(frameNum)
if options.vid:
cv2.putText(image, '{:.2f}'.format(time.clock()),
(40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 0))
out.write(image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if time.clock() >= 30:
break
frameNum += 1
# Clean Up
if options.track:
f.close()
for robot_index in Robots:
sendStopCommand(robotList)
closeAll()
camera.release()
cv2.destroyAllWindows()
| 29.806502
| 123
| 0.580317
|
4a03e107cd01bd8ce3ff2d4fbd2be87ab74212a4
| 1,487
|
py
|
Python
|
setup.py
|
masora1030/eigoyurusan
|
fa82044a2dc2f0f1f7454f5394e6d68fa923c289
|
[
"MIT"
] | 9
|
2020-07-16T08:14:24.000Z
|
2021-02-12T04:16:36.000Z
|
setup.py
|
masora1030/eigoyurusan
|
fa82044a2dc2f0f1f7454f5394e6d68fa923c289
|
[
"MIT"
] | 3
|
2020-07-22T15:21:04.000Z
|
2021-11-23T23:21:30.000Z
|
setup.py
|
masora1030/eigoyurusan
|
fa82044a2dc2f0f1f7454f5394e6d68fa923c289
|
[
"MIT"
] | 2
|
2020-07-22T21:51:26.000Z
|
2021-05-09T20:36:56.000Z
|
"""The setup script."""
from __future__ import absolute_import
from __future__ import unicode_literals
import setuptools
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = ['Click>=7.0',
'setuptools',
'pdfminer',
'selenium',
'arxiv',
'markdown']
setuptools.setup(
name="eigoyurusan",
version="0.1.8",
author="Sora Takashima",
author_email="soraemonpockt@gmail.com",
description="English is too difficult for me.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/masora1030/eigoyurusan",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Intended Audience :: Education",
"Topic :: Text Processing :: Linguistic",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
],
install_requires=requirements,
license="MIT license",
keywords="eigoyurusan, translate, translator, paper, PDF",
project_urls={
"Projects": "https://github.com/users/masora1030/projects/1",
"Source": "https://github.com/masora1030/eigoyurusan",
},
entry_points="""
# -*- Entry points: -*-
[console_scripts]
eigoyurusan = eigoyurusan.eigoyurusan:main
""",
)
| 30.346939
| 69
| 0.619368
|
4a03e35bbbb9413105b97da2ba115b7490468334
| 414
|
py
|
Python
|
config.py
|
ndonyemark/TheNewsAPI
|
3a22877a4fe027daf17198b413a111829bf1908f
|
[
"Unlicense"
] | null | null | null |
config.py
|
ndonyemark/TheNewsAPI
|
3a22877a4fe027daf17198b413a111829bf1908f
|
[
"Unlicense"
] | null | null | null |
config.py
|
ndonyemark/TheNewsAPI
|
3a22877a4fe027daf17198b413a111829bf1908f
|
[
"Unlicense"
] | null | null | null |
import os
class Config():
SECRET_KEY = os.environ.get("SECRET_KEY")
NEWS_BASE_URL = "https://newsapi.org/v2/sources?apiKey={}"
NEWS_ARTICLES_URL = "https://newsapi.org/v2/everything?q={}&apiKey={}"
API_KEY = os.environ.get("API_KEY")
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
"development": DevConfig,
"production": ProdConfig
}
| 18.818182
| 74
| 0.673913
|
4a03e3afb90f6f942c4d4ec9cf954958de50aaf5
| 6,659
|
py
|
Python
|
django_smartstaticfiles/settings.py
|
niall-byrne/django-smartstaticfiles
|
0573eb758d044610c062fb0b3c3822202621e9f4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-06-21T19:40:12.000Z
|
2018-06-21T19:40:12.000Z
|
django_smartstaticfiles/settings.py
|
niall-byrne/django-smartstaticfiles
|
0573eb758d044610c062fb0b3c3822202621e9f4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-01-18T21:13:03.000Z
|
2019-01-19T22:19:38.000Z
|
django_smartstaticfiles/settings.py
|
niall-byrne/django-smartstaticfiles
|
0573eb758d044610c062fb0b3c3822202621e9f4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-11-27T10:40:05.000Z
|
2019-11-27T10:40:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.utils.module_loading import import_string
from django.utils.six import iteritems, iterkeys
settings_attr = 'SMARTSTATICFILES_CONFIG'
settings_defaults = {
# Whether to enable JavaScript minification.
'JS_MIN_ENABLED': False,
# Whether to enable CSS minification.
'CSS_MIN_ENABLED': False,
# File patterns for matching JavaScript assets (in relative URL without
# STATIC_URL prefix)
'JS_FILE_PATTERNS': ['*.js'],
# File patterns for matching CSS assets (in relative URL without
# STATIC_URL prefix)
'CSS_FILE_PATTERNS': ['*.css'],
# Dotted string of the module path and the callable for JavaScript
# minification. The callable should accept a single argument of a string
# of the content of original JavaScript, and return a string of minified
# content. (Notice that loud comments such as /*! ... */ must be preserved
# in the result so as to make JavaScript asset URLs replacement work.)
# The result will be cached and reused when possible.
'JS_MIN_FUNC': 'rjsmin.jsmin',
# Extra keyword arguments which are sent to the callable for JavaScript
# minification. They are sent after the argument of a string of the
# content of original JavaScript. If no keyword arguments are sent, set it
# to an empty dict ({}) or None.
'JS_MIN_FUNC_KWARGS': {
'keep_bang_comments': True,
},
# Dotted string of the module path and the callable for CSS
# minification. The callable should accept a single argument of
# string which contains the content of original CSS, and return a
# string of minified content. The result will be cached and
# reused when possible.
'CSS_MIN_FUNC': 'rcssmin.cssmin',
# Extra keyword arguments which are sent to the callable for CSS
# minification. They are sent after the argument of a string of the
# content of original CSS. If no keyword arguments are sent, set it
# to an empty dict ({}) or None.
'CSS_MIN_FUNC_KWARGS': {
'keep_bang_comments': True,
},
# A regular expression (case-sensitive by default) which is used to
# search against assets (in relative URL without STATIC_URL prefix). The
# mathced assets won't be minified. Set it to None to ignore no assets.
# (Assets with .min.js or .min.css extensions are always ignored.)
'RE_IGNORE_MIN': None,
# Whether to enable deletion of unhashed files.
'DELETE_UNHASHED_ENABLED': True,
# Whether to enable deletion of intermediate hashed files.
'DELETE_INTERMEDIATE_ENABLED': True,
# A regular expression (case-sensitive by default) which is used to
# search against assets (in relative URL without STATIC_URL prefix). The
# matched assets won't be hashed. Set it to None to ignore no assets.
'RE_IGNORE_HASHING': None,
# Whether to enable JavaScript asset URLs replacement.
'JS_ASSETS_REPL_ENABLED': False,
# Tag name of loud comments used in JavaScript asset URLs replacement.
'JS_ASSETS_REPL_TAG': 'rev',
# Whether to remove one trailing newline (if presents) after each
# replaced URL in JavaScript. This is effective only if "JS_MIN_ENABLED"
# is set to True. This fixes the problems and annoyances caused by a
# deliberately added newline at the end of each loud comment by certain
# minification libraries (e.g. jsmin).
'JS_ASSETS_REPL_TRAILING_FIX': False,
}
settings_cache = None
def setup_settings_cache():
global settings_cache
if settings_cache is None:
try:
_settings = getattr(settings, settings_attr)
except AttributeError:
settings_cache = {}
else:
try:
settings_cache = dict(_settings)
except (TypeError, ValueError):
raise ImproperlyConfigured(
'setting "%s" must be a dict' % settings_attr
)
# Set default values
for key, value in iteritems(settings_defaults):
settings_cache.setdefault(key, value)
# Import modules from dotted strings
if settings_cache['JS_MIN_ENABLED']:
settings_cache['JS_MIN_FUNC'] = \
import_string(settings_cache['JS_MIN_FUNC'])
if settings_cache['CSS_MIN_ENABLED']:
settings_cache['CSS_MIN_FUNC'] = \
import_string(settings_cache['CSS_MIN_FUNC'])
# Compile possible regular expressions
regex_keys_to_cache = ['RE_IGNORE_HASHING']
if settings_cache['JS_MIN_ENABLED'] or settings_cache['CSS_MIN_ENABLED']:
regex_keys_to_cache.append('RE_IGNORE_MIN')
for key in regex_keys_to_cache:
regex = settings_cache.get(key, None)
if regex:
try:
settings_cache[key] = re.compile(regex)
except Exception as err:
raise ImproperlyConfigured(
'key "%s" in setting "%s" is not a valid regular '
'expression: %s' % (key, settings_attr, err)
)
elif regex is not None:
settings_cache[key] = None
def clear_settings_cache():
global settings_cache
settings_cache = None
def get_cached_setting_key(key):
setup_settings_cache()
return settings_cache[key]
def settings_changed_handler(setting, **kwargs):
if setting == settings_attr:
clear_settings_cache()
setting_changed.connect(settings_changed_handler)
class CachedSettingsMixin(object):
def __init__(self, *args, **kwargs):
self.update_patterns()
super(CachedSettingsMixin, self).__init__(*args, **kwargs)
def update_patterns(self):
if not self.js_assets_repl_enabled:
return
esc_tag = re.escape(self.js_assets_repl_tag)
self.patterns += (
("*.js", (
(r"""(/\*!\s*%s(?:\((.*?)\))?\s*\*/\s*['"](.*?)['"]\s*/\*!\s*end%s\s*\*/(\n)?)"""
% (esc_tag, esc_tag),
"""'%s'"""),
)),
)
class SettingProxy(object):
def __init__(self, key):
self.key = key
def __call__(self, instance):
return get_cached_setting_key(self.key)
# Dynamically create properties, whose names are lower-cased keys of
# settings_defaults
for key in iterkeys(settings_defaults):
setattr(CachedSettingsMixin, key.lower(), property(SettingProxy(key)))
| 35.994595
| 97
| 0.66091
|
4a03e40f196f46b9f400217ef023628f758ece43
| 376
|
py
|
Python
|
foobar_end.py
|
alexroat/foobar_challange
|
8e5ac6dd88fad710b06419a12186ba29be1735bd
|
[
"MIT"
] | null | null | null |
foobar_end.py
|
alexroat/foobar_challange
|
8e5ac6dd88fad710b06419a12186ba29be1735bd
|
[
"MIT"
] | null | null | null |
foobar_end.py
|
alexroat/foobar_challange
|
8e5ac6dd88fad710b06419a12186ba29be1735bd
|
[
"MIT"
] | null | null | null |
import base64
MESSAGE = '''
GksWDREMBAcSS0VCUkgGBgQNEV9eT0YXDgAJHRMIFBFGTF9YVQoSAAQJCB0WSE1URgkDHh0dFQdG TF9YVQYPFxMJAREQAwRTTUxCGREHCBEXCQgdHBtGVFtMQg0cAw4XCgkBX15PRgYADgcRBhxGVFtM QgsTCQRTTUxCHh0ARlRbTEIPGwFAUxw=
'''
KEY = 'alexroat'
result = []
for i, c in enumerate(base64.b64decode(MESSAGE)):
result.append(chr(c ^ ord(KEY[i % len(KEY)])))
print(''.join(result))
| 26.857143
| 186
| 0.800532
|
4a03e4cb9eca36ed4ffe7b488ed9f4555e5b0597
| 1,690
|
py
|
Python
|
pettingzoo/gamma/prison/manual_control.py
|
AnanthHari/PettingZoo
|
c147c2992a067fd529570db0bea6a0324f01ee6e
|
[
"MIT"
] | null | null | null |
pettingzoo/gamma/prison/manual_control.py
|
AnanthHari/PettingZoo
|
c147c2992a067fd529570db0bea6a0324f01ee6e
|
[
"MIT"
] | null | null | null |
pettingzoo/gamma/prison/manual_control.py
|
AnanthHari/PettingZoo
|
c147c2992a067fd529570db0bea6a0324f01ee6e
|
[
"MIT"
] | null | null | null |
import pygame
import numpy as np
def manual_control(**kwargs):
from .prison import env as _env
env = _env(**kwargs)
env.reset()
x = 0
y = 0
while True:
agent_actions = np.array([1 for _ in range(8)])
num_actions = 0
test_done = False
for event in pygame.event.get():
# wasd to switch prisoner, jk to move left and right
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
x = 0
elif event.key == pygame.K_d:
x = 1
elif event.key == pygame.K_w:
y = max(0, y - 1)
elif event.key == pygame.K_s:
y = min(3, y + 1)
elif event.key == pygame.K_j:
num_actions += 1
agent_actions[env.convert_coord_to_prisoner_id(
(x, y))] = 0
elif event.key == pygame.K_k:
num_actions += 1
agent_actions[env.convert_coord_to_prisoner_id(
(x, y))] = 2
elif event.key == pygame.K_ESCAPE:
test_done = True
actions = dict(zip(env.agents, agent_actions))
for i in env.agents:
reward, done, info = env.last()
if reward != 0:
print("Agent {} was reward {}".format(i, reward))
if done:
test_done = True
action = actions[i]
env.step(action, observe=False)
env.render()
if test_done:
break
env.close()
if __name__ == "__main__":
manual_control()
| 30.727273
| 67
| 0.468047
|
4a03e52c12f79002669b1f8333aa0fa131bd481e
| 657
|
py
|
Python
|
Dataset/Leetcode/train/2/115.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/2/115.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/2/115.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, l1: ListNode, l2: ListNode) -> ListNode:
l1_list = []
l2_list = []
while l1 or l2:
if l1:
l1_list.append(l1.val)
l1 = l1.next
if l2:
l2_list.append(l2.val)
l2 = l2.next
res = int(''.join([str(i) for i in l1_list[::-1]])) + int(''.join([str(i) for i in l2_list[::-1]]))
res = [int(i) for i in str(res)][::-1]
if res:
temp = None
for i in range(len(res)):
temp = ListNode(res.pop(),temp)
return temp
else:
return l1
| 29.863636
| 107
| 0.430746
|
4a03e5d5c1b486a2361dc7fc146f5549b21fa7d5
| 12,491
|
py
|
Python
|
tests/webuploader/test_import_study.py
|
EBI-Metagenomics/ebi-metagenomics-api
|
1f028902fe493583c5c8191dd5dae92cca9e15a9
|
[
"Apache-2.0"
] | null | null | null |
tests/webuploader/test_import_study.py
|
EBI-Metagenomics/ebi-metagenomics-api
|
1f028902fe493583c5c8191dd5dae92cca9e15a9
|
[
"Apache-2.0"
] | null | null | null |
tests/webuploader/test_import_study.py
|
EBI-Metagenomics/ebi-metagenomics-api
|
1f028902fe493583c5c8191dd5dae92cca9e15a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from emgapi import models as emg_models
from emgapianns.management.commands.import_study import Command
from emgena import models as ena_models
def mock_ena_run_study(*args, **kwargs):
study = ena_models.RunStudy()
study.study_id = "ERP117125"
study.project_id = "PRJEB34249"
study.study_status = "public"
study.center_name = "UNIVERSITY OF CAMBRIDGE"
study.hold_date = None
study.first_created = "2019-09-04 11:23:26"
study.last_updated = "2019-09-04 11:23:26"
study.study_title = "Dysbiosis associated with acute helminth infections in herbivorous youngstock - "
"observations and implications"
study.study_description = (
"This study investigates, for the first time, the associations between acute "
)
"infections by GI helminths and the faecal microbial and metabolic profiles of "
"a cohort of equine youngstock, prior to and following treatment with "
"parasiticides (ivermectin)."
study.submission_account_id = "Webin-50804"
study.pubmed_id = ""
return study, []
def mock_ncbi_run_study_SRP000125():
"""
Useful tests for this case:
List of pubmed_ids
:return:
"""
study = ena_models.RunStudy()
study.study_id = "SRP000125"
study.project_id = "PRJNA17765"
study.study_status = "public"
study.center_name = "SDSU Center for Universal Microbial Sequencing"
study.hold_date = None
study.first_created = "2010-02-26"
study.last_updated = "2017-09-25"
study.study_title = "Marine phages from the Gulf of Mexico"
study.study_description = (
"A combined set of 41 samples was isolated by the Suttle Laboratory from 13 "
"different sites in the Gulf of Mexico between June 1, 1994 and July 31, 2001. "
"The phage fraction was purified and sequenced using pyrophosphate sequencing "
"(454 Life Sciences). This is part of a global ocean survey of phage and "
"virus sequences. 454 sequence data is available from the Short Read Archive "
'(SRA): <a href="ftp://ftp.ncbi.nih.gov/pub/TraceDB/ShortRead/SRA000408 '
'">SRA000408</a>. Metagenomics SEED ID: 4440304.3 Nature paper ID: 32 '
"The WGS project can be found using the Project data link."
)
study.submission_account_id = "Webin-842"
study.pubmed_id = "17090214,18337718"
return study, []
def get_ena_project_mock(center_name):
project = ena_models.Project()
project.center_name = center_name
project.project_id = "PRJEB34249"
return project
def mock_ncbi_run_study_SRP034734():
"""Useful tests for this case:
pubmed_id = None
Center name for BioProjects
"""
study = ena_models.RunStudy()
study.study_id = "SRP034734"
study.project_id = "PRJNA218849"
study.study_status = "public"
study.center_name = "Veer Narmad South Gujarat University"
study.hold_date = None
study.first_created = "2014-01-09"
study.last_updated = "2017-07-10"
study.study_title = "Lonar Lake sediment Metagenome"
study.study_description = (
"Understanding the relevance of bacterial and archaeal diversity in the "
"soda lake sediments by the culture independent approach using bTEFAP. "
"Lonar Lake is a saline soda lake located at Lonar in Buldana district, "
"Maharashtra State, India, which was created by a meteor impact."
)
study.submission_account_id = "Webin-842"
study.pubmed_id = None
return study, []
def insert_biome(biome_id, biome_name, left, right, depth, lineage):
emg_models.Biome.objects.update_or_create(
biome_id=biome_id,
biome_name=biome_name,
defaults={"lft": left, "rgt": right, "depth": depth, "lineage": lineage},
)
@pytest.mark.django_db(reset_sequences=True, transaction=True)
class TestImportStudyTransactions:
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._get_ena_project"
)
@patch("emgapianns.management.commands.import_study.Command.get_study_dir")
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._fetch_study_metadata"
)
def test_import_ena_study_should_succeed(
self, mock_db, mock_study_dir, mock_ena_project
):
"""
:param mock_db:
:param mock_study_dir:
:return:
"""
accession = "ERP117125"
lineage = "root:Host-associated:Mammals:Digestive system:Fecal"
biome_name = "Fecal"
mock_db.return_value = expected = mock_ena_run_study()
mock_study_dir.return_value = "2019/09/ERP117125"
mock_ena_project.return_value = get_ena_project_mock("UNIVERSITY OF CAMBRIDGE")
insert_biome(422, biome_name, 841, 844, 5, lineage)
with mock_db, mock_study_dir, mock_ena_project:
cmd = Command()
cmd.run_from_argv(argv=["manage.py", "import_study", accession, lineage])
actual_study = emg_models.Study.objects.get(secondary_accession=accession)
assert expected[0].study_id == actual_study.secondary_accession
assert expected[0].project_id == actual_study.project_id
assert expected[0].center_name == actual_study.centre_name
assert None is actual_study.experimental_factor
assert True is actual_study.is_public
assert None is actual_study.public_release_date
assert expected[0].study_description in actual_study.study_abstract
assert expected[0].study_title in actual_study.study_name
assert "FINISHED" == actual_study.study_status
assert "SUBMITTED" == actual_study.data_origination
assert None is actual_study.author_email
assert None is actual_study.author_name
assert timezone.now().strftime(
"%m/%d/%Y"
) == actual_study.last_update.strftime("%m/%d/%Y")
assert (
expected[0].submission_account_id == actual_study.submission_account_id
)
assert biome_name == actual_study.biome.biome_name
assert mock_study_dir.return_value == actual_study.result_directory
assert expected[0].first_created == actual_study.first_created.strftime(
"%Y-%m-%d %H:%M:%S"
)
assert 0 == len(actual_study.publications.all())
assert 0 == len(actual_study.samples.all())
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._get_ena_project"
)
@patch("emgapianns.management.commands.import_study.Command.get_study_dir")
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._fetch_study_metadata"
)
def test_import_ncbi_study_SRP000125_should_succeed(
self, mock_db, mock_study_dir, mock_ena_project
):
"""Test NCBI SRP000125"""
accession = "SRP000125"
lineage = "root:Host-associated:Mammals:Digestive system:Fecal"
biome_name = "Fecal"
mock_study_dir.return_value = "2019/09/{}".format(accession)
mock_db.return_value = expected = mock_ncbi_run_study_SRP000125()
mock_ena_project.return_value = get_ena_project_mock(
"SDSU Center for Universal Microbial Sequencing"
)
insert_biome(422, biome_name, 841, 844, 5, lineage)
with mock_db, mock_study_dir, mock_ena_project:
cmd = Command()
cmd.run_from_argv(argv=["manage.py", "import_study", accession, lineage])
actual_study = emg_models.Study.objects.get(secondary_accession=accession)
assert expected[0].study_id == actual_study.secondary_accession
assert expected[0].project_id == actual_study.project_id
assert expected[0].center_name == actual_study.centre_name
assert None is actual_study.experimental_factor
assert True is actual_study.is_public
assert None is actual_study.public_release_date
assert expected[0].study_description in actual_study.study_abstract
assert expected[0].study_title in actual_study.study_name
assert "FINISHED" == actual_study.study_status
assert "HARVESTED" == actual_study.data_origination
assert None is actual_study.author_email
assert None is actual_study.author_name
assert timezone.now().strftime(
"%m/%d/%Y"
) == actual_study.last_update.strftime("%m/%d/%Y")
assert (
expected[0].submission_account_id == actual_study.submission_account_id
)
assert biome_name == actual_study.biome.biome_name
assert mock_study_dir.return_value == actual_study.result_directory
assert expected[0].first_created == actual_study.first_created.strftime(
"%Y-%m-%d"
)
assert 2 == len(actual_study.publications.all())
assert 0 == len(actual_study.samples.all())
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._get_ena_project"
)
@patch("emgapianns.management.commands.import_study.Command.get_study_dir")
@patch(
"emgapianns.management.lib.create_or_update_study.StudyImporter._fetch_study_metadata"
)
def test_import_ncbi_study_SRP034734_should_succeed(
self, mock_db, mock_study_dir, mock_ena_project
):
"""
:param mock_db:
:param mock_study_dir:
:return:
"""
accession = "SRP034734"
lineage = "root:Host-associated:Mammals:Digestive system:Fecal"
biome_name = "Fecal"
#
mock_study_dir.return_value = "2019/09/{}".format(accession)
mock_db.return_value = expected = mock_ncbi_run_study_SRP034734()
mock_ena_project.return_value = get_ena_project_mock(
"Veer Narmad South Gujarat University"
)
#
insert_biome(422, biome_name, 841, 844, 5, lineage)
with mock_db, mock_study_dir, mock_ena_project:
cmd = Command()
cmd.run_from_argv(argv=["manage.py", "import_study", accession, lineage])
actual_study = emg_models.Study.objects.get(secondary_accession=accession)
assert expected[0].study_id == actual_study.secondary_accession
assert expected[0].project_id == actual_study.project_id
assert expected[0].center_name == actual_study.centre_name
assert None is actual_study.experimental_factor
assert True is actual_study.is_public
assert None is actual_study.public_release_date
assert expected[0].study_description in actual_study.study_abstract
assert expected[0].study_title in actual_study.study_name
assert "FINISHED" == actual_study.study_status
assert "HARVESTED" == actual_study.data_origination
assert None is actual_study.author_email
assert None is actual_study.author_name
assert timezone.now().strftime(
"%m/%d/%Y"
) == actual_study.last_update.strftime("%m/%d/%Y")
assert (
expected[0].submission_account_id == actual_study.submission_account_id
)
assert biome_name == actual_study.biome.biome_name
assert mock_study_dir.return_value == actual_study.result_directory
assert expected[0].first_created == actual_study.first_created.strftime(
"%Y-%m-%d"
)
assert 0 == len(actual_study.publications.all())
assert 0 == len(actual_study.samples.all())
| 43.674825
| 106
| 0.677848
|
4a03e5f60862953c87d4fa20a7e4b461da282c08
| 811
|
py
|
Python
|
microproxy/layer/proxy/replay.py
|
mike820324/microProxy
|
64c7c5add4759c6e105b9438cd18c0f8c930c7a3
|
[
"MIT"
] | 20
|
2016-04-17T08:43:26.000Z
|
2021-05-31T04:01:27.000Z
|
microproxy/layer/proxy/replay.py
|
mike820324/microProxy
|
64c7c5add4759c6e105b9438cd18c0f8c930c7a3
|
[
"MIT"
] | 237
|
2016-04-17T07:07:08.000Z
|
2017-01-26T09:15:52.000Z
|
microproxy/layer/proxy/replay.py
|
mike820324/microProxy
|
64c7c5add4759c6e105b9438cd18c0f8c930c7a3
|
[
"MIT"
] | 5
|
2016-04-16T14:22:45.000Z
|
2019-11-27T04:41:55.000Z
|
from tornado import gen
from microproxy.protocol import tls
from microproxy.layer.base import ProxyLayer
class ReplayLayer(ProxyLayer):
def __init__(self, context, **kwargs):
super(ReplayLayer, self).__init__(context, **kwargs)
@gen.coroutine
def process_and_return_context(self):
dest_stream = yield self.create_dest_stream(
(self.context.host, self.context.port))
if self.context.scheme in ("https", "h2"):
if self.context.scheme == "h2":
alpn = ["h2"]
else:
alpn = None
dest_stream = yield dest_stream.start_tls(
server_side=False, ssl_options=tls.create_dest_sslcontext(alpn=alpn))
self.context.dest_stream = dest_stream
raise gen.Return(self.context)
| 30.037037
| 85
| 0.639951
|
4a03e7676e838e15b7bb60b1750821ed269066d4
| 213
|
py
|
Python
|
upwind/cython/setup.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 7
|
2018-02-02T21:12:56.000Z
|
2020-09-10T01:07:18.000Z
|
upwind/cython/setup.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 3
|
2020-09-09T23:17:00.000Z
|
2020-09-15T02:11:33.000Z
|
upwind/cython/setup.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 2
|
2019-01-31T22:15:40.000Z
|
2022-03-30T02:08:30.000Z
|
from distutils.core import setup
from Cython.Build import cythonize
import numpy
# python setup.py build_ext --inplace
setup(
ext_modules = cythonize("upwind5.pyx"),
include_dirs=[numpy.get_include()],
)
| 21.3
| 43
| 0.755869
|
4a03e7d9cd4f11e3d04793ba351d5d429c65174d
| 1,594
|
py
|
Python
|
cac/optimizer.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 24
|
2021-05-13T13:34:27.000Z
|
2022-03-02T13:52:29.000Z
|
cac/optimizer.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 2
|
2021-07-09T08:21:31.000Z
|
2022-02-11T15:30:55.000Z
|
cac/optimizer.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 6
|
2021-06-10T07:47:48.000Z
|
2022-01-27T07:36:09.000Z
|
"""Defines Factory object to register various optimizers"""
from typing import Any
import math
from abc import ABC, abstractmethod
from cac.factory import Factory
from torch.optim import Adam, SGD, AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR, CyclicLR, \
OneCycleLR, MultiStepLR
optimizer_factory = Factory()
optimizer_factory.register_builder('SGD', SGD)
optimizer_factory.register_builder('Adam', Adam)
optimizer_factory.register_builder('AdamW', AdamW)
class Scheduler(ABC):
"""Base class for custom scheduler to inherit from"""
def __init__(self):
self.step_count = 0
def step(self):
self.step_count += 1
return self.get_value()
@abstractmethod
def get_value(self):
"""Get updated value for the current step"""
raise NotImplementedError
class Polynomial(Scheduler):
"""Scheduler with polynomial relation with time
:param power: exponent to be applied to the time steps
:type power: float
"""
def __init__(self, power: float):
super(Polynomial, self).__init__()
self.power = power
def get_value(self):
return math.pow(self.step_count, self.power)
scheduler_factory = Factory()
scheduler_factory.register_builder('ReduceLROnPlateau', ReduceLROnPlateau)
scheduler_factory.register_builder('StepLR', StepLR)
scheduler_factory.register_builder('MultiStepLR', MultiStepLR)
scheduler_factory.register_builder('CyclicLR', CyclicLR)
scheduler_factory.register_builder('1cycle', OneCycleLR)
scheduler_factory.register_builder('Polynomial', Polynomial)
| 30.653846
| 75
| 0.749059
|
4a03e98aa4e1146a4e3fe97385fc18f1a42ae731
| 10,693
|
py
|
Python
|
mesonbuild/ast/introspection.py
|
mpoquet/meson
|
cc8af259c813e239292cccb60bacfbc7641e12a9
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/ast/introspection.py
|
mpoquet/meson
|
cc8af259c813e239292cccb60bacfbc7641e12a9
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/ast/introspection.py
|
mpoquet/meson
|
cc8af259c813e239292cccb60bacfbc7641e12a9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool
from . import AstInterpreter
from .. import compilers, environment, mesonlib, mparser, optinterpreter
from .. import coredata as cdata
from ..interpreterbase import InvalidArguments
from ..build import Executable, Jar, SharedLibrary, SharedModule, StaticLibrary
import os
build_target_functions = ['executable', 'jar', 'library', 'shared_library', 'shared_module', 'static_library', 'both_libraries']
class IntrospectionHelper:
# mimic an argparse namespace
def __init__(self, cross_file):
self.cross_file = cross_file
self.native_file = None
self.cmd_line_options = {}
class IntrospectionInterpreter(AstInterpreter):
# Interpreter to detect the options without a build directory
# Most of the code is stolen from interperter.Interpreter
def __init__(self, source_root, subdir, backend, cross_file=None, subproject='', subproject_dir='subprojects', env=None):
super().__init__(source_root, subdir)
options = IntrospectionHelper(cross_file)
self.cross_file = cross_file
if env is None:
self.environment = environment.Environment(source_root, None, options)
else:
self.environment = env
self.subproject = subproject
self.subproject_dir = subproject_dir
self.coredata = self.environment.get_coredata()
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
self.backend = backend
self.default_options = {'backend': self.backend}
self.project_data = {}
self.targets = []
self.funcs.update({
'add_languages': self.func_add_languages,
'executable': self.func_executable,
'jar': self.func_jar,
'library': self.func_library,
'project': self.func_project,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'both_libraries': self.func_both_lib,
})
def func_project(self, node, args, kwargs):
if len(args) < 1:
raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.')
proj_name = args[0]
proj_vers = kwargs.get('version', 'undefined')
proj_langs = self.flatten_args(args[1:])
if isinstance(proj_vers, mparser.ElementaryNode):
proj_vers = proj_vers.value
if not isinstance(proj_vers, str):
proj_vers = 'undefined'
self.project_data = {'descriptive_name': proj_name, 'version': proj_vers}
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.merge_user_options(oi.options)
def_opts = self.flatten_args(kwargs.get('default_options', []))
self.project_default_options = mesonlib.stringlistify(def_opts)
self.project_default_options = cdata.create_options_dict(self.project_default_options)
self.default_options.update(self.project_default_options)
self.coredata.set_default_options(self.default_options, self.subproject, self.environment)
if not self.is_subproject() and 'subproject_dir' in kwargs:
spdirname = kwargs['subproject_dir']
if isinstance(spdirname, str):
self.subproject_dir = spdirname
if not self.is_subproject():
self.project_data['subprojects'] = []
subprojects_dir = os.path.join(self.source_root, self.subproject_dir)
if os.path.isdir(subprojects_dir):
for i in os.listdir(subprojects_dir):
if os.path.isdir(os.path.join(subprojects_dir, i)):
self.do_subproject(i)
self.coredata.init_backend_options(self.backend)
options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')}
self.coredata.set_options(options)
self.func_add_languages(None, proj_langs, None)
def do_subproject(self, dirname):
subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir)
subpr = os.path.join(subproject_dir_abs, dirname)
try:
subi = IntrospectionInterpreter(subpr, '', self.backend, cross_file=self.cross_file, subproject=dirname, subproject_dir=self.subproject_dir, env=self.environment)
subi.analyze()
subi.project_data['name'] = dirname
self.project_data['subprojects'] += [subi.project_data]
except:
return
def func_add_languages(self, node, args, kwargs):
args = self.flatten_args(args)
need_cross_compiler = self.environment.is_cross_build()
for lang in sorted(args, key=compilers.sort_clink):
lang = lang.lower()
if lang not in self.coredata.compilers:
self.environment.detect_compilers(lang, need_cross_compiler)
def build_target(self, node, args, kwargs, targetclass):
if not args:
return
kwargs = self.flatten_kwargs(kwargs, True)
name = self.flatten_args(args)[0]
srcqueue = [node]
if 'sources' in kwargs:
srcqueue += kwargs['sources']
source_nodes = []
while srcqueue:
curr = srcqueue.pop(0)
arg_node = None
if isinstance(curr, mparser.FunctionNode):
arg_node = curr.args
elif isinstance(curr, mparser.ArrayNode):
arg_node = curr.args
elif isinstance(curr, mparser.IdNode):
# Try to resolve the ID and append the node to the queue
id = curr.value
if id in self.assignments and self.assignments[id]:
node = self.assignments[id][0]
if isinstance(node, (mparser.ArrayNode, mparser.IdNode, mparser.FunctionNode)):
srcqueue += [node]
if arg_node is None:
continue
elemetary_nodes = list(filter(lambda x: isinstance(x, (str, mparser.StringNode)), arg_node.arguments))
srcqueue += list(filter(lambda x: isinstance(x, (mparser.FunctionNode, mparser.ArrayNode, mparser.IdNode)), arg_node.arguments))
# Pop the first element if the function is a build target function
if isinstance(curr, mparser.FunctionNode) and curr.func_name in build_target_functions:
elemetary_nodes.pop(0)
if elemetary_nodes:
source_nodes += [curr]
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
is_cross = False
objects = []
empty_sources = [] # Passing the unresolved sources list causes errors
target = targetclass(name, self.subdir, self.subproject, is_cross, empty_sources, objects, self.environment, kwargs)
self.targets += [{
'name': target.get_basename(),
'id': target.get_id(),
'type': target.get_typename(),
'defined_in': os.path.normpath(os.path.join(self.source_root, self.subdir, environment.build_filename)),
'subdir': self.subdir,
'build_by_default': target.build_by_default,
'sources': source_nodes,
'kwargs': kwargs,
'node': node,
}]
return
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_builtin_option('default_library')
if default_library == 'shared':
return self.build_target(node, args, kwargs, SharedLibrary)
elif default_library == 'static':
return self.build_target(node, args, kwargs, StaticLibrary)
elif default_library == 'both':
return self.build_target(node, args, kwargs, SharedLibrary)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, Executable)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, StaticLibrary)
def func_shared_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedLibrary)
def func_both_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedLibrary)
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedModule)
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, Jar)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
return
target_type = kwargs.pop('target_type')
if isinstance(target_type, mparser.ElementaryNode):
target_type = target_type.value
if target_type == 'executable':
return self.build_target(node, args, kwargs, Executable)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, SharedLibrary)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, StaticLibrary)
elif target_type == 'both_libraries':
return self.build_target(node, args, kwargs, SharedLibrary)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, Jar)
def is_subproject(self):
return self.subproject != ''
def analyze(self):
self.load_root_meson_file()
self.sanity_check_ast()
self.parse_project()
self.run()
| 44.18595
| 174
| 0.653325
|
4a03ea1d61b8ed4a5c81f96cc83f0acb28c3cd82
| 6,417
|
py
|
Python
|
test/test.py
|
WiseLabCMU/slam3d
|
9d7d17b3b1f172b906da8f0244fe2022c4b9d02c
|
[
"BSD-3-Clause"
] | 4
|
2019-12-05T13:00:30.000Z
|
2021-12-27T20:02:24.000Z
|
test/test.py
|
DaAwesomeP/slam3d
|
9140a9c992b09bc8a2c64d600004107638981fb9
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T13:33:44.000Z
|
2022-02-21T13:33:44.000Z
|
test/test.py
|
DaAwesomeP/slam3d
|
9140a9c992b09bc8a2c64d600004107638981fb9
|
[
"BSD-3-Clause"
] | 4
|
2021-06-18T17:54:14.000Z
|
2021-06-18T21:15:51.000Z
|
#!/usr/bin/env python
"""
test.py
Created by Perry Naseck on 6/25/21.
Copyright (c) 2021, Wireless Sensing and Embedded Systems Lab, Carnegie
Mellon University
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
"""
import csv
from filecmp import cmp
import numpy as np
import sys
import time
from particlefilter import ParticleFilterLoc, setSeed
# Only one test for now, so keep it simple
NUM_BCNS = 4
UWB_STD = 0.1
UWB_BIAS = 0.2
SKIP_TO_WAYPOINT = 0
VIO_FILE = "/test1_ParticleFilterLoc_vio.csv"
UWB_FILE = "/test1_ParticleFilterLoc_uwb.csv"
DEPLOY_FILE = "/test1_ParticleFilterLoc_deploy.csv"
LINE_LEN = 1024
SEED = 123456789
ALLOW_VARIANCE = 0.000005
printedHeaders = False
def _getVio(vioFile, skipToWaypoint: int) -> tuple:
_lineBuf = vioFile.readline(LINE_LEN)
if (_lineBuf == ""):
return (False, None, None, None, None)
split = (_lineBuf[:-1]).split(",")
t = np.float64(split[0])
y = np.float32(split[1]) # VIO on iOS is reported in a different order (y, z, x)
z = np.float32(split[2])
x = np.float32(split[3])
return (True, t, x, y, z)
def _getUwb(uwbFile, skipToWaypoint: int) -> tuple:
_lineBuf = uwbFile.readline(LINE_LEN)
if (_lineBuf == ""):
return (False, None, None, None)
split = (_lineBuf[:-1]).split(",")
t = np.float64(split[0])
b = np.int32(split[1])
r = np.float32(split[2])
assert(b < NUM_BCNS)
return (True, t, b, r)
def _getDeployment(deployFile, deployment):
for i in range(NUM_BCNS):
_lineBuf = deployFile.readline(LINE_LEN)
if (_lineBuf == ""):
return
split = (_lineBuf[:-1]).split(",") # remove last char (newline)
b = int(split[0])
assert(b < NUM_BCNS)
deployment[b][1] = np.float32(split[1])
deployment[b][2] = np.float32(split[2])
deployment[b][0] = np.float32(split[3])
def _writeTagLoc(outFile, t: np.float64, x: np.float32, y: np.float32, z: np.float32, theta: np.float32):
outFile.write(("{:.6f},{:.6f},{:.6f},{:.6f},{:.6f}\n").format(t, y, z, x, theta))
def main() -> int:
if len(sys.argv) < 4:
print("Test folder, out file, expected file, and/or --nofail not specified!")
print("test.py [--nofail] <test folder> <output file> [expected file (required without --nofail)]")
return 1
noFail = 0
if sys.argv[1] == "--nofail":
noFail = 1
testFolder = sys.argv[1 + noFail]
tagOutFilePath = sys.argv[2 + noFail]
print("Starting test")
setSeed(SEED)
print("Starting localization")
vioFile = open(testFolder + VIO_FILE, 'r')
uwbFile = open(testFolder + UWB_FILE, 'r')
tagOutFile = open(tagOutFilePath, 'w')
tagOutFile.write("t,x,y,z,theta\n")
_particleFilter = ParticleFilterLoc()
deployFile = open(testFolder + DEPLOY_FILE, 'r')
deployment = [[np.float32(0)]*3 for i in range(NUM_BCNS)]
_getDeployment(deployFile, deployment)
deployFile.close()
print("Initialized")
t_measure = time.perf_counter()
haveVio, vioT, vioX, vioY, vioZ = _getVio(vioFile, SKIP_TO_WAYPOINT)
haveUwb, uwbT, uwbB, uwbR = _getUwb(uwbFile, SKIP_TO_WAYPOINT)
while haveVio or haveUwb:
if haveVio and (not haveUwb or vioT < uwbT):
_particleFilter.depositVio(vioT, vioX, vioY, vioZ, 0.0)
status, outT, outX, outY, outZ, outTheta = _particleFilter.getTagLoc()
if status:
_writeTagLoc(tagOutFile, outT, outX, outY, outZ, outTheta)
haveVio, vioT, vioX, vioY, vioZ = _getVio(vioFile, 0)
elif haveUwb:
uwbR -= UWB_BIAS
if uwbR > 0.0 and uwbR < 30.0:
_particleFilter.depositRange(deployment[uwbB][0], deployment[uwbB][1], deployment[uwbB][2], uwbR, UWB_STD)
haveUwb, uwbT, uwbB, uwbR = _getUwb(uwbFile, 0)
t_measure = time.perf_counter() - t_measure
print("Finished localization")
print(f"Loop took {t_measure:.6f} seconds to execute")
vioFile.close()
uwbFile.close()
tagOutFile.close()
if len(sys.argv) == 2 and sys.argv[1] == "--nofail":
print("Expected compare file not provided and called with --nofail, exiting")
return 0
expectedFilePath = sys.argv[3 + noFail]
res = cmp(expectedFilePath, tagOutFilePath, shallow=False)
if res:
print("Test passed, exact match")
return 0
else:
print(f"Test not exact match, checking all numbers within {ALLOW_VARIANCE}")
res = 1
num_off = 0
num_exact = 0
num_allowable = 0
max_allowable_off_found = 0.0
max_off_found = 0.0
file_length = "same number as in expected"
csvExpectedFile = open(expectedFilePath, 'r', newline='')
csvTagOutFile = open(tagOutFilePath, 'r', newline='')
expectedFileReader = list(csv.reader(csvExpectedFile, delimiter=','))
tagOutFileReader = list(csv.reader(csvTagOutFile, delimiter=','))
# flatten lists from rows into one big list
expectedParams = np.array(expectedFileReader).flatten()
tagOutParams = np.array(tagOutFileReader).flatten()
paramsIter = expectedParams
if len(expectedParams) > len(tagOutParams):
file_length = "fewer parameters than expected"
paramsIter = tagOutParams
res = 0
elif len(expectedParams) < len(tagOutParams):
file_length = "more parameters than expected"
res = 0
for i in range(5, len(paramsIter)): # skip first row (header)
diff = abs(float(expectedParams[i]) - float(tagOutParams[i]))
if expectedParams[i] == tagOutParams[i]:
num_exact += 1
elif diff <= ALLOW_VARIANCE:
num_allowable += 1
if diff > max_allowable_off_found:
max_allowable_off_found = diff
else:
num_off += 1
if diff > max_off_found:
max_off_found = diff
if num_off > 0:
res = 0
print(f"Number of parameters in output file: {file_length}")
print(f"Number of parameters out of range: {num_off}")
print(f"Number of parameters exact match: {num_exact}")
print(f"Number of parameters within allowable range (not exact match): {num_allowable}")
print(f"Maximum difference found in out of range: {max_off_found:.8f}")
print(f"Maximum difference found in allowable: {max_allowable_off_found:.8f}")
if res:
print("Test passed, within allowed deviation")
return 0
elif not res and len(sys.argv) > 1 and sys.argv[1] == "--nofail":
print("Test failed, but called with --nofail")
return 0
else:
print("Test failed")
return 1
if __name__=="__main__":
res = main()
exit(res)
| 31
| 114
| 0.678043
|
4a03eaa55d4b748f6367d3eb7a96ead2ba989b4e
| 899
|
py
|
Python
|
servers/python/tcp-server.py
|
opdev1004/boxrpg-unity
|
c0c4779ba9e15d7c79cd5e8518b5211467cab4e9
|
[
"MIT"
] | null | null | null |
servers/python/tcp-server.py
|
opdev1004/boxrpg-unity
|
c0c4779ba9e15d7c79cd5e8518b5211467cab4e9
|
[
"MIT"
] | null | null | null |
servers/python/tcp-server.py
|
opdev1004/boxrpg-unity
|
c0c4779ba9e15d7c79cd5e8518b5211467cab4e9
|
[
"MIT"
] | null | null | null |
import socket
from _thread import *
import sys
server = "192.168.1.14"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
str(e)
s.listen(2)
print("Waiting for a connection, Server started")
def threaded_client(conn):
conn.send(str.encode("Connected"))
reply = ""
while True:
try:
data = conn.recv(2048)
reply = data.decode("utf-8")
if not data:
print("disconnected")
break
else:
print("Received: ", reply)
print("Sending: ", reply)
conn.sendall(str.encode(reply))
except:
break
print("Lost connection")
conn.close()
while True:
conn, addr = s.accept()
print("Connected to: ", addr)
start_new_thread(threaded_client, (conn,))
| 20.906977
| 53
| 0.562848
|
4a03eb1e75962746f91e81d1c64afe00412aac39
| 3,310
|
py
|
Python
|
AnimalGalary/settings.py
|
myyrakle/AnimalGalary
|
f0886e4837e26fbb59d31c361c7fbe751c2dd762
|
[
"MIT"
] | null | null | null |
AnimalGalary/settings.py
|
myyrakle/AnimalGalary
|
f0886e4837e26fbb59d31c361c7fbe751c2dd762
|
[
"MIT"
] | null | null | null |
AnimalGalary/settings.py
|
myyrakle/AnimalGalary
|
f0886e4837e26fbb59d31c361c7fbe751c2dd762
|
[
"MIT"
] | null | null | null |
"""
Django settings for AnimalGalary project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sti)b=#wfq8_cf@8q)e6*2&1^8x=tsgrb%%wr2y)%hwvae3b1j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app.apps.AppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AnimalGalary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AnimalGalary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
from .password import PASSWORD
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'animal',
'USER': 'root',
'PASSWORD': PASSWORD,
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| 25.859375
| 91
| 0.68852
|
4a03ecbe70524b77dd1c11db0c9a781262a8844e
| 3,627
|
py
|
Python
|
fraud_poc/data/training_data.py
|
leosmerling-hopeit/fraud-poc
|
cd4448d0a957e5fd5ebfadda46b8100db30aca90
|
[
"Apache-2.0"
] | 2
|
2020-08-03T13:14:07.000Z
|
2021-05-20T19:43:09.000Z
|
fraud_poc/data/training_data.py
|
leosmerling-hopeit/fraud-poc
|
cd4448d0a957e5fd5ebfadda46b8100db30aca90
|
[
"Apache-2.0"
] | 1
|
2022-02-26T09:12:35.000Z
|
2022-02-26T09:12:35.000Z
|
fraud_poc/data/training_data.py
|
leosmerling-hopeit/fraud-poc
|
cd4448d0a957e5fd5ebfadda46b8100db30aca90
|
[
"Apache-2.0"
] | 3
|
2020-12-18T13:24:22.000Z
|
2021-05-20T19:43:10.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_training-data.ipynb (unless otherwise specified).
__all__ = ['__steps__', 'logger', 'run']
# Cell
from typing import Dict
from datetime import datetime, timezone, timedelta
import random
import math
import dask.dataframe as dd
import numpy as np
from hopeit.app.context import EventContext
from hopeit.app.events import Spawn, SHUFFLE
from hopeit.app.api import event_api
from hopeit.app.logger import app_logger
from ..jobs import get_client, FeatureCalcJob, TrainingDataJob
# Cell
__steps__ = ['run']
logger = app_logger()
# Cell
def _merge_feature_datasets(datasets: Dict[str, str]):
df = None
for key, path in datasets.items():
df_key = dd.read_parquet(path, engine='fastparquet')
if df is not None:
df = df.merge(df_key, left_on='order_id', right_on='order_id', suffixes=('', '_DROP'))
keep_cols = [c for c in df.columns if c[-5:] != '_DROP']
df = df[keep_cols]
else:
df = df_key
return df
def _add_labels(df):
df['is_fraud'] = (df['known_ip_addr_by_customer_id'] == 0) & (df['num_ip_addr_by_customer_id'] > 3)
df['is_fraud'] = df['is_fraud'] | ((df['known_email_by_customer_id'] == 0) & (df['num_email_by_customer_id'] > 3))
df['is_fraud'] = df['is_fraud'] | (df['order_amount'] > (1. + 0.5 * random.random() * df['order_amount_mean_by_customer_id']))
df['is_fraud'] = df['is_fraud'].apply(lambda x: int(x & (random.random() > 0.1)), meta=('is_fraud', int))
return df
def _add_sample_flag(df, subsample_not_fraud: float):
df['sample'] = df['is_fraud'].apply(lambda x: int((x > 0) | (random.random() > (1.-subsample_not_fraud))), meta=('sample', int))
return df
def _add_validation_flag(df):
now = datetime.now(tz=timezone.utc)
now_epoch = now.timestamp()
df['now'] = now
df['elapsed_wgt'] = df['order_date'].apply(lambda x: math.log(max(0.001, 1. - (now_epoch - x.timestamp())/now_epoch)) + 1., meta=('elapsed_wgt', float))
df['validation'] = df['elapsed_wgt'].apply(lambda x: int((max(0, x) * random.random()) > 0.8), meta=('validation', int))
return df
def _add_fold_number(df, num_folds):
df['fold'] = df['is_fraud'].apply(lambda x: random.randint(0, num_folds), meta=('fold', int))
return df
# Cell
def run(job: FeatureCalcJob, context: EventContext) -> TrainingDataJob:
base_path = context.env['data']['training']
num_folds = context.env['training_data']['num_folds']
subsample_not_fraud = context.env['training_data']['subsample_not_fraud']
client = get_client(context)
try:
df = _merge_feature_datasets(job.features)
df = _add_labels(df)
df = _add_sample_flag(df, subsample_not_fraud)
df = _add_validation_flag(df)
df = _add_fold_number(df, num_folds)
sampled_save_path = f"{base_path}/sampled/"
logger.info(context, f"Saving sampled training dataset to {sampled_save_path}...")
df_sample = df[df['sample'] > 0]
df_sample = df_sample.set_index('fold')
df_sample.to_parquet(sampled_save_path)
valid_save_path = f"{base_path}/validation/"
logger.info(context, f"Saving weighted validation dataset to {valid_save_path}...")
df_validation = df[df['validation'] >0 ]
df_validation.to_parquet(valid_save_path)
return TrainingDataJob(
sources=job.features,
sampled=sampled_save_path,
validation=valid_save_path
)
except Exception as e:
logger.error(context, e)
return None
finally:
client.close()
| 37.78125
| 156
| 0.661428
|
4a03ed036a5b0052eb9d662f72cc98b524fba829
| 1,511
|
py
|
Python
|
aashe_bulletin/urls.py
|
AASHE/aashe-bulletin
|
e93a62d4bb08addf5edfa8875b099b3b5a8072df
|
[
"MIT"
] | null | null | null |
aashe_bulletin/urls.py
|
AASHE/aashe-bulletin
|
e93a62d4bb08addf5edfa8875b099b3b5a8072df
|
[
"MIT"
] | 9
|
2017-03-11T18:29:43.000Z
|
2020-02-24T18:49:30.000Z
|
aashe_bulletin/urls.py
|
AASHE/aashe-bulletin
|
e93a62d4bb08addf5edfa8875b099b3b5a8072df
|
[
"MIT"
] | 1
|
2020-02-26T22:12:49.000Z
|
2020-02-26T22:12:49.000Z
|
import django.contrib.auth.views
from django.conf.urls import patterns, include, url
from django.contrib import admin
from bulletin.tools.plugins.views.story import StoryListView
from views import (AllItemsView,
FAQView,
LatestNewsFeedView,
NoSearchForYouView,
SubmissionGuidelinesView)
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$',
StoryListView.as_view(),
name='front-page'),
url(r'^all-items/$',
AllItemsView.as_view(),
name='all-items'),
url(r'^accounts/login/$',
django.contrib.auth.views.login,
name="login"),
url(r'^accounts/logout/$',
django.contrib.auth.views.logout,
name="logout"),
url(r'^help/',
FAQView.as_view(),
name='faq'),
url(r'^submission-guidelines/',
SubmissionGuidelinesView.as_view(),
name='submission-guidelines'),
url(r'^search-permission-denied/',
NoSearchForYouView.as_view(),
name='no-search-for-you'),
url(r'^admin/',
include(admin.site.urls)),
url(r'^flat-pages/',
include('django.contrib.flatpages.urls')),
url(r'^rss/news/$',
LatestNewsFeedView(),
name='latest-news-feed'),
url(r'^rss/news/category/(?P<category>.+)/$',
LatestNewsFeedView(),
name='latest-news-category-feed'),
url(r'^',
include('bulletin.urls',
namespace='bulletin')),
)
| 23.984127
| 60
| 0.583058
|
4a03edf8d1e31b58b6d55cbeb11f01faf9baee75
| 4,836
|
py
|
Python
|
lib/hmmlearn/tests/test_multinomial_hmm.py
|
Freakwill/hmmlearn
|
74b87fad84b2d7215a14bd60b14a1fd9aa1d4605
|
[
"BSD-3-Clause"
] | null | null | null |
lib/hmmlearn/tests/test_multinomial_hmm.py
|
Freakwill/hmmlearn
|
74b87fad84b2d7215a14bd60b14a1fd9aa1d4605
|
[
"BSD-3-Clause"
] | null | null | null |
lib/hmmlearn/tests/test_multinomial_hmm.py
|
Freakwill/hmmlearn
|
74b87fad84b2d7215a14bd60b14a1fd9aa1d4605
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from hmmlearn import hmm
from . import assert_log_likelihood_increasing, normalized
class TestMultinomialAgainstWikipedia:
"""
Examples from Wikipedia:
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setup_method(self, method):
n_components = 2 # ['Rainy', 'Sunny']
n_features = 3 # ['walk', 'shop', 'clean']
self.h = hmm.MultinomialHMM(n_components)
self.h.n_features = n_features
self.h.startprob_ = np.array([0.6, 0.4])
self.h.transmat_ = np.array([[0.7, 0.3], [0.4, 0.6]])
self.h.emissionprob_ = np.array([[0.1, 0.4, 0.5],
[0.6, 0.3, 0.1]])
def test_decode_viterbi(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy', 'Rainy'],
# with probability 0.01344."
X = [[0], [1], [2]]
logprob, state_sequence = self.h.decode(X, algorithm="viterbi")
assert round(np.exp(logprob), 5) == 0.01344
assert np.allclose(state_sequence, [1, 0, 0])
def test_decode_map(self):
X = [[0], [1], [2]]
_logprob, state_sequence = self.h.decode(X, algorithm="map")
assert np.allclose(state_sequence, [1, 0, 0])
def test_predict(self):
X = [[0], [1], [2]]
state_sequence = self.h.predict(X)
posteriors = self.h.predict_proba(X)
assert np.allclose(state_sequence, [1, 0, 0])
assert np.allclose(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
class TestMultinomailHMM:
def setup_method(self, method):
self.n_components = 2
self.n_features = 3
self.h = hmm.MultinomialHMM(self.n_components)
self.h.startprob_ = np.array([0.6, 0.4])
self.h.transmat_ = np.array([[0.7, 0.3], [0.4, 0.6]])
self.h.emissionprob_ = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])
def test_attributes(self):
with pytest.raises(ValueError):
self.h.emissionprob_ = []
self.h._check()
with pytest.raises(ValueError):
self.h.emissionprob_ = np.zeros((self.n_components - 2,
self.n_features))
self.h._check()
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
n_samples = len(idx)
X = np.random.randint(self.n_features, size=(n_samples, 1))
ll, posteriors = self.h.score_samples(X)
assert posteriors.shape == (n_samples, self.n_components)
assert np.allclose(posteriors.sum(axis=1), np.ones(n_samples))
def test_sample(self, n_samples=1000):
X, state_sequence = self.h.sample(n_samples)
assert X.ndim == 2
assert len(X) == len(state_sequence) == n_samples
assert len(np.unique(X)) == self.n_features
def test_fit(self, params='ste', n_iter=5):
h = self.h
h.params = params
lengths = np.array([10] * 10)
X, _state_sequence = h.sample(lengths.sum())
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = normalized(np.random.random(self.n_components))
h.transmat_ = normalized(
np.random.random((self.n_components, self.n_components)),
axis=1)
h.emissionprob_ = normalized(
np.random.random((self.n_components, self.n_features)),
axis=1)
assert_log_likelihood_increasing(h, X, lengths, n_iter)
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5):
lengths = [10] * 10
X, _state_sequence = self.h.sample(sum(lengths))
# use init_function to initialize paramerters
h = hmm.MultinomialHMM(self.n_components, params=params,
init_params=params)
h._init(X, lengths=lengths)
assert_log_likelihood_increasing(h, X, lengths, n_iter)
def test__check_and_set_multinomial_n_features(self):
self.h._check_and_set_multinomial_n_features(
np.array([[0, 0, 2, 1, 3, 1, 1]]))
self.h._check_and_set_multinomial_n_features(
np.array([[0, 0, 1, 3, 1]], np.uint8))
with pytest.raises(ValueError): # non-integral
self.h._check_and_set_multinomial_n_features(
np.array([[0., 2., 1., 3.]]))
with pytest.raises(ValueError): # negative integers
self.h._check_and_set_multinomial_n_features(
np.array([[0, -2, 1, 3, 1, 1]]))
| 37.2
| 76
| 0.592225
|
4a03edfcfff5fcee319a5948599731f487161456
| 46,479
|
py
|
Python
|
app/main/views/service_settings.py
|
GouvQC/notification-admin
|
5707d8526668e0800ede256db925bdec6f58455d
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
GouvQC/notification-admin
|
5707d8526668e0800ede256db925bdec6f58455d
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
GouvQC/notification-admin
|
5707d8526668e0800ede256db925bdec6f58455d
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from datetime import datetime
from flask import (
abort,
current_app,
flash,
jsonify,
redirect,
render_template,
request,
session,
url_for,
)
from flask_babel import _
from flask_login import current_user
from notifications_python_client.errors import HTTPError
from app import (
billing_api_client,
current_service,
email_branding_client,
format_thousands,
inbound_number_client,
letter_branding_client,
notification_api_client,
organisations_client,
service_api_client,
user_api_client,
)
from app.extensions import zendesk_client
from app.main import main
from app.main.forms import (
ConfirmPasswordForm,
EstimateUsageForm,
FieldWithLanguageOptions,
FreeSMSAllowance,
InternationalSMSForm,
LinkOrganisationsForm,
MessageLimit,
PreviewBranding,
RenameServiceForm,
SearchByNameForm,
SelectLogoForm,
SendingDomainForm,
ServiceContactDetailsForm,
ServiceDataRetentionEditForm,
ServiceDataRetentionForm,
ServiceEditInboundNumberForm,
ServiceInboundNumberForm,
ServiceLetterContactBlockForm,
ServiceOnOffSettingForm,
ServiceReplyToEmailForm,
ServiceSmsSenderForm,
ServiceSwitchChannelForm,
SetEmailBranding,
SetLetterBranding,
SMSPrefixForm,
)
from app.s3_client.s3_logo_client import upload_email_logo
from app.utils import (
DELIVERED_STATUSES,
FAILURE_STATUSES,
SENDING_STATUSES,
email_safe,
get_logo_cdn_domain,
user_has_permissions,
user_is_gov_user,
user_is_platform_admin,
)
PLATFORM_ADMIN_SERVICE_PERMISSIONS = OrderedDict([
('inbound_sms', {'title': 'Receive inbound SMS', 'requires': 'sms', 'endpoint': '.service_set_inbound_number'}),
('email_auth', {'title': 'Email authentication'}),
('upload_document', {'title': 'Send files by email', 'endpoint': '.service_switch_can_upload_document'}),
('upload_letters', {'title': 'Uploading letters', 'requires': 'letter'}),
])
@main.route("/services/<service_id>/service-settings")
@user_has_permissions('manage_service', 'manage_api_keys')
def service_settings(service_id):
return render_template(
'views/service-settings.html',
service_permissions=PLATFORM_ADMIN_SERVICE_PERMISSIONS, sending_domain=current_app.config["SENDING_DOMAIN"]
)
@main.route("/services/<service_id>/service-settings/name", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_name_change(service_id):
form = RenameServiceForm()
if request.method == 'GET':
form.name.data = current_service.name
if form.validate_on_submit():
if form.name.data == current_service.name:
return redirect(url_for('.service_settings', service_id=service_id))
unique_name = service_api_client.is_service_name_unique(service_id, form.name.data, email_safe(form.name.data))
if not unique_name:
form.name.errors.append(_("This service name is already in use"))
return render_template('views/service-settings/name.html', form=form)
session['service_name_change'] = form.name.data
return redirect(url_for('.service_name_change_confirm', service_id=service_id))
return render_template(
'views/service-settings/name.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/name/confirm", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_name_change_confirm(service_id):
# Validate password for form
def _check_password(pwd):
return user_api_client.verify_password(current_user.id, pwd)
form = ConfirmPasswordForm(_check_password)
if form.validate_on_submit():
try:
current_service.update(
name=session['service_name_change'],
email_from=email_safe(session['service_name_change'])
)
except HTTPError as e:
error_msg = "Duplicate service name '{}'".format(session['service_name_change'])
if e.status_code == 400 and error_msg in e.message['name']:
# Redirect the user back to the change service name screen
flash(_('This service name is already in use'), 'error')
return redirect(url_for('main.service_name_change', service_id=service_id))
else:
raise e
else:
session.pop('service_name_change')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/confirm.html',
heading=_('Change your service name'),
form=form)
@main.route("/services/<service_id>/service-settings/request-to-go-live/estimate-usage", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def estimate_usage(service_id):
form = EstimateUsageForm(
volume_email=current_service.volume_email,
volume_sms=current_service.volume_sms,
volume_letter=current_service.volume_letter,
consent_to_research={
True: 'yes',
False: 'no',
}.get(current_service.consent_to_research),
)
if form.validate_on_submit():
current_service.update(
volume_email=form.volume_email.data,
volume_sms=form.volume_sms.data,
volume_letter=form.volume_letter.data,
consent_to_research=(form.consent_to_research.data == 'yes'),
)
return redirect(url_for(
'main.request_to_go_live',
service_id=service_id,
))
return render_template(
'views/service-settings/estimate-usage.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/request-to-go-live", methods=['GET'])
@user_has_permissions('manage_service')
def request_to_go_live(service_id):
agreement_signed = current_service.organisation.agreement_signed
return render_template(
'views/service-settings/request-to-go-live.html',
show_agreement=agreement_signed is not None,
agreement_signed=agreement_signed,
)
@main.route("/services/<service_id>/service-settings/request-to-go-live", methods=['POST'])
@user_has_permissions('manage_service')
@user_is_gov_user
def submit_request_to_go_live(service_id):
zendesk_client.create_ticket(
subject='Request to go live - {}'.format(current_service.name),
message=(
'Service: {service_name}\n'
'{service_dashboard}\n'
'\n---'
'\nOrganisation type: {organisation_type}'
'\nAgreement signed: {agreement}'
'\nEmails in next year: {volume_email_formatted}'
'\nText messages in next year: {volume_sms_formatted}'
'\nLetters in next year: {volume_letter_formatted}'
'\nConsent to research: {research_consent}'
'\nOther live services: {existing_live}'
'\n'
'\n---'
'\nRequest sent by {email_address}'
'\n'
).format(
service_name=current_service.name,
service_dashboard=url_for('main.service_dashboard', service_id=current_service.id, _external=True),
organisation_type=str(current_service.organisation_type).title(),
agreement=current_service.organisation.as_agreement_statement_for_go_live_request(
current_user.email_domain
),
volume_email_formatted=format_thousands(current_service.volume_email),
volume_sms_formatted=format_thousands(current_service.volume_sms),
volume_letter_formatted=format_thousands(current_service.volume_letter),
research_consent='Yes' if current_service.consent_to_research else 'No',
existing_live='Yes' if current_user.live_services else 'No',
email_address=current_user.email_address,
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name,
tags=current_service.request_to_go_live_tags,
)
current_service.update(go_live_user=current_user.id)
flash(_('Thank you for your request to go live. We’ll get back to you within one working day.'), 'default')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/switch-live", methods=["GET", "POST"])
@user_is_platform_admin
def service_switch_live(service_id):
form = ServiceOnOffSettingForm(
name="Make service live",
enabled=not current_service.trial_mode
)
if form.validate_on_submit():
current_service.update_status(live=form.enabled.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-service-setting.html',
title="Make service live",
form=form,
)
@main.route("/services/<service_id>/service-settings/switch-count-as-live", methods=["GET", "POST"])
@user_is_platform_admin
def service_switch_count_as_live(service_id):
form = ServiceOnOffSettingForm(
name="Count in list of live services",
enabled=current_service.count_as_live,
truthy='Yes',
falsey='No',
)
if form.validate_on_submit():
current_service.update_count_as_live(form.enabled.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-service-setting.html',
title="Count in list of live services",
form=form,
)
@main.route("/services/<service_id>/service-settings/permissions/<permission>", methods=["GET", "POST"])
@user_is_platform_admin
def service_set_permission(service_id, permission):
if permission not in PLATFORM_ADMIN_SERVICE_PERMISSIONS:
abort(404)
title = PLATFORM_ADMIN_SERVICE_PERMISSIONS[permission]['title']
form = ServiceOnOffSettingForm(
name=title,
enabled=current_service.has_permission(permission)
)
if form.validate_on_submit():
current_service.force_permission(permission, on=form.enabled.data)
return redirect(url_for(".service_settings", service_id=service_id))
return render_template(
'views/service-settings/set-service-setting.html',
title=title,
form=form,
)
@main.route("/services/<service_id>/service-settings/can-upload-document", methods=['GET', 'POST'])
@user_is_platform_admin
def service_switch_can_upload_document(service_id):
if current_service.contact_link:
return redirect(url_for('.service_set_permission', service_id=service_id, permission='upload_document'))
form = ServiceContactDetailsForm()
if form.validate_on_submit():
contact_type = form.contact_details_type.data
current_service.update(
contact_link=form.data[contact_type]
)
return redirect(url_for('.service_set_permission', service_id=service_id, permission='upload_document'))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/archive", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def archive_service(service_id):
if not current_service.active and (
current_service.trial_mode or current_user.platform_admin
):
abort(403)
if request.method == 'POST':
service_api_client.archive_service(service_id)
session.pop('service_id', None)
flash(
_("‘%(service_name)s’ was deleted", service_name=current_service.name),
'default_with_tick',
)
return redirect(url_for('.choose_account'))
else:
flash(
'{} ‘{}’? {}'.format(_("Are you sure you want to delete"), current_service.name, _("There’s no way to undo this.")),
'delete',
)
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/suspend", methods=["GET", "POST"])
@user_has_permissions('manage_service')
def suspend_service(service_id):
if request.method == 'POST':
service_api_client.suspend_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash(_("This will suspend the service and revoke all API keys. Are you sure you want to suspend this service?"),
'suspend')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/resume", methods=["GET", "POST"])
@user_has_permissions('manage_service')
def resume_service(service_id):
if request.method == 'POST':
service_api_client.resume_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash(_("This will resume the service. New API keys are required for this service to use the API"), 'resume')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/contact-link", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_contact_link(service_id):
form = ServiceContactDetailsForm()
if request.method == 'GET':
contact_details = current_service.contact_link
contact_type = check_contact_details_type(contact_details)
field_to_update = getattr(form, contact_type)
form.contact_details_type.data = contact_type
field_to_update.data = contact_details
if form.validate_on_submit():
contact_type = form.contact_details_type.data
current_service.update(
contact_link=form.data[contact_type]
)
return redirect(url_for('.service_settings', service_id=current_service.id))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/set-reply-to-email", methods=['GET'])
@user_has_permissions('manage_service')
def service_set_reply_to_email(service_id):
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/sending-domain", methods=['GET', 'POST'])
@user_is_platform_admin
def service_sending_domain(service_id):
form = SendingDomainForm()
if request.method == 'GET':
form.sending_domain.data = current_service.sending_domain
if form.validate_on_submit():
current_service.update(sending_domain=form.sending_domain.data)
flash(_('Sending domain updated'), 'default')
return redirect(url_for('.service_settings', service_id=service_id))
default_sending = current_app.config["SENDING_DOMAIN"]
template = 'views/service-settings/sending_domain.html'
return render_template(template, service_id=service_id, sending_domain=default_sending, form=form)
@main.route("/services/<service_id>/service-settings/email-reply-to", methods=['GET'])
@user_has_permissions('manage_service', 'manage_api_keys')
def service_email_reply_to(service_id):
return render_template('views/service-settings/email_reply_to.html')
@main.route("/services/<service_id>/service-settings/email-reply-to/add", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_add_email_reply_to(service_id):
form = ServiceReplyToEmailForm()
first_email_address = current_service.count_email_reply_to_addresses == 0
is_default = first_email_address if first_email_address else form.is_default.data
if form.validate_on_submit():
try:
notification_id = service_api_client.verify_reply_to_email_address(
service_id, form.email_address.data
)["data"]["id"]
except HTTPError as e:
error_msg = "Your service already uses '{}' as an email reply-to address.".format(form.email_address.data)
if e.status_code == 400 and error_msg == e.message:
flash(error_msg, 'error')
return redirect(url_for('.service_email_reply_to', service_id=service_id))
else:
raise e
return redirect(url_for(
'.service_verify_reply_to_address',
service_id=service_id,
notification_id=notification_id,
is_default=is_default
))
return render_template(
'views/service-settings/email-reply-to/add.html',
form=form,
first_email_address=first_email_address)
@main.route("/services/<service_id>/service-settings/email-reply-to/<notification_id>/verify", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_verify_reply_to_address(service_id, notification_id):
replace = request.args.get('replace', False)
is_default = request.args.get('is_default', False)
return render_template(
'views/service-settings/email-reply-to/verify.html',
service_id=service_id,
notification_id=notification_id,
partials=get_service_verify_reply_to_address_partials(service_id, notification_id),
verb=(_("Change") if replace else _("Add")),
replace=replace,
is_default=is_default
)
@main.route("/services/<service_id>/service-settings/email-reply-to/<notification_id>/verify.json")
@user_has_permissions('manage_service')
def service_verify_reply_to_address_updates(service_id, notification_id):
return jsonify(**get_service_verify_reply_to_address_partials(service_id, notification_id))
def get_service_verify_reply_to_address_partials(service_id, notification_id):
form = ServiceReplyToEmailForm()
first_email_address = current_service.count_email_reply_to_addresses == 0
notification = notification_api_client.get_notification(current_app.config["NOTIFY_SERVICE_ID"], notification_id)
replace = request.args.get('replace', False)
replace = False if replace == "False" else replace
existing_is_default = False
if replace:
existing = current_service.get_email_reply_to_address(replace)
existing_is_default = existing['is_default']
verification_status = "pending"
is_default = True if (request.args.get('is_default', False) == "True") else False
if notification["status"] in DELIVERED_STATUSES:
verification_status = "success"
if notification["to"] not in [i["email_address"] for i in current_service.email_reply_to_addresses]:
if replace:
service_api_client.update_reply_to_email_address(
current_service.id, replace, email_address=notification["to"], is_default=is_default
)
else:
service_api_client.add_reply_to_email_address(
current_service.id,
email_address=notification["to"],
is_default=is_default
)
created_at_no_tz = notification["created_at"][:-6]
seconds_since_sending = (datetime.utcnow() - datetime.strptime(created_at_no_tz, '%Y-%m-%dT%H:%M:%S.%f')).seconds
if notification["status"] in FAILURE_STATUSES or (
notification["status"] in SENDING_STATUSES and seconds_since_sending > 45
):
verification_status = "failure"
form.email_address.data = notification['to']
form.is_default.data = is_default
return {
'status': render_template(
'views/service-settings/email-reply-to/_verify-updates.html',
reply_to_email_address=notification["to"],
service_id=current_service.id,
notification_id=notification_id,
verification_status=verification_status,
is_default=is_default,
existing_is_default=existing_is_default,
form=form,
first_email_address=first_email_address,
replace=replace
),
'stop': 0 if verification_status == "pending" else 1
}
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_email_reply_to"
)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_email_reply_to"
)
@user_has_permissions('manage_service')
def service_edit_email_reply_to(service_id, reply_to_email_id):
form = ServiceReplyToEmailForm()
reply_to_email_address = current_service.get_email_reply_to_address(reply_to_email_id)
if request.method == 'GET':
form.email_address.data = reply_to_email_address['email_address']
form.is_default.data = reply_to_email_address['is_default']
if form.validate_on_submit():
if form.email_address.data == reply_to_email_address["email_address"]:
service_api_client.update_reply_to_email_address(
current_service.id,
reply_to_email_id=reply_to_email_id,
email_address=form.email_address.data,
is_default=True if reply_to_email_address['is_default'] else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
try:
notification_id = service_api_client.verify_reply_to_email_address(
service_id, form.email_address.data
)["data"]["id"]
except HTTPError as e:
error_msg = "Your service already uses ‘{}’ as a reply-to email address.".format(form.email_address.data)
if e.status_code == 400 and error_msg == e.message:
flash(error_msg, 'error')
return redirect(url_for('.service_email_reply_to', service_id=service_id))
else:
raise e
return redirect(url_for(
'.service_verify_reply_to_address',
service_id=service_id,
notification_id=notification_id,
is_default=True if reply_to_email_address['is_default'] else form.is_default.data,
replace=reply_to_email_id
))
if (request.endpoint == "main.service_confirm_delete_email_reply_to"):
flash(_('Are you sure you want to delete this reply-to email address?'), 'delete')
return render_template(
'views/service-settings/email-reply-to/edit.html',
form=form,
reply_to_email_address_id=reply_to_email_id,
)
@main.route("/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete", methods=['POST'])
@user_has_permissions('manage_service')
def service_delete_email_reply_to(service_id, reply_to_email_id):
service_api_client.delete_reply_to_email_address(
service_id=current_service.id,
reply_to_email_id=reply_to_email_id,
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-inbound-number", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_inbound_number(service_id):
available_inbound_numbers = inbound_number_client.get_available_inbound_sms_numbers()
inbound_numbers_value_and_label = [
(number['id'], number['number']) for number in available_inbound_numbers['data']
]
no_available_numbers = available_inbound_numbers['data'] == []
form = ServiceInboundNumberForm(
inbound_number_choices=inbound_numbers_value_and_label
)
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service.id,
sms_sender=form.inbound_number.data,
is_default=True,
inbound_number_id=form.inbound_number.data
)
current_service.force_permission('inbound_sms', on=True)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-inbound-number.html',
form=form,
no_available_numbers=no_available_numbers,
)
@main.route("/services/<service_id>/service-settings/sms-prefix", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_sms_prefix(service_id):
form = SMSPrefixForm(enabled=(
'on' if current_service.prefix_sms else 'off'
))
form.enabled.label.text = '{} ‘{}:’'.format(_("Start all text messages with"), current_service.name)
if form.validate_on_submit():
current_service.update(
prefix_sms=(form.enabled.data == 'on')
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/sms-prefix.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-international-sms", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_international_sms(service_id):
form = InternationalSMSForm(
enabled='on' if current_service.has_permission('international_sms') else 'off'
)
if form.validate_on_submit():
current_service.force_permission(
'international_sms',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-international-sms.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-inbound-sms", methods=['GET'])
@user_has_permissions('manage_service')
def service_set_inbound_sms(service_id):
return render_template(
'views/service-settings/set-inbound-sms.html',
)
@main.route("/services/<service_id>/service-settings/set-letters", methods=['GET'])
@user_has_permissions('manage_service')
def service_set_letters(service_id):
return redirect(
url_for(
'.service_set_channel',
service_id=current_service.id,
channel='letter',
),
code=301,
)
@main.route("/services/<service_id>/service-settings/set-<channel>", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_channel(service_id, channel):
if channel not in {'email', 'sms', 'letter'}:
abort(404)
form = ServiceSwitchChannelForm(
channel=channel,
enabled=current_service.has_permission(channel)
)
if form.validate_on_submit():
current_service.force_permission(
channel,
on=form.enabled.data,
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-{}.html'.format(channel),
form=form,
)
@main.route("/services/<service_id>/service-settings/set-auth-type", methods=['GET'])
@user_has_permissions('manage_service')
def service_set_auth_type(service_id):
return render_template(
'views/service-settings/set-auth-type.html',
)
@main.route("/services/<service_id>/service-settings/letter-contacts", methods=['GET'])
@user_has_permissions('manage_service', 'manage_api_keys')
def service_letter_contact_details(service_id):
letter_contact_details = service_api_client.get_letter_contacts(service_id)
return render_template(
'views/service-settings/letter-contact-details.html',
letter_contact_details=letter_contact_details)
@main.route("/services/<service_id>/service-settings/letter-contact/add", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_add_letter_contact(service_id):
form = ServiceLetterContactBlockForm()
first_contact_block = current_service.count_letter_contact_details == 0
from_template = request.args.get('from_template')
if form.validate_on_submit():
new_letter_contact = service_api_client.add_letter_contact(
current_service.id,
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=first_contact_block if first_contact_block else form.is_default.data
)
if from_template:
service_api_client.update_service_template_sender(
service_id,
from_template,
new_letter_contact['data']['id'],
)
return redirect(
url_for('.view_template', service_id=service_id, template_id=from_template)
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/add.html',
form=form,
first_contact_block=first_contact_block,
back_link=(
url_for('main.view_template', template_id=from_template, service_id=current_service.id)
if from_template
else url_for('.service_letter_contact_details', service_id=current_service.id)
),
)
@main.route(
"/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_letter_contact",
)
@main.route(
"/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_letter_contact",
)
@user_has_permissions('manage_service')
def service_edit_letter_contact(service_id, letter_contact_id):
letter_contact_block = current_service.get_letter_contact_block(letter_contact_id)
form = ServiceLetterContactBlockForm(
letter_contact_block=letter_contact_block['contact_block']
)
if request.method == 'GET':
form.is_default.data = letter_contact_block['is_default']
if form.validate_on_submit():
current_service.edit_letter_contact_block(
id=letter_contact_id,
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=letter_contact_block['is_default'] or form.is_default.data
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
if (request.endpoint == "main.service_confirm_delete_letter_contact"):
flash(_("Are you sure you want to delete this contact block?"), 'delete')
return render_template(
'views/service-settings/letter-contact/edit.html',
form=form,
letter_contact_id=letter_contact_block['id'])
@main.route("/services/<service_id>/service-settings/letter-contact/make-blank-default")
@user_has_permissions('manage_service')
def service_make_blank_default_letter_contact(service_id):
current_service.remove_default_letter_contact_block()
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
@main.route(
"/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/delete",
methods=['POST'],
)
@user_has_permissions('manage_service')
def service_delete_letter_contact(service_id, letter_contact_id):
service_api_client.delete_letter_contact(
service_id=current_service.id,
letter_contact_id=letter_contact_id,
)
return redirect(url_for('.service_letter_contact_details', service_id=current_service.id))
@main.route("/services/<service_id>/service-settings/sms-sender", methods=['GET'])
@user_has_permissions('manage_service', 'manage_api_keys')
def service_sms_senders(service_id):
return render_template(
'views/service-settings/sms-senders.html',
)
@main.route("/services/<service_id>/service-settings/sms-sender/add", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_add_sms_sender(service_id):
form = ServiceSmsSenderForm()
first_sms_sender = current_service.count_sms_senders == 0
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service.id,
sms_sender=form.sms_sender.data.replace('\r', '') or None,
is_default=first_sms_sender if first_sms_sender else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
return render_template(
'views/service-settings/sms-sender/add.html',
form=form,
first_sms_sender=first_sms_sender)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_sms_sender"
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_sms_sender"
)
@user_has_permissions('manage_service')
def service_edit_sms_sender(service_id, sms_sender_id):
sms_sender = current_service.get_sms_sender(sms_sender_id)
is_inbound_number = sms_sender['inbound_number_id']
if is_inbound_number:
form = ServiceEditInboundNumberForm(is_default=sms_sender['is_default'])
else:
form = ServiceSmsSenderForm(**sms_sender)
if form.validate_on_submit():
service_api_client.update_sms_sender(
current_service.id,
sms_sender_id=sms_sender_id,
sms_sender=sms_sender['sms_sender'] if is_inbound_number else form.sms_sender.data.replace('\r', ''),
is_default=True if sms_sender['is_default'] else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
form.is_default.data = sms_sender['is_default']
if (request.endpoint == "main.service_confirm_delete_sms_sender"):
flash(_("Are you sure you want to delete this text message sender?"), 'delete')
return render_template(
'views/service-settings/sms-sender/edit.html',
form=form,
sms_sender=sms_sender,
inbound_number=is_inbound_number,
sms_sender_id=sms_sender_id
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['POST'],
)
@user_has_permissions('manage_service')
def service_delete_sms_sender(service_id, sms_sender_id):
service_api_client.delete_sms_sender(
service_id=current_service.id,
sms_sender_id=sms_sender_id,
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-letter-contact-block", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def service_set_letter_contact_block(service_id):
if not current_service.has_permission('letter'):
abort(403)
form = ServiceLetterContactBlockForm(letter_contact_block=current_service.letter_contact_block)
if form.validate_on_submit():
current_service.update(
letter_contact_block=form.letter_contact_block.data.replace('\r', '') or None
)
if request.args.get('from_template'):
return redirect(
url_for('.view_template', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-letter-contact-block.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-message-limit", methods=['GET', 'POST'])
@user_is_platform_admin
def set_message_limit(service_id):
form = MessageLimit(message_limit=current_service.message_limit)
if form.validate_on_submit():
service_api_client.update_message_limit(service_id, form.message_limit.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-message-limit.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-free-sms-allowance", methods=['GET', 'POST'])
@user_is_platform_admin
def set_free_sms_allowance(service_id):
form = FreeSMSAllowance(free_sms_allowance=current_service.free_sms_fragment_limit)
if form.validate_on_submit():
billing_api_client.create_or_update_free_sms_fragment_limit(service_id, form.free_sms_allowance.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-free-sms-allowance.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-email-branding", methods=['GET', 'POST'])
@user_is_platform_admin
def service_set_email_branding(service_id):
email_branding = email_branding_client.get_all_email_branding()
current_branding = current_service.email_branding_id
if current_branding is None:
current_branding = (FieldWithLanguageOptions.FRENCH_OPTION_VALUE if
current_service.default_branding_is_french is True else
FieldWithLanguageOptions.ENGLISH_OPTION_VALUE)
form = SetEmailBranding(
all_branding_options=get_branding_as_value_and_label(email_branding),
current_branding=current_branding,
)
if form.validate_on_submit():
return redirect(url_for(
'.service_preview_email_branding',
service_id=service_id,
branding_style=form.branding_style.data,
))
return render_template(
'views/service-settings/set-email-branding.html',
form=form,
search_form=SearchByNameForm()
)
@main.route("/services/<service_id>/service-settings/preview-email-branding", methods=['GET', 'POST'])
@user_is_platform_admin
def service_preview_email_branding(service_id):
branding_style = request.args.get('branding_style', None)
form = PreviewBranding(branding_style=branding_style)
default_branding_is_french = None
if form.branding_style.data == FieldWithLanguageOptions.ENGLISH_OPTION_VALUE:
default_branding_is_french = False
elif form.branding_style.data == FieldWithLanguageOptions.FRENCH_OPTION_VALUE:
default_branding_is_french = True
if form.validate_on_submit():
if default_branding_is_french is not None:
current_service.update(
email_branding=None,
default_branding_is_french=default_branding_is_french
)
else:
current_service.update(
email_branding=form.branding_style.data
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/preview-email-branding.html',
form=form,
service_id=service_id,
action=url_for('main.service_preview_email_branding', service_id=service_id),
)
@main.route("/services/<service_id>/service-settings/set-letter-branding", methods=['GET', 'POST'])
@user_is_platform_admin
def service_set_letter_branding(service_id):
letter_branding = letter_branding_client.get_all_letter_branding()
form = SetLetterBranding(
all_branding_options=get_branding_as_value_and_label(letter_branding),
current_branding=current_service.letter_branding_id,
)
if form.validate_on_submit():
return redirect(url_for(
'.service_preview_letter_branding',
service_id=service_id,
branding_style=form.branding_style.data,
))
return render_template(
'views/service-settings/set-letter-branding.html',
form=form,
search_form=SearchByNameForm()
)
@main.route("/services/<service_id>/service-settings/preview-letter-branding", methods=['GET', 'POST'])
@user_is_platform_admin
def service_preview_letter_branding(service_id):
branding_style = request.args.get('branding_style')
form = PreviewBranding(branding_style=branding_style)
if form.validate_on_submit():
current_service.update(
letter_branding=form.branding_style.data
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/preview-letter-branding.html',
form=form,
service_id=service_id,
action=url_for('main.service_preview_letter_branding', service_id=service_id),
)
@main.route("/services/<service_id>/service-settings/request-letter-branding", methods=['GET', 'POST'])
@user_has_permissions('manage_service', 'manage_templates')
def request_letter_branding(service_id):
return render_template(
'views/service-settings/request-letter-branding.html',
from_template=request.args.get('from_template'),
)
@main.route("/services/<service_id>/service-settings/link-service-to-organisation", methods=['GET', 'POST'])
@user_is_platform_admin
def link_service_to_organisation(service_id):
all_organisations = organisations_client.get_organisations()
current_linked_organisation = organisations_client.get_service_organisation(service_id).get('id', None)
form = LinkOrganisationsForm(
choices=convert_dictionary_to_wtforms_choices_format(all_organisations, 'id', 'name'),
organisations=current_linked_organisation
)
if form.validate_on_submit():
if form.organisations.data != current_linked_organisation:
organisations_client.update_service_organisation(
service_id,
form.organisations.data
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/link-service-to-organisation.html',
has_organisations=all_organisations,
form=form,
search_form=SearchByNameForm(),
)
@main.route("/services/<service_id>/branding-request/email", methods=['GET', 'POST'])
@user_has_permissions('manage_service')
def branding_request(service_id):
current_branding = current_service.email_branding_id
cdn_url = get_logo_cdn_domain()
default_en_filename = "https://{}/gov-canada-en.svg".format(cdn_url)
default_fr_filename = "https://{}/gov-canada-fr.svg".format(cdn_url)
choices = [
('__FIP-EN__', _('English GC logo') + '||' + default_en_filename),
('__FIP-FR__', _('French GC logo') + '||' + default_fr_filename),
]
if current_branding is None:
current_branding = (FieldWithLanguageOptions.FRENCH_OPTION_VALUE if
current_service.default_branding_is_french is True else
FieldWithLanguageOptions.ENGLISH_OPTION_VALUE)
branding_style = current_branding
else:
current_branding_filename = "https://{}/{}".format(cdn_url, current_service.email_branding['logo'])
branding_style = 'custom'
choices.append(('custom', _('Custom {} logo').format(current_service.name) + '||' + current_branding_filename))
form = SelectLogoForm(
label=_('Type of logo'),
choices=choices,
branding_style=branding_style,
)
upload_filename = None
if form.validate_on_submit():
file_submitted = form.file.data
if file_submitted:
upload_filename = upload_email_logo(
file_submitted.filename,
file_submitted,
current_app.config['AWS_REGION'],
user_id=session["user_id"]
)
current_user.send_branding_request(current_service.id, current_service.name, upload_filename)
default_branding_is_french = None
branding_choice = form.branding_style.data
if branding_choice == 'custom' or file_submitted:
default_branding_is_french = None
else:
default_branding_is_french = (branding_choice == FieldWithLanguageOptions.FRENCH_OPTION_VALUE)
if default_branding_is_french is not None:
current_service.update(
email_branding=None,
default_branding_is_french=default_branding_is_french
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/branding/manage-email-branding.html',
form=form,
using_custom_branding=current_service.email_branding_id is not None,
cdn_url=cdn_url,
upload_filename=upload_filename,
)
@main.route("/services/<service_id>/data-retention", methods=['GET'])
@user_is_platform_admin
def data_retention(service_id):
return render_template(
'views/service-settings/data-retention.html',
)
@main.route("/services/<service_id>/data-retention/add", methods=['GET', 'POST'])
@user_is_platform_admin
def add_data_retention(service_id):
form = ServiceDataRetentionForm()
if form.validate_on_submit():
service_api_client.create_service_data_retention(service_id,
form.notification_type.data,
form.days_of_retention.data)
return redirect(url_for('.data_retention', service_id=service_id))
return render_template(
'views/service-settings/data-retention/add.html',
form=form
)
@main.route("/services/<service_id>/data-retention/<data_retention_id>/edit", methods=['GET', 'POST'])
@user_is_platform_admin
def edit_data_retention(service_id, data_retention_id):
data_retention_item = current_service.get_data_retention_item(data_retention_id)
form = ServiceDataRetentionEditForm(days_of_retention=data_retention_item['days_of_retention'])
if form.validate_on_submit():
service_api_client.update_service_data_retention(service_id, data_retention_id, form.days_of_retention.data)
return redirect(url_for('.data_retention', service_id=service_id))
return render_template(
'views/service-settings/data-retention/edit.html',
form=form,
data_retention_id=data_retention_id,
notification_type=data_retention_item['notification_type']
)
def get_branding_as_value_and_label(email_branding):
return [
(branding['id'], branding['name'])
for branding in email_branding
]
def convert_dictionary_to_wtforms_choices_format(dictionary, value, label):
return [
(item[value], item[label]) for item in dictionary
]
def check_contact_details_type(contact_details):
if contact_details.startswith('http'):
return 'url'
elif '@' in contact_details:
return 'email_address'
else:
return 'phone_number'
| 38.097541
| 128
| 0.702382
|
4a03ee265496a5d9beb1ef281c794d582893ae95
| 5,518
|
py
|
Python
|
tests/notebooks.py
|
colour-science/trimesh
|
ee5db2ac81b2357886d854dfa1436b5e4ec5e8d8
|
[
"MIT"
] | 2
|
2019-12-10T22:40:58.000Z
|
2022-01-28T03:49:11.000Z
|
tests/notebooks.py
|
colour-science/trimesh
|
ee5db2ac81b2357886d854dfa1436b5e4ec5e8d8
|
[
"MIT"
] | 1
|
2020-03-27T14:21:14.000Z
|
2020-03-27T15:25:49.000Z
|
tests/notebooks.py
|
colour-science/trimesh
|
ee5db2ac81b2357886d854dfa1436b5e4ec5e8d8
|
[
"MIT"
] | 1
|
2021-05-01T04:05:02.000Z
|
2021-05-01T04:05:02.000Z
|
import os
import sys
import json
import inspect
import subprocess
import numpy as np
# current working directory
cwd = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
def load_notebook(file_obj):
"""
Load an ipynb file into a cleaned and stripped string that can
be ran with `exec`
The motivation for this is to check ipynb examples with CI so
they don't get silently broken and confusing.
Arguments
----------
file_obj : open file object
Returns
----------
script : str
Cleaned script which can be passed to exec
"""
raw = json.load(file_obj)
lines = np.hstack([i['source']
for i in raw['cells'] if 'source' in i])
script = exclude_calls(lines)
return script
def exclude_calls(
lines,
exclude=['%matplotlib',
'%pylab',
'show',
'plt',
'save_image',
'?']):
"""
Exclude certain calls based on substrings, replacing
them with pass statements.
Parameters
-------------
lines : (n, ) str
Lines making up a Python script
exclude (m, ) str
Substrings to exclude lines based off of
Returns
-------------
joined : str
Lines combined with newline
"""
result = []
for line in lines:
# skip lines that only have whitespace or comments
strip = line.strip()
if len(strip) == 0 or strip.startswith('#'):
continue
# if the line has a blacklisted phrase switch it with a pass statement
# we don't want to exclude function definitions however
if not strip.startswith('def ') and any(i in line for i in exclude):
# switch statement with pass
line_modified = to_pass(line)
else:
# remove trailing whitespace
line_modified = line.rstrip()
# skip duplicate lines
if len(result) > 0 and line_modified == result[-1]:
continue
# append the modified line to the result
result.append(line_modified)
# recombine into string and add trailing newline
result = '\n'.join(result) + '\n'
return result
def to_pass(line):
"""
Replace a line of code with a pass statement, with
the correct number of leading spaces
Arguments
----------
line : str, line of code
Returns
----------
passed : str, line of code with same leading spaces
but code replaced with pass statement
"""
# the number of leading spaces on the line
spaces = len(line) - len(line.lstrip(' '))
# replace statement with pass and correct leading spaces
passed = (' ' * spaces) + 'pass'
return passed
def render_notebook(file_name, out_name, nbconvert='jupyter'):
"""
Render an IPython notebook to an HTML file.
"""
out_name = os.path.abspath(out_name)
file_name = os.path.abspath(file_name)
command = [nbconvert,
'nbconvert',
'--to',
'html',
file_name,
'--output',
out_name]
subprocess.check_call(command)
def render_examples(out_dir, in_dir=None, ext='ipynb'):
"""
Render all IPython notebooks in a directory to HTML.
"""
# replace with relative path
if in_dir is None:
in_dir = os.path.abspath(
os.path.join(cwd, '../examples'))
for file_name in os.listdir(in_dir):
# check extension
split = file_name.split('.')
if split[-1] != ext:
continue
# full path of file
nb_path = os.path.join(in_dir, file_name)
html_path = os.path.join(out_dir,
'.'.join(split[:-1]) + '.html')
render_notebook(nb_path, html_path)
def main():
# examples which we're not going to run in CI
# widget.py opens a window and does a bunch of openGL stuff
ci_blacklist = ['widget.py',
'voxel.py',
'voxel_fillers.py',
'voxel_silhouette.py']
if "examples" in sys.argv:
out_path = sys.argv[sys.argv.index("examples") + 1]
render_examples(out_path)
elif "exec" in sys.argv:
# exec the script passed
file_name = sys.argv[sys.argv.index("exec") + 1].strip()
# we want to skip some of these examples in CI
if 'ci' in sys.argv and os.path.basename(file_name) in ci_blacklist:
print('{} in CI blacklist: skipping!'.format(file_name))
return
# skip files that don't exist
if not os.path.exists(file_name):
return
if file_name.lower().endswith('.ipynb'):
# ipython notebooks
with open(file_name, 'r') as file_obj:
script = load_notebook(file_obj)
elif file_name.lower().endswith('.py'):
# regular python files
with open(file_name, 'r') as file_obj:
script = exclude_calls(file_obj.read().split('\n'))
else:
# skip other types of files
return
print('running {}'.format(file_name))
try:
exec(script, globals())
except BaseException as E:
print('failed {}!\n\nscript was:\n{}\n\n'.format(file_name, script))
raise E
if __name__ == '__main__':
"""
Load and run a notebook if a file name is passed.
"""
main()
| 28.297436
| 80
| 0.569047
|
4a03ee9eb5b9f1623408d903facb0c0ce55d9557
| 2,904
|
py
|
Python
|
src/knarrow/cli/__main__.py
|
InCogNiTo124/knarrow
|
b0a19273a27e68899d982bcc0bf0938c60d3ec26
|
[
"Apache-2.0"
] | 2
|
2021-10-10T11:12:53.000Z
|
2021-12-14T13:55:30.000Z
|
src/knarrow/cli/__main__.py
|
InCogNiTo124/knarrow
|
b0a19273a27e68899d982bcc0bf0938c60d3ec26
|
[
"Apache-2.0"
] | 17
|
2021-09-30T21:51:28.000Z
|
2022-03-27T23:33:17.000Z
|
src/knarrow/cli/__main__.py
|
InCogNiTo124/knarrow
|
b0a19273a27e68899d982bcc0bf0938c60d3ec26
|
[
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from collections import Counter
from functools import partial
from pathlib import Path
from knarrow import find_knee
def gte_0(value):
x = float(value)
assert x >= 0.0
return x
METHODS = [
"angle",
"c_method",
"distance",
"distance_adjacent",
"kneedle",
"menger_anchored",
"menger_successive",
"ols_swiping",
]
def get_parser():
parser = ArgumentParser(prog="knarrow", formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-m", "--method", choices=(["all"] + METHODS), default="all", help="select the knee searching method"
)
parser.add_argument(
"--sort",
action="store_true",
help="sort the values before the knee search. By default is assumes the input is already sorted",
)
parser.add_argument("--smoothing", default=0.0, type=gte_0, help="cublic spline smoothing parameter")
parser.add_argument(
"-d", "--delimiter", default=None, help="split the values with DELIMITER. If None, split by space"
)
parser.add_argument(
"-o",
"--output",
choices=["index", "value"],
default="index",
help=(
"if output is `value`, this will return the row of the input file where the knee was detected. "
"if output is `index`, the index of that row will be returned"
),
)
parser.add_argument("files", nargs="*", default=["-"], help="a list of files. STDIN is denoted with `-`.")
return parser
def cli(method="all", files=None, sort=False, delimiter=None, output=None, smoothing=None):
for filename in files:
path = Path("/dev/stdin" if filename == "-" else filename)
with path.open("r") as file:
rows = list(map(str.strip, file))
split = partial(str.split, sep=delimiter)
values = map(split, rows)
numbers = list(tuple(float(value) for value in row) for row in values)
indices = list(range(len(numbers)))
if sort:
indices.sort(key=lambda i: numbers[i])
key_function = (lambda x: x) if len(numbers[0]) == 1 else (lambda x: x[0])
numbers.sort(key=key_function)
if method == "all":
counter = Counter([find_knee(numbers, method=m, sort=False, smoothing=smoothing) for m in METHODS])
most_common = counter.most_common(1).pop(0)
knee = most_common[0]
else:
knee = find_knee(numbers, method=method, sort=False, smoothing=smoothing)
result = indices[knee] if output == "index" else rows[indices[knee]]
print(path.name, result)
return
def main():
parser = get_parser()
args = vars(parser.parse_args())
exit(cli(**args))
if __name__ == "__main__":
main()
| 32.629213
| 115
| 0.608471
|
4a03eee3d357a7cbaff2f4dbd00169ec329e7f8f
| 28,339
|
py
|
Python
|
model.py
|
fzohra/despurold
|
bf526d608c38e29c025309f1e4925598f161286e
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
fzohra/despurold
|
bf526d608c38e29c025309f1e4925598f161286e
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
fzohra/despurold
|
bf526d608c38e29c025309f1e4925598f161286e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code defining LEO inner loop.
See "Meta-Learning with Latent Embedding Optimization" by Rusu et al.
(https://arxiv.org/pdf/1807.05960.pdf).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import data as data_module
import wandb
def get_orthogonality_regularizer(orthogonality_penalty_weight):
"""Returns the orthogonality regularizer."""
def orthogonality(weight):
"""Calculates the layer-wise penalty encouraging orthogonality."""
with tf.name_scope(None, "orthogonality", [weight]) as name:
w2 = tf.matmul(weight, weight, transpose_b=True)
wn = tf.norm(weight, ord=2, axis=1, keepdims=True) + 1e-32
correlation_matrix = w2 / tf.matmul(wn, wn, transpose_b=True)
matrix_size = correlation_matrix.get_shape().as_list()[0]
base_dtype = weight.dtype.base_dtype
identity = tf.eye(matrix_size, dtype=base_dtype)
weight_corr = tf.reduce_mean(
tf.squared_difference(correlation_matrix, identity))
return tf.multiply(
tf.cast(orthogonality_penalty_weight, base_dtype),
weight_corr,
name=name)
return orthogonality
class LEO(snt.AbstractModule):
"""Sonnet module implementing the inner loop of LEO."""
def __init__(self, config=None, use_64bits_dtype=True, name="leo"):
super(LEO, self).__init__(name=name)
self.n_splits = 8
self._float_dtype = tf.float64 if use_64bits_dtype else tf.float32
self._int_dtype = tf.int64 if use_64bits_dtype else tf.int32
self._inner_unroll_length = config["inner_unroll_length"]
self._finetuning_unroll_length = config["finetuning_unroll_length"]
self._inner_lr_init = config["inner_lr_init"]
self._finetuning_lr_init = config["finetuning_lr_init"]
self._num_latents = config["num_latents"]
self._dropout_rate = config["dropout_rate"]
self._kl_weight = config["kl_weight"] # beta
self._encoder_penalty_weight = config["encoder_penalty_weight"] # gamma
self._l2_penalty_weight = config["l2_penalty_weight"] # lambda_1
# lambda_2
self._orthogonality_penalty_weight = config["orthogonality_penalty_weight"]
assert self._inner_unroll_length > 0, ("Positive unroll length is necessary"
" to create the graph")
def _build(self, data, is_meta_training=True):
"""Connects the LEO module to the graph, creating the variables.
Args:
data: A data_module.ProblemInstance constaining Tensors with the
following shapes:
- tr_input: (N, K, dim)
- tr_output: (N, K, 1)
- tr_info: (N, K)
- val_input: (N, K_valid, dim)
- val_output: (N, K_valid, 1)
- val_info: (N, K_valid)
where N is the number of classes (as in N-way) and K and the and
K_valid are numbers of training and validation examples within a
problem instance correspondingly (as in K-shot), and dim is the
dimensionality of the embedding.
is_meta_training: A boolean describing whether we run in the training
mode.
Returns:
Tensor with the inner validation loss of LEO (include both adaptation in
the latent space and finetuning).
"""
if isinstance(data, list):
data = data_module.ProblemInstance(*data)
self.is_meta_training = is_meta_training
self.save_problem_instance_stats(data.tr_input)
latents, kl, kl_components, kl_zn, distribution_params = self.forward_encoder(data)
tr_loss, adapted_classifier_weights, encoder_penalty, corr_penalty, adapted_latents, adapted_kl, adapted_kl_components, adapted_kl_zn, spurious = self.leo_inner_loop(
data, latents, distribution_params)
val_loss, val_accuracy = self.finetuning_inner_loop(
data, tr_loss, adapted_classifier_weights)
#tr_loss can we observe this for each latent component
#val_loss can we observe this for each latent component
#compute generalization_loss = val_loss - tr_loss
#if generalization_loss is high fir a latent component, simply threshold and drop it.
# graph the generalization loss for the components during the training, are there any that have a high genrealizatio loss
#remove correlations between latent space gradient dimensions
val_loss += self._kl_weight * kl
val_loss += self._encoder_penalty_weight * encoder_penalty
# The l2 regularization is is already added to the graph when constructing
# the snt.Linear modules. We pass the orthogonality regularizer separately,
# because it is not used in self.grads_and_vars.
regularization_penalty = (
self._l2_regularization + self._decoder_orthogonality_reg)
batch_val_loss = tf.reduce_mean(val_loss)
batch_val_accuracy = tf.reduce_mean(val_accuracy)
batch_generalization_loss = tf.reshape(tf.reduce_mean(val_loss, 1), [5,1]) - tr_loss
if self.is_meta_training:
tr_out = tf.cast(data.tr_output, dtype=tf.float32)
tr_out_tiled = tf.tile(tr_out, multiples=[1, 1, 64])
tr_out_tiled_expanded = tf.expand_dims(tr_out_tiled, -1)
kl_components_y = tf.concat([tr_out_tiled_expanded, tf.expand_dims(kl_components, -1)], axis=-1)
adapted_kl_components_y = tf.concat([tr_out_tiled_expanded, tf.expand_dims(adapted_kl_components, -1)], axis=-1)
kl_zn_y = tf.concat([tf.squeeze(tr_out, -1), kl_zn], axis=-1)
adapted_kl_zn_y = tf.concat([tf.squeeze(tr_out, -1), adapted_kl_zn], axis=-1)
latents_y = tf.concat([tr_out_tiled_expanded, tf.expand_dims(latents, -1)], axis=-1)
adapted_latents_y = tf.concat([tr_out_tiled_expanded, tf.expand_dims(adapted_latents, -1)], axis=-1)
spurious_y = tf.concat([tr_out_tiled_expanded, tf.expand_dims(spurious, -1)], axis=-1)
else:
kl_components_y = kl_components
adapted_kl_components_y = adapted_kl_components
kl_zn_y = kl_zn
adapted_kl_zn_y = adapted_kl_zn
latents_y = latents
adapted_latents_y = adapted_latents
spurious_y = spurious
return batch_val_loss + regularization_penalty, batch_val_accuracy, batch_generalization_loss, \
kl_components_y, adapted_kl_components_y, kl_zn_y, adapted_kl_zn_y, kl, adapted_kl, latents_y, adapted_latents_y, spurious_y
# def l2_regularizer(self, phi, theta):
# reg = tf.reduce_sum(tf.square(phi - theta))
# return 0*5 * 1e-8 * reg
@snt.reuse_variables
def leo_inner_loop(self, data, latents, distribution_params):
with tf.variable_scope("leo_inner"):
inner_lr = tf.get_variable(
"lr", [1, 1, self._num_latents],
dtype=self._float_dtype,
initializer=tf.constant_initializer(self._inner_lr_init))
starting_latents = latents
# learn a function which learns the importance of the splits
starting_latents_split = tf.nn.l2_normalize(starting_latents, axis=0)#self.get_split_features_full(starting_latents, "l2n")
# input = tf.zeros([5])
# for i in range(8): #self._inner_unroll_length):
# # adjusted with l2 norm
# loss, theta_i = self.forward_decoder(
# data, starting_latents_split[i], tf.zeros([5, 1, 640], dtype=tf.float32))
# # #idea: if the loss is high when the dimensions are regularized => the input is being correlated with unwanted features (spurious correlations)
# if (i==0):
# prev_loss_max = loss
#
# mask = tf.greater(loss, prev_loss_max)
# mask = tf.squeeze(mask)
# input = tf.where(mask, tf.fill([5,], i), tf.cast(input, tf.int32))
# prev_loss_max = tf.math.maximum(prev_loss_max, loss)
#controlling the rate and direction of the latents
#1. Try l2 on weights /z
#2 Try divergence on weights /z
#3 try MSE on weights /z
# lambda/2||phi - theta||^2 minimize the distance
# - lambda/2||phi - theta||^2 maxmize the distance
# adjustments in the latent space?
# how can we identify spurious correlations?
# 1. split the data into predefined knowledge X = (X_1, X_2, X_3...X_8)
# 2. sample from Z n times Xn = (X_n1, X_n2, X_n3...X_n8)
# 3. compute the mean for each datum
# 4. compute the correlation between Xn
# 5. learning F s.t.: if we remove this pretrained knowledge, what is the probability the classifier will remain invariant to the change
# if self.is_meta_training:
# # encourages the decoder to output a parameter initizalization which is closest to the adapted latents
# corr_penalty = tf.losses.mean_squared_error(
# labels=latents, predictions=starting_latents)
# corr_penalty = tf.cast(corr_penalty, self._float_dtype)
# else:
# loss -= 0.01 * corr_penalty
# loss -= 0.01*tf.nn.l2_loss((latents-starting_latents))
# _, adapted_kl, _, _ = self.kl_divergence_given_latents(distribution_params, latents)
# divergence_penalty = 0.0001 * adapted_kl
# loss += divergence_penalty
# split_by_class = []
# starting_latents_split_t = tf.convert_to_tensor(starting_latents_split)
# for i in range(5): #i for class
# datum = tf.gather(starting_latents_split_t, input[i])
# datum_class = tf.gather(datum, i) #max loss datum, class
# split_by_class.append(datum_class)
# spurious = tf.concat([tf.expand_dims(split_by_class[0], 0), tf.expand_dims(split_by_class[1], 0)], 0)
# spurious = tf.concat([spurious, tf.expand_dims(split_by_class[2], 0)], 0)
# spurious = tf.concat([spurious, tf.expand_dims(split_by_class[3], 0)], 0)
# spurious = tf.concat([spurious, tf.expand_dims(split_by_class[4], 0)], 0)
spurious = starting_latents_split
loss, theta = self.forward_decoder(data, latents, tf.zeros([5, 1, 640], dtype=tf.float32))
for i in range(5): #temp simulate convergence #self._inner_unroll_length): #number of adaptation steps
corr_penalty = tf.nn.l2_loss((latents-spurious))
loss += 0.00001*corr_penalty
loss_grad = tf.gradients(loss, latents) # dLtrain/dz
latents -= inner_lr * loss_grad[0]
loss, classifier_weights = self.forward_decoder(data, latents, theta)
adapted_latents, adapted_kl, adapted_kl_components, adapted_kl_zn = self.kl_divergence_given_latents(distribution_params, latents)
# after adapting the latents, measure how large the divergence is (on average and for each component)
# latents, adapted_kl, adapted_kl_components = self.kl_divergence_given_latents(distribution_params, latents)
# mean_kl = tf.reduce_mean(adapted_kl)
# mask = tf.cast(adapted_kl > mean_kl, tf.float32)
# latents = tf.multiply(latents, mask)
# latents = tf.clip_by_value(latents, clip_value_min=0., clip_value_max=1.)
if self.is_meta_training:
# stop_gradients lets you do the computation,
# without updating it using sgd when the loss is taken wrt to the parameters
#reduces the load of the adaptation procedure
encoder_penalty = tf.losses.mean_squared_error(
labels=tf.stop_gradient(latents), predictions=starting_latents)
encoder_penalty = tf.cast(encoder_penalty, self._float_dtype)
else:
encoder_penalty = tf.constant(0., self._float_dtype)
return loss, classifier_weights, encoder_penalty, corr_penalty, adapted_latents, adapted_kl, adapted_kl_components, adapted_kl_zn, spurious
def get_split_features_full(self, data, method="none"):
split_dim = int(64 / 8)
split_data = []
for i in range(8):
start_idx = split_dim * i
end_idx = split_dim * i + split_dim
data_i = data[:, :, start_idx:end_idx]
data_t = tf.nn.l2_normalize(data_i, axis=0)
start_stack = 0
end_stack = start_idx
data_before = data[:, :, start_stack:end_stack]
start_stack_after = end_idx
end_stack_after = 64
data_after = data[:, :, start_stack_after:end_stack_after]
full = tf.concat([data_before, data_t, data_after], -1)
split_data.append(full)
return split_data
def get_split_features(self, data, center, method="none"):
split_dim = int(64 / 8)
split_data = []
for i in range(8):
start_idx = split_dim * i
end_idx = split_dim * i + split_dim
data_i = data[:, :, start_idx:end_idx]
if center is not None:
center_i = center[:, :, start_idx:end_idx]
else:
center_i = None
data_i = self.preprocess_split(data_i, center_i, method)
split_data.append(data_i)
return split_data
def preprocess_split(self, data, center=None, method="none"):
if method == "none":
return data
elif method == "l2n":
return tf.nn.l2_normalize(data, axis=-1)
elif method == "cl2n":
data = tf.nn.l2_normalize(data, axis=-1)
return tf.nn.l2_normalize(data - center, axis=-1)
@snt.reuse_variables
def finetuning_inner_loop(self, data, leo_loss, classifier_weights):
tr_loss = leo_loss
with tf.variable_scope("finetuning"):
finetuning_lr = tf.get_variable(
"lr", [1, 1, self.embedding_dim],
dtype=self._float_dtype,
initializer=tf.constant_initializer(self._finetuning_lr_init))
#directly fine tune the weights to reduce the loss
for _ in range(self._finetuning_unroll_length):
loss_grad = tf.gradients(tr_loss, classifier_weights)
classifier_weights -= finetuning_lr * loss_grad[0]
tr_loss, _ = self.calculate_inner_loss(data.tr_input, data.tr_output,
classifier_weights)
val_loss, val_accuracy = self.calculate_inner_loss(
data.val_input, data.val_output, classifier_weights)
return val_loss, val_accuracy
@snt.reuse_variables
def forward_encoder(self, data):
encoder_outputs = self.encoder(data.tr_input)
relation_network_outputs = self.relation_network(encoder_outputs)
latent_dist_params = self.average_codes_per_class(relation_network_outputs)
latents, kl, kl_components, kl_zn = self.possibly_sample(latent_dist_params)
return latents, kl, kl_components, kl_zn, latent_dist_params #temp
@snt.reuse_variables
def forward_decoder(self, data, latents, theta):
weights_dist_params = self.decoder(latents)
# Default to glorot_initialization and not stddev=1.
fan_in = self.embedding_dim.value
fan_out = self.num_classes.value
stddev_offset = np.sqrt(2. / (fan_out + fan_in))
classifier_weights, kl_for_weights, _, _ = self.possibly_sample(weights_dist_params,
stddev_offset=stddev_offset)
# if (ifL2):
tr_loss, _ = self.calculate_inner_loss_with_l2(data.tr_input, data.tr_output,
classifier_weights, theta)
# tr_loss += kl_weights*0.01
# else:
# tr_loss, _ = self.calculate_inner_loss(data.tr_input, data.tr_output,
# classifier_weights)
return tr_loss, classifier_weights
@snt.reuse_variables
def encoder(self, inputs):
with tf.variable_scope("encoder"):
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
encoder_module = snt.Linear(
self._num_latents,
use_bias=False,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(encoder_module)(after_dropout)
return outputs
@snt.reuse_variables
def relation_network(self, inputs):
with tf.variable_scope("relation_network"):
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
relation_network_module = snt.nets.MLP(
[2 * self._num_latents] * 3,
use_bias=False,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
total_num_examples = self.num_examples_per_class*self.num_classes
inputs = tf.reshape(inputs, [total_num_examples, self._num_latents])
left = tf.tile(tf.expand_dims(inputs, 1), [1, total_num_examples, 1])
right = tf.tile(tf.expand_dims(inputs, 0), [total_num_examples, 1, 1])
concat_codes = tf.concat([left, right], axis=-1)
outputs = snt.BatchApply(relation_network_module)(concat_codes)
outputs = tf.reduce_mean(outputs, axis=1)
# 2 * latents, because we are returning means and variances of a Gaussian
outputs = tf.reshape(outputs, [self.num_classes,
self.num_examples_per_class,
2 * self._num_latents])
return outputs
@snt.reuse_variables
def decoder(self, inputs):
with tf.variable_scope("decoder"):
l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
orthogonality_reg = get_orthogonality_regularizer(
self._orthogonality_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# 2 * embedding_dim, because we are returning means and variances
decoder_module = snt.Linear(
2 * self.embedding_dim,
use_bias=False,
regularizers={"w": l2_regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(decoder_module)(inputs)
self._orthogonality_reg = orthogonality_reg(decoder_module.w)
return outputs
def average_codes_per_class(self, codes):
codes = tf.reduce_mean(codes, axis=1, keep_dims=True) # K dimension
# Keep the shape (N, K, *)
codes = tf.tile(codes, [1, self.num_examples_per_class, 1])
return codes
def possibly_sample(self, distribution_params, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tfp.distributions.Normal(loc=means, scale=stddev)
if not self.is_meta_training:
return means, tf.constant(0., dtype=self._float_dtype), tf.constant(0., dtype=self._float_dtype), tf.constant(0., dtype=self._float_dtype)
# sampled latents for each class 5,1,64
samples = distribution.sample()
# 5, 1, 128 distribution_params
# interpret each sample as a factor of a joint distribution over the latent variable z_n
# 5,1,64 distributions (1 for each of the 64 means and std deviations)
kl_divergence, kl_divergence_components, kl_divergence_zn = self.kl_divergence(samples, distribution)
return samples, kl_divergence, kl_divergence_components, kl_divergence_zn
def sample(self, distribution_params, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tfp.distributions.Normal(loc=means, scale=stddev)
if not self.is_meta_training:
return means
# sampled latents for each class 5,1,64
samples = []
for i in range(8):
samples.append(distribution.sample())
samples = tf.concat([tf.expand_dims(t, 0) for t in samples], 0)
return samples
def kl_divergence_given_latents(self, distribution_params, adapted_latents, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tfp.distributions.Normal(loc=means, scale=stddev)
if not self.is_meta_training:
return means, tf.constant(0., dtype=self._float_dtype), tf.constant(0., dtype=self._float_dtype), tf.constant(0., dtype=self._float_dtype)
kl_divergence, kl_divergence_components, kl_divergence_zn = self.kl_divergence(adapted_latents, distribution)
# prior_dist = tfd.MultivariateNormalDiag(loc=tf.zeros_like(adapted_latents),
# scale_diag=tf.ones_like(adapted_latents))
# var_post_dist = tfd.MultivariateNormalDiag(loc=means, scale_diag=stddev)
# kl_divergence = tfd.kl_divergence(distribution_a=var_post_dist, distribution_b=prior_dist)
return adapted_latents, kl_divergence, kl_divergence_components, kl_divergence_zn
# KL divergence of a multivariate gaussian posterior with a multivariate gaussian standard normal
# identify which factors remain invariant
# learn a distribution over which factors remain invariant
# probablistic thresholding as intervening on the latent space
def kl_divergence(self, samples, normal_distribution):
random_prior = tfp.distributions.Normal(
loc=tf.zeros_like(samples), scale=tf.ones_like(samples))
#observation: preadaptation, the components are positive, postadaptation, they are negative. Why is that?
# because the divergence is computed as the sum over sampled values. If it integrated over the entire support, it would be positive
#returns the log of the probability density/mass function evaluated at the given sample value.
kl_divergence_components = normal_distribution.log_prob(samples) - random_prior.log_prob(samples)
# incorrect, this should be 5,1,1
kl = tf.reduce_mean(kl_divergence_components)
kl_divergence_zn = tf.reduce_mean(kl_divergence_components, axis=-1)
return kl, kl_divergence_components, kl_divergence_zn
def predict(self, inputs, weights):
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
# This is 3-dimensional equivalent of a matrix product, where we sum over
# the last (embedding_dim) dimension. We get [N, K, N, K] tensor as output.
per_image_predictions = tf.einsum("ijk,lmk->ijlm", after_dropout, weights)
# Predictions have shape [N, K, N]: for each image ([N, K] of them), what
# is the probability of a given class (N)?
predictions = tf.reduce_mean(per_image_predictions, axis=-1)
return predictions
#adjust the inner loss to include l2
def calculate_inner_loss(self, inputs, true_outputs, classifier_weights):
model_outputs = self.predict(inputs, classifier_weights)
model_predictions = tf.argmax(
model_outputs, -1, output_type=self._int_dtype)
accuracy = tf.contrib.metrics.accuracy(model_predictions,
tf.squeeze(true_outputs, axis=-1))
return self.loss_fn(model_outputs, true_outputs), accuracy
def calculate_inner_loss_with_l2(self, inputs, true_outputs, classifier_weights, theta):
model_outputs = self.predict(inputs, classifier_weights)
model_predictions = tf.argmax(
model_outputs, -1, output_type=self._int_dtype)
accuracy = tf.contrib.metrics.accuracy(model_predictions,
tf.squeeze(true_outputs, axis=-1))
return self.loss_fn_withl2(model_outputs, true_outputs, classifier_weights, theta), accuracy
def save_problem_instance_stats(self, instance):
num_classes, num_examples_per_class, embedding_dim = instance.get_shape()
if hasattr(self, "num_classes"):
assert self.num_classes == num_classes, (
"Given different number of classes (N in N-way) in consecutive runs.")
if hasattr(self, "num_examples_per_class"):
assert self.num_examples_per_class == num_examples_per_class, (
"Given different number of examples (K in K-shot) in consecutive"
"runs.")
if hasattr(self, "embedding_dim"):
assert self.embedding_dim == embedding_dim, (
"Given different embedding dimension in consecutive runs.")
self.num_classes = num_classes
self.num_examples_per_class = num_examples_per_class
self.embedding_dim = embedding_dim
@property
def dropout_rate(self):
return self._dropout_rate if self.is_meta_training else 0.0
def loss_fn(self, model_outputs, original_classes):
original_classes = tf.squeeze(original_classes, axis=-1)
# Tensorflow doesn't handle second order gradients of a sparse_softmax yet.
one_hot_outputs = tf.one_hot(original_classes, depth=self.num_classes)
return tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_outputs, logits=model_outputs)
def loss_fn_withl2(self, model_outputs, original_classes, classifier_weights, theta):
original_classes = tf.squeeze(original_classes, axis=-1)
# Tensorflow doesn't handle second order gradients of a sparse_softmax yet.
one_hot_outputs = tf.one_hot(original_classes, depth=self.num_classes)
# if self.is_meta_training:
# #remove correlations between generated weights
# decoder_penalty = tf.losses.mean_squared_error(
# labels=tf.stop_gradient(classifier_weights), predictions=theta)
# decoder_penalty = tf.cast(decoder_penalty, self._float_dtype)
# else:
# decoder_penalty = tf.constant(0., self._float_dtype)
return tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_outputs, logits=model_outputs)
# observation: seems to be improving the accuracy, especially meta-train
# return tf.nn.softmax_cross_entropy_with_logits_v2(
# labels=one_hot_outputs, logits=model_outputs) + self._encoder_penalty_weight*decoder_penalty
# return tf.nn.softmax_cross_entropy_with_logits_v2(
# labels=one_hot_outputs, logits=model_outputs) + tf.nn.l2_loss(theta)
def grads_and_vars(self, metatrain_loss):
"""Computes gradients of metatrain_loss, avoiding NaN.
Uses a fixed penalty of 1e-4 to enforce only the l2 regularization (and not
minimize the loss) when metatrain_loss or any of its gradients with respect
to trainable_vars are NaN. In practice, this approach pulls the variables
back into a feasible region of the space when the loss or its gradients are
not defined.
Args:
metatrain_loss: A tensor with the LEO meta-training loss.
Returns:
A tuple with:
metatrain_gradients: A list of gradient tensors.
metatrain_variables: A list of variables for this LEO model.
"""
metatrain_variables = self.trainable_variables
metatrain_gradients = tf.gradients(metatrain_loss, metatrain_variables)
nan_loss_or_grad = tf.logical_or(
tf.is_nan(metatrain_loss),
tf.reduce_any([tf.reduce_any(tf.is_nan(g))
for g in metatrain_gradients]))
regularization_penalty = (
1e-4 / self._l2_penalty_weight * self._l2_regularization)
zero_or_regularization_gradients = [
g if g is not None else tf.zeros_like(v)
for v, g in zip(tf.gradients(regularization_penalty,
metatrain_variables), metatrain_variables)]
metatrain_gradients = tf.cond(nan_loss_or_grad,
lambda: zero_or_regularization_gradients,
lambda: metatrain_gradients, strict=True)
return metatrain_gradients, metatrain_variables
@property
def _l2_regularization(self):
return tf.cast(
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
dtype=self._float_dtype)
@property
def _decoder_orthogonality_reg(self):
return self._orthogonality_reg
| 46.686985
| 170
| 0.702071
|
4a03effe2dbaad3fca0ae68f0e87afb9c07381bc
| 673
|
py
|
Python
|
git/progress.py
|
phts/ImprovedGit
|
b811b4c7bf6c92441fe53f85abfb7c3c20608704
|
[
"MIT"
] | null | null | null |
git/progress.py
|
phts/ImprovedGit
|
b811b4c7bf6c92441fe53f85abfb7c3c20608704
|
[
"MIT"
] | null | null | null |
git/progress.py
|
phts/ImprovedGit
|
b811b4c7bf6c92441fe53f85abfb7c3c20608704
|
[
"MIT"
] | null | null | null |
import sublime, sublime_plugin
import functools, threading
class Progress():
def __init__(self, thread, message, message_done):
self.thread = thread
self.message = message
self.message_done = message_done
self.add = 1
self.size = 8
sublime.set_timeout(lambda: self.run(0), 100)
def run(self, i):
if not self.thread.is_alive():
sublime.status_message(self.message_done)
return
before = i % self.size
after = self.size - (before + 1)
if not after:
self.add = -1
elif not before:
self.add = 1
sublime.status_message('%s [%s=%s]' % (self.message, ' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.run(i+self.add), 100)
| 22.433333
| 82
| 0.679049
|
4a03f06eb7c631ce63720c0924968d7b49290186
| 443
|
py
|
Python
|
mayan/apps/lock_manager/migrations/0002_auto_20150604_2219.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 2
|
2021-09-12T19:41:19.000Z
|
2021-09-12T19:41:20.000Z
|
mayan/apps/lock_manager/migrations/0002_auto_20150604_2219.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 37
|
2021-09-13T01:00:12.000Z
|
2021-10-02T03:54:30.000Z
|
mayan/apps/lock_manager/migrations/0002_auto_20150604_2219.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 1
|
2021-09-22T13:17:30.000Z
|
2021-09-22T13:17:30.000Z
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lock_manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lock',
name='name',
field=models.CharField(
unique=True, max_length=64, verbose_name='Name'
),
preserve_default=True,
),
]
| 23.315789
| 64
| 0.525959
|
4a03f3aa4c07db2b06f8e0854a2aa7b0e7cd742e
| 400
|
py
|
Python
|
interactive_terminal_flashcards.py
|
pramttl/adaptive-flashcards-algorithm
|
d94a3c5388dfe51d21cf6c157a7b656f83a9a33f
|
[
"Apache-2.0"
] | 2
|
2017-04-10T12:02:53.000Z
|
2017-06-30T18:04:20.000Z
|
interactive_terminal_flashcards.py
|
pramttl/adaptive-flashcards-algorithm
|
d94a3c5388dfe51d21cf6c157a7b656f83a9a33f
|
[
"Apache-2.0"
] | null | null | null |
interactive_terminal_flashcards.py
|
pramttl/adaptive-flashcards-algorithm
|
d94a3c5388dfe51d21cf6c157a7b656f83a9a33f
|
[
"Apache-2.0"
] | null | null | null |
from api import *
algo = FlashcardAlgorithm()
while True:
cue = algo.draw_card()
print cue
try:
s = int(raw_input())
except:
# If user entered non integer, quit
print "Quitting.."
break
if s == 0:
print "** ",
print algo.card[cue] ,
print " **"
print
algo.reply(cue, s)
print "--------------------"
| 16.666667
| 43
| 0.47
|
4a03f3bff25fa0072b32871bc065212058692686
| 14,266
|
py
|
Python
|
tests/msisdn_api.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
tests/msisdn_api.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
tests/msisdn_api.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | 3
|
2019-10-24T11:40:06.000Z
|
2022-02-24T07:34:00.000Z
|
"""
MSISDN API unit tests.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import json
from flask import url_for
import pytest
from _fixtures import * # noqa: F403, F401
from _importer_params import GSMADataParams, OperatorDataParams, RegistrationListParams
def test_msisdn_too_long(flask_app, api_version):
"""Test Depot ID not know yet.
Verify that MSISDN API should validate that
a supplied MSISDN is less than or equal to 15 chars and
return an HTTP 400 error code if not.
"""
short_or_equal_msisdns = ['1', '123456', '12345678901234', '123456789012345']
long_length_msisdns = ['1234567890123456789', '123456789012345678901']
if api_version == 'v1':
for i in short_or_equal_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 200
for i in long_length_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 400
assert b'Bad MSISDN format (too long)' in rv.data
else: # api version 2
for i in short_or_equal_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 200
for i in long_length_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 400
assert b'Bad MSISDN format (too long)' in rv.data
def test_msisdn_chars(flask_app, api_version):
"""Test Depot ID not know yet.
Verify that MSISDN API should validate that
a supplied MSISDN contains only numbers.
"""
char_msisdns = ['A123456AAAAA', '12345678901234A', '*1234567890123A']
if api_version == 'v1':
for i in char_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 400
assert b'Bad MSISDN format (can only contain digit characters)' in rv.data
else: # api version 2
for i in char_msisdns:
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 400
assert b'Bad MSISDN format (can only contain digit characters)' in rv.data
def test_empty_msisdn(flask_app, api_version):
"""Test Depot ID not known yet.
Verify that MSISDN API should return a 404 status for a zero-length MSISDN.
"""
""" MSISDN API should return a 404 status for a zero-length MSISDN """
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn=''))
assert rv.status_code == 404
else: # msisdn version 2
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=''))
assert rv.status_code == 404
@pytest.mark.parametrize('operator_data_importer',
[OperatorDataParams(filename='testData1-operator-operator4-anonymized_20161101_20161130.csv',
extract=False,
perform_unclean_checks=False,
perform_region_checks=False,
perform_home_network_check=False)],
indirect=True)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(
filename='testData1-gsmatac_operator1_operator4_anonymized.txt')],
indirect=True)
@pytest.mark.parametrize('registration_list_importer',
[RegistrationListParams(filename='registration_list_msisidn_api_test_data.csv')],
indirect=True)
def test_observed_msisdn_with_registration_list(flask_app, operator_data_importer, gsma_tac_db_importer,
registration_list_importer, db_conn, tmpdir, logger):
"""Test Depot ID not known yet.
Verify MSISDN API (version 2.0) should return IMEI information from GSMA, Network and Device Registration System.
"""
# operator input file contains imei_norm = 38847733370026 with msisdn=22300049781840
# gsma input file contains tac 38847733 with manufacturer = 1d4e632daf5249ba6f4165cca4cb4ff5025ddae6
# registration list contains imei_norm = 38847733370026
operator_data_importer.import_data()
gsma_tac_db_importer.import_data()
registration_list_importer.import_data()
imei_norm = '38847733370026'
msisdn = '22300049781840'
imsi = '11104803062043'
gsma_manufacturer = '1d4e632daf5249ba6f4165cca4cb4ff5025ddae6'
gsma_model_name = 'ef12302c27d9b8a5a002918bd643dcd412d2db66'
registration_brand_name = '1d4e632daf5249ba6f4165cca4cb4ff5025ddae6'
registration_model_name = '1d4e632daf5249ba6f4165cca4cb4ff5025ddae6'
registration_make = '1d4e632daf5249ba6f4165cca4cb4ff5025ddae6'
rv = flask_app.get(url_for('v2.msisdn_get_api', msisdn=msisdn))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['results'][0]
assert data['imei_norm'] == imei_norm
assert data['imsi'] == imsi
assert data['gsma']['manufacturer'] == gsma_manufacturer
assert data['gsma']['model_name'] == gsma_model_name
assert data['gsma']['brand_name'] is not None
assert data['last_seen'] is not None
assert data['registration']['brand_name'] == registration_brand_name
assert data['registration']['make'] == registration_make
assert data['registration']['model'] == registration_model_name
# imei norm = 387094332125410, imsi = 111041080094910, msisdn = 223321010800949
imei_norm = '38709433212541'
imsi = '111041080094910'
msisdn = '223321010800949'
gsma_manufacturer = '4acc7e11603eddb554adc50c8bc0b6185144d4e0'
gsma_model_name = 'd3bdf1170bf4b026e6e29b15a0d66a5ca83f1944'
rv = flask_app.get(url_for('v2.msisdn_get_api', msisdn=msisdn))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['results'][0]
assert data['imei_norm'] == imei_norm
assert data['imsi'] == imsi
assert data['gsma']['manufacturer'] == gsma_manufacturer
assert data['gsma']['model_name'] == gsma_model_name
assert data['gsma']['brand_name'] is not None
assert data['last_seen'] is not None
assert data['registration']['brand_name'] is None
assert data['registration']['make'] is None
assert data['registration']['model'] is None
@pytest.mark.parametrize('operator_data_importer',
[OperatorDataParams(filename='testData1-operator-operator4-anonymized_20161101_20161130.csv',
extract=False,
perform_unclean_checks=False,
perform_region_checks=False,
perform_home_network_check=False)],
indirect=True)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(
filename='testData1-gsmatac_operator1_operator4_anonymized.txt')],
indirect=True)
def test_observed_msisdn(flask_app, operator_data_importer, gsma_tac_db_importer,
db_conn, tmpdir, logger, api_version):
"""Test Depot ID not known yet.
Verify MSISDN API should return IMSI, GSMA Manufacturer, GSMA Model Name fot the current MSISDN.
"""
# operator input file contains imei_norm = 38847733370026 with msisdn=22300049781840
# gsma input file contains tac 38847733 with manufacturer = 1d4e632daf5249ba6f4165cca4cb4ff5025ddae6
operator_data_importer.import_data()
gsma_tac_db_importer.import_data()
imei_norm = '38847733370026'
msisdn = '22300049781840'
imsi = '11104803062043'
gsma_manufacturer = '1d4e632daf5249ba6f4165cca4cb4ff5025ddae6'
gsma_model_name = 'ef12302c27d9b8a5a002918bd643dcd412d2db66'
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn=msisdn))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['imei_norm'] == imei_norm
assert data['imsi'] == imsi
assert data['gsma_manufacturer'] == gsma_manufacturer
assert data['gsma_model_name'] == gsma_model_name
return data
else: # api version 2
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=msisdn))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['results'][0]
assert data['imei_norm'] == imei_norm
assert data['imsi'] == imsi
assert data['gsma']['manufacturer'] == gsma_manufacturer
assert data['gsma']['model_name'] == gsma_model_name
assert data['gsma']['brand_name'] is not None
assert data['last_seen'] is not None
assert data['registration'] is None
def test_put_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Test Depot ID not known yet.
Verify the MSISDN API does not support HTTP PUT and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
for i in ['3884773337002633']:
rv = flask_app.put(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2
for i in ['3884773337002633']:
rv = flask_app.put(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_post_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Test Depot ID not known yet.
Verify the MSISDN API does not support HTTP POST and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
for i in ['3884773337002633']:
rv = flask_app.post(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2
for i in ['3884773337002633']:
rv = flask_app.post(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_delete_not_allowed(flask_app, db_conn, tmpdir, logger, api_version):
"""Test Depot ID not known yet.
Verify the MSISDN API does not support HTTP DELETE and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
for i in ['3884773337002633']:
rv = flask_app.delete(url_for('{0}.msisdn_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
else: # api version 2
for i in ['3884773337002633']:
rv = flask_app.delete(url_for('{0}.msisdn_get_api'.format(api_version), msisdn=i))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data
def test_response_headers(flask_app, api_version):
"""Verify the security headers are set properly on returned response."""
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.msisdn_api'.format(api_version), msisdn='123456789012345'))
assert rv.status_code == 200
assert rv.headers.get('X-Frame-Options') == 'DENY'
assert rv.headers.get('X-Content-Type-Options') == 'nosniff'
else: # api version 2
rv = flask_app.get(url_for('{0}.msisdn_get_api'.format(api_version), msisdn='123456789012345'))
assert rv.status_code == 200
assert rv.headers.get('X-Frame-Options') == 'DENY'
assert rv.headers.get('X-Content-Type-Options') == 'nosniff'
| 50.05614
| 120
| 0.680779
|
4a03f43f50c386aa25a91d9182d5b53de3bcc1a2
| 2,516
|
py
|
Python
|
util/plot_utils.py
|
fmassa/detr
|
7613beb10a530ca0ab836f2c8845d0501f5bf063
|
[
"Apache-2.0"
] | 9
|
2021-10-05T14:14:32.000Z
|
2022-02-25T13:10:24.000Z
|
util/plot_utils.py
|
fmassa/detr
|
7613beb10a530ca0ab836f2c8845d0501f5bf063
|
[
"Apache-2.0"
] | 2
|
2021-12-02T05:51:43.000Z
|
2021-12-02T11:35:03.000Z
|
util/plot_utils.py
|
fmassa/detr
|
7613beb10a530ca0ab836f2c8845d0501f5bf063
|
[
"Apache-2.0"
] | 1
|
2020-10-16T13:00:56.000Z
|
2020-10-16T13:00:56.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0):
dfs = [pd.read_json(Path(p) / 'log.txt', lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
| 38.121212
| 118
| 0.573132
|
4a03f4bdd92eb206a12751658da1a8b3b806e6e7
| 1,628
|
py
|
Python
|
owlbot.py
|
DFrenkel/google-api-python-client
|
b3e1fd4df954547eb79195424bccc25adb3030bb
|
[
"Apache-2.0"
] | 3,469
|
2018-09-12T19:54:02.000Z
|
2022-03-31T17:53:12.000Z
|
owlbot.py
|
DFrenkel/google-api-python-client
|
b3e1fd4df954547eb79195424bccc25adb3030bb
|
[
"Apache-2.0"
] | 835
|
2018-09-12T10:40:13.000Z
|
2022-03-31T15:19:06.000Z
|
owlbot.py
|
DFrenkel/google-api-python-client
|
b3e1fd4df954547eb79195424bccc25adb3030bb
|
[
"Apache-2.0"
] | 1,329
|
2018-09-11T15:06:09.000Z
|
2022-03-31T17:53:04.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library()
# Copy kokoro configs.
# Docs are excluded as repo docs cannot currently be generated using sphinx.
s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])
s.move(templated_files / '.trampolinerc') # config file for trampoline_v2
# Also move issue templates
s.move(templated_files / '.github', excludes=['CODEOWNERS'])
# Move scripts folder needed for samples CI
s.move(templated_files / 'scripts')
# Copy CONTRIBUTING.rst
s.move(templated_files / 'CONTRIBUTING.rst')
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples(skip_readmes=True)
| 35.391304
| 78
| 0.615479
|
4a03f513c6535fac0be26a5bbd01b55c89fa4eed
| 534
|
py
|
Python
|
flint/nn/modules/flatten.py
|
Renovamen/tinyark
|
da536e8f8132ef531c5bef3feebd3178c1877fce
|
[
"MIT"
] | 15
|
2021-02-08T16:01:52.000Z
|
2021-02-10T07:49:26.000Z
|
flint/nn/modules/flatten.py
|
Renovamen/tinyark
|
da536e8f8132ef531c5bef3feebd3178c1877fce
|
[
"MIT"
] | null | null | null |
flint/nn/modules/flatten.py
|
Renovamen/tinyark
|
da536e8f8132ef531c5bef3feebd3178c1877fce
|
[
"MIT"
] | 2
|
2021-05-10T06:40:45.000Z
|
2021-05-10T14:47:03.000Z
|
from flint import Tensor
from .module import Module
from .. import functional as F
class Flatten(Module):
"""
Flatten the input. Does not affect the batch size.
NOTE:
If inputs are shaped ``(batch,)`` without a feature axis, then flattening
adds an extra channel dimension and output shape is ``(batch, 1)``.
"""
def __init__(self) -> None:
super(Flatten, self).__init__()
def forward(self, input: Tensor) -> Tensor:
self.output = F.flatten(input)
return self.output
| 28.105263
| 81
| 0.646067
|
4a03f914c07ef70f2c4da5204a6d2862b2ab3857
| 1,370
|
py
|
Python
|
notes/algo-ds-practice/problems/bst/compute_height_after_insert.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 6
|
2020-07-05T05:15:19.000Z
|
2021-01-24T20:17:14.000Z
|
notes/algo-ds-practice/problems/bst/compute_height_after_insert.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | null | null | null |
notes/algo-ds-practice/problems/bst/compute_height_after_insert.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 2
|
2020-09-14T06:46:37.000Z
|
2021-06-15T09:17:21.000Z
|
'''
You have a list of unique integers `arr`.
You have to insert these integers into a BST in that order.
You have to return another list `height` such that `height[i]` is the height of arr[i] in the BST.
Example:
arr = [10, 5, 2, 7, 15]
heights = [1, 2, 3, 3, 2]
SOLUTION:
We can just insert into a BST and then tell height.
But it will take O(n*n) in the case of a degenerate BST.
We can instead do the following:
For every arr[i]:
1. If `arr[i] > max(arr[0...i-1])`, then `height[arr[i]] = height[max] + 1`; `arr[i]` will be the child of `max`.
2. If `arr[i] < min(arr[0...i-1])`, then `height[arr[i]] = height[min] + 1`; `arr[i]` will be the child of `min`.
3. Compute `ub = upper_bound(arr[0..i-1], arr[i])`.
This will return the index of the smallest element which is greater than `arr[i]`.
4. Assign `greater = arr[ub]` and `smaller = arr[ub-1]`, denoting the elements that are closest to `arr[i]`.
5. If `idx[greater] > idx[smaller]`, then `height[arr[i]] = height[greater] + 1`. `arr[i]` is child of `greater`.
Else, `height[arr[i]] = height[smaller] + 1`; `arr[i]` is child of `smaller`.
Where `idx[elem]` means index of `elem` in `arr`.
Complexity -> O(nlogn).
Draw some examples to see why case 3 works.
Note: We'll need to have a 'sorted-set' like data structure to do the `upper_bound()`.
Which means that we'll have to use C++ or Java :(
'''
| 45.666667
| 113
| 0.656934
|
4a03f967d3e4c4a48197f53ff8ff8fadefe50546
| 3,866
|
py
|
Python
|
superset/examples/multiformat_time_series.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | 1
|
2019-10-28T18:26:39.000Z
|
2019-10-28T18:26:39.000Z
|
superset/examples/multiformat_time_series.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | 8
|
2020-03-24T17:59:51.000Z
|
2022-03-29T22:27:47.000Z
|
superset/examples/multiformat_time_series.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from sqlalchemy import BigInteger, Date, DateTime, String
from superset import db
from superset.utils.core import get_example_database
from .helpers import (
config,
get_example_data,
get_slice_json,
merge_slice,
misc_dash_slices,
Slice,
TBL,
)
def load_multiformat_time_series(only_metadata=False, force=False):
"""Loading time series data from a zip file in the repo"""
tbl_name = "multiformat_time_series"
database = get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("multiformat_time_series.json.gz")
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.ds2 = pd.to_datetime(pdf.ds2, unit="s")
pdf.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"ds": Date,
"ds2": DateTime,
"epoch_s": BigInteger,
"epoch_ms": BigInteger,
"string0": String(100),
"string1": String(100),
"string2": String(100),
"string3": String(100),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print(f"Creating table [{tbl_name}] reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "ds"
obj.database = database
dttm_and_expr_dict = {
"ds": [None, None],
"ds2": [None, None],
"epoch_s": ["epoch_s", None],
"epoch_ms": ["epoch_ms", None],
"string2": ["%Y%m%d-%H%M%S", None],
"string1": ["%Y-%m-%d^%H:%M:%S", None],
"string0": ["%Y-%m-%d %H:%M:%S.%f", None],
"string3": ["%Y/%m/%d%H:%M:%S.%f", None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print("Creating Heatmap charts")
for i, col in enumerate(tbl.columns):
slice_data = {
"metrics": ["count"],
"granularity_sqla": col.column_name,
"row_limit": config.get("ROW_LIMIT"),
"since": "2015",
"until": "2016",
"where": "",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
slc = Slice(
slice_name=f"Calendar Heatmap multiformat {i}",
viz_type="cal_heatmap",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add("Calendar Heatmap multiformat 0")
| 33.327586
| 70
| 0.606053
|
4a03fa7d563ebcda1f4e06140f718630853d0ddb
| 4,264
|
py
|
Python
|
LastFMExample/lastfm.py
|
AndreasTraut/-Visualization-of-Data-with-Python
|
c78a9bc8b51cc2c8e9d6b07a75a225d5db05053f
|
[
"MIT"
] | 2
|
2020-02-13T14:20:43.000Z
|
2020-12-23T07:22:47.000Z
|
LastFMExample/lastfm.py
|
AndreasTraut/-Visualization-of-Data-with-Python
|
c78a9bc8b51cc2c8e9d6b07a75a225d5db05053f
|
[
"MIT"
] | 3
|
2020-01-18T17:00:42.000Z
|
2020-04-07T06:28:58.000Z
|
LastFMExample/lastfm.py
|
AndreasTraut/-Visualization-of-Data-with-Python
|
c78a9bc8b51cc2c8e9d6b07a75a225d5db05053f
|
[
"MIT"
] | 1
|
2022-02-19T17:13:48.000Z
|
2022-02-19T17:13:48.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 18:46:07 2020
In this example I downloaded my complete history of played songs since 2016
from www.last.fm (66'955 songs in total) and re-built some of these nice
statistics and figues, which last.fm provides. This are for example a bar-plot
with monthly aggregates of total played songs. Or top 10 songs of the week and
so on. Having the same plots at the end as last.fm has prooves, that my
results are correct. :-)
@author: Andreas Traut
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot
#%%
df = pd.read_csv('lastfm_data.csv',
names=['artist', 'album','song', 'timestamp'],
converters={'timestamp':pd.to_datetime})
#%% Extracting year/month/... from timestamp and adding as new columns
dates = pd.DatetimeIndex(df['timestamp'])
df['year'] = dates.year
df['month'] = dates.month
df['weekofyear'] = dates.weekofyear
df['hour']= dates.hour
df['weekday'] = dates.weekday #Monday=0
#%% Overall statistics
# Next I wanted to have the overall statistics as for example # "played songs
# per year" or "scrobbels per day".
print("\nPlayed songs per year:\n{}".format(df['year'].value_counts(sort=False)))
print("\nScrobbels per day:\n{}".format(df['year'].value_counts(sort=False)/365.))
# Now lets examine the "top artist", "top album", "top songs":
print("\nTop artists:\n{}".format(df['artist'].value_counts().head()))
print("\nTop album:\n{}".format(df['album'].value_counts().head()))
print("\nTop songs:\n{}".format(df['song'].value_counts().head(10)))
#%% Defining a year/mont/weekofyear for examination
myYear = 2017
myMonth = 5
myWeekofYear = 21
#%% Examine selected year
print("\nAll songs in year %s:\n"%(myYear), df.loc[df['year'] == myYear, ['artist', 'album', 'song']])
selection = df.loc[df['year'] == myYear, ['artist', 'album', 'song', 'month']]
selectionPrev = df.loc[df['year'] == myYear-1, ['artist', 'album', 'song', 'month']]
print("\nTop artists:\n{}".format(selection['artist'].value_counts().head()))
print("\nTop songs:\n{}".format(selection['song'].value_counts().head(10)))
perMonth = selection['month'].value_counts().sort_index()
perMonthPrev = selectionPrev['month'].value_counts().sort_index()
print("\nScrobbels per month in {}:\n{}".format(myYear, perMonth))
print("\nScrobbels per month in {}:\n{}".format(myYear-1, perMonthPrev))
index = np.arange(12)
pltperMonth = pyplot.bar(index, perMonth, width=0.3, label=myYear, color='red')
pltperMonthPrev = pyplot.bar(index - 0.3, perMonthPrev, width=0.3, label=myYear-1, color='peachpuff')
pyplot.title('Year {}. Scrobbels per month.'.format(myYear))
pyplot.xticks(index, ('Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Dez'))
pyplot.legend()
pyplot.show()
isyear = (df['year'] == myYear)
ismonth = (df['month'] == myMonth)
selection = df.loc[isyear & ismonth, ['artist', 'album', 'song', 'timestamp']]
print ("\nOnly month {} in {}:\n{}".format(myMonth, myYear, selection))
print("\nTop Artists in {}/{}:\n{}".format(myMonth, myYear, selection['artist'].value_counts().head()))
print("\nTop Songs in {}/{}:\n{}".format(myMonth, myYear, selection['song'].value_counts().head(10)))
# isweekofyear= (df['weekofyear'] == myWeekofYear)
# selection = df.loc[isyear & isweekofyear, ['hour']]
#%% All songs afterDateX and beforeDateY
X = '2018-12-20'
Y = '2018-12-31'
print("\nSongs played between %s and %s:" %(X, Y))
afterDateX = df['timestamp'] >= X
beforeDateY = df['timestamp'] <= Y
print(df.loc[afterDateX & beforeDateY, ['artist', 'album', 'song']])
#%% Listening clock
isweekofyear= (df['weekofyear'] == myWeekofYear)
selection = df.loc[isyear & isweekofyear, ['hour']]
#selection = df.loc[df['year'] == myYear, ['artist', 'album', 'song', 'month', 'hour', 'year']]
index = np.arange(24)
perHour = myselection['hour'].value_counts().sort_index()
pltperHour = pyplot.subplot(111)#, projection='polar')
# pltperHour.set_theta_zero_location("N")
pltperHour.bar(perHour.index, perHour, width=0.3, color='blue', alpha=0.5)
#pltperHour.set_xticklabels(['00', '', '18', '', '12', '', '06', ''])
pltperHour.set_xticks(index)
pyplot.title('Year {}, Week-of-Year {}. Scrobbels per hour.'.format(myYear, myWeekofYear))
pyplot.show()
| 43.070707
| 106
| 0.677533
|
4a03fb1d8d1eb3d3f42100b260010a77dd8c829e
| 2,937
|
py
|
Python
|
pyalp/sequence/checkerboard.py
|
BaptisteLefebvre/pyalp
|
05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a
|
[
"MIT"
] | 1
|
2020-11-09T09:23:11.000Z
|
2020-11-09T09:23:11.000Z
|
pyalp/sequence/checkerboard.py
|
BaptisteLefebvre/pyalp
|
05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a
|
[
"MIT"
] | null | null | null |
pyalp/sequence/checkerboard.py
|
BaptisteLefebvre/pyalp
|
05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a
|
[
"MIT"
] | 1
|
2020-11-09T09:23:19.000Z
|
2020-11-09T09:23:19.000Z
|
import numpy
from .base import Sequence
class Checkerboard(Sequence):
"""TODO add doc...
TODO complete...
"""
def __init__(self, check_size=30, n_checks=10, rate=50.0, n_repetitions=None, seed=None):
# Save input parameters
self.check_size = check_size # px
self.n_checks = n_checks
self.rate = rate # Hz
self.n_repetitions = n_repetitions
self.seed = seed
# Save computed parameters
self.id = None # identifier set by device during sequence allocation
self.checkerboard_size = self.n_checks * self.check_size # px
# TODO assert(isinstance(n_repetitions, int))
# TODO assert(0 <= sequence.n_repetitions and sequence.n_repetitions <= 1048576)
bit_planes = 8 # bit depth of the pictures
pic_num = 50 # number of pictures
Sequence.__init__(self, bit_planes, pic_num)
# TODO remove following lines...
# self.picture_time = int(1.0e6 / self.rate) # ns
# self.synch_delay = ALP_DEFAULT
# self.synch_pulse_width = ALP_DEFAULT
# self.trigger_in_delay = ALP_DEFAULT
# self.infinite_loop = False
# self.pic_offset = ALP_DEFAULT
# self.pic_load = ALP_DEFAULT
def get_user_array(self):
"""TODO add doc..."""
# Allocate frame
width, height = self.device.get_resolution()
shape = (width, height, self.pic_num)
dtype = 'uint8'
frames = numpy.zeros(shape, dtype=dtype)
# Generate data
size = (self.n_checks, self.n_checks, self.pic_num)
numpy.random.seed(self.seed)
data = numpy.random.randint(0, high=2, size=size, dtype=dtype)
# Scale data
max_ = numpy.iinfo(dtype).max
data = max_ * data
# Define frames
x_min = (width - self.checkerboard_size) // 2
x_max = x_min + self.checkerboard_size
y_min = (height - self.checkerboard_size) // 2
y_max = y_min + self.checkerboard_size
frames[x_min:x_max, y_min:y_max, :] = numpy.kron(data, numpy.ones((self.check_size, self.check_size, 1)))
# Transpose frames
frames = numpy.transpose(frames)
# TODO remove the following lines.
# print("frames: {}".format(frames))
print("frames.shape: {}".format(frames.shape))
print("numpy.amin(frames): {}".format(numpy.amin(frames)))
print("numpy.amax(frames): {}".format(numpy.amax(frames)))
import matplotlib.pyplot as plt
import os
plt.imshow(frames[:, :, 0])
plt.savefig(os.path.expanduser(os.path.join("~", "checkerboard.svg")))
# Return frames
return frames
def display(self):
self.device.invert_projection()
sequence_id = self.device.allocate(self.bit_planes, self.pic_num)
self.device.timing(sequence_id, picture_time=self.picture_time)
raise NotImplementedError()
| 39.689189
| 113
| 0.625468
|
4a03fb2761b4ca3cf4e99d5dd1156188be6186d9
| 4,937
|
py
|
Python
|
dagon/peer2peer/node.py
|
JuanBarron/dagonstar
|
23e24ceb27e868c70461a9c573dadfb86eb96486
|
[
"Apache-2.0"
] | null | null | null |
dagon/peer2peer/node.py
|
JuanBarron/dagonstar
|
23e24ceb27e868c70461a9c573dadfb86eb96486
|
[
"Apache-2.0"
] | null | null | null |
dagon/peer2peer/node.py
|
JuanBarron/dagonstar
|
23e24ceb27e868c70461a9c573dadfb86eb96486
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
from flask import Flask
from flask import request
import json
import sys
import requests
import os
import socket
from time import sleep
app = Flask(__name__)
# nodes
ip = "localhost"
m = 10
ID = int(sys.argv[1]) #workflow
PortBase=int(sys.argv[2])
SuscribedNodes=[]
Records = []
Transversal_Records = dict()
@app.route('/updateTask', methods=['POST'])
def update():
params = request.get_json()
if 'task' in params and 'status' in params and 'working_dir' in params:
task_name = params['task']
task_status = params['status']
task_working_dir = params['working_dir']
update_task(task_name,task_status,task_working_dir)
return "OK"
return "BAD"
@app.route('/getTask', methods=['POST'])
def get_Task():
params = request.get_json()
if 'workflow' in params and 'task' in params:
task_name = params['task']
task_workflow = params['workflow']
for x in Transversal_Records[task_workflow]:
while True:
if x['name'] == task_name and x['status'] == "FINISHED":
return json.dumps(x)
else:
sleep(1)
@app.route('/subscribe', methods=['POST'])
def subscribe():
params = request.get_json()
if 'id' in params and 'port' in params and 'ip' in params:
sus_id = params['id']
sus_port = params['port']
sus_ip = params['ip']
if (in_suscribed_nodes(sus_id)):
return "None"
info = {'id':sus_id,'port':sus_port,'ip':sus_ip}
SuscribedNodes.append(info)
return json.dumps(Records) #need to return the actual info
@app.route('/notify/<workflow>', methods=['POST'])
def getNotification(workflow):
global Transversal_Records
params = request.get_json()
if 'name' in params and 'status' in params and 'working_dir' in params:
name = params['name']
status = params['status']
working_dir = params['working_dir']
info = {'name':name,'status':status,'working_dir':working_dir}
# workflow exist in records
if not workflow in Transversal_Records:
Transversal_Records[workflow]=[]
#look for the task
for task in Transversal_Records[workflow]:
if task['name']==name:
task['status'] = status
return "OK"
Transversal_Records[workflow].append(info)
return "OK"
return "BAD"
@app.route('/list_subs', methods=['GET'])
def get_subscribers():
return json.dumps(SuscribedNodes)
@app.route('/list_tasks', methods=['GET'])
def get_task_list():
return json.dumps(Records)
@app.route('/list_trans', methods=['GET'])
def get_transversal_list():
return json.dumps(Transversal_Records)
def subscribe2workflow(id,host):
global Transversal_Records
service = "/subscribe"
url = "http://%s/%s" % (host,service)
ip,port = host.split(":")
data = {'id':id,'port':port,'ip':ip}
res = requests.post(url, json=data)
if res.status_code != 201 and res.status_code != 200: # error
raise Exception("Something went wrong %d %s" % (res.status_code, res.reason))
if res.text == "OK":
return 0
else:
data = json.loads(res.text)
for item in data:
Transversal_Records[host].append(item)
def CheckStatus():
pass
def in_suscribed_nodes(id_node):
for item in SuscribedNodes:
if item['id']==id_node:
return True
return False
def update_task(task_name,task_status,task_working_dir):
global Records
for item in Records:
if item['name'] == task_name:
item['status'] = task_status
item['working_dir'] = task_working_dir
notify_suscribers(item) #notify suscribers
return True
info = {'name':task_name,'status':task_status,'working_dir':task_working_dir}
Records.append(info)
notify_suscribers(info) #notify suscribers
return False
def if_task_exist(task_name):
for item in Records:
if item['name'] == task_name:
return True
return False
def notify_suscribers(data):
for subscriber in SuscribedNodes:
service = "notify/%s" % ID
url = "http://%s:%s/%s" % (subscriber['ip'],subscriber['port'],service)
print(url)
res = requests.post(url, json=data)
if res.status_code != 201 and res.status_code != 200: # error
raise Exception("Something went wrong %d %s" % (res.status_code, res.reason))
def Check():
for node in SuscribedNodes:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ip,int(node[1])))
if result == 0:
pass
else:
requests.get('http://'+ip+':'+str(PortBase)+'/DeleteNode?id='+str(node[0]),verify=False)
print ("hubo un nodo caido")
sock.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=(PortBase),debug = True)
| 30.103659
| 91
| 0.627912
|
4a03fba52bd9e715adca6f383e7e494bbc503484
| 2,049
|
py
|
Python
|
raiden/network/utils.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | 1
|
2018-10-27T11:30:06.000Z
|
2018-10-27T11:30:06.000Z
|
raiden/network/utils.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | null | null | null |
raiden/network/utils.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | null | null | null |
from itertools import count
from time import sleep
from typing import Optional
import psutil
import requests
from requests import RequestException
def get_free_port(address: str, initial_port: int):
"""Find an unused TCP port in a specified range. This should not
be used in misson-critical applications - a race condition may
occur if someone grabs the port before caller of this function
has chance to use it.
Parameters:
address : an ip address of interface to use
initial_port : port to start iteration with
Return:
Iterator that will return next unused port on a specified
interface
"""
try:
# On OSX this function requires root privileges
psutil.net_connections()
except psutil.AccessDenied:
return count(initial_port)
def _unused_ports():
for port in count(initial_port):
# check if the port is being used
connect_using_port = (
conn
for conn in psutil.net_connections()
if hasattr(conn, 'laddr') and
conn.laddr[0] == address and
conn.laddr[1] == port
)
# only generate unused ports
if not any(connect_using_port):
yield port
return _unused_ports()
def get_http_rtt(
url: str,
samples: int = 3,
method: str = 'head',
timeout: int = 1,
) -> Optional[float]:
"""
Determine the average HTTP RTT to `url` over the number of `samples`.
Returns `None` if the server is unreachable.
"""
durations = []
for _ in range(samples):
try:
durations.append(
requests.request(method, url, timeout=timeout).elapsed.total_seconds(),
)
except (RequestException, OSError):
return None
except Exception as ex:
print(ex)
return None
# Slight delay to avoid overloading
sleep(.125)
return sum(durations) / samples
| 28.859155
| 87
| 0.603709
|
4a03fbc5031a4c3a28c2fad64f45cd0ca67e1f85
| 13,101
|
py
|
Python
|
tests/test_BaseTransform.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_BaseTransform.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_BaseTransform.py
|
OrtnerMichael/magPyLib
|
4c7e7f56f6e0b915ec0e024c172c460fa80126e5
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import pytest
from scipy.spatial.transform import Rotation as R
import magpylib as magpy
from magpylib._src.obj_classes.class_BaseTransform import apply_move
from magpylib._src.obj_classes.class_BaseTransform import apply_rotation
@pytest.mark.parametrize(
(
"description",
"old_position",
"displacement",
"new_position",
"start",
),
[
# SCALAR INPUT
("01_ with start='auto'", (0, 0, 0), (1, 2, 3), (1, 2, 3), "auto"),
("02_ with start=0", (1, 2, 3), (1, 2, 3), (2, 4, 6), 0),
("03_ with start=-1", (2, 4, 6), (1, 2, 3), (3, 6, 9), -1),
("04_ pad behind", (3, 6, 9), (-1, -2, -3), [(3, 6, 9), (2, 4, 6)], 1),
(
"05_ whole path",
[(3, 6, 9), (2, 4, 6)],
(-1, -2, -3),
[(2, 4, 6), (1, 2, 3)],
"auto",
),
(
"06_ pad before",
[(2, 4, 6), (1, 2, 3)],
(-1, -2, -3),
[(1, 2, 3), (1, 2, 3), (0, 0, 0)],
-3,
),
(
"07_ whole path starting in the middle",
[(1, 2, 3), (1, 2, 3), (0, 0, 0)],
(1, 2, 3),
[(1, 2, 3), (2, 4, 6), (1, 2, 3)],
1,
),
(
"08_ whole path starting in the middle with negative start",
[(1, 2, 3), (2, 4, 6), (1, 2, 3)],
(1, 2, 3),
[(1, 2, 3), (3, 6, 9), (2, 4, 6)],
-2,
),
# VECTOR INPUT
(
"17_ vector + start=0: simple append",
(0, 0, 0),
[(1, 2, 3)],
[(0, 0, 0), (1, 2, 3)],
"auto",
),
(
"18_ vector + start in middle: merge",
[(0, 0, 0), (1, 2, 3)],
[(1, 2, 3)],
[(0, 0, 0), (2, 4, 6)],
1,
),
(
"19_ vector + start in middle: merge + pad behind",
[(0, 0, 0), (2, 4, 6)],
[(-1, -2, -3), (-2, -4, -6)],
[(0, 0, 0), (1, 2, 3), (0, 0, 0)],
1,
),
(
"20_ vector + start before: merge + pad before",
[(0, 0, 0), (1, 2, 3), (0, 0, 0)],
[(1, 2, 3), (1, 2, 3)],
[(1, 2, 3), (1, 2, 3), (1, 2, 3), (0, 0, 0)],
-4,
),
],
)
def test_apply_move(description, old_position, displacement, new_position, start):
"""v4 path functionality tests"""
print(description)
s = magpy.Sensor(position=old_position)
apply_move(s, displacement, start=start)
assert np.all(s.position == np.array(new_position))
@pytest.mark.parametrize(
(
"description",
"old_position",
"new_position",
"old_orientation_rotvec",
"rotvec_to_apply",
"new_orientation_rotvec",
"start",
"anchor",
),
[
# SCALAR INPUT
(
"01_ with start='auto'",
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0.1, 0.2, 0.3),
(0.1, 0.2, 0.3),
"auto",
None,
),
(
"02_ with start=0",
(0, 0, 0),
(0, 0, 0),
(0.1, 0.2, 0.3),
(0.1, 0.2, 0.3),
(0.2, 0.4, 0.6),
0,
None,
),
(
"03_ with start=-1",
(0, 0, 0),
(0, 0, 0),
(0.2, 0.4, 0.6),
(-0.2, -0.4, -0.6),
(0, 0, 0),
-1,
None,
),
(
"04_ with anchor",
(0, 0, 0),
(1, -1, 0),
(0, 0, 0),
(0, 0, np.pi / 2),
(0, 0, np.pi / 2),
-1,
(1, 0, 0),
),
(
"05_ pad behind",
(1, -1, 0),
[(1, -1, 0), (2, 0, 0)],
(0, 0, np.pi / 2),
(0, 0, np.pi / 2),
[(0, 0, np.pi / 2), (0, 0, np.pi)],
1,
(1, 0, 0),
),
(
"06_ whole path",
[(1, -1, 0), (2, 0, 0)],
[(2, 0, 0), (1, 1, 0)],
[(0, 0, np.pi / 2), (0, 0, np.pi)],
(0, 0, np.pi / 2),
[(0, 0, np.pi), (0, 0, -np.pi / 2)],
"auto",
(1, 0, 0),
),
(
"07_ pad before",
[(2, 0, 0), (1, 1, 0)],
[(1, 1, 0), (1, 1, 0), (0, 0, 0)],
[(0, 0, np.pi), (0, 0, -np.pi / 2)],
(0, 0, np.pi / 2),
[(0, 0, -np.pi / 2), (0, 0, -np.pi / 2), (0, 0, 0)],
-3,
(1, 0, 0),
),
(
"08_ whole path starting in the middle",
[(1, 1, 0), (1, 1, 0), (0, 0, 0)],
[(1, 1, 0), (0, 0, 0), (1, -1, 0)],
[(0, 0, -np.pi / 2), (0, 0, -np.pi / 2), (0, 0, 0)],
(0, 0, np.pi / 2),
[(0, 0, -np.pi / 2), (0, 0, 0), (0, 0, np.pi / 2)],
1,
(1, 0, 0),
),
(
"09_ whole path starting in the middle without anchor",
[(1, 1, 0), (0, 0, 0), (1, -1, 0)],
[(1, 1, 0), (0, 0, 0), (1, -1, 0)],
[(0, 0, -np.pi / 2), (0, 0, 0), (0, 0, np.pi / 2)],
((0, 0, np.pi / 4)),
[(0, 0, -np.pi / 2), (0, 0, np.pi / 4), (0, 0, 3 * np.pi / 4)],
1,
None,
),
# VECTOR INPUT
(
"11_ simple append start=auto behavior",
(0, 0, 0),
[(0, 0, 0), (1, -1, 0)],
(0, 0, 0),
[(0, 0, np.pi / 2)],
[(0, 0, 0), (0, 0, np.pi / 2)],
"auto",
(1, 0, 0),
),
(
"12_ vector + start=0: simple merge",
[(0, 0, 0), (1, -1, 0)],
[(1, -1, 0), (1, -1, 0)],
[(0, 0, 0), (0, 0, np.pi / 2)],
[(0, 0, np.pi / 2)],
[(0, 0, np.pi / 2), (0, 0, np.pi / 2)],
0,
(1, 0, 0),
),
(
"13_ vector + start in middle: merge + pad behind",
[(1, -1, 0), (1, -1, 0)],
[(1, -1, 0), (1, 1, 0), (1, 1, 0)],
[(0, 0, np.pi / 2), (0, 0, np.pi / 2)],
[(0, 0, np.pi), (0, 0, np.pi)],
[(0, 0, np.pi / 2), (0, 0, -np.pi / 2), (0, 0, -np.pi / 2)],
1,
(1, 0, 0),
),
(
"14_ vector + start before: merge + pad before",
[(1, -1, 0), (1, 1, 0), (1, 1, 0)],
[(1, -1, 0), (1, 1, 0), (1, 1, 0), (1, 1, 0)],
[(0, 0, np.pi / 2), (0, 0, -np.pi / 2), (0, 0, -np.pi / 2)],
[(0, 0, 0), (0, 0, np.pi)],
[
(0, 0, np.pi / 2),
(0, 0, -np.pi / 2),
(0, 0, -np.pi / 2),
(0, 0, -np.pi / 2),
],
-4,
(1, 0, 0),
),
],
)
def test_apply_rotation(
description,
old_position,
new_position,
old_orientation_rotvec,
rotvec_to_apply,
new_orientation_rotvec,
start,
anchor,
):
"""v4 path functionality tests"""
print(description)
s = magpy.Sensor(
position=old_position, orientation=R.from_rotvec(old_orientation_rotvec)
)
apply_rotation(s, R.from_rotvec(rotvec_to_apply), start=start, anchor=anchor)
assert np.allclose(s.position, np.array(new_position))
assert np.allclose(
s.orientation.as_matrix(), R.from_rotvec(new_orientation_rotvec).as_matrix()
)
# def test_apply_move_v4_pt1():
# """ v4 path functionality tests """
# # pylint: disable=too-many-statements
# # SCALAR INPUT - ABSOLUTE=FALSE
# s = magpy.Sensor()
# # move object with start='auto'
# apply_move(s, (1,2,3))
# assert np.all(s.position == (1,2,3))
# # move object with start=0
# apply_move(s, (1,2,3), start=0)
# assert np.all(s.position == (2,4,6))
# # move object with start=-1
# apply_move(s, (1,2,3), start=-1)
# assert np.all(s.position == (3,6,9))
# # pad behind
# apply_move(s, (-1,-2,-3), start=1)
# assert np.all(s.position == [(3,6,9), (2,4,6)])
# # move whole path
# apply_move(s, (-1,-2,-3))
# assert np.all(s.position == [(2,4,6), (1,2,3)])
# # pad before
# apply_move(s, (-1,-2,-3), start=-3)
# assert np.all(s.position == [(1,2,3), (1,2,3), (0,0,0)])
# # move whole path starting in the middle
# apply_move(s, (1,2,3), start=1)
# assert np.all(s.position == [(1,2,3), (2,4,6), (1,2,3)])
# # move whole path starting in the middle with negative start
# apply_move(s, (1,2,3), start=-2)
# assert np.all(s.position == [(1,2,3), (3,6,9), (2,4,6)])
# def test_apply_move_v4_pt3():
# """ v4 path functionality tests """
# # VECTOR INPUT - ABSOLUTE=FALSE
# s = magpy.Sensor()
# # vector + start=0: simple append
# apply_move(s, [(1,2,3)])
# assert np.all(s.position == [(0,0,0), (1,2,3)])
# # vector + start in middle: merge
# apply_move(s, [(1,2,3)], start=1)
# assert np.all(s.position == [(0,0,0), (2,4,6)])
# # vector + start in middle: merge + pad behind
# apply_move(s, [(-1,-2,-3), (-2,-4,-6)], start=1)
# assert np.all(s.position == [(0,0,0), (1,2,3), (0,0,0)])
# # vector + start before: merge + pad before
# apply_move(s, [(1,2,3), (1,2,3)], start=-4)
# assert np.all(s.position == [(1,2,3), (1,2,3), (1,2,3), (0,0,0)])
# def test_apply_rotation_v4_pt1():
# """ v4 path functionality tests """
# # SCALAR INPUT
# s = magpy.Sensor()
# # rotate object with start='auto'
# apply_rotation(s, R.from_rotvec((.1,.2,.3)))
# assert np.allclose(s.position, (0,0,0))
# assert np.allclose(s.orientation.as_rotvec(), (.1,.2,.3))
# # rotate object with start=0
# apply_rotation(s, R.from_rotvec((.1,.2,.3)), start=0)
# assert np.allclose(s.position, (0,0,0))
# assert np.allclose(s.orientation.as_rotvec(), (.2,.4,.6))
# # rotate object with start=-1
# apply_rotation(s, R.from_rotvec((-.2,-.4,-.6)), start=-1)
# assert np.allclose(s.position, (0,0,0))
# assert np.allclose(s.orientation.as_rotvec(), (0,0,0))
# # rotate object with anchor
# apply_rotation(s, R.from_rotvec((0,0,np.pi/2)), anchor=(1,0,0))
# assert np.allclose(s.position, (1,-1,0))
# assert np.allclose(s.orientation.as_rotvec(), (0,0,np.pi/2))
# # pad behind
# apply_rotation(s, R.from_rotvec((0,0,np.pi/2)), anchor=(1,0,0), start=1)
# assert np.allclose(s.position, ((1,-1,0), (2,0,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,np.pi/2), (0,0,np.pi)))
# # rotate whole path
# apply_rotation(s, R.from_rotvec((0,0,np.pi/2)), anchor=(1,0,0))
# assert np.allclose(s.position, ((2,0,0), (1,1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,np.pi), (0,0,-np.pi/2)))
# # pad before
# apply_rotation(s, R.from_rotvec((0,0,np.pi/2)), anchor=(1,0,0), start=-3)
# assert np.allclose(s.position, ((1,1,0), (1,1,0), (0,0,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,-np.pi/2), (0,0,-np.pi/2), (0,0,0)))
# # rotate whole path starting in the middle
# apply_rotation(s, R.from_rotvec((0,0,np.pi/2)), anchor=(1,0,0), start=1)
# assert np.allclose(s.position, ((1,1,0), (0,0,0), (1,-1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,-np.pi/2), (0,0,0), (0,0,np.pi/2)))
# # rotate whole path starting in the middle without anchor
# apply_rotation(s, R.from_rotvec((0,0,np.pi/4)), start=1)
# assert np.allclose(s.position, ((1,1,0), (0,0,0), (1,-1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,-np.pi/2), (0,0,np.pi/4), (0,0,3*np.pi/4)))
# def test_apply_rotation_v4_pt2():
# """ v4 path functionality tests """
# # VECTOR INPUT - ABSOLUTE=FALSE
# s = magpy.Sensor()
# # simple append start=auto behavior
# apply_rotation(s, R.from_rotvec(((0,0,np.pi/2),)), anchor=(1,0,0))
# assert np.allclose(s.position, ((0,0,0), (1,-1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,0), (0,0,np.pi/2)))
# # vector + start=0: simple merge
# apply_rotation(s, R.from_rotvec(((0,0,np.pi/2),)), anchor=(1,0,0), start=0)
# assert np.allclose(s.position, ((1,-1,0), (1,-1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,np.pi/2), (0,0,np.pi/2)))
# # vector + start in middle: merge + pad behind
# apply_rotation(s, R.from_rotvec(((0,0,np.pi), (0,0,np.pi))), anchor=(1,0,0), start=1)
# assert np.allclose(s.position, ((1,-1,0), (1,1,0), (1,1,0)))
# assert np.allclose(s.orientation.as_rotvec(), ((0,0,np.pi/2), (0,0,-np.pi/2), (0,0,-np.pi/2)))
# # vector + start before: merge + pad before
# apply_rotation(s, R.from_rotvec(((0,0,0), (0,0,np.pi))), anchor=(1,0,0), start=-4)
# assert np.allclose(s.position, ((1,-1,0), (1,1,0), (1,1,0), (1,1,0)))
# assert np.allclose(
# s.orientation.as_rotvec(), ((0,0,np.pi/2), (0,0,-np.pi/2), (0,0,-np.pi/2), (0,0,-np.pi/2)))
| 32.428218
| 101
| 0.432791
|
4a03fbcec81a1833ab604906837c84b5d2d1f09c
| 1,348
|
py
|
Python
|
mozart/services/api_v02/service.py
|
hysds/mozart
|
44ed1b70114d65d8fe10c56ce4ef22460764f882
|
[
"Apache-2.0"
] | 1
|
2019-10-18T21:30:28.000Z
|
2019-10-18T21:30:28.000Z
|
mozart/services/api_v02/service.py
|
hysds/mozart
|
44ed1b70114d65d8fe10c56ce4ef22460764f882
|
[
"Apache-2.0"
] | 6
|
2019-10-22T05:17:32.000Z
|
2021-03-08T22:51:05.000Z
|
mozart/services/api_v02/service.py
|
hysds/mozart
|
44ed1b70114d65d8fe10c56ce4ef22460764f882
|
[
"Apache-2.0"
] | 3
|
2018-04-08T19:13:42.000Z
|
2019-02-07T18:58:04.000Z
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import int
from builtins import str
from future import standard_library
standard_library.install_aliases()
from flask import Blueprint
from flask_restx import Api, apidoc
from mozart.services.api_v02.specs import job_spec_ns, container_ns, hysds_io_ns
from mozart.services.api_v02.events import event_ns
from mozart.services.api_v02.jobs import job_ns, queue_ns, on_demand_ns
from mozart.services.api_v02.tags import user_tags_ns, user_rules_tags_ns
from mozart.services.api_v02.user_rules import user_rule_ns
services = Blueprint('api_v0-2', __name__, url_prefix='/api/v0.2')
api = Api(services, ui=False, version="0.2", title="Mozart API",
description="Rest API for HySDS job submission and query.")
# specs.py
api.add_namespace(job_spec_ns)
api.add_namespace(container_ns)
api.add_namespace(hysds_io_ns)
# events.py
api.add_namespace(event_ns)
# jobs.py
api.add_namespace(job_ns)
api.add_namespace(queue_ns)
api.add_namespace(on_demand_ns)
# tags.py
api.add_namespace(user_tags_ns)
api.add_namespace(user_rules_tags_ns)
# user_rules.py
api.add_namespace(user_rule_ns)
@services.route('/doc/', endpoint='api_doc')
def swagger_ui():
return apidoc.ui_for(api)
| 28.083333
| 80
| 0.815282
|
4a03fbecb70b4255bfd7f26089ee30d4d61424cf
| 4,822
|
py
|
Python
|
src/TimeSeries/TimeSeriesForecast.py
|
rudyn2/wine_market_temporal_prediction
|
ab51dbdaa75dfd532eaef3f712ca21f3117c6d74
|
[
"MIT"
] | null | null | null |
src/TimeSeries/TimeSeriesForecast.py
|
rudyn2/wine_market_temporal_prediction
|
ab51dbdaa75dfd532eaef3f712ca21f3117c6d74
|
[
"MIT"
] | null | null | null |
src/TimeSeries/TimeSeriesForecast.py
|
rudyn2/wine_market_temporal_prediction
|
ab51dbdaa75dfd532eaef3f712ca21f3117c6d74
|
[
"MIT"
] | null | null | null |
import abc
from typing import Tuple
import pandas as pd
from src.TimeSeries.TimeSeries import TimeSeries
class ResultsNotFound(Exception):
def __init__(self, name: str):
super(ResultsNotFound, self).__init__(f"There are not SARIMAX models fitted for: {name}. Please, fit before"
f"predict.")
class TimeSeriesForecast(TimeSeries):
"""
Class that inherits from TimeSeries Class. It handles forecasting of future values by using the known values of
the series and models, like SARIMA. It also has some methods for visualization of forecasting results.
"""
def __init__(self):
super().__init__()
self._models = {}
self._results = {}
def get_mse(self, name):
"""
Computes de Mean Square Error of the original time series indexed by {name} and the prediction
of the fitted model for the same series. It's a measure of how well the model fits to the original data.
:param name: Name of temporal serie.
"""
pred_mean, pred_ci = self.predict_in_sample(name)
y_truth = self[name]
mse = ((pred_mean - y_truth) ** 2).mean()
print(f"Reported MSE for last SARIMAX model trained in {name}: {mse}")
def predict_in_sample(self, name: str):
"""
Does in-sample prediction of specified series.
:param name: Name of the series.
:return: A PredictionResultsWrapper instance
(see more references on statsmodels docs).
"""
start = self._data.index[0]
end = self._data.index[-1]
return self._predict(name, start, end)
def predict_out_of_sample(self, name: str, start: str, end: str):
"""
Does a out of sample prediction using last model fitted for {name} series. Forecasting.
:param name: Name of the serie.
:param start: String representation of a date from which predict.
:param end: String representation of a date until which predict.
"""
return self._predict(name=name, start=start, end=end)
def _predict(self, name: str, start: str, end: str):
"""
It either do in-sample prediction or out of sample forecasting, depending if the {start} and {end} range falls
in or out of the time range of the data used for fitting the model.
:param name: Name of temporal serie.
:param start: start of forecast or in-sample
:param end: end of forecast or in-sample
:return: tuple of two arrays
1) mean of the prediction for each point in range
2) 95% confidence interval for each point in range
"""
if name not in self._results.keys():
raise ResultsNotFound(name)
return self._proxy_predict(name, start, end)
def plot_insample_pred(self, name: str):
"""
Plot the fitted model prediction for the data's original time range, data indexed by {name}.
:param name: Name of temporal serie.
"""
self.plot_forecast(name, start=self[name].index[0], end=self[name].index[-1],
forecast_label='In-sample forecast')
def plot_forecast(self, name: str, start: str, end: str, forecast_label: str):
"""
PLot the predicted data indexed by {name} in the time range specified by {start} and {end}, it plots
the mean value predicted alongside the confidence interval.
:param name: Name of temporal serie.
:param start: Start of temporal axis
:param end: End of temporal axis
:param forecast_label: label for the time series prediction
"""
fig, ax = self._get_customized_figure()
pred_mean, pred_ci = self._predict(name, start=start, end=end)
pred_mean.plot(ax=ax, label=forecast_label, alpha=.7)
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Date')
ax.set_ylabel(name)
ax.legend()
return fig, ax
@abc.abstractmethod
def _proxy_predict(self, result, start: str, end: str) -> Tuple[pd.Series, pd.DataFrame]:
raise NotImplementedError
@abc.abstractmethod
def fit(self, name, **kwargs):
raise NotImplementedError
| 42.672566
| 118
| 0.570095
|
4a03fe972d4b4c4c898667cb107897aaea889540
| 3,181
|
py
|
Python
|
feincms/contrib/fields.py
|
youPickItUp/feincms
|
eb7b09b745da0773c1cd8bea6ebb3f7c28edc8b3
|
[
"BSD-3-Clause"
] | 325
|
2015-01-07T08:20:09.000Z
|
2022-03-22T02:55:13.000Z
|
feincms/contrib/fields.py
|
youPickItUp/feincms
|
eb7b09b745da0773c1cd8bea6ebb3f7c28edc8b3
|
[
"BSD-3-Clause"
] | 148
|
2015-01-09T06:19:35.000Z
|
2022-01-07T15:36:35.000Z
|
feincms/contrib/fields.py
|
youPickItUp/feincms
|
eb7b09b745da0773c1cd8bea6ebb3f7c28edc8b3
|
[
"BSD-3-Clause"
] | 109
|
2015-01-13T09:30:20.000Z
|
2022-01-03T09:24:07.000Z
|
from __future__ import absolute_import, unicode_literals
import json
import logging
from distutils.version import LooseVersion
import six
from django import forms, get_version
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
class JSONFormField(forms.fields.CharField):
def clean(self, value, *args, **kwargs):
# It seems that sometimes we receive dict objects here, not only
# strings. Partial form validation maybe?
if value:
if isinstance(value, six.string_types):
try:
value = json.loads(value)
except ValueError:
raise forms.ValidationError("Invalid JSON data!")
try:
# Run the value through JSON so we can normalize formatting
# and at least learn about malformed data:
value = json.dumps(value, cls=DjangoJSONEncoder)
except ValueError:
raise forms.ValidationError("Invalid JSON data!")
return super(JSONFormField, self).clean(value, *args, **kwargs)
if LooseVersion(get_version()) > LooseVersion("1.8"):
workaround_class = models.TextField
else:
workaround_class = six.with_metaclass(models.SubfieldBase, models.TextField)
class JSONField(workaround_class):
"""
TextField which transparently serializes/unserializes JSON objects
See:
http://www.djangosnippets.org/snippets/1478/
"""
formfield = JSONFormField
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if isinstance(value, dict):
return value
elif isinstance(value, six.string_types) or isinstance(value, six.binary_type):
# Avoid asking the JSON decoder to handle empty values:
if not value:
return {}
try:
return json.loads(value)
except ValueError:
logging.getLogger("feincms.contrib.fields").exception(
"Unable to deserialize store JSONField data: %s", value
)
return {}
else:
assert value is None
return {}
def from_db_value(self, value, expression, connection, context=None):
return self.to_python(value)
def get_prep_value(self, value):
"""Convert our JSON object to a string before we save"""
return self._flatten_value(value)
def value_to_string(self, obj):
"""Extract our value from the passed object and return it in string
form"""
if hasattr(obj, self.attname):
value = getattr(obj, self.attname)
else:
assert isinstance(obj, dict)
value = obj.get(self.attname, "")
return self._flatten_value(value)
def _flatten_value(self, value):
"""Return either a string, JSON-encoding dict()s as necessary"""
if not value:
return ""
if isinstance(value, dict):
value = json.dumps(value, cls=DjangoJSONEncoder)
assert isinstance(value, six.string_types)
return value
| 31.49505
| 87
| 0.62276
|
4a03fed8ed17aea0c44d18208757fbd359a58b5c
| 255
|
py
|
Python
|
src/api-engine/api/lib/agent/network_base.py
|
hyperledger-gerrit-archive/cello
|
52ce6439a391ee65cec76934c1d7b0475543a1e4
|
[
"Apache-2.0"
] | 865
|
2017-01-12T21:51:37.000Z
|
2022-03-26T16:39:16.000Z
|
src/api-engine/api/lib/agent/network_base.py
|
hyperledger-gerrit-archive/cello
|
52ce6439a391ee65cec76934c1d7b0475543a1e4
|
[
"Apache-2.0"
] | 226
|
2017-02-06T08:36:24.000Z
|
2022-03-30T06:13:46.000Z
|
src/api-engine/api/lib/agent/network_base.py
|
hyperledger-gerrit-archive/cello
|
52ce6439a391ee65cec76934c1d7b0475543a1e4
|
[
"Apache-2.0"
] | 506
|
2017-02-08T06:11:18.000Z
|
2022-03-10T04:25:25.000Z
|
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
class NetworkBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
pass
@abc.abstractmethod
def generate_config(self, *args, **kwargs):
pass
| 15.9375
| 47
| 0.647059
|
4a03ffc7fb0c4f7167238b77a6f49a16c7ebbf4f
| 7,467
|
py
|
Python
|
tools/python/google/httpd_utils.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/python/google/httpd_utils.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/python/google/httpd_utils.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to help start/stop a local apache http server."""
import logging
import optparse
import os
import subprocess
import sys
import time
import urllib
import google.path_utils
import google.platform_utils
class HttpdNotStarted(Exception): pass
def UrlIsAlive(url):
"""Checks to see if we get an http response from |url|.
We poll the url 5 times with a 1 second delay. If we don't
get a reply in that time, we give up and assume the httpd
didn't start properly.
Args:
url: The URL to check.
Return:
True if the url is alive.
"""
wait_time = 5
while wait_time > 0:
try:
response = urllib.urlopen(url)
# Server is up and responding.
return True
except IOError:
pass
wait_time -= 1
# Wait a second and try again.
time.sleep(1)
return False
def ApacheConfigDir(start_dir):
"""Returns a path to the directory holding the Apache config files."""
return google.path_utils.FindUpward(start_dir, 'tools', 'python',
'google', 'httpd_config')
def GetCygserverPath(start_dir, apache2=False):
"""Returns the path to the directory holding cygserver.exe file."""
cygserver_path = None
if apache2:
cygserver_path = google.path_utils.FindUpward(start_dir, 'third_party',
'cygwin', 'usr', 'sbin')
return cygserver_path
def StartServer(document_root=None, output_dir=None, apache2=False):
"""Starts a local server on port 8000 using the basic configuration files.
Args:
document_root: If present, specifies the document root for the server;
otherwise, the filesystem's root (e.g., C:/ or /) will be used.
output_dir: If present, specifies where to put server logs; otherwise,
they'll be placed in the system's temp dir (e.g., $TEMP or /tmp).
apache2: boolean if true will cause this function to configure
for Apache 2.x as opposed to Apache 1.3.x
Returns: the ApacheHttpd object that was created
"""
script_dir = google.path_utils.ScriptDir()
platform_util = google.platform_utils.PlatformUtility(script_dir)
if not output_dir:
output_dir = platform_util.GetTempDirectory()
if not document_root:
document_root = platform_util.GetFilesystemRoot()
apache_config_dir = ApacheConfigDir(script_dir)
if apache2:
httpd_conf_path = os.path.join(apache_config_dir, 'httpd2.conf')
else:
httpd_conf_path = os.path.join(apache_config_dir, 'httpd.conf')
mime_types_path = os.path.join(apache_config_dir, 'mime.types')
start_cmd = platform_util.GetStartHttpdCommand(output_dir,
httpd_conf_path,
mime_types_path,
document_root,
apache2=apache2)
stop_cmd = platform_util.GetStopHttpdCommand()
httpd = ApacheHttpd(start_cmd, stop_cmd, [8000],
cygserver_path=GetCygserverPath(script_dir, apache2))
httpd.StartServer()
return httpd
def StopServers(apache2=False):
"""Calls the platform's stop command on a newly created server, forcing it
to stop.
The details depend on the behavior of the platform stop command. For example,
it's often implemented to kill all running httpd processes, as implied by
the name of this function.
Args:
apache2: boolean if true will cause this function to configure
for Apache 2.x as opposed to Apache 1.3.x
"""
script_dir = google.path_utils.ScriptDir()
platform_util = google.platform_utils.PlatformUtility(script_dir)
httpd = ApacheHttpd('', platform_util.GetStopHttpdCommand(), [],
cygserver_path=GetCygserverPath(script_dir, apache2))
httpd.StopServer(force=True)
class ApacheHttpd(object):
def __init__(self, start_command, stop_command, port_list,
cygserver_path=None):
"""Args:
start_command: command list to call to start the httpd
stop_command: command list to call to stop the httpd if one has been
started. May kill all httpd processes running on the machine.
port_list: list of ports expected to respond on the local machine when
the server has been successfully started.
cygserver_path: Path to cygserver.exe. If specified, exe will be started
with server as well as stopped when server is stopped.
"""
self._http_server_proc = None
self._start_command = start_command
self._stop_command = stop_command
self._port_list = port_list
self._cygserver_path = cygserver_path
def StartServer(self):
if self._http_server_proc:
return
if self._cygserver_path:
cygserver_exe = os.path.join(self._cygserver_path, "cygserver.exe")
cygbin = google.path_utils.FindUpward(cygserver_exe, 'third_party',
'cygwin', 'bin')
env = os.environ
env['PATH'] += ";" + cygbin
subprocess.Popen(cygserver_exe, env=env)
logging.info('Starting http server')
self._http_server_proc = subprocess.Popen(self._start_command)
# Ensure that the server is running on all the desired ports.
for port in self._port_list:
if not UrlIsAlive('http://127.0.0.1:%s/' % str(port)):
raise HttpdNotStarted('Failed to start httpd on port %s' % str(port))
def StopServer(self, force=False):
"""If we started an httpd.exe process, or if force is True, call
self._stop_command (passed in on init so it can be platform-dependent).
This will presumably kill it, and may also kill any other httpd.exe
processes that are running.
"""
if force or self._http_server_proc:
logging.info('Stopping http server')
kill_proc = subprocess.Popen(self._stop_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.info('%s\n%s' % (kill_proc.stdout.read(),
kill_proc.stderr.read()))
self._http_server_proc = None
if self._cygserver_path:
subprocess.Popen(["taskkill.exe", "/f", "/im", "cygserver.exe"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def main():
# Provide some command line params for starting/stopping the http server
# manually.
option_parser = optparse.OptionParser()
option_parser.add_option('-k', '--server', help='Server action (start|stop)')
option_parser.add_option('-r', '--root', help='Document root (optional)')
option_parser.add_option('-a', '--apache2', action='store_true',
default=False, help='Starts Apache 2 instead of Apache 1.3 (default). '
'Ignored on Mac (apache2 is used always)')
options, args = option_parser.parse_args()
if not options.server:
print ("Usage: %s -k {start|stop} [-r document_root] [--apache2]" %
sys.argv[0])
return 1
document_root = None
if options.root:
document_root = options.root
if 'start' == options.server:
StartServer(document_root, apache2=options.apache2)
else:
StopServers(apache2=options.apache2)
if '__main__' == __name__:
sys.exit(main())
| 37.149254
| 80
| 0.663453
|
4a04003c579272f9536bd6b44ab8e58a767c4d4d
| 2,080
|
py
|
Python
|
testing_project/study_stock.py
|
giwon-bae/giwon-bae.github.io
|
28474af641e4aadf0fcd1c421a1226495551c5d6
|
[
"MIT"
] | null | null | null |
testing_project/study_stock.py
|
giwon-bae/giwon-bae.github.io
|
28474af641e4aadf0fcd1c421a1226495551c5d6
|
[
"MIT"
] | null | null | null |
testing_project/study_stock.py
|
giwon-bae/giwon-bae.github.io
|
28474af641e4aadf0fcd1c421a1226495551c5d6
|
[
"MIT"
] | null | null | null |
import win32com.client
# 연결 여부 체크
objCpCybos = win32com.client.Dispatch("CpUtil.CpCybos")
bConnect = objCpCybos.IsConnect
if (bConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
exit()
# 현재가 객체 구하기
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, 'A005930') #종목 코드 - 삼성전자
objStockMst.BlockRequest()
# 현재가 통신 및 통신 에러 처리
rqStatus = objStockMst.GetDibStatus()
rqRet = objStockMst.GetDibMsg1()
print("통신상태", rqStatus, rqRet)
if rqStatus != 0:
exit()
# 현재가 정보 조회
code = objStockMst.GetHeaderValue(0) #종목코드
name= objStockMst.GetHeaderValue(1) # 종목명
time= objStockMst.GetHeaderValue(4) # 시간
cprice= objStockMst.GetHeaderValue(11) # 종가
diff= objStockMst.GetHeaderValue(12) # 대비
open= objStockMst.GetHeaderValue(13) # 시가
high= objStockMst.GetHeaderValue(14) # 고가
low= objStockMst.GetHeaderValue(15) # 저가
offer = objStockMst.GetHeaderValue(16) #매도호가
bid = objStockMst.GetHeaderValue(17) #매수호가
vol= objStockMst.GetHeaderValue(18) #거래량
vol_value= objStockMst.GetHeaderValue(19) #거래대금
# 예상 체결관련 정보
exFlag = objStockMst.GetHeaderValue(58) #예상체결가 구분 플래그
exPrice = objStockMst.GetHeaderValue(55) #예상체결가
exDiff = objStockMst.GetHeaderValue(56) #예상체결가 전일대비
exVol = objStockMst.GetHeaderValue(57) #예상체결수량
print("코드", code)
print("이름", name)
print("시간", time)
print("종가", cprice)
print("대비", diff)
print("시가", open)
print("고가", high)
print("저가", low)
print("매도호가", offer)
print("매수호가", bid)
print("거래량", vol)
print("거래대금", vol_value)
if (exFlag == ord('0')):
print("장 구분값: 동시호가와 장중 이외의 시간")
elif (exFlag == ord('1')) :
print("장 구분값: 동시호가 시간")
elif (exFlag == ord('2')):
print("장 구분값: 장중 또는 장종료")
print("예상체결가 대비 수량")
print("예상체결가", exPrice)
print("예상체결가 대비", exDiff)
print("예상체결수량", exVol)
from slacker import Slacker
slack = Slacker('xoxb-1763551092886-1794174939376-JtvgrdtbWHOvWWdi3vK0r8oq')
# Send a message to #general channel
slack.chat.post_message('#stock', name + '(' + str(code) + ')')
slack.chat.post_message('#stock', '현재가:' + str(offer))
slack.chat.post_message('#stock', '거래량:' + str(vol))
| 27.368421
| 76
| 0.707212
|
4a04015a025134b708a21418535f4993c58f8ed6
| 714
|
py
|
Python
|
pyfermod/stats/stats_expon_rvs.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 44
|
2016-03-18T22:05:31.000Z
|
2021-12-23T01:50:09.000Z
|
pyfermod/stats/stats_expon_rvs.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 88
|
2016-08-19T08:05:37.000Z
|
2022-03-28T23:29:21.000Z
|
pyfermod/stats/stats_expon_rvs.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 24
|
2016-02-07T18:12:06.000Z
|
2022-02-19T09:06:17.000Z
|
"""
Returns the array of random variates
for the Exponential probability distribution
using the given arrays for the abscissa or template
values and each of the parameters values.
"""
import numpy
import pyferret
import pyferret.stats
DISTRIB_NAME = "Exponential"
FUNC_NAME = "rvs"
def ferret_init(id):
"""
Initialization for the stats_expon_rvs Ferret PyEF
"""
return pyferret.stats.getinitdict(DISTRIB_NAME, FUNC_NAME)
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Result array assignment for the stats_expon_rvs Ferret PyEF
"""
pyferret.stats.assignresultsarray(DISTRIB_NAME, FUNC_NAME,
result, resbdf, inputs, inpbdfs)
| 24.62069
| 70
| 0.717087
|
4a04022f9d30cb844584e6d050ac96bfc5815897
| 7,565
|
py
|
Python
|
src/python_op3/soccer/ball_follower.py
|
culdo/python-op3
|
59a068ae4c8694778126aebc2ab553963b82493b
|
[
"MIT"
] | 5
|
2019-08-06T07:28:10.000Z
|
2022-01-30T17:00:41.000Z
|
src/python_op3/soccer/ball_follower.py
|
culdo/python-op3
|
59a068ae4c8694778126aebc2ab553963b82493b
|
[
"MIT"
] | 2
|
2019-08-06T15:54:42.000Z
|
2021-04-21T02:40:36.000Z
|
src/python_op3/soccer/ball_follower.py
|
culdo/python-op3
|
59a068ae4c8694778126aebc2ab553963b82493b
|
[
"MIT"
] | 5
|
2020-09-25T10:03:51.000Z
|
2021-10-18T06:19:43.000Z
|
import rospy
from math import pi, tan
from geometry_msgs.msg import Point
from op3_walking_module_msgs.msg import WalkingParam
from op3_walking_module_msgs.srv import GetWalkingParam
class BallFollower(object):
def __init__(self):
self.ball_position = Point()
self.current_walking_param_ = WalkingParam()
self.FOV_WIDTH = 35.2 * pi / 180
self.FOV_HEIGHT = 21.6 * pi / 180
self.count_not_found_ = 0
self.count_to_kick_ = 0
self.on_tracking_ = False
self.approach_ball_position_ = "NotFound"
self.kick_motion_index_ = 83
self.CAMERA_HEIGHT = 0.46
self.NOT_FOUND_THRESHOLD = 50
self.MAX_FB_STEP = 40.0 * 0.001
self.MAX_RL_TURN = 15.0 * pi / 180
self.IN_PLACE_FB_STEP = -3.0 * 0.001
self.MIN_FB_STEP = 5.0 * 0.001
self.MIN_RL_TURN = 5.0 * pi / 180
self.UNIT_FB_STEP = 1.0 * 0.001
self.UNIT_RL_TURN = 0.5 * pi / 180
self.SPOT_FB_OFFSET = 0.0 * 0.001
self.SPOT_RL_OFFSET = 0.0 * 0.001
self.SPOT_ANGLE_OFFSET = 0.0
self.hip_pitch_offset_ = 7.0
self.current_pan_ = -10
self.current_tilt_ = -10
self.current_x_move_ = 0.005
self.current_r_angle_ = 0
self.curr_period_time_ = 0.6
self.accum_period_time_ = 0.0
self.DEBUG_PRINT = False
self.prev_time_ = rospy.get_rostime()
def start_following(self):
self.on_tracking_ = True
rospy.loginfo("Start Ball following")
self.set_walking_command("start")
result = self.get_walking_param()
if result:
self.hip_pitch_offset_ = self.current_walking_param_.hip_pitch_offset
self.curr_period_time_ = self.current_walking_param_.period_time
else:
self.hip_pitch_offset_ = 7.0 * pi / 180
self.curr_period_time_ = 0.6
def stop_following(self):
self.on_tracking_ = False
self.count_to_kick_ = 0
rospy.loginfo("Stop Ball following")
self.set_walking_command("stop")
def calc_footstep(self, target_distance, target_angle, delta_time):
# calc fb
rl_angle = 0.0
next_movement = self.current_x_move_
if target_distance < 0:
target_distance = 0.0
fb_goal = min(target_distance * 0.1, self.MAX_FB_STEP)
self.accum_period_time_ += delta_time
if self.accum_period_time_ > (self.curr_period_time_ / 4):
self.accum_period_time_ = 0.0
if (target_distance * 0.1 / 2) < self.current_x_move_:
next_movement -= self.UNIT_FB_STEP
else:
next_movement += self.UNIT_FB_STEP
fb_goal = min(next_movement, fb_goal)
fb_move = max(fb_goal, self.MIN_FB_STEP)
rospy.loginfo("distance to ball : %6.4f, fb : %6.4f, delta : %6.6f" % (target_distance, fb_move, delta_time))
rospy.loginfo("============================================")
# calc rl angle
if abs(target_angle) * 180 / pi > 5.0:
rl_offset = abs(target_angle) * 0.2
rl_goal = min(rl_offset, self.MAX_RL_TURN)
rl_goal = max(rl_goal, self.MIN_RL_TURN)
rl_angle = min(abs(self.current_r_angle_) + self.UNIT_RL_TURN, rl_goal)
if target_angle < 0:
rl_angle *= -1
return fb_move, rl_angle
def process_following(self, x_angle, y_angle, ball_size):
curr_time = rospy.get_rostime()
dur = curr_time - self.prev_time_
delta_time = dur.nsecs * 0.000000001 + dur.secs
self.prev_time_ = curr_time
self.count_not_found_ = 0
if self.current_tilt_ == -1 and self.current_pan_ == -10:
rospy.logerr("Failed to get current angle of head joints.")
self.set_walking_command("stop")
self.on_tracking_ = False
self.approach_ball_position_ = "NotFound"
return False
rospy.loginfo(" ============== Head | Ball ============== ")
rospy.loginfo("== Head Pan : " + (self.current_pan_ * 180 / pi) + " | Ball X : " + (x_angle * 180 / pi))
rospy.loginfo("== Head Tilt : " + (self.current_pan_ * 180 / pi) + " | Ball Y : " + (y_angle * 180 / pi))
self.approach_ball_position_ = "OutOfRange"
distance_to_ball = self.CAMERA_HEIGHT * tan(pi * 0.5 + self.current_tilt_ - self.hip_pitch_offset_ - ball_size)
ball_y_angle = (self.current_tilt_ + y_angle) * 180 / pi
ball_x_angle = (self.current_pan_ + x_angle) * 180 / pi
if distance_to_ball < 0:
distance_to_ball *= -1
distance_to_kick = 0.22
# Kick Ball
# if distance_to_ball<distance_to_kick and abs(ball_x_angle) <25.0:
# self.count_to_kick_ += 1
# rospy.loginfo("== Head Pan : " + (self.current_pan_ * 180 / pi) + " | Ball X : " + (ball_x_angle * 180 / pi))
# rospy.loginfo("== Head Tilt : " + (self.current_pan_ * 180 / pi) + " | Ball Y : " + (ball_y_angle * 180 / pi))
# rospy.loginfo("foot to kick : "+ball_x_angle)
#
# rospy.loginfo("In range [%d | %d]" % (self.count_to_kick_, ball_x_angle))
# if self.count_to_kick_>20:
# self.set_walking_cmd("stop")
# self.on_tracking_ = False
# if ball_x_angle>0:
# rospy.loginfo("Read")
distance_to_walk = distance_to_ball - distance_to_kick
fb_move, rl_angle = self.calc_footstep(distance_to_walk, self.current_pan_, delta_time)
self.set_walking_param(fb_move, 0, rl_angle)
return False
def decide_ball_pos(self, x_angle, y_angle):
if self.current_tilt_ == -10 and self.current_pan_ == -10:
self.approach_ball_position_ = "NotFound"
return
ball_x_angle = self.current_pan_ + x_angle
if ball_x_angle > 0:
self.approach_ball_position_ = "OnLeft"
else:
self.approach_ball_position_ = "OnRight"
def wait_following(self):
self.count_not_found_ += 1
if self.count_not_found_ > self.NOT_FOUND_THRESHOLD / 2.0:
self.set_walking_param(0.0, 0.0, 0.0)
def set_walking_command(self, command):
if command == "start":
# get initial param
self.get_walking_param()
self.set_walking_param(self.IN_PLACE_FB_STEP, 0, 0, True)
self._pub_walking_command.publish(command)
rospy.loginfo("Send Walking command : " + command)
def set_walking_param(self, x_move, y_move, rotation_angle, balance=True):
self.current_walking_param_.balance_enable = balance
self.current_walking_param_.x_move_amplitude = x_move + self.SPOT_FB_OFFSET
self.current_walking_param_.y_move_amplitude = y_move + self.SPOT_RL_OFFSET
self.current_walking_param_.angle_move_amplitude = rotation_angle + self.SPOT_ANGLE_OFFSET
self._pub_walking_param.publish(self.current_walking_param_)
# update variable silently
self.current_x_move_ = x_move
self.current_r_angle_ = rotation_angle
def get_walking_param(self):
walking_param_msg = GetWalkingParam()
if self.get_walking_param_srv_(walking_param_msg):
self.current_walking_param_ = walking_param_msg.response.parameters
# update ui
rospy.loginfo("Get walking parameters")
return True
else:
rospy.logerr("Fail to get walking parameters.")
return False
| 38.994845
| 124
| 0.618771
|
4a040271f61fd13585c7468151f2eedf6480c220
| 1,070
|
py
|
Python
|
models/metrics/factory.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | 4
|
2021-05-31T16:53:35.000Z
|
2021-11-30T03:03:34.000Z
|
models/metrics/factory.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | 3
|
2022-02-02T20:41:56.000Z
|
2022-02-24T11:47:44.000Z
|
models/metrics/factory.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | null | null | null |
from bootstrap.lib.options import Options
from .bdd import BDDDrive
from .map import mAP
from .futuretraj import FutureTraj
from .bdd_caption import CaptionMetrics
from .multimetrics import MultiMetrics
def factory(engine, mode):
opt = Options()['model.metric']
if opt['name'] == 'map':
metric = mAP(engine,
mode=mode)
elif opt['name'] == 'future_traj':
metric = FutureTraj(engine,
mode=mode)
elif opt['name'] == 'multi_metrics':
metric = MultiMetrics(engine, mode=mode, metrics=opt['metrics'])
elif opt['name'] == "bdd-drive":
metric = BDDDrive(engine, mode=mode)
elif opt['name'] == 'bdd_caption':
decode_fn = engine.dataset[mode].to_sentence
bleu_smoothing = opt["bleu_smoothing"]
output_sentence = Options().get("model.network.output_sentence", "caption")
metric = CaptionMetrics(decode_fn, bleu_smoothing, engine, mode=mode, output_sentence=output_sentence)
else:
raise ValueError(opt['name'])
return metric
| 34.516129
| 110
| 0.648598
|
4a0402bbdbb16e5434ffe2d3cebb896ac587e99d
| 14,122
|
py
|
Python
|
control/excalibur/parameter.py
|
dls-controls/excalibur-detector
|
11811d6e4b2e6ce7413037e9489999799551ff8d
|
[
"Apache-2.0"
] | null | null | null |
control/excalibur/parameter.py
|
dls-controls/excalibur-detector
|
11811d6e4b2e6ce7413037e9489999799551ff8d
|
[
"Apache-2.0"
] | 22
|
2017-05-19T09:29:02.000Z
|
2021-07-22T12:35:33.000Z
|
control/excalibur/parameter.py
|
dls-controls/excalibur-detector
|
11811d6e4b2e6ce7413037e9489999799551ff8d
|
[
"Apache-2.0"
] | 5
|
2018-04-23T14:57:38.000Z
|
2020-10-20T12:50:34.000Z
|
'''
Created on Mar 21, 2017
@author: tcn45
'''
from collections import OrderedDict
from .fem_api_parameters import *
FEM_RTN_INTERNALERROR = -1
FEM_RTN_CONNECTION_CLOSED = 10000
FEM_RTN_TIMEOUT = 10001
CHIPS_PER_FEM = FEM_CHIPS_PER_STRIPE_X
FEM_PIXELS_PER_CHIP = FEM_PIXELS_PER_CHIP_X * FEM_PIXELS_PER_CHIP_Y
ParamPerFem = 1
ParamPerFemRandomAccess = 2
ParamPerChip = 3
ParamReadOnly = 1
ParamReadWrite = 2
class ParameterSpec(object):
def __init__(self, param_id, param_type, param_len, param_mode, param_access=ParamReadOnly):
self.param_id = param_id
self.param_type = param_type
self.param_len = param_len
self.param_mode = param_mode
self.param_access = param_access
def get(self):
return (self.param_id, self.param_type, self.param_len, self.param_mode, self.param_access)
class ParameterMap(OrderedDict):
def __init__(self, *args, **kwargs):
super(ParameterMap, self).__init__()
self.update(*args, **kwargs)
def __getitem__(self, key):
param_container = OrderedDict.__getitem__(self, key)
return param_container.get()
def __setitem__(self, key, arg):
if isinstance(arg, ParameterSpec):
param_container = arg
elif isinstance(arg, tuple):
param_container = ParameterSpec(*arg)
else:
raise TypeError("Unable to set parameter map item with {}".format(type(arg)))
OrderedDict.__setitem__(self, key, param_container)
class ExcaliburFrontEndParameterMap(ParameterMap):
def __init__(self):
super(ExcaliburFrontEndParameterMap, self).__init__()
self['mpx3_colourmode'] = ParameterSpec(FEM_OP_MPXIII_COLOURMODE, 'int', 1, ParamPerFem)
self['mpx3_counterdepth'] = ParameterSpec(FEM_OP_MPXIII_COUNTERDEPTH, 'int', 1, ParamPerFem)
self['mpx3_externaltrigger'] = ParameterSpec(FEM_OP_MPXIII_EXTERNALTRIGGER, 'int', 1, ParamPerFem)
self['mpx3_operationmode'] = ParameterSpec(FEM_OP_MPXIII_OPERATIONMODE, 'int', 1, ParamPerFem)
self['mpx3_counterselect'] = ParameterSpec(FEM_OP_MPXIII_COUNTERSELECT, 'int', 1, ParamPerFem)
self['mpx3_numtestpulses'] = ParameterSpec(FEM_OP_MPXIII_NUMTESTPULSES, 'int', 1, ParamPerFem)
self['mpx3_readwritemode'] = ParameterSpec(FEM_OP_MPXIII_READWRITEMODE, 'int', 1, ParamPerFem)
self['mpx3_disccsmspm'] = ParameterSpec(FEM_OP_MPXIII_DISCCSMSPM, 'int', 1, ParamPerFem)
self['mpx3_equalizationmode'] = ParameterSpec(FEM_OP_MPXIII_EQUALIZATIONMODE, 'int', 1, ParamPerFem)
self['mpx3_csmspmmode'] = ParameterSpec(FEM_OP_MPXIII_CSMSPMMODE, 'int', 1, ParamPerFem)
self['mpx3_gainmode'] = ParameterSpec(FEM_OP_MPXIII_GAINMODE, 'int', 1, ParamPerFem)
self['mpx3_triggerpolarity'] = ParameterSpec(FEM_OP_MPXIII_TRIGGERPOLARITY, 'int', 1, ParamPerFem)
self['mpx3_lfsrbypass'] = ParameterSpec(FEM_OP_MPXIII_LFSRBYPASS, 'int', 1, ParamPerFem)
self['mpx3_dacsense'] = ParameterSpec(FEM_OP_MPXIII_DACSENSE, 'int', 1, ParamPerChip)
self['mpx3_dacexternal'] = ParameterSpec(FEM_OP_MPXIII_DACEXTERNAL, 'int', 1, ParamPerChip)
self['mpx3_threshold0dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD0DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold1dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD1DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold2dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD2DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold3dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD3DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold4dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD4DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold5dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD5DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold6dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD6DAC, 'int', 1, ParamPerChip)
self['mpx3_threshold7dac'] = ParameterSpec(FEM_OP_MPXIII_THRESHOLD7DAC, 'int', 1, ParamPerChip)
self['mpx3_preampdac'] = ParameterSpec(FEM_OP_MPXIII_PREAMPDAC, 'int', 1, ParamPerChip)
self['mpx3_ikrumdac'] = ParameterSpec(FEM_OP_MPXIII_IKRUMDAC, 'int', 1, ParamPerChip)
self['mpx3_shaperdac'] = ParameterSpec(FEM_OP_MPXIII_SHAPERDAC, 'int', 1, ParamPerChip)
self['mpx3_discdac'] = ParameterSpec(FEM_OP_MPXIII_DISCDAC, 'int', 1, ParamPerChip)
self['mpx3_disclsdac'] = ParameterSpec(FEM_OP_MPXIII_DISCLSDAC, 'int', 1, ParamPerChip)
self['mpx3_shapertestdac'] = ParameterSpec(FEM_OP_MPXIII_SHAPERTESTDAC, 'int', 1, ParamPerChip)
self['mpx3_discldac'] = ParameterSpec(FEM_OP_MPXIII_DISCLDAC, 'int', 1, ParamPerChip)
self['mpx3_delaydac'] = ParameterSpec(FEM_OP_MPXIII_DELAYDAC, 'int', 1, ParamPerChip)
self['mpx3_tpbufferindac'] = ParameterSpec(FEM_OP_MPXIII_TPBUFFERINDAC, 'int', 1, ParamPerChip)
self['mpx3_tpbufferoutdac'] = ParameterSpec(FEM_OP_MPXIII_TPBUFFEROUTDAC, 'int', 1, ParamPerChip)
self['mpx3_rpzdac'] = ParameterSpec(FEM_OP_MPXIII_RPZDAC, 'int', 1, ParamPerChip)
self['mpx3_gnddac'] = ParameterSpec(FEM_OP_MPXIII_GNDDAC, 'int', 1, ParamPerChip)
self['mpx3_tprefdac'] = ParameterSpec(FEM_OP_MPXIII_TPREFDAC, 'int', 1, ParamPerChip)
self['mpx3_fbkdac'] = ParameterSpec(FEM_OP_MPXIII_FBKDAC, 'int', 1, ParamPerChip)
self['mpx3_casdac'] = ParameterSpec(FEM_OP_MPXIII_CASDAC, 'int', 1, ParamPerChip)
self['mpx3_tprefadac'] = ParameterSpec(FEM_OP_MPXIII_TPREFADAC, 'int', 1, ParamPerChip)
self['mpx3_tprefbdac'] = ParameterSpec(FEM_OP_MPXIII_TPREFBDAC, 'int', 1, ParamPerChip)
self['mpx3_testdac'] = ParameterSpec(FEM_OP_MPXIII_TESTDAC, 'int', 1, ParamPerChip)
self['mpx3_dischdac'] = ParameterSpec(FEM_OP_MPXIII_DISCHDAC, 'int', 1, ParamPerChip)
self['efuseid'] = ParameterSpec(FEM_OP_MPXIII_EFUSEID, 'int', 1, ParamPerChip)
self['testpulse_enable'] = ParameterSpec(FEM_OP_MPXIII_TESTPULSE_ENABLE, 'int', 1, ParamPerChip)
self['mpx3_pixel_mask'] = ParameterSpec(FEM_OP_MPXIII_PIXELMASK, 'short', FEM_PIXELS_PER_CHIP, ParamPerChip)
self['mpx3_pixel_discl'] = ParameterSpec(FEM_OP_MPXIII_PIXELDISCL, 'short', FEM_PIXELS_PER_CHIP, ParamPerChip)
self['mpx3_pixel_disch'] = ParameterSpec(FEM_OP_MPXIII_PIXELDISCH, 'short', FEM_PIXELS_PER_CHIP, ParamPerChip)
self['mpx3_pixel_test'] = ParameterSpec(FEM_OP_MPXIII_PIXELTEST, 'short', FEM_PIXELS_PER_CHIP, ParamPerChip)
self['num_frames_to_acquire'] = ParameterSpec(FEM_OP_NUMFRAMESTOACQUIRE, 'int', 1, ParamPerFem)
self['acquisition_time'] = ParameterSpec(FEM_OP_ACQUISITIONTIME, 'int', 1, ParamPerFem)
self['pwr_p5va_vmon'] = ParameterSpec(FEM_OP_P5V_A_VMON, 'float', 1, ParamPerFem)
self['pwr_p5vb_vmon'] = ParameterSpec(FEM_OP_P5V_B_VMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem00_imon'] = ParameterSpec(FEM_OP_P5V_FEMO0_IMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem01_imon'] = ParameterSpec(FEM_OP_P5V_FEMO1_IMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem02_imon'] = ParameterSpec(FEM_OP_P5V_FEMO2_IMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem03_imon'] = ParameterSpec(FEM_OP_P5V_FEMO3_IMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem04_imon'] = ParameterSpec(FEM_OP_P5V_FEMO4_IMON, 'float', 1, ParamPerFem)
self['pwr_p5v_fem05_imon'] = ParameterSpec(FEM_OP_P5V_FEMO5_IMON, 'float', 1, ParamPerFem)
self['pwr_p48v_vmon'] = ParameterSpec(FEM_OP_P48V_VMON, 'float', 1, ParamPerFem)
self['pwr_p48v_imon'] = ParameterSpec(FEM_OP_P48V_IMON, 'float', 1, ParamPerFem)
self['pwr_p5vsup_vmon'] = ParameterSpec(FEM_OP_P5VSUP_VMON, 'float', 1, ParamPerFem)
self['pwr_p5vsup_imon'] = ParameterSpec(FEM_OP_P5VSUP_IMON, 'float', 1, ParamPerFem)
self['pwr_humidity_mon'] = ParameterSpec(FEM_OP_HUMIDITY_MON, 'float', 1, ParamPerFem)
self['pwr_air_temp_mon'] = ParameterSpec(FEM_OP_AIR_TEMP_MON, 'float', 1, ParamPerFem)
self['pwr_coolant_temp_mon'] = ParameterSpec(FEM_OP_COOLANT_TEMP_MON, 'float', 1, ParamPerFem)
self['pwr_coolant_flow_mon'] = ParameterSpec(FEM_OP_COOLANT_FLOW_MON, 'float', 1, ParamPerFem)
self['pwr_p3v3_imon'] = ParameterSpec(FEM_OP_P3V3_IMON, 'float', 1, ParamPerFem)
self['pwr_p1v8_imonA'] = ParameterSpec(FEM_OP_P1V8_IMON_A, 'float', 1, ParamPerFem)
self['pwr_bias_imon'] = ParameterSpec(FEM_OP_BIAS_IMON, 'float', 1, ParamPerFem)
self['pwr_p3v3_vmon'] = ParameterSpec(FEM_OP_P3V3_VMON, 'float', 1, ParamPerFem)
self['pwr_p1v8_vmon'] = ParameterSpec(FEM_OP_P1V8_VMON_A, 'float', 1, ParamPerFem)
self['pwr_bias_vmon'] = ParameterSpec(FEM_OP_BIAS_VMON, 'float', 1, ParamPerFem)
self['pwr_p1v8_imonB'] = ParameterSpec(FEM_OP_P1V8_IMON_B, 'float', 1, ParamPerFem)
self['pwr_p1v8_vmonB'] = ParameterSpec(FEM_OP_P1V8_VMON_B, 'float', 1, ParamPerFem)
self['pwr_coolant_temp_status'] = ParameterSpec(FEM_OP_COOLANT_TEMP_STATUS, 'int', 1, ParamPerFem)
self['pwr_humidity_status'] = ParameterSpec(FEM_OP_HUMIDITY_STATUS, 'int', 1, ParamPerFem)
self['pwr_coolant_flow_status'] = ParameterSpec(FEM_OP_COOLANT_FLOW_STATUS, 'int', 1, ParamPerFem)
self['pwr_air_temp_status'] = ParameterSpec(FEM_OP_AIR_TEMP_STATUS, 'int', 1, ParamPerFem)
self['pwr_fan_fault'] = ParameterSpec(FEM_OP_FAN_FAULT, 'int', 1, ParamPerFem)
self['supply_p1v5_avdd1'] = ParameterSpec(FEM_OP_P1V5_AVDD_1_POK, 'int', 1, ParamPerFem)
self['supply_p1v5_avdd2'] = ParameterSpec(FEM_OP_P1V5_AVDD_2_POK, 'int', 1, ParamPerFem)
self['supply_p1v5_avdd3'] = ParameterSpec(FEM_OP_P1V5_AVDD_3_POK, 'int', 1, ParamPerFem)
self['supply_p1v5_avdd4'] = ParameterSpec(FEM_OP_P1V5_AVDD_4_POK, 'int', 1, ParamPerFem)
self['supply_p1v5_vdd1'] = ParameterSpec(FEM_OP_P1V5_VDD_1_POK, 'int', 1, ParamPerFem)
self['supply_p2v5_dvdd1'] = ParameterSpec(FEM_OP_P2V5_DVDD_1_POK, 'int', 1, ParamPerFem)
self['mpx3_dac_out'] = ParameterSpec(FEM_OP_DAC_OUT_FROM_MEDIPIX, 'float', 1, ParamPerChip)
self['moly_temp'] = ParameterSpec(FEM_OP_MOLY_TEMPERATURE, 'float', 1, ParamPerFem)
self['fem_local_temp'] = ParameterSpec(FEM_OP_LOCAL_TEMP, 'float', 1, ParamPerFem)
self['fem_remote_temp'] = ParameterSpec(FEM_OP_REMOTE_DIODE_TEMP, 'float', 1, ParamPerFem)
self['moly_humidity'] = ParameterSpec(FEM_OP_MOLY_HUMIDITY, 'float', 1, ParamPerFem)
self['medipix_chip_disable'] = ParameterSpec(FEM_OP_MEDIPIX_CHIP_DISABLE, 'int', 1, ParamPerChip)
self['dac_scan_dac'] = ParameterSpec(FEM_OP_SCAN_DAC, 'int', 1, ParamPerFem)
self['dac_scan_start'] = ParameterSpec(FEM_OP_SCAN_START, 'int', 1, ParamPerFem)
self['dac_scan_stop'] = ParameterSpec(FEM_OP_SCAN_STOP, 'int', 1, ParamPerFem)
self['dac_scan_step'] = ParameterSpec(FEM_OP_SCAN_STEP, 'int', 1, ParamPerFem)
self['dac_scan_steps_complete'] = ParameterSpec(FEM_OP_DAC_SCAN_STEPS_COMPLETE, 'int', 1, ParamPerFem)
self['dac_scan_state'] = ParameterSpec(FEM_OP_DAC_SCAN_STATE, 'int', 1, ParamPerFem)
self['fe_lv_enable'] = ParameterSpec(FEM_OP_LV_ON_OFF, 'int', 1, ParamPerFem)
self['fe_hv_enable'] = ParameterSpec(FEM_OP_BIAS_ON_OFF, 'int', 1, ParamPerFem)
self['fe_hv_bias'] = ParameterSpec(FEM_OP_BIAS_LEVEL, 'float', 1, ParamPerFem)
self['fe_vdd_enable'] = ParameterSpec(FEM_OP_VDD_ON_OFF, 'int', 1, ParamPerFem)
self['datareceiver_enable'] = ParameterSpec(FEM_OP_DATA_RECEIVER_ENABLE, 'int', 1 , ParamPerFem)
self['frames_acquired'] = ParameterSpec(FEM_OP_FRAMES_ACQUIRED, 'int', 1 , ParamPerFem)
self['control_state'] = ParameterSpec(FEM_OP_CONTROL_STATE, 'int', 1 , ParamPerFem)
self['source_data_addr'] = ParameterSpec(FEM_OP_SOURCE_DATA_ADDR, 'string', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['source_data_mac'] = ParameterSpec(FEM_OP_SOURCE_DATA_MAC, 'string', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['source_data_port'] = ParameterSpec(FEM_OP_SOURCE_DATA_PORT, 'int', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['dest_data_addr'] = ParameterSpec(FEM_OP_DEST_DATA_ADDR, 'string', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['dest_data_mac'] = ParameterSpec(FEM_OP_DEST_DATA_MAC, 'string', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['dest_data_port'] = ParameterSpec(FEM_OP_DEST_DATA_PORT, 'int', FEM_FARM_MODE_LUT_SIZE, ParamPerFemRandomAccess)
self['dest_data_port_offset'] = ParameterSpec(FEM_OP_DEST_DATA_PORT_OFFSET, 'int', 1, ParamPerFem)
self['farm_mode_num_dests'] = ParameterSpec(FEM_OP_FARM_MODE_NUM_DESTS, 'int', 1, ParamPerFem)
self['farm_mode_enable'] = ParameterSpec(FEM_OP_FARM_MODE_ENABLE, 'int', 1, ParamPerFem)
self['firmware_version'] = ParameterSpec(FEM_OP_FIRMWARE_VERSION, 'int', 4, ParamPerFem)
class ExcaliburFrontEndCommandMap(OrderedDict):
def __init__(self):
super(ExcaliburFrontEndCommandMap, self).__init__()
self['fe_init'] = (FEM_OP_FEINIT, 'frontend initialisation', FEM_RTN_INITFAILED)
self['start_acquisition'] = (FEM_OP_STARTACQUISITION, 'start acquisition', FEM_RTN_INTERNALERROR)
self['stop_acquisition'] = (FEM_OP_STOPACQUISITION, 'stop acquisition', FEM_RTN_INTERNALERROR)
self['load_pixelconfig'] = (FEM_OP_LOADPIXELCONFIG, 'pixel config load', FEM_RTN_INTERNALERROR)
self['load_dacconfig'] = (FEM_OP_LOADDACCONFIG, 'DAC config load', FEM_RTN_INTERNALERROR)
self['fem_reboot'] = (FEM_OP_REBOOT, 'FEM reboot', FEM_RTN_INTERNALERROR)
self['reset_udp_counter'] = (FEM_OP_RESET_UDP_COUNTER, 'reset udp counter', FEM_RTN_INTERNALERROR)
self['ping'] = (FEM_OP_PING, 'ping', FEM_RTN_INTERNALERROR)
| 67.894231
| 132
| 0.719586
|
4a04041574de53092e04324cecd61638a9313d5b
| 6,120
|
py
|
Python
|
contrib/devtools/test-security-check.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
contrib/devtools/test-security-check.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
contrib/devtools/test-security-check.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import os
import subprocess
import unittest
from utils import determine_wellknown_cmd
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def clean_files(source, executable):
os.remove(source)
os.remove(executable)
def call_security_check(cc, source, executable, options):
subprocess.run([*cc,source,'-o',executable] + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
clean_files(source, executable)
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA')) # -pie -fPIE does nothing unless --dynamicbase is also supplied
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
(1, executable+': failed HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']),
(0, ''))
clean_files(source, executable)
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
(1, executable+': failed PIE LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
(1, executable+': failed PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(0, ''))
clean_files(source, executable)
if __name__ == '__main__':
unittest.main()
| 60
| 193
| 0.653758
|
4a04046841492db246505026d3bd40c5cd8b0a49
| 87,073
|
py
|
Python
|
hata/discord/events/handling_helpers.py
|
m0nk3ybraindead/hata
|
f87ed3d7009eeae31d6ea158772efd33775c7b1c
|
[
"0BSD"
] | 1
|
2022-03-02T03:59:57.000Z
|
2022-03-02T03:59:57.000Z
|
hata/discord/events/handling_helpers.py
|
m0nk3ybraindead/hata
|
f87ed3d7009eeae31d6ea158772efd33775c7b1c
|
[
"0BSD"
] | null | null | null |
hata/discord/events/handling_helpers.py
|
m0nk3ybraindead/hata
|
f87ed3d7009eeae31d6ea158772efd33775c7b1c
|
[
"0BSD"
] | null | null | null |
__all__ = ('EventHandlerBase', 'EventWaitforBase', 'eventlist', )
from functools import partial as partial_func
from types import FunctionType
from scarletio import (
CallableAnalyzer, MethodLike, RemovedDescriptor, RichAttributeErrorBaseType, Task, WaitTillAll, WeakKeyDictionary,
is_coroutine_function
)
from scarletio.utils.compact import NEEDS_DUMMY_INIT
from ..core import KOKORO
from ..message import Message
from .core import DEFAULT_EVENT_HANDLER, EVENT_HANDLER_NAME_TO_PARSER_NAMES
def _check_name_should_break(name):
"""
Checks whether the passed `name` is type `str`.
Used inside of ``check_name`` to check whether the given variable is usable, so we should stop checking
other alternative cases.
Parameters
----------
name : `Any`
Returns
-------
should_break : `bool`
If non empty `str` is received returns `True`, meanwhile if `None` or empty `str` is received `False`.
Raises
------
TypeError
If `name` was not passed as `None` or type `str`.
"""
if (name is None):
return False
if type(name) is not str:
raise TypeError(
f'`name` can be `None`, `str`, got {name.__class__.__name__}; {name!r}.'
)
if name:
return True
return False
def check_name(func, name):
"""
Tries to find the given `func`'s preferred name. The check order is the following:
- Passed `name` parameter.
- `func.__event_name__`.
- `func.__name__`.
- `func.__class__.__name__`.
If any of these is set (or passed at the case of `name`) as `None` or as an empty string, then those are ignored.
Parameters
----------
func : `None`, `callable`
The function, what preferred name we are looking for.
name : `None`, `str`
A directly given name value by the user. Defaults to `None` by caller (or at least sit should).
Returns
-------
name : `str`
The preferred name of `func` with lower case characters only.
Raises
------
TypeError
- If a checked name is not `None`, `str`.
- If a metaclass was given.
- If both `name` and `func` are given as `None`.
"""
if None is func is name:
raise TypeError(
f'Both `func` and `name` parameters are `None`'
)
while True:
if _check_name_should_break(name):
break
if hasattr(func, '__event_name__'):
name = func.__event_name__
if _check_name_should_break(name):
break
#func or method
if hasattr(func, '__name__'):
name = func.__name__
if _check_name_should_break(name):
break
func = type(func)
if not issubclass(func, type) and hasattr(func, '__name__'):
name = func.__name__
if _check_name_should_break(name):
break
raise TypeError(
f'Meta-classes are not allowed, got {func!r}.'
)
if not name.islower():
name = name.lower()
return name
def check_parameter_count_and_convert(func, expected, *, name='event', can_be_async_generator=False,
error_message=None):
"""
If needed converts the given `func` to an async callable and then checks whether it expects the specified
amount of non reserved positional parameters.
`func` can be either:
- An async `callable`.
- A class with non async `__new__` (neither `__init__` of course) accepting no non reserved parameters,
meanwhile it's `__call__` is async. This is the convert (or instance) case and it causes the final parameter
count check to be applied on the type's `__call__`.
- A class with async `__new__`.
After the callable was chosen, then the amount of positional parameters are checked what it expects. Reserved
parameters, like `self` are ignored and if the callable accepts keyword only parameter, then it is a no-go.
If every check passed, then at the convert case instances the type and returns that, meanwhile at the other cases
it returns the received `func`.
Parameters
----------
func : `callable`
The callable, what's type and parameter count will checked.
expected : `int`
The amount of parameters, what would be passed to the given `func` when called at the future.
name : `str` = `'event'`, Optional (Keyword only)
The event's name, what is checked and converted. Defaults to `'event'`.
can_be_async_generator : `bool` = `False`, Optional (Keyword only)
Whether async generators are accepted as well.
error_message : `None`, `str` = `None`, Optional (Keyword only)
A specified error message with what a `TypeError` will be raised at the end, if the given `func` is not async
and neither cannot be converted to an async callable.
Returns
-------
func : `callable`
Raises
------
TypeError
- If `func` was not given as callable.
- If `func` is not as async and neither cannot be converted to an async one.
- If `func` expects less or more non reserved positional parameters as `expected` is.
"""
analyzer = CallableAnalyzer(func)
if analyzer.is_async() or (analyzer.is_async_generator() if can_be_async_generator else False):
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > expected:
raise TypeError(
f'`{name}` should accept `{expected!r}` parameters, meanwhile the given callable expects '
f'at least `{min_!r}`, got {func!r}.'
)
if min_ == expected:
return func
# min < expected
if max_ >= expected:
return func
if analyzer.accepts_args():
return func
raise TypeError(
f'`{name}` should accept `{expected}` parameters, meanwhile the given callable expects up to '
f'`{max_!r}`, got {func!r}.'
)
if (
analyzer.can_instance_to_async_callable() or
(analyzer.can_instance_to_async_generator() if can_be_async_generator else False)
):
sub_analyzer = CallableAnalyzer(func.__call__, as_method=True)
if sub_analyzer.is_async():
min_, max_ = sub_analyzer.get_non_reserved_positional_parameter_range()
if min_ > expected:
raise TypeError(
f'`{name}` should accept `{expected!r}` parameters, meanwhile the given callable '
f'after instancing expects at least `{min_!r}`, got {func!r}.'
)
if min_ == expected:
func = analyzer.instance()
return func
# min < expected
if max_ >= expected:
func = analyzer.instance()
return func
if sub_analyzer.accepts_args():
func = analyzer.instance()
return func
raise TypeError(
f'`{name}` should accept `{expected}` parameters, meanwhile the given callable after '
f'instancing expects up to {max_!r}, got `{func!r}`.'
)
func = analyzer.instance()
return func
if error_message is None:
error_message = f'Not async callable type, or cannot be instance to async: `{func!r}`.'
raise TypeError(error_message)
def compare_converted(converted, non_converted):
"""
Compares a maybe instance-able type to an instanced object.
Parameters
----------
converted : `Any`
The already converted object.
non_converted : `Any`
The not yet converted instance to match `converted` on.
Returns
-------
matches : `bool`
Whether `converted` is matched by `non_converted.
"""
# function, both should be functions
if isinstance(non_converted, FunctionType):
return (converted is non_converted)
# method, both should be methods
if isinstance(non_converted, MethodLike):
return (converted is non_converted)
# callable object, both should be the same
if not isinstance(non_converted, type) and hasattr(type(non_converted), '__call__'):
return (converted is non_converted)
# type, but not metaclass
if not issubclass(non_converted, type) and isinstance(non_converted, type):
# async initializer, both is type
if is_coroutine_function(non_converted.__new__):
return (converted is non_converted)
# async call -> should be initialized already, compare the converted's type
if hasattr(non_converted, '__call__'):
return (type(converted) is non_converted)
# meow?
raise TypeError(
f'Expected function, method or a callable object, got {non_converted!r}'
)
def _convert_unsafe_event_iterable(iterable, type_=None):
"""
Converts an iterable to a list of ``EventListElement``-s. This function is called to generate a ``eventlist``
compatible `list` to avoid handling the same cases everywhere.
`iterable`'s element's can be:
- ``EventListElement``.
- `type_` if given.
- `tuple` of `1`-`3` elements (`func`, `args`, `kwargs`).
- `func` itself.
Parameters
----------
iterable : `iterable`
The iterable, what's elements will be checked.
type_ : `None`, `type` = `None`, Optional
If `type_` was passed, then each element is pre-validated with the given type. Some extension classes might
support behaviour.
The given `type_` should implement a `from_args_kwargs` constructor.
Returns
-------
result : `list` of (``EventListElement``, ``type_``)
Raises
------
ValueError
If an element of the received iterable does not matches any of the expected formats.
"""
result = []
for element in iterable:
if type(element) is EventListElement:
if (type_ is not None):
element = type_.from_args_kwargs(element.func, element.args, element.kwargs)
if isinstance(element, type_):
pass
else:
if isinstance(element, tuple):
element_length = len(element)
if element_length > 3 or element_length == 0:
raise ValueError(
f'Expected `tuple` with length 1 or 2, got {element_length!r}; {element!r}.'
)
func = element[0]
if element_length == 1:
args = None
kwargs = None
else:
args = element[1]
if (args is not None) and not isinstance(args, tuple):
raise ValueError(
f'Expected `None`, `tuple` at index 1 of an element, got {element!r}.'
)
if element_length == 2:
kwargs = None
else:
kwargs = element[2]
if (kwargs is not None):
if (type(kwargs) is not dict):
raise ValueError(
f'Expected `None`, `dict` at index 2 of an element: got {element!r}.'
)
if not kwargs:
kwargs = None
else:
func = element
args = None
kwargs = None
if type_ is None:
element = EventListElement(func, args, kwargs)
else:
element = type_.from_args_kwargs(func, args, kwargs)
result.append(element)
continue
return result
def create_event_from_class(constructor, klass, parameter_names, name_name, event_name):
"""
Creates an event passing trough a constructor.
Parameters
----------
klass : `type`
The type to work with.
parameter_names : `tuple` of `str`
The parameters names to pass to the constructor.
name_name : `None`, `str`
The event's name's name.
event_name : `str`
The event's name. If event is nto found, then defaults to `name_name`'s found value if any.
Returns
-------
instance : `Any`
The created instance.
Raises
------
BasesException
Any occurred exception.
"""
if not isinstance(klass, type):
raise TypeError(
f'`klass` can be `type`, got {klass.__class__.__name__}; {klass!r}.'
)
parameters_by_name = {}
for parameter_name in parameter_names:
try:
parameter = getattr(klass, parameter_name)
except AttributeError:
found = False
parameter = None
else:
found = True
parameters_by_name[parameter_name] = (parameter, found)
name = klass.__name__
if (name_name is not None) and (not parameters_by_name[name_name][1]):
parameters_by_name[name_name] = (name, True)
if not parameters_by_name[event_name][1]:
try:
parameter = getattr(klass, name)
except AttributeError:
pass
else:
parameters_by_name[event_name] = (parameter, True)
return constructor(*(parameters_by_name[parameter_name][0] for parameter_name in parameter_names))
class _EventHandlerManager(RichAttributeErrorBaseType):
"""
Gives a decorator functionality to an event handler, because 'rich' event handlers still can not be used a
decorator, their `__call__` is already allocated for handling their respective event.
This class is familiar to ``eventlist``, but it directly works with the respective event handler giving an
easy API to do operations with it.
Attributes
----------
parent : `Any`
The ``_EventHandlerManager``'s parent event handler.
"""
__slots__ = ('parent',)
def __init__(self, parent):
"""
Creates an ``_EventHandlerManager`` from the given event handler.
The `parent` event handler should implement the following methods:
- `.create_event(func, *args, **kwargs)`
- `.delete_event(func)`
And optionally:
- `.create_event_from_class(klass)`
Parameters
----------
parent : `Any`
The respective event handler.
"""
self.parent = parent
def __repr__(self):
"""Returns the representation of the event handler manager."""
return f'<{self.__class__.__name__} of {self.parent!r}>'
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to the event handler manager's parent. If `func` is not passed, then returns a
``._wrapper` to allow using the manager as a decorator with still passing keyword parameters.
Parameters
----------
func : `callable`, Optional
The event to be added to the respective event handler.
*args : Positional parameters
Additionally passed positional parameters to be passed with the given `func` to the event handler.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be passed with the given `func` to the event handler.
Returns
-------
func : `callable`
- The created instance by the respective event handler.
- If `func` was not passed, then returns a ``._wrapper``.
"""
if func is ...:
return partial_func(self, *args, **kwargs)
func = self.parent.create_event(func, *args, **kwargs)
return func
def from_class(self, klass):
"""
Allows the event handler manager to be able to capture a class and create add it to the parent event handler
from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
func : `callable`
The created instance by the respective event handler.
Raises
------
TypeError
If the parent of the event handler manager has no support for `.from_class`.
"""
from_class_constructor = getattr(type(self.parent), 'create_event_from_class', None)
if (from_class_constructor is None):
raise TypeError(
f'`.from_class` is not supported by {self.parent!r}.'
)
return from_class_constructor(self.parent, klass)
def remove(self, func, *args, **kwargs):
"""
Removes the given `func` from the event handler manager's parent.
Parameters
----------
func : `callable`
The event to be removed to the respective event handler.
*args : Positional parameters
Additional positional parameters.
**kwargs : Keyword parameters
Additional keyword parameters.
"""
self.parent.delete_event(func, *args, **kwargs)
def __getattr__(self, name):
"""Returns the attribute of the event handler manager's parent."""
try:
return getattr(self.parent, name)
except AttributeError:
pass
# pass at exception handling to remove cause
RichAttributeErrorBaseType.__getattr__(self, name)
def __dir__(self):
"""Returns the attribute names of the object."""
return sorted(set(object.__dir__(self)) | set(dir(self.parent)))
def extend(self, iterable):
"""
Extends the respective event handler with the given iterable of events.
Parameters
----------
iterable : `iterable`
Raises
------
TypeError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(parent, 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(
f'`{parent!r}` does not support elements of type {type_!r}; got {iterable!r}.'
)
for element in iterable:
parent.create_event(element)
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
parent = self.parent
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
if args is None:
if kwargs is None:
parent.create_event(func,)
else:
parent.create_event(func, **kwargs)
else:
if kwargs is None:
parent.create_event(func, *args)
else:
parent.create_event(func, *args, **kwargs)
def unextend(self, iterable):
"""
Unextends the respective event handler with the given `iterable`.
Parameters
----------
iterable : `iterable`
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed element is not stored by the parent event handler. At this case error is raised
only at the end.
"""
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(parent, 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(
f'`{parent!r}` does not support elements of type {type_!r}; got {iterable!r}.'
)
collected = []
for element in iterable:
try:
parent.delete_event(element, None)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
collected = []
parent = self.parent
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
try:
if args is None:
if kwargs is None:
parent.delete_event(func)
else:
parent.delete_event(func, **kwargs)
else:
if kwargs is None:
parent.delete_event(func, *args)
else:
parent.delete_event(func, *args, **kwargs)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
class _EventHandlerManagerRouter(_EventHandlerManager):
"""
Wraps multiple `Client``'s ``_EventHandlerManager`` functionality together.
Attributes
----------
_getter : `callable`
A callable what should return the ``_EventHandlerManager``-s of the `_EventHandlerManagerRouter`, on who the
extension is applied.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handler_manager_router | ``_EventHandlerManagerRouter`` |
+-------------------------------+-----------------------------------+
Should return the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handlers | `Any` |
+-------------------------------+-----------------------------------+
_from_class_constructor : `callable`, `None`
Whether the extension supports `.from_class` method and how exactly it does. If set as `None`, means it not
supports it.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| klass | `klass` |
+-------------------------------+-----------------------------------+
Should returns the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| commands | `list` of `Any` |
+-------------------------------+-----------------------------------+
parent : ``ClientWrapper``
The parent ``ClientWrapper``.
"""
__slots__ = ('_getter', '_from_class_constructor', 'parent')
def __init__(self, parent, getter, from_class_constructor):
"""
Creates an ``_EventHandlerManagerRouter`` routing to all the clients of a ``ClientWrapper``.
Parameters
----------
parent : ``ClientWrapper``
The respective routed client wrapper.
getter : `callable`
A callable what should return the ``_EventHandlerManager``-s of the `_EventHandlerManagerRouter`, on who the
extension is applied.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handler_manager_router | ``_EventHandlerManagerRouter`` |
+-------------------------------+-----------------------------------+
Should return the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handlers | `Any` |
+-------------------------------+-----------------------------------+
from_class_constructor : `None`, `callable`
Whether the extension supports `.from_class` method and how exactly it does. If given as `None`, then it
means it not supports it.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| klass | `klass` |
+-------------------------------+-----------------------------------+
Should returns the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| commands | `list` of `Any` |
+-------------------------------+-----------------------------------+
"""
self.parent = parent
self._getter = getter
self._from_class_constructor = from_class_constructor
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to all of the represented client's respective event handler managers.
Parameters
----------
func : `callable`, Optional
The event to be added to the respective event handler.
*args : Positional parameter
Additionally passed positional parameters to be passed with the given `func` to the event handler.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be passed with the given `func` to the event handler.
Returns
-------
func : ``Routed``
The added functions.
"""
if func is ...:
return partial_func(self, *args, **kwargs)
handlers = self._getter(self)
if not handlers:
return
count = len(handlers)
routed_args = route_args(args, count)
routed_kwargs = route_kwargs(kwargs, count)
routed_func = maybe_route_func(func, count)
routed = []
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
func = handler.create_event(func_, *args, **kwargs)
routed.append(func)
return Router(routed)
def from_class(self, klass):
"""
Allows the event handler manager router to be able to capture a class and create and add it to the represented
event handlers from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
routed : ``Router``
The routed created instances.
Raises
------
TypeError
If the parent of the event handler manager has no support for `.from_class`.
BaseException
Any exception raised by any of the event handler.
"""
from_class_constructor = self._from_class_constructor
if from_class_constructor is None:
raise TypeError(
f'`.from_class` is not supported by {self.parent!r}.'
)
handlers = self._getter(self)
count = len(handlers)
if not count:
return
routed_maybe = from_class_constructor(klass)
if isinstance(routed_maybe, Router):
if len(routed_maybe) != count:
raise ValueError(
f'The given class is routed to `{len(routed_maybe)}`, meanwhile expected to be routed '
f'to `{count}` times, got {klass!r}.'
)
routed = routed_maybe
else:
copy_method = getattr(type(routed_maybe), 'copy', None)
if copy_method is None:
routed = [routed_maybe for _ in range(count)]
else:
routed = [copy_method(routed_maybe) for _ in range(count)]
for handler, event in zip(handlers, routed):
handler.create_event(event)
return routed
def remove(self, func, *args, **kwargs):
"""
Removes the given `func` from the represented event handler managers.
Parameters
----------
func : ``Router``, `callable`
The event to be removed to the respective event handlers.
*args : `None`, `str`
Additional positional parameters.
**kwargs : Keyword parameters
Additional keyword parameters.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if isinstance(func, Router):
if len(func) != count:
raise ValueError(
f'The given `func` is routed `{len(func)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {func!r}.'
)
for func, handler in zip(func, handlers):
handler.delete_event(func, *args, **kwargs)
else:
for handler in handlers:
handler.delete_event(func, *args, **kwargs)
def extend(self, iterable):
"""
Extends the event handler manager router's respective managers with the given iterable of events.
Parameters
----------
iterable : `iterable`
Raises
------
TypeError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(
f'`{parent!r}` does not support elements of type {type_!r}; got {iterable!r}.'
)
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
raise ValueError(
f'The given `func` is routed `{len(element)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {element!r}.'
)
for func, handler in zip(element, handlers):
handler.create_event(func, None)
else:
for handler in handlers:
handler.create_event(element, None)
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_args = route_args(args, count)
routed_func = maybe_route_func(func, count)
routed_kwargs = route_kwargs(kwargs, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
handler.create_event(func_, *args, **kwargs)
def unextend(self, iterable):
"""
Unextends the event handler router's represented event handlers with the given `iterable`.
Parameters
----------
iterable : `iterable`
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed element is not stored by the parent event handler. At this case error is raised
only at the end.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(
f'`{parent!r}` does not support elements of type {type_!r}; got {iterable!r}.'
)
collected = []
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
collected.append(
f'The given `func` is routed `{len(element)}` times, meanwhile expected '
f'to be routed to `{count}` times, got {element!r}.'
)
continue
for func, handler in zip(element, handlers):
try:
handler.delete_event(func, None)
except ValueError as err:
collected.append(err.args[0])
else:
for handler in handlers:
try:
handler.delete_event(element, None)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
collected = []
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_func = maybe_route_func(func, count)
if kwargs is None:
for handler, func_ in zip(handlers, routed_func):
try:
handler.delete_event(func_)
except ValueError as err:
collected.append(err.args[0])
else:
routed_kwargs = route_kwargs(kwargs, count)
routed_args = route_args(args, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
try:
handler.delete_event(func_, *args, **kwargs)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
def __repr__(self):
return (
f'<{self.__class__.__name__} '
f'parent={self.parent!r}, '
f'getter={self._getter!r}, '
f'from_class_constructor={self._from_class_constructor!r}'
f'>'
)
class EventListElement:
"""
Represents an element of an ``eventlist``.
Attributes
----------
func : `callable`
The event of the event-list element.
args : `None`, `tuple` of `Any`
Additional positional parameters for `func`.
kwargs : `None`, `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
__slots__ = ('func', 'args', 'kwargs', )
def __init__(self, func, args, kwargs):
"""
Creates a ``EventListElement` from the given parameters.
Parameters
----------
func : `callable`
The event of the eventlist element.
args : `None`, `str`
Additional positional parameters for `func`.
kwargs : `None`, `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Returns the representation of the eventlist element."""
return f'{self.__class__.__name__}({self.func!r}, args={self.args!r}, kwargs={self.kwargs!r})'
def __len__(self):
"""Additional information for unpacking if needed."""
return 3
def __iter__(self):
"""
Unpacks the eventlist element.
This method is a generator.
"""
yield self.func
yield self.args
yield self.kwargs
class Router(tuple):
"""
Object used to describe multiple captured created command-like objects.
"""
def __repr__(self):
"""Returns the router's representation."""
result = [self.__class__.__name__, '(']
limit = len(self)
if limit:
index = 0
while True:
element = self[index]
result.append(repr(element))
index += 1
if index == limit:
break
result.append(', ')
result.append(')')
return ''.join(result)
def route_value(to_route_value, count, default=None):
"""
Routes only a single `name` - `value` pair.
Parameters
----------
to_route_value : `Any`
The respective value to route
count : `int`
The expected amount of copies to generate.
default : `None`, `Any` = `None`, Optional
Optional default variable to use. Defaults to `None`.
Returns
-------
result : `list` of `Any`
A list of the routed values
"""
result = []
if isinstance(to_route_value, tuple):
if len(to_route_value) != count:
raise ValueError(f'The represented router has `{count}` applicable clients, meanwhile received only '
f'`{len(to_route_value)}` routed values, got: {to_route_value!r}.')
last = ...
for value in to_route_value:
if value is None:
value = default
last = default
elif value is ...:
if last is ...:
last = default
value = last
else:
last = value
result.append(value)
continue
else:
if (to_route_value is None) or (to_route_value is ...):
to_route_value = default
for _ in range(count):
result.append(to_route_value)
return result
def route_parameter(parameter, count):
"""
Routes a parameter to `count` amount of copies.
This function is an iterable generator.
Parameters
----------
parameter : `Any`
The parameter to route.
count : `int`
The expected amount of copies to generate.
Yields
------
result : `Any`
Raises
------
ValueError
A value is a `tuple`, but it's length is different from `count`.
"""
if isinstance(parameter, tuple):
if len(parameter) != count:
raise ValueError(
f'The represented router has `{count}` applicable clients, meanwhile received only '
f'`{len(parameter)}` routed values, got: {parameter!r}.'
)
last = None
for value in parameter:
if value is None:
last = None
elif value is ...:
value = last
else:
last = value
yield value
continue
else:
for _ in range(count):
yield parameter
def route_kwargs(kwargs, count):
"""
Routes the given `kwargs` to the given `count` amount of copies.
If a value of a keyword is given as a `tuple`, then it will be routed by element for each applicable
client.
Parameters
----------
kwargs : `dict` of (`str`, `Any`) items
Keyword parameter to route.
count : `int`
The expected amount of copies to generate.
Returns
-------
result : `tuple` of `dict` of (`str`, `Any) items
Raises
------
ValueError
A value is a `tuple`, but it's length is different from `count`.
"""
result = tuple({} for _ in range(count))
if (kwargs is not None):
for parameter_name, parameter_value in kwargs.items():
for route_to, parameter in zip(result, route_parameter(parameter_value, count)):
route_to[parameter_name] = parameter
return result
def route_args(args, count):
"""
Routes the given `args` to the given `count` amount of copies.
Parameters
----------
args : `tuple` of `Any`
Positional parameter to route.
count : `int`
The expected amount of copies to generate.
Returns
-------
result : `tuple` of `tuple` of `Any`
Raises
------
ValueError
A value is a `tuple`, but it's length is different from `count`.
"""
if (args is None):
result = tuple(tuple() for _ in range(count))
else:
result = tuple([] for _ in range(count))
for parameter_value in args:
for route_to, parameter in zip(result, route_parameter(parameter_value, count)):
route_to.append(parameter)
result = tuple(tuple(routed_to) for routed_to in result)
return result
def route_name(name, count):
"""
Routes the given `name` to the given `count` amount of copies.
If `name` is given as `tuple`, then each element of it will be returned for each applicable client.
Parameters
----------
name : `None`, `Ellipsis`, `str`, `tuple` of (`None`, `Ellipsis`, `str`)
The name to use instead of `func`'s real one.
count : `int`
The expected amount of names.
Returns
-------
result : `list` of (`None`, `str`)
Raises
------
TypeError
- If `name` was not given as `None`, `Ellipsis`, `str`, neither as `tuple` of (`None`, `Ellipsis`, `str`).
ValueError
If `name` was given as `tuple` but it's length is different from the expected one.
"""
result = []
if isinstance(name, tuple):
for index, name_value in enumerate(name):
if (name_value is not None) and (name_value is not ...) and (not isinstance(name_value, str)):
raise TypeError(
f'`name` was given as a `tuple`, but it\'s {index}th element is not `None`, '
f'`Ellipsis`, `str`, got, {name_value.__class__.__name__}: {name_value!r}.'
)
if len(name) != count:
raise ValueError(
f'`name` was given as `tuple`, but it\'s length ({len(name)!r}) not matches the expected '
f'(`{count}`) one, got {name!r}.'
)
last = None
for name_value in name:
if name is None:
name_value = None
last = None
elif name_value is ...:
name_value = last
else:
last = name_value
result.append(name_value)
else:
if name is None:
name_value = None
elif isinstance(name, str):
name_value = str(name)
else:
raise TypeError(
'`name` can be given `None`, `tuple` of (`None, `Ellipsis`, `str`), got '
f'{name.__class__.__name__}; {name!r}.'
)
for _ in range(count):
result.append(name_value)
return result
def maybe_route_func(func, count):
"""
Routes the given `func` `count` times if applicable.
Parameters
----------
func : `callable`
The respective callable to ass
count : `int`
The expected amount of functions to return.
Returns
-------
result : `list` of `func`
"""
copy_function = getattr(type(func), 'copy', None)
result = []
if copy_function is None:
for _ in range(count):
result.append(func)
else:
for _ in range(count):
copied = copy_function(func)
result.append(copied)
return result
class eventlist(list):
"""
Represents a container to store events before adding them to a client. Some extension classes might support this
class as well.
Attributes
----------
kwargs : `None`, `dict` of (`str`, `Any`) items
Keyword parameters used for each element when extending the client's events with the event-list.
type : `None`, `type`
If `type_` was passed when creating the eventlist, then each added element is pre-validated with the given type
before adding them. Some extension classes might support behaviour.
Notes
-----
Hata's `commands` extension class supports collecting commands in ``eventlist`` and pre-validating as well with
passing `type_` as `Command`.
"""
insert = RemovedDescriptor()
sort = RemovedDescriptor()
pop = RemovedDescriptor()
reverse = RemovedDescriptor()
remove = RemovedDescriptor()
index = RemovedDescriptor()
count = RemovedDescriptor()
__mul__ = RemovedDescriptor()
__rmul__ = RemovedDescriptor()
__imul__ = RemovedDescriptor()
__add__ = RemovedDescriptor()
__radd__ = RemovedDescriptor()
__iadd__ = RemovedDescriptor()
__setitem__ = RemovedDescriptor()
__contains__ = RemovedDescriptor()
__slots__ = ('kwargs', 'type')
def __new__(cls, iterable=None, type_=None, **kwargs):
"""
Creates a new eventlist from the the given parameters.
Parameters
----------
iterable : `None`, `iterable` = `None`, Optional
An iterable of events to extend the eventlist with.
type_ : `None`, `type` = `None`, Optional
A type to validate each added element to the eventlist.
**kwargs : Keyword parameters
Additional keyword parameters to be used when adding each element.
Raises
------
TypeError
If `type_` was passed as not as `type`, or if it has no `from_args_kwargs` method.
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if (type_ is not None) and (not isinstance(type_, type)):
raise TypeError(
f'`type_` can be `None`, `type`, got {type_!r}.'
)
if not kwargs:
kwargs = None
self = list.__new__(cls)
self.type = type_
self.kwargs = kwargs
if (iterable is not None):
self.extend(iterable)
return self
if NEEDS_DUMMY_INIT:
def __init__(self, *args, **kwargs):
pass
def from_class(self, klass):
"""
Allows the ``eventlist`` to be able to capture a class and create an element from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
element : `callable`
The created instance from the eventlist's `.type`.
Raises
------
TypeError
If the eventlist has no `.type` set, or if it's `.type` is not supporting this method.
"""
type_ = self.type
if type_ is None:
raise TypeError(
'`.from_class` method cannot be used on `eventlist` without type.'
)
from_class = getattr(type_, 'from_class', None)
if from_class is None:
raise TypeError(
f'`.from_class`. is not supported by the `eventlist`\'s type: {type_!r}.'
)
element = from_class(klass)
list.append(self, element)
return element
def extend(self, iterable):
"""
Extends the ``eventlist`` with the given `iterable`.
Parameters
----------
iterable : `iterable`
An iterable of events to extend the eventlist with.
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if type(iterable) is type(self):
if self.type is not iterable.type:
raise ValueError(
f'Extending {self.__class__.__name__} with an other object of the same type, is not allowed if '
f'their `.type` is different. Own: {self.type!r}; other: {iterable.type!r}.'
)
else:
iterable = _convert_unsafe_event_iterable(iterable, self.type)
list.extend(self, iterable)
def unextend(self, iterable):
"""
Unextends the eventlist with the given `iterable`.
Parameters
----------
iterable : `iterable`
An iterable of events to unextend the eventlist with.
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed elements is not at the ``eventlist``. At this case error is raised only at the end.
"""
if type(iterable) is not type(self):
iterable = _convert_unsafe_event_iterable(iterable, self.type)
else:
if self.type is not iterable.type:
raise ValueError(
f'Extending {self.__class__.__name__} with an other object of the same type, is not allowed if '
f'their `.type` is different. Own: {self.type!r}; other: {iterable.type!r}.'
)
collected = []
for element in iterable:
try:
self.remove(*element)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected))
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to the ``eventlist`` with the other given keyword parameters. If `func` is not passed,
then returns a ``._wrapper` to allow using the ``eventlist`` as a decorator with still passing keyword
parameters.
Parameters
----------
func : `callable`, Optional
The event to be added to the eventlist.
*args : Positional parameter
Additionally passed positional parameters to be used when the passed `func` is used up.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be used when the passed `func` is used up.
Returns
-------
func : `callable`
- If `func` was passed and the eventlist has no `.type` then returns the passed `func`.
- If `func` was passed and the eventlist has `.type` set, then returns an instance of that.
- If `func` was not passed, then returns a ``._wrapper``.
"""
own_kwargs = self.kwargs
if (own_kwargs is not None) and own_kwargs:
for name_, value_ in own_kwargs.items():
kwargs.setdefault(name_, value_)
if func is ...:
return partial_func(self, *args, **kwargs)
type_ = self.type
if type_ is None:
element = EventListElement(func, *args, **kwargs)
else:
element = func = type_(func, *args, **kwargs)
list.append(self, element)
return func
def remove(self, func):
"""
Removes an element of the eventlist.
Parameters
----------
func : `callable`
The function to remove.
Raises
------
ValueError
If the passed `func` - `name` combination was not found.
"""
# we might overwrite __iter__ later
for element in list.__iter__(self):
if compare_converted(element.func, func):
return
raise ValueError(
f'Could not match any element by func={func!r}.'
)
def __repr__(self):
"""Returns the representation of the eventlist."""
repr_parts = [
self.__class__.__name__,
'([',
]
limit = list.__len__(self)
if limit != 0:
index = 0
while True:
element=list.__getitem__(self, index)
repr_parts.append(repr(element))
index +=1
if index == limit:
break
repr_parts.append(', ')
continue
repr_parts.append(']')
type_ = self.type
if (type_ is not None):
repr_parts.append(', type=')
repr_parts.append(repr(type_))
kwargs = self.kwargs
if (kwargs is not None):
repr_parts.append(', kwargs=')
repr_parts.append(repr(kwargs))
repr_parts.append(')')
return ''.join(repr_parts)
def add_kwargs(self, **kwargs):
"""
Adds keyword parameters to the ``eventlist`'s.
Parameters
----------
**kwargs : Keyword parameters
KeyWord parameters to extend the ``eventlist``'s with.
"""
if not kwargs:
return
own_kwargs = self.kwargs
if own_kwargs is None:
self.kwargs = kwargs
else:
own_kwargs.update(kwargs)
def remove_kwargs(self, *names):
"""
Removes keyword parameters of the ``eventlist`` by their name.
Parameters
----------
*names : Positional parameters
Keyword parameter's name added to the ``eventlist``.
"""
if not names:
return
own_kwargs = self.kwargs
if own_kwargs is None:
return
for name in names:
try:
del own_kwargs[name]
except KeyError:
pass
if not own_kwargs:
self.kwargs = None
def clear_kwargs(self):
"""
Clears the kwargs of the eventlist.
"""
self.kwargs = None
# This class is a placeholder for the `with` statement support also for the `shortcut` property as well.
class EventHandlerBase:
"""
Base class for event handlers.
"""
__slots__ = ()
# subclasses should overwrite it
async def __call__(self, *args):
"""
The method what will be called by the respective parser. The first received parameter is always a ``Client``
meanwhile the rest depends on the dispatch event.
This method is a coroutine.
Parameters
----------
*args : Additional positional parameters
"""
pass
# subclasses should overwrite it
def create_event(self, func, *args, **kwargs):
"""
Adds the specified event to the event handler. Subclasses might add additional keyword parameters as well.
Parameters
----------
func : `callable`
The callable to be added.
*args : Positional parameters
Positional parameters to pass to the created event.
**kwargs : Keyword parameters
Keyword parameters to pass to the created event.
Returns
-------
func : `callable`
The created event.
"""
pass
# subclasses should overwrite it
def delete_event(self, func):
"""
Removes the specified event from the event handler. Subclasses might add additional keyword parameters as well.
Parameters
----------
func : `callable`
The callable to be removed.
"""
pass
@property
def shortcut(self):
"""
Shortcuts the event handler's event adding and removing functionality to make those operations easier.
Returns
-------
event_handler_manager : ``_EventHandlerManager``
"""
return _EventHandlerManager(self)
class EventWaitforMeta(type):
"""
Metaclass for `waitfor` event handlers
The supported events by default are the following:
- `message_create`
- `message_edit`
- `message_delete`
- `channel_create`
- `channel_edit`
- `channel_delete`
- `role_create`
- `role_edit`
- `role_delete`
- `guild_delete`
- `guild_edit`
- `emoji_edit`
- `emoji_delete`
- `reaction_add`
- `reaction_delete`
See Also
--------
``EventWaitforBase`` : Base class to inherit instead of meta-classing ``EventWaitforMeta``.
"""
def __call__(cls, *args, **kwargs):
"""
Instances the type.
Auto-adds a `.waitfors` attribute to them and also sets it as a `WeakKeyDictionary`, so you would not
need to bother with that.
Parameters
----------
*args : Additional positional parameters
**kwargs : Additional keyword parameters
Returns
-------
object_ : `Any`
"""
object_ = cls.__new__(cls, *args, **kwargs)
if type(object_) is not cls:
return object_
object_.waitfors = WeakKeyDictionary()
cls.__init__(object_, *args, **kwargs)
return object_
_call_waitfors = {}
async def _call_message_create(self, client, message):
args = (client, message)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_create'] = _call_message_create
del _call_message_create
async def _call_message_edit(self, client, message, old_attributes):
args = (client, message, old_attributes)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_edit'] = _call_message_edit
del _call_message_edit
async def _call_message_delete(self, client, message,):
args = (client, message)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_delete'] = _call_message_delete
del _call_message_delete
async def _call_typing(self, client, channel, user, timestamp):
args = (client, channel, user, timestamp)
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['typing'] = _call_typing
del _call_typing
async def _call_channel_create(self, client, channel):
guild = channel.guild
if guild is None:
return
args = (client, channel)
self._run_waitfors_for(guild, args)
_call_waitfors['channel_create'] = _call_channel_create
del _call_channel_create
async def _call_channel_edit(self, client, channel, old_attributes):
args = (client, channel, old_attributes)
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['channel_edit'] = _call_channel_edit
del _call_channel_edit
async def _call_channel_delete(self, client, channel):
args = (client, channel)
self._run_waitfors_for(channel, args)
guild = channel.guild
if (guild is not None):
self._run_waitfors_for(guild, args)
_call_waitfors['channel_delete'] = _call_channel_delete
del _call_channel_delete
async def _call_role_create(self, client, role):
args = (client, role)
guild = role.guild
self._run_waitfors_for(guild, args)
_call_waitfors['role_create'] = _call_role_create
del _call_role_create
async def _call_role_edit(self, client, role, old_attributes):
args = (client, role, old_attributes)
self._run_waitfors_for(role, args)
guild = role.guild
self._run_waitfors_for(guild, args)
_call_waitfors['role_edit'] = _call_role_edit
del _call_role_edit
async def _call_role_delete(self, client, role, guild):
args = (client, role, guild)
self._run_waitfors_for(role, args)
self._run_waitfors_for(guild, args)
_call_waitfors['role_delete'] = _call_role_delete
del _call_role_delete
async def _call_guild_delete(self, client, guild, profile):
args = (client, guild, profile)
self._run_waitfors_for(guild, args)
_call_waitfors['guild_delete'] = _call_guild_delete
del _call_guild_delete
async def _call_guild_edit(self, client, guild, old_attributes):
args = (client, guild, old_attributes)
self._run_waitfors_for(guild, args)
_call_waitfors['guild_edit'] = _call_guild_edit
del _call_guild_edit
async def _call_emoji_create(self, client, emoji):
args = (client, emoji)
guild = emoji.guild
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_create'] = _call_emoji_create
del _call_emoji_create
async def _call_emoji_edit(self, client, emoji, old_attributes):
args = (client, emoji, old_attributes)
self._run_waitfors_for(emoji, args)
guild = emoji.guild
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_edit'] = _call_emoji_edit
del _call_emoji_edit
async def _call_emoji_delete(self, client, emoji):
args = (client, emoji)
self._run_waitfors_for(emoji, args)
guild = emoji.guild
if (guild is not None):
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_delete'] = _call_emoji_delete
del _call_emoji_delete
async def _call_reaction_add(self, client, event):
message = event.message
if not isinstance(message, Message):
return
args = (client, event)
self._run_waitfors_for(message, args)
_call_waitfors['reaction_add'] = _call_reaction_add
del _call_reaction_add
async def _call_reaction_delete(self, client, event):
message = event.message
if not isinstance(message, Message):
return
args = (client, event)
self._run_waitfors_for(message, args)
_call_waitfors['reaction_delete'] = _call_reaction_delete
del _call_reaction_delete
class EventWaitforBase(EventHandlerBase, metaclass=EventWaitforMeta):
"""
Base class for event handlers, which implement waiting for a specified action to occur.
Attributes
----------
waitfors : `WeakValueDictionary` of (``DiscordEntity``, `async-callable`) items
An auto-added container to store `entity` - `async-callable` pairs.
Class Attributes
----------------
__event_name__ : `None`, `str` = `None`
Predefined name to what the event handler will be added.
call_waitfors : `None`, `async callable` = `None`
An added method to subclasses to ensure the waitfors if overwrite `__call__` is overwritten. Subclasses can
also overwrite `call_waitfors` method as well.
"""
__slots__ = ('waitfors', )
__event_name__ = None
call_waitfors = None
def append(self, target, waiter):
"""
Adds a new relation to `.waitfors`.
When the respective event is received with the specified `target` entity, then `waiter` will be ensured.
Parameters
----------
target : ``DiscordEntity``
The target entity, to what relative waiters will be called.
waiter : `async callable`
Waiter to call every time a respective event to `target` is received.
"""
try:
actual = self.waitfors[target]
if type(actual) is asynclist:
list.append(actual, waiter)
else:
self.waitfors[target] = container = asynclist()
list.append(container, actual)
list.append(container, waiter)
except KeyError:
self.waitfors[target] = waiter
def remove(self, target, waiter):
"""
Removes the specified relation from `.waitfors`.
Parameters
----------
target : ``DiscordEntity``
The entity on what the given waiter waits for the respective event.
waiter : `async callable`
The waiter, what is called with the respective parameters if the respective event occurs related to the
given `target`.
"""
try:
container = self.waitfors.pop(target)
except KeyError:
return
if type(container) is not asynclist:
return
try:
list.remove(container, waiter)
except ValueError:
pass
else:
if len(container) == 1:
self.waitfors[target] = container[0]
return
self.waitfors[target] = container
def get_waiter(self, target, waiter, by_type=False, is_method=False):
"""
Looks up whether any of the given `target` - `waiter` relation is stored inside of `.waiters` and if there is
any, then returns the first find. If non, then returns `None`.
Parameters
----------
target : ``DiscordEntity``
The target entity.
waiter : `Any`
The waiter. `by_type` and `is_method` overwrite the behaviour of checking it.
by_type : `bool` = `False`, Optional
Whether `waiter` was given as the type of the real waiter. Defaults to `False`.
is_method : `bool` = `False`, Optional
Whether the real waiter is a method-like, and you want to check it's "self". Applied before `by_type` and
defaults to `False`.
Returns
-------
waiter : `Any`
"""
try:
element = self.waitfors[target]
except KeyError:
return None
if type(element) is asynclist:
for element in element:
if is_method:
if not isinstance(element, MethodLike):
continue
element = element.__self__
if by_type:
if type(element) is waiter:
return element
else:
continue
else:
if element == waiter:
return element
else:
continue
return None
else:
if is_method:
if not isinstance(element, MethodLike):
return None
element = element.__self__
if by_type:
if type(element) is waiter:
return element
else:
return None
else:
if element == waiter:
return element
else:
return None
def get_waiters(self, target, waiter, by_type=False, is_method=False):
"""
Looks up the waiters of `target` - `waiter` relation stored inside of `.waiters` and returns all the matched
one.
Parameters
----------
target : ``DiscordEntity``
The target entity.
waiter : `Any`
The waiter. `by_type` and `is_method` overwrite the behaviour of checking it.
by_type : `bool` = `False`, Optional
Whether `waiter` was given as the type of the real waiter. Defaults to `False`.
is_method : `bool` = `False`, Optional
Whether the real waiter is a method-like, and you want to check it's "self". Applied before `by_type` and
defaults to `False`.
Returns
-------
waiters : `list` of `Any`
"""
result = []
try:
element = self.waitfors[target]
except KeyError:
return result
if type(element) is asynclist:
for element in element:
if is_method:
if not isinstance(element, MethodLike):
continue
element = element.__self__
if by_type:
if type(element) is not waiter:
continue
else:
if element != waiter:
continue
result.append(element)
continue
else:
if is_method:
if not isinstance(element, MethodLike):
return result
element = element.__self__
if by_type:
if type(element) is waiter:
result.append(element)
else:
if element == waiter:
result.append(element)
return result
def _run_waitfors_for(self, target, args):
"""
Runs the waitfors of the given target.
Parameters
----------
target : ``DiscordEntity``
The target entity.
args : `tuple` of `Any`
Parameters to ensure the waitfors with.
"""
try:
event = self.waitfors[target]
except KeyError:
pass
else:
if type(event) is asynclist:
for event in event:
Task(event(*args), KOKORO)
else:
Task(event(*args), KOKORO)
def EventWaitforMeta__new__(cls, class_name, class_parents, class_attributes):
"""
Subclasses ``EventWaitforBase``.
Parameters
----------
class_name : `str`
The created class's name.
class_parents : `tuple` of `type`
The superclasses of the creates type.
class_attributes : `dict` of (`str`, `Any`) items
The class attributes of the created type.
Returns
-------
type : ``EventWaitforMeta``
The created type.
Raises
------
TypeError
- If the class do not inherits ``EventWaitforBase``.
- If `.__event_name__` was not set or was no set correctly. (Note that if was not ste, then the class's name
is used instead.)
- If there is no predefined `call_waitfors` for the class and it does not defines one either.
"""
for base in class_parents:
if issubclass(base,EventWaitforBase):
break
else:
raise TypeError(
f'`{cls.__name__} should be only the metaclass of `{EventWaitforBase.__name__}`.'
)
event_name = class_attributes.get('__event_name__', None)
if event_name is None:
event_name = class_name
if event_name not in EVENT_HANDLER_NAME_TO_PARSER_NAMES:
raise TypeError(
f'`{class_name}.__event_name__` is not set, or is not set correctly.'
)
if (class_attributes.get('call_waitfors', None) is None):
try:
call_waitfors = cls._call_waitfors[event_name]
except KeyError:
raise TypeError(
f'Event: `{event_name!r}` has no auto `call_waitfor` added. Please define one.'
)
class_attributes['call_waitfors'] = call_waitfors
try:
call = class_attributes.get('__call__', None)
except KeyError:
call = None
if (call is None) or (call is EventHandlerBase.__call__):
class_attributes['__call__'] = call_waitfors
return type.__new__(cls, class_name, class_parents, class_attributes)
EventWaitforMeta.__new__ = EventWaitforMeta__new__
del EventWaitforMeta__new__
class ChunkWaiter(EventHandlerBase):
__slots__ = ('waiters',)
__event_name__ = 'guild_user_chunk'
def __init__(self):
self.waiters = {}
# Interact directly with `self.waiters` instead.
def create_event(self, waiter, nonce):
"""
Raises
------
RuntimeError
Interact with self.waiters instead.
"""
raise RuntimeError('Interact with `self.waiters` instead.')
def delete_event(self, waiter, nonce):
"""
Raises
------
RuntimeError
Interact with self.waiters instead.
"""
raise RuntimeError('Interact with `self.waiters` instead.')
async def __call__(self, client, event):
"""
Ensures that the chunk waiter for the specified nonce is called and if it returns `True` it is removed from the
waiters.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client, who received the respective dispatch event.
event : ``GuildUserChunkEvent``
The received guild user chunk event.
"""
nonce = event.nonce
if nonce is None:
return
waiters = self.waiters
try:
waiter = waiters[nonce]
except KeyError:
return
if waiter(event):
del waiters[nonce]
class WaitForHandler:
"""
O(n) event waiter. Added as an event handler by ``Client.wait_for``.
Attributes
----------
waiters : `dict` of (``Future``, `callable`) items
A dictionary which contains the waiter futures and the respective checks.
"""
__slots__ = ('waiters', )
def __init__(self):
"""
Creates a new ``WaitForHandler``.
"""
self.waiters = {}
async def __call__(self, client, *args):
"""
Runs the checks of the respective event.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who received the respective events.
args : `tuple` of `Any`
Other received parameters by the event.
"""
for future, check in self.waiters.items():
try:
result = check(*args)
except GeneratorExit as err:
future.set_exception_if_pending(err)
raise
except BaseException as err:
future.set_exception_if_pending(err)
else:
if isinstance(result, bool):
if result:
if len(args) == 1:
args = args[0]
else:
return
else:
args = (*args, result)
future.set_result_if_pending(args)
class asynclist(list):
"""
Container used by events to call more events and by waitfor events to call more waiters.
"""
__slots__ = ()
def __init__(self, iterable=None):
"""
Creates a new asynclist from the given iterable.
Parameters
----------
iterable : `None`, `iterable` = `None`, Optional
"""
if (iterable is not None):
list.extend(self, iterable)
async def __call__(self, *args):
"""
Ensures the contained async callables on the client's loop.
This method is a coroutine.
Parameters
----------
*args : Additional position parameters
Parameters to call with the contained async callables.
"""
for coroutine_function in list.__iter__(self):
Task(coroutine_function(*args), KOKORO)
def __repr__(self):
"""Returns the asynclist's representation."""
repr_parts = [
self.__class__.__name__,
'(['
]
limit = list.__len__(self)
if limit:
index = 0
while True:
element = list.__getitem__(self, index)
repr_parts.append(repr(element))
index += 1
if index == limit:
break
repr_parts.append(', ')
continue
repr_parts.append('])')
return ''.join(repr_parts)
def __getattribute__(self, name):
"""Gets the given attribute from the elements of the asynclist."""
if not isinstance(name, str):
raise TypeError(f'Attribute name must be string, not `{name.__class__.__name__}`.')
try:
attribute = object.__getattribute__(self, name)
except AttributeError:
pass
else:
if attribute is not ...:
return attribute
for coroutine_function in list.__iter__(self):
attribute = getattr(coroutine_function, name, ...)
if attribute is ...:
continue
return attribute
raise AttributeError(f'`{self.__class__.__name__}` object has no attribute `{name}`.')
append = RemovedDescriptor()
clear = RemovedDescriptor()
copy = RemovedDescriptor()
count = RemovedDescriptor()
extend = RemovedDescriptor()
index = RemovedDescriptor()
insert = RemovedDescriptor()
pop = RemovedDescriptor()
remove = RemovedDescriptor()
reverse = RemovedDescriptor()
sort = RemovedDescriptor()
async def _with_error(client, task):
"""
Runs the given awaitable and if it raises, calls `client.events.error` with the exception.
This function is a coroutine.
Parameters
----------
client : ``Client``
The client, who's `client.events.error` will be called.
task : `awaitable`
The awaitable to run.
"""
try:
await task
except GeneratorExit:
raise
except BaseException as err:
await client.events.error(client, repr(task), err)
finally:
task = None # clear references
async def ensure_shutdown_event_handlers(client):
"""
Ensures the client's shutdown event handlers.
This function is a coroutine.
Parameters
----------
client : ``Client``
The respective client.
"""
# call client.events.shutdown if has any.
return await ensure_event_handlers(client, client.events.shutdown)
async def ensure_voice_client_shutdown_event_handlers(client):
"""
Ensures the client's voice client shutdown event handlers.
This function is a coroutine.
Parameters
----------
client : ``Client``
The respective client.
"""
return await ensure_event_handlers(client, client.events.voice_client_shutdown)
async def ensure_event_handlers(client, event_handlers):
"""
Ensures the given event handlers. Used by ``ensure_shutdown_event_handlers`` and
``ensure_voice_client_shutdown_event_handlers``.
This function is a coroutine.
Parameters
----------
client : ``Client``
The respective client.
event_handlers : `async-callable`
The event handlers to ensure.
"""
if (event_handlers is not DEFAULT_EVENT_HANDLER):
# We use `WaitTillAll` even for 1 task, since we do not want any raised exceptions to be forwarded.
tasks = []
if type(event_handlers) is asynclist:
for event_handler in list.__iter__(event_handlers):
tasks.append(Task(_with_error(client, event_handler(client)), KOKORO))
else:
tasks.append(Task(_with_error(client, event_handlers(client)), KOKORO))
event_handlers = None # clear references
future = WaitTillAll(tasks, KOKORO)
tasks = None # clear references
await future
def call_unknown_dispatch_event_event_handler(client, name, data):
"""
Calls `client.events.unknown_dispatch_event`.
Parameters
----------
client : ``Client``
The respective client.
name : `str`
The name of an event.
data : `object`
The received data.
"""
event_handler = client.events.unknown_dispatch_event
if (event_handler is not DEFAULT_EVENT_HANDLER):
Task(event_handler(client, name, data), KOKORO)
IGNORED_EVENT_HANDLER_TYPES = frozenset((
WaitForHandler,
ChunkWaiter,
))
def should_ignore_event_handler(event_handler):
"""
Returns whether the given `event_handler` should be ignored from snapshotting.
Parameters
----------
event_handler : `async-callable`
The respective event handler.
Returns
-------
should_ignore : `bool`
"""
if event_handler is DEFAULT_EVENT_HANDLER:
return True
if type(event_handler) in IGNORED_EVENT_HANDLER_TYPES:
return True
return False
def _iterate_event_handler(event_handler):
"""
Iterates over the given event handler, yielding each valuable handler.
This method is an iterable generator.
Parameters
----------
event_handler : `Any`
Event handler to iterate trough.
Yields
------
event_handler : `sync-callable`
Valuable event handlers.
"""
if isinstance(event_handler, asynclist):
for iterated_event_handler in list.__iter__(event_handler):
if not should_ignore_event_handler(iterated_event_handler):
yield iterated_event_handler
else:
if not should_ignore_event_handler(event_handler):
yield event_handler
| 32.87014
| 120
| 0.530555
|
4a04046b79ffc27d56e2bdb76759bfa9acef8b02
| 738
|
py
|
Python
|
src/adafruit_blinka/board/hifive_unleashed.py
|
Jcc99/Adafruit_Blinka
|
41f8155bab83039ed9d45276addd3d501e83f3e6
|
[
"MIT"
] | 294
|
2018-06-30T19:08:27.000Z
|
2022-03-26T21:08:47.000Z
|
src/adafruit_blinka/board/hifive_unleashed.py
|
Jcc99/Adafruit_Blinka
|
41f8155bab83039ed9d45276addd3d501e83f3e6
|
[
"MIT"
] | 421
|
2018-06-30T20:54:46.000Z
|
2022-03-31T15:08:37.000Z
|
src/adafruit_blinka/board/hifive_unleashed.py
|
Jcc99/Adafruit_Blinka
|
41f8155bab83039ed9d45276addd3d501e83f3e6
|
[
"MIT"
] | 234
|
2018-07-23T18:49:16.000Z
|
2022-03-28T16:59:48.000Z
|
"""Pin definitions for the Hifive Unleashed."""
from adafruit_blinka.microcontroller.hfu540 import pin
GPIO_A = pin.GPIO0
GPIO_B = pin.GPIO1
GPIO_C = pin.GPIO2
GPIO_D = pin.GPIO3
GPIO_E = pin.GPIO4
GPIO_F = pin.GPIO5
GPIO_G = pin.GPIO6
GPIO_H = pin.GPIO7
GPIO_I = pin.GPIO8
GPIO_J = pin.GPIO9
GPIO_K = pin.GPIO15
UART0_TXD = pin.UART0_TXD
UART0_RXD = pin.UART0_RXD
SPI0_SCLK = pin.SPI0_SCLK
SPI0_DIN = pin.SPI0_DIN
UART1_TXD = pin.UART1_TXD
SPI0_CS = pin.SPI0_CS
UART1_RXD = pin.UART1_RXD
SPI0_DOUT = pin.SPI0_DOUT
I2C0_SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
SDA = pin.I2C0_SDA
SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
I2C0_SCL = pin.I2C0_SCL
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_DOUT
MISO = pin.SPI0_DIN
SPI_CS = pin.SPI0_CS
| 19.421053
| 54
| 0.769648
|
4a0405988e45b0c9c30d38323fc0614d9284d24b
| 7,302
|
py
|
Python
|
tanjun/errors.py
|
ashwinvin/Tanjun
|
e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab
|
[
"BSD-3-Clause"
] | null | null | null |
tanjun/errors.py
|
ashwinvin/Tanjun
|
e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab
|
[
"BSD-3-Clause"
] | null | null | null |
tanjun/errors.py
|
ashwinvin/Tanjun
|
e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The errors and warnings raised within and by Tanjun."""
from __future__ import annotations
__all__: list[str] = [
"CommandError",
"ConversionError",
"HaltExecution",
"FailedCheck",
"MissingDependencyError",
"NotEnoughArgumentsError",
"TooManyArgumentsError",
"ParserError",
"TanjunError",
"TanjunWarning",
"StateWarning",
]
import typing
if typing.TYPE_CHECKING:
from collections import abc as collections
class TanjunError(Exception):
"""The base class for all errors raised by Tanjun."""
__slots__ = ()
class TanjunWarning(RuntimeWarning):
"""The base class for all warnings raised by Tanjun."""
__slots__ = ()
class HaltExecution(TanjunError):
"""Error raised while looking for a command in-order to end-execution early.
For the most part, this will be raised during checks in-order to prevent
other commands from being tried.
"""
__slots__ = ()
class MissingDependencyError(TanjunError):
"""Error raised when a dependency couldn't be found."""
__slots__ = ("message",)
def __init__(self, message: str) -> None:
self.message = message
def __repr__(self) -> str:
return f"{type(self).__name__} <{self.message}>"
class CommandError(TanjunError):
"""Error raised to end command execution.
Parameters
----------
message : str
String message which will be sent as a response to the message
that triggered the current command.
Raises
------
ValueError
Raised when the message is over 2000 characters long or empty.
"""
__slots__ = ("message",)
# None or empty string == no response
message: str
"""The response error message.
If this is an empty string or `None` then this will silently end
command execution otherwise Tanjun will try to send the string message in
response.
"""
def __init__(self, message: str, /) -> None:
if len(message) > 2000:
raise ValueError("Error message cannot be over 2_000 characters long.")
elif not message:
raise ValueError("Response message must have at least 1 character.")
self.message = message
def __repr__(self) -> str:
return f"{type(self).__name__} <{self.message}>"
def __str__(self) -> str:
return self.message or ""
# TODO: use this
class InvalidCheck(TanjunError, RuntimeError): # TODO: or/and warning? # TODO: InvalidCheckError
"""Error raised as an assertion that a check will never pass in the current environment."""
__slots__ = ()
class FailedCheck(TanjunError, RuntimeError): # TODO: FailedCheckError
"""Error raised as an alternative to returning `False` in a check."""
__slots__ = ()
class ParserError(TanjunError, ValueError):
"""Base error raised by a parser or parameter during parsing.
.. note::
Expected errors raised by the parser will subclass this error.
Parameters
----------
message : str
String message for this error.
parameter : typing.Optional[str]
Name of the parameter which caused this error, should be `None` if not
applicable.
"""
__slots__ = ("message", "parameter")
message: str
"""String message for this error.
.. note::
This may be used as a command response message.
"""
parameter: typing.Optional[str]
"""Name of the this was raised for.
.. note::
This will be `builtin.None` if it was raised while parsing the provided
message content.
"""
def __init__(self, message: str, parameter: typing.Optional[str], /) -> None:
self.message = message
self.parameter = parameter
def __str__(self) -> str:
return self.message
class ConversionError(ParserError):
"""Error raised by a parser parameter when it failed to converter a value.
Parameters
----------
parameter : tanjun.abc.Parameter
The parameter this was raised by.
errors : collections.abc.Iterable[ValueError]
An iterable of the source value errors which were raised during conversion/
"""
__slots__ = ("errors",)
errors: collections.Sequence[ValueError]
"""Sequence of the errors that were caught during conversion for this parameter."""
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, parameter: str, message: str, /, errors: collections.Iterable[ValueError] = ()) -> None:
super().__init__(message, parameter)
self.errors = tuple(errors)
class NotEnoughArgumentsError(ParserError):
"""Error raised by the parser when not enough arguments are found for a parameter.
Parameters
----------
parameter : tanjun.abc.Parameter
The parameter this error was raised for
"""
__slots__ = ()
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, message: str, parameter: str, /) -> None:
super().__init__(message, parameter)
class TooManyArgumentsError(ParserError):
"""Error raised by the parser when too many arguments are found for a parameter.
Parameters
----------
parameter : tanjun.abc.Parameter
The parameter this error was raised for
"""
__slots__ = ()
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, message: str, parameter: str, /) -> None:
super().__init__(message, parameter)
class StateWarning(RuntimeWarning):
"""Warning raised when a utility is loaded without access to state stores it depends on."""
__slots__ = ()
| 29.443548
| 111
| 0.684196
|
4a0407d6c5f7e43e5ae23f9a4e5cb97b58c613aa
| 1,187
|
py
|
Python
|
setup.py
|
adamltyson/movement
|
0ced58511091b935ef6974c9ee258de81fb15da8
|
[
"MIT"
] | 1
|
2020-04-06T10:13:48.000Z
|
2020-04-06T10:13:48.000Z
|
setup.py
|
adamltyson/movement
|
0ced58511091b935ef6974c9ee258de81fb15da8
|
[
"MIT"
] | null | null | null |
setup.py
|
adamltyson/movement
|
0ced58511091b935ef6974c9ee258de81fb15da8
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_namespace_packages
requirements = ["imlib", "numpy"]
setup(
name="movement",
version="0.0.4",
description="Movement analysis",
install_requires=requirements,
extras_require={
"dev": [
"sphinx",
"recommonmark",
"sphinx_rtd_theme",
"pydoc-markdown",
"black",
"pytest-cov",
"pytest",
"gitpython",
"coveralls",
"coverage<=4.5.4",
]
},
python_requires=">=3.6",
packages=find_namespace_packages(exclude=("docs", "tests*")),
include_package_data=True,
url="https://github.com/adamltyson/movement",
author="Adam Tyson",
author_email="adam.tyson@ucl.ac.uk",
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
],
zip_safe=False,
)
| 28.261905
| 65
| 0.561078
|
4a040847485cd63a4c2f1efcd50554a308d79b25
| 217
|
py
|
Python
|
104_manejoCadenas/metodoIsspace.py
|
josuerojasq/netacad_python
|
510c3a026f83e499144d91c00edc5045a8304a08
|
[
"MIT"
] | null | null | null |
104_manejoCadenas/metodoIsspace.py
|
josuerojasq/netacad_python
|
510c3a026f83e499144d91c00edc5045a8304a08
|
[
"MIT"
] | null | null | null |
104_manejoCadenas/metodoIsspace.py
|
josuerojasq/netacad_python
|
510c3a026f83e499144d91c00edc5045a8304a08
|
[
"MIT"
] | null | null | null |
#El método "isspace()" identifica espacios en blanco solamente
# no tiene en cuenta ningún otro caracter (el resultado es entonces False).
print(' \n '.isspace())
print(" ".isspace())
print("mooo mooo mooo".isspace())
| 43.4
| 75
| 0.728111
|
4a0408d8a3bf38d6d0f548e50ddd5044eab47a04
| 260
|
py
|
Python
|
rentcar/rentcar/doctype/dafatar_pesananan/dafatar_pesananan.py
|
Anelka-Lazuardi/rentcar
|
4859ae046edc377ba5094102aab426e03c54f754
|
[
"MIT"
] | null | null | null |
rentcar/rentcar/doctype/dafatar_pesananan/dafatar_pesananan.py
|
Anelka-Lazuardi/rentcar
|
4859ae046edc377ba5094102aab426e03c54f754
|
[
"MIT"
] | null | null | null |
rentcar/rentcar/doctype/dafatar_pesananan/dafatar_pesananan.py
|
Anelka-Lazuardi/rentcar
|
4859ae046edc377ba5094102aab426e03c54f754
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, MIDB and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DafatarPesananan(Document):
pass
| 23.636364
| 49
| 0.773077
|
4a0409b0c778315a481851abf8f74d98a770857c
| 5,630
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/Atom/TestSuite_Main.py
|
TheKeaver/o3de
|
3791149c6bb18d007ee375f592bdd031871f793d
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-15T14:22:36.000Z
|
2021-07-15T14:22:36.000Z
|
AutomatedTesting/Gem/PythonTests/Atom/TestSuite_Main.py
|
TheKeaver/o3de
|
3791149c6bb18d007ee375f592bdd031871f793d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Atom/TestSuite_Main.py
|
TheKeaver/o3de
|
3791149c6bb18d007ee375f592bdd031871f793d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
from ly_test_tools.o3de.editor_test import EditorSharedTest, EditorTestSuite
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite):
@pytest.mark.test_case_id("C36525657")
class AtomEditorComponents_BloomAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_BloomAdded as test_module
@pytest.mark.test_case_id("C32078118")
class AtomEditorComponents_DecalAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DecalAdded as test_module
@pytest.mark.test_case_id("C36525658")
class AtomEditorComponents_DeferredFogAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DeferredFogAdded as test_module
@pytest.mark.test_case_id("C32078119")
class AtomEditorComponents_DepthOfFieldAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DepthOfFieldAdded as test_module
@pytest.mark.test_case_id("C36525659")
class AtomEditorComponents_DiffuseProbeGridAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DiffuseProbeGridAdded as test_module
@pytest.mark.test_case_id("C32078120")
class AtomEditorComponents_DirectionalLightAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DirectionalLightAdded as test_module
@pytest.mark.test_case_id("C36525660")
class AtomEditorComponents_DisplayMapperAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_DisplayMapperAdded as test_module
@pytest.mark.test_case_id("C36525661")
class AtomEditorComponents_EntityReferenceAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_EntityReferenceAdded as test_module
@pytest.mark.test_case_id("C32078121")
class AtomEditorComponents_ExposureControlAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_ExposureControlAdded as test_module
@pytest.mark.test_case_id("C32078115")
class AtomEditorComponents_GlobalSkylightIBLAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_GlobalSkylightIBLAdded as test_module
@pytest.mark.test_case_id("C32078122")
class AtomEditorComponents_GridAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_GridAdded as test_module
@pytest.mark.test_case_id("C36525671")
class AtomEditorComponents_HDRColorGradingAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_HDRColorGradingAdded as test_module
@pytest.mark.test_case_id("C32078117")
class AtomEditorComponents_LightAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_LightAdded as test_module
@pytest.mark.test_case_id("C36525662")
class AtomEditorComponents_LookModificationAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_LookModificationAdded as test_module
@pytest.mark.test_case_id("C32078123")
class AtomEditorComponents_MaterialAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_MaterialAdded as test_module
@pytest.mark.test_case_id("C32078124")
class AtomEditorComponents_MeshAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_MeshAdded as test_module
@pytest.mark.test_case_id("C36525663")
class AtomEditorComponents_OcclusionCullingPlaneAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_OcclusionCullingPlaneAdded as test_module
@pytest.mark.test_case_id("C32078125")
class AtomEditorComponents_PhysicalSkyAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_PhysicalSkyAdded as test_module
@pytest.mark.test_case_id("C36525664")
class AtomEditorComponents_PostFXGradientWeightModifierAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_PostFXGradientWeightModifierAdded as test_module
@pytest.mark.test_case_id("C32078127")
class AtomEditorComponents_PostFXLayerAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_PostFXLayerAdded as test_module
@pytest.mark.test_case_id("C32078131")
class AtomEditorComponents_PostFXRadiusWeightModifierAdded(EditorSharedTest):
from Atom.tests import (
hydra_AtomEditorComponents_PostFXRadiusWeightModifierAdded as test_module)
@pytest.mark.test_case_id("C36525665")
class AtomEditorComponents_PostFXShapeWeightModifierAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_PostFxShapeWeightModifierAdded as test_module
@pytest.mark.test_case_id("C32078128")
class AtomEditorComponents_ReflectionProbeAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_ReflectionProbeAdded as test_module
@pytest.mark.test_case_id("C36525666")
class AtomEditorComponents_SSAOAdded(EditorSharedTest):
from Atom.tests import hydra_AtomEditorComponents_SSAOAdded as test_module
class ShaderAssetBuilder_RecompilesShaderAsChainOfDependenciesChanges(EditorSharedTest):
from Atom.tests import hydra_ShaderAssetBuilder_RecompilesShaderAsChainOfDependenciesChanges as test_module
| 48.956522
| 115
| 0.817229
|
4a040a660fcfffc519b91fba781077cdeec73c35
| 13,579
|
py
|
Python
|
python+numpy.py
|
dearli123/awesome-DeepLearning
|
ed7423074582acfb22617d32507e167fb2193cd8
|
[
"Apache-2.0"
] | null | null | null |
python+numpy.py
|
dearli123/awesome-DeepLearning
|
ed7423074582acfb22617d32507e167fb2193cd8
|
[
"Apache-2.0"
] | null | null | null |
python+numpy.py
|
dearli123/awesome-DeepLearning
|
ed7423074582acfb22617d32507e167fb2193cd8
|
[
"Apache-2.0"
] | null | null | null |
Python 3.9.2 (tags/v3.9.2:1a79785, Feb 19 2021, 13:44:55) [MSC v.1928 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # 导入需要用到的package
import numpy as np
import json
# 读入训练数据
datafile = './work/housing.data'
data = np.fromfile(datafile, sep=' ')
data
# 读入之后的数据被转化成1维array,其中array的第0-13项是第一条数据,第14-27项是第二条数据,以此类推....
# 这里对原始数据做reshape,变成N x 14的形式
feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE','DIS',
'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
feature_num = len(feature_names)
data = data.reshape([data.shape[0] // feature_num, feature_num])
# 查看数据
x = data[0]
print(x.shape)
print(x)
ratio = 0.8
offset = int(data.shape[0] * ratio)
training_data = data[:offset]
training_data.shape
# 计算train数据集的最大值,最小值,平均值
maximums, minimums, avgs = \
training_data.max(axis=0), \
training_data.min(axis=0), \
training_data.sum(axis=0) / training_data.shape[0]
# 对数据进行归一化处理
for i in range(feature_num):
#print(maximums[i], minimums[i], avgs[i])
data[:, i] = (data[:, i] - minimums[i]) / (maximums[i] - minimums[i])
def load_data():
# 从文件导入数据
datafile = './work/housing.data'
data = np.fromfile(datafile, sep=' ')
# 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数
feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', \
'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
feature_num = len(feature_names)
# 将原始数据进行Reshape,变成[N, 14]这样的形状
data = data.reshape([data.shape[0] // feature_num, feature_num])
# 将原数据集拆分成训练集和测试集
# 这里使用80%的数据做训练,20%的数据做测试
# 测试集和训练集必须是没有交集的
ratio = 0.8
offset = int(data.shape[0] * ratio)
training_data = data[:offset]
# 计算训练集的最大值,最小值,平均值
maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), \
training_data.sum(axis=0) / training_data.shape[0]
# 对数据进行归一化处理
for i in range(feature_num):
#print(maximums[i], minimums[i], avgs[i])
data[:, i] = (data[:, i] - minimums[i]) / (maximums[i] - minimums[i])
# 训练集和测试集的划分比例
training_data = data[:offset]
test_data = data[offset:]
return training_data, test_data
# 获取数据
training_data, test_data = load_data()
x = training_data[:, :-1]
y = training_data[:, -1:]
w = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, -0.1, -0.2, -0.3, -0.4, 0.0]
w = np.array(w).reshape([13, 1])
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,
# 此处设置固定的随机数种子
np.random.seed(0)
self.w = np.random.randn(num_of_weights, 1)
self.b = 0.
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y):
error = z - y
cost = error * error
cost = np.mean(cost)
return cost
net = Network(13)
# 此处可以一次性计算多个样本的预测值和损失函数
x1 = x[0:3]
y1 = y[0:3]
z = net.forward(x1)
print('predict: ', z)
loss = net.loss(z, y1)
print('loss:', loss)
net = Network(13)
losses = []
#只画出参数w5和w9在区间[-160, 160]的曲线部分,以及包含损失函数的极值
w5 = np.arange(-160.0, 160.0, 1.0)
w9 = np.arange(-160.0, 160.0, 1.0)
losses = np.zeros([len(w5), len(w9)])
#计算设定区域内每个参数取值所对应的Loss
for i in range(len(w5)):
for j in range(len(w9)):
net.w[5] = w5[i]
net.w[9] = w9[j]
z = net.forward(x)
loss = net.loss(z, y)
losses[i, j] = loss
#使用matplotlib将两个变量和对应的Loss作3D图
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
w5, w9 = np.meshgrid(w5, w9)
ax.plot_surface(w5, w9, losses, rstride=1, cstride=1, cmap='rainbow')
plt.show()
x1 = x[0]
y1 = y[0]
z1 = net.forward(x1)
print('x1 {}, shape {}'.format(x1, x1.shape))
print('y1 {}, shape {}'.format(y1, y1.shape))
print('z1 {}, shape {}'.format(z1, z1.shape))
gradient_w0 = (z1 - y1) * x1[0]
print('gradient_w0 {}'.format(gradient_w0))
gradient_w1 = (z1 - y1) * x1[1]
print('gradient_w1 {}'.format(gradient_w1))
gradient_w2= (z1 - y1) * x1[2]
print('gradient_w1 {}'.format(gradient_w2))
gradient_w = (z1 - y1) * x1
print('gradient_w_by_sample1 {}, gradient.shape {}'.format(gradient_w, gradient_w.shape))
x2 = x[1]
y2 = y[1]
z2 = net.forward(x2)
gradient_w = (z2 - y2) * x2
print('gradient_w_by_sample2 {}, gradient.shape {}'.format(gradient_w, gradient_w.shape))
x3 = x[2]
y3 = y[2]
z3 = net.forward(x3)
gradient_w = (z3 - y3) * x3
print('gradient_w_by_sample3 {}, gradient.shape {}'.format(gradient_w, gradient_w.shape))
# 注意这里是一次取出3个样本的数据,不是取出第3个样本
x3samples = x[0:3]
y3samples = y[0:3]
z3samples = net.forward(x3samples)
print('x {}, shape {}'.format(x3samples, x3samples.shape))
print('y {}, shape {}'.format(y3samples, y3samples.shape))
print('z {}, shape {}'.format(z3samples, z3samples.shape))
gradient_w = (z3samples - y3samples) * x3samples
print('gradient_w {}, gradient.shape {}'.format(gradient_w, gradient_w.shape))
z = net.forward(x)
gradient_w = (z - y) * x
print('gradient_w shape {}'.format(gradient_w.shape))
print(gradient_w)
# axis = 0 表示把每一行做相加然后再除以总的行数
gradient_w = np.mean(gradient_w, axis=0)
print('gradient_w ', gradient_w.shape)
print('w ', net.w.shape)
print(gradient_w)
print(net.w)
gradient_w = gradient_w[:, np.newaxis]
print('gradient_w shape', gradient_w.shape)
z = net.forward(x)
gradient_w = (z - y) * x
gradient_w = np.mean(gradient_w, axis=0)
gradient_w = gradient_w[:, np.newaxis]
gradient_w
gradient_b = (z - y)
gradient_b = np.mean(gradient_b)
# 此处b是一个数值,所以可以直接用np.mean得到一个标量
gradient_b
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,此处设置固定的随机数种子
np.random.seed(0)
self.w = np.random.randn(num_of_weights, 1)
self.b = 0.
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y):
error = z - y
num_samples = error.shape[0]
cost = error * error
cost = np.sum(cost) / num_samples
return cost
def gradient(self, x, y):
z = self.forward(x)
gradient_w = (z-y)*x
gradient_w = np.mean(gradient_w, axis=0)
gradient_w = gradient_w[:, np.newaxis]
gradient_b = (z - y)
gradient_b = np.mean(gradient_b)
return gradient_w, gradient_b
# 调用上面定义的gradient函数,计算梯度
# 初始化网络
net = Network(13)
# 设置[w5, w9] = [-100., -100.]
net.w[5] = -100.0
net.w[9] = -100.0
z = net.forward(x)
loss = net.loss(z, y)
gradient_w, gradient_b = net.gradient(x, y)
gradient_w5 = gradient_w[5][0]
gradient_w9 = gradient_w[9][0]
print('point {}, loss {}'.format([net.w[5][0], net.w[9][0]], loss))
print('gradient {}'.format([gradient_w5, gradient_w9]))
# 在[w5, w9]平面上,沿着梯度的反方向移动到下一个点P1
# 定义移动步长 eta
eta = 0.1
# 更新参数w5和w9
net.w[5] = net.w[5] - eta * gradient_w5
net.w[9] = net.w[9] - eta * gradient_w9
# 重新计算z和loss
z = net.forward(x)
loss = net.loss(z, y)
gradient_w, gradient_b = net.gradient(x, y)
gradient_w5 = gradient_w[5][0]
gradient_w9 = gradient_w[9][0]
print('point {}, loss {}'.format([net.w[5][0], net.w[9][0]], loss))
print('gradient {}'.format([gradient_w5, gradient_w9]))
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,此处设置固定的随机数种子
np.random.seed(0)
self.w = np.random.randn(num_of_weights,1)
self.w[5] = -100.
self.w[9] = -100.
self.b = 0.
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y):
error = z - y
num_samples = error.shape[0]
cost = error * error
cost = np.sum(cost) / num_samples
return cost
def gradient(self, x, y):
z = self.forward(x)
gradient_w = (z-y)*x
gradient_w = np.mean(gradient_w, axis=0)
gradient_w = gradient_w[:, np.newaxis]
gradient_b = (z - y)
gradient_b = np.mean(gradient_b)
return gradient_w, gradient_b
def update(self, gradient_w5, gradient_w9, eta=0.01):
net.w[5] = net.w[5] - eta * gradient_w5
net.w[9] = net.w[9] - eta * gradient_w9
def train(self, x, y, iterations=100, eta=0.01):
points = []
losses = []
for i in range(iterations):
points.append([net.w[5][0], net.w[9][0]])
z = self.forward(x)
L = self.loss(z, y)
gradient_w, gradient_b = self.gradient(x, y)
gradient_w5 = gradient_w[5][0]
gradient_w9 = gradient_w[9][0]
self.update(gradient_w5, gradient_w9, eta)
losses.append(L)
if i % 50 == 0:
print('iter {}, point {}, loss {}'.format(i, [net.w[5][0], net.w[9][0]], L))
return points, losses
# 获取数据
train_data, test_data = load_data()
x = train_data[:, :-1]
y = train_data[:, -1:]
# 创建网络
net = Network(13)
num_iterations=2000
# 启动训练
points, losses = net.train(x, y, iterations=num_iterations, eta=0.01)
# 画出损失函数的变化趋势
plot_x = np.arange(num_iterations)
plot_y = np.array(losses)
plt.plot(plot_x, plot_y)
plt.show()
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,此处设置固定的随机数种子
np.random.seed(0)
self.w = np.random.randn(num_of_weights, 1)
self.b = 0.
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y):
error = z - y
num_samples = error.shape[0]
cost = error * error
cost = np.sum(cost) / num_samples
return cost
def gradient(self, x, y):
z = self.forward(x)
gradient_w = (z-y)*x
gradient_w = np.mean(gradient_w, axis=0)
gradient_w = gradient_w[:, np.newaxis]
gradient_b = (z - y)
gradient_b = np.mean(gradient_b)
return gradient_w, gradient_b
def update(self, gradient_w, gradient_b, eta = 0.01):
self.w = self.w - eta * gradient_w
self.b = self.b - eta * gradient_b
def train(self, x, y, iterations=100, eta=0.01):
losses = []
for i in range(iterations):
z = self.forward(x)
L = self.loss(z, y)
gradient_w, gradient_b = self.gradient(x, y)
self.update(gradient_w, gradient_b, eta)
losses.append(L)
if (i+1) % 10 == 0:
print('iter {}, loss {}'.format(i, L))
return losses
# 获取数据
train_data, test_data = load_data()
x = train_data[:, :-1]
y = train_data[:, -1:]
# 创建网络
net = Network(13)
num_iterations=1000
# 启动训练
losses = net.train(x,y, iterations=num_iterations, eta=0.01)
# 画出损失函数的变化趋势
plot_x = np.arange(num_iterations)
plot_y = np.array(losses)
plt.plot(plot_x, plot_y)
plt.show()
import numpy as np
class Network(object):
def __init__(self, num_of_weights):
# 随机产生w的初始值
# 为了保持程序每次运行结果的一致性,此处设置固定的随机数种子
#np.random.seed(0)
self.w = np.random.randn(num_of_weights, 1)
self.b = 0.
def forward(self, x):
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y):
error = z - y
num_samples = error.shape[0]
cost = error * error
cost = np.sum(cost) / num_samples
return cost
def gradient(self, x, y):
z = self.forward(x)
N = x.shape[0]
gradient_w = 1. / N * np.sum((z-y) * x, axis=0)
gradient_w = gradient_w[:, np.newaxis]
gradient_b = 1. / N * np.sum(z-y)
return gradient_w, gradient_b
def update(self, gradient_w, gradient_b, eta = 0.01):
self.w = self.w - eta * gradient_w
self.b = self.b - eta * gradient_b
def train(self, training_data, num_epochs, batch_size=10, eta=0.01):
n = len(training_data)
losses = []
for epoch_id in range(num_epochs):
# 在每轮迭代开始之前,将训练数据的顺序随机打乱
# 然后再按每次取batch_size条数据的方式取出
np.random.shuffle(training_data)
# 将训练数据进行拆分,每个mini_batch包含batch_size条的数据
mini_batches = [training_data[k:k+batch_size] for k in range(0, n, batch_size)]
for iter_id, mini_batch in enumerate(mini_batches):
#print(self.w.shape)
#print(self.b)
x = mini_batch[:, :-1]
y = mini_batch[:, -1:]
a = self.forward(x)
loss = self.loss(a, y)
gradient_w, gradient_b = self.gradient(x, y)
self.update(gradient_w, gradient_b, eta)
losses.append(loss)
print('Epoch {:3d} / iter {:3d}, loss = {:.4f}'.
format(epoch_id, iter_id, loss))
return losses
# 获取数据
train_data, test_data = load_data()
# 创建网络
net = Network(13)
# 启动训练
losses = net.train(train_data, num_epochs=50, batch_size=100, eta=0.1)
# 画出损失函数的变化趋势
plot_x = np.arange(len(losses))
plot_y = np.array(losses)
plt.plot(plot_x, plot_y)
plt.show()
| 29.519565
| 95
| 0.571397
|
4a040ad5c60b39da414822113985c76574a1a69e
| 738
|
py
|
Python
|
tools/chrome_proxy/common/chrome_proxy_shared_page_state.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/chrome_proxy/common/chrome_proxy_shared_page_state.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/chrome_proxy/common/chrome_proxy_shared_page_state.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page.shared_page_state import SharedPageState
class ChromeProxySharedPageState(SharedPageState):
"""Overides SharePageState to disable replay service/forwarder."""
def __init__(self, test, finder_options, story_set):
super(ChromeProxySharedPageState, self).__init__(
test, finder_options, story_set)
network_controller = self.platform.network_controller
network_controller.StopReplay()
#TODO(bustamante): Implement/use a non-private way to stop the forwarder.
network_controller._network_controller_backend._StopForwarder()
| 38.842105
| 77
| 0.791328
|
4a040c3720ce94bc0e56e8f69ef907d7f642b7ab
| 711
|
py
|
Python
|
tests/test_urls.py
|
vetalpaprotsky/python-project-lvl3
|
b456b50181911de4a3e5bf6c17332c14b5e47565
|
[
"MIT"
] | null | null | null |
tests/test_urls.py
|
vetalpaprotsky/python-project-lvl3
|
b456b50181911de4a3e5bf6c17332c14b5e47565
|
[
"MIT"
] | null | null | null |
tests/test_urls.py
|
vetalpaprotsky/python-project-lvl3
|
b456b50181911de4a3e5bf6c17332c14b5e47565
|
[
"MIT"
] | 1
|
2021-02-01T18:01:10.000Z
|
2021-02-01T18:01:10.000Z
|
import pytest
from page_loader.urls import url_to_file_name
@pytest.mark.parametrize(
'url,expected',
[
('http://test.com', 'test-com.html'),
('https://test.com', 'test-com.html'),
('//test.com', 'test-com.html'),
('http://test.com/', 'test-com.html'),
('http://test.com/page', 'test-com-page.html'),
('http://test.com/page.html', 'test-com-page.html'),
('http://test.com/image.png', 'test-com-image.png'),
('http://testing.1.2.3.com', 'testing-1-2-3-com.html'),
('http://test.com/scripts/main-js.js', 'test-com-scripts-main-js.js'),
]
)
def test_url_to_file_name(url, expected):
assert url_to_file_name(url) == expected
| 33.857143
| 78
| 0.587904
|
4a040c752b6855d9eb1f7ae0b18c784cac353717
| 1,933
|
py
|
Python
|
commands/findduplicatetracks.py
|
syphar/rekordboxcleanup
|
d8a7ec654517afc5bc9f20ef74bbf81e4f5bfad9
|
[
"MIT"
] | null | null | null |
commands/findduplicatetracks.py
|
syphar/rekordboxcleanup
|
d8a7ec654517afc5bc9f20ef74bbf81e4f5bfad9
|
[
"MIT"
] | 2
|
2021-03-31T19:27:42.000Z
|
2021-12-13T20:23:08.000Z
|
commands/findduplicatetracks.py
|
syphar/rekordboxcleanup
|
d8a7ec654517afc5bc9f20ef74bbf81e4f5bfad9
|
[
"MIT"
] | null | null | null |
import collections
import re
import click
from lxml import etree
from commands.common import create_playlist, write_xml
REMIX_PATTERNS = (
re.compile(r'^(.*)\((.*)remix\)$', re.IGNORECASE),
re.compile(r'^(.*)\((.*)mix\)$', re.IGNORECASE),
re.compile(r'^(.*)\-(.*)remix$', re.IGNORECASE),
re.compile(r'^(.*)\-(.*)mix$', re.IGNORECASE),
)
def _normalize_key(string):
return string.strip().lower()
def _split_song_title(title):
for p in REMIX_PATTERNS:
match = p.match(title)
if not match:
continue
return (
match.group(1),
match.group(2),
)
return (
title,
'original',
)
def _sort_artist(artist):
if ',' not in artist:
return artist
artists = artist.split(',')
return ','.join(
sorted([
a.strip()
for a in artists
])
)
@click.command()
@click.argument('rekordbox_xml')
@click.argument('destination_xml')
def findduplicatetracks(rekordbox_xml, destination_xml):
et = etree.parse(rekordbox_xml)
root = et.getroot()
tracks = root.find('COLLECTION')
counter = collections.Counter()
tracks_by_key = collections.defaultdict(list)
for track in tracks.findall('TRACK'):
name, mix = _split_song_title(track.attrib['Name'])
artist = _sort_artist(track.attrib['Artist'])
key = (
_normalize_key(artist),
_normalize_key(name),
_normalize_key(mix),
)
counter[key] = counter[key] + 1
tracks_by_key[key].append(int(track.attrib['TrackID']))
duplicate_track_ids = []
for key, count in counter.items():
if count <= 1:
continue
duplicate_track_ids.extend(tracks_by_key[key])
create_playlist(
et,
"duplicate tracks",
duplicate_track_ids,
)
write_xml(et, destination_xml)
| 21.241758
| 63
| 0.58717
|
4a040d18b8dff85068de7dae00fd337e0ab76d5e
| 2,834
|
py
|
Python
|
mlfinlab/__init__.py
|
nautiism/mlfinlab
|
b9beb1500c1938591eff7cfae2cdc4c3aa783f54
|
[
"BSD-3-Clause"
] | null | null | null |
mlfinlab/__init__.py
|
nautiism/mlfinlab
|
b9beb1500c1938591eff7cfae2cdc4c3aa783f54
|
[
"BSD-3-Clause"
] | null | null | null |
mlfinlab/__init__.py
|
nautiism/mlfinlab
|
b9beb1500c1938591eff7cfae2cdc4c3aa783f54
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Package based on the text book: Advances in Financial Machine Learning, by Marcos Lopez de Prado
"""
import webbrowser
import textwrap
import mlfinlab.cross_validation as cross_validation
import mlfinlab.data_structures as data_structures
import mlfinlab.multi_product as multi_product
import mlfinlab.filters.filters as filters
import mlfinlab.labeling.labeling as labeling
import mlfinlab.features.fracdiff as fracdiff
import mlfinlab.sample_weights as sample_weights
import mlfinlab.sampling as sampling
import mlfinlab.bet_sizing as bet_sizing
import mlfinlab.util as util
import mlfinlab.structural_breaks as structural_breaks
import mlfinlab.feature_importance as feature_importance
import mlfinlab.ensemble as ensemble
import mlfinlab.portfolio_optimization as portfolio_optimization
import mlfinlab.clustering as clustering
import mlfinlab.backtest_statistics.statistics as backtest_statistics
# Sponsorship notification
# try:
# webbrowser.get('google-chrome').open('https://www.patreon.com/HudsonThames', new=2)
# except webbrowser.Error as error:
# try:
# webbrowser.get('firefox').open('https://www.patreon.com/HudsonThames', new=2)
# except webbrowser.Error as error:
# try:
# webbrowser.get('windows-default').open('https://www.patreon.com/HudsonThames', new=2)
# except webbrowser.Error as error:
# pass
print()
print()
print(textwrap.dedent("""\
Support us on Patreon: https://www.patreon.com/HudsonThames
MlFinLab needs you! We need your help for us to keep on maintaining and implementing academic research based on
financial machine learning (for open-source). In order for us to continue we need to raise $4000 of monthly donations
via Patreon - by December 2020. If we can't reach our goal, we will need to adopt more of a paid for service. We thought
that the best and least impactful course of action (should we not reach our goal) is to leave the package as open-source
but to make the documentation (ReadTheDocs) a paid for service. This is the ultimate litmus test, if the package is a
value add, then we need the community to help us keep it going.
Our road map for 2020 is to implement the text book: Machine Learning for Asset Managers by Marcos Lopez de Prado,
as well as a few papers from the Journal of Financial Data Science. We are hiring a full time developer for 3 months
to help us reach our goals. The money that you, our sponsors, contribute will go directly to paying salaries and other
expenses such as journal subscriptions and data.
We need your help to continue maintaining and developing this community. Thank you for using our package and we
invite you to join our slack channel using the following link:
https://join.slack.com/t/mlfinlab/shared_invite/zt-c62u9gpz-VFc13j6da~UVg3DkV7~RjQ
"""))
print()
print()
| 47.233333
| 120
| 0.787932
|
4a040d25650285c3a6eb8ab3902a403f887bf38d
| 2,893
|
py
|
Python
|
scripts/startx.py
|
mengdi-li/robotic-occlusion-reasoning
|
741072a7f454c308ae034716cd58ab45038e65af
|
[
"MIT"
] | 5
|
2021-11-25T13:18:14.000Z
|
2021-12-21T13:10:45.000Z
|
scripts/startx.py
|
mengdi-li/robotic-occlusion-reasoning
|
741072a7f454c308ae034716cd58ab45038e65af
|
[
"MIT"
] | null | null | null |
scripts/startx.py
|
mengdi-li/robotic-occlusion-reasoning
|
741072a7f454c308ae034716cd58ab45038e65af
|
[
"MIT"
] | null | null | null |
# from https://github.com/allenai/allenact/blob/main/scripts/startx.py
import atexit
import os
import platform
import re
import shlex
import subprocess
import tempfile
# Turning off automatic black formatting for this script as it breaks quotes.
# fmt: off
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(":")[0]] = value
return records
def generate_xorg_conf(devices):
xorg_conf = []
device_section = """
Section "Device"
Identifier "Device{device_id}"
Driver "nvidia"
VendorName "NVIDIA Corporation"
BusID "{bus_id}"
EndSection
"""
server_layout_section = """
Section "ServerLayout"
Identifier "Layout0"
{screen_records}
EndSection
"""
screen_section = """
Section "Screen"
Identifier "Screen{screen_id}"
Device "Device{device_id}"
DefaultDepth 24
Option "AllowEmptyInitialConfiguration" "True"
SubSection "Display"
Depth 24
Virtual 1024 768
EndSubSection
EndSection
"""
screen_records = []
for i, bus_id in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records="\n ".join(screen_records)))
output = "\n".join(xorg_conf)
return output
def startx(display=0):
if platform.system() != "Linux":
raise Exception("Can only run startx on linux")
devices = []
for r in pci_records():
if r.get("Vendor", "") == "NVIDIA Corporation"\
and r["Class"] in ["VGA compatible controller", "3D controller"]:
bus_id = "PCI:" + ":".join(map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"])))
devices.append(bus_id)
if not devices:
raise Exception("no nvidia cards found")
try:
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write(generate_xorg_conf(devices))
command = shlex.split("Xorg -noreset +extension GLX +extension RANDR +extension RENDER -config %s :%s" % (path, display))
proc = subprocess.Popen(command)
atexit.register(lambda: proc.poll() is None and proc.kill())
proc.wait()
finally:
os.close(fd)
os.unlink(path)
# fmt: on
if __name__ == "__main__":
display = 1 # X11 display number. Display sockets are stored at "/tmp/.X11-unix/"
startx(display)
| 28.362745
| 129
| 0.625994
|
4a040e6e994952854e60307ff2148cb2002baa4b
| 1,092
|
py
|
Python
|
TBC/types/Table.py
|
rackerlabs/trial-by-combat
|
36f81e4a13c9497d91d62f47405151f00a6c75b7
|
[
"Apache-2.0"
] | 2
|
2016-08-08T19:43:40.000Z
|
2016-12-26T23:00:35.000Z
|
TBC/types/Table.py
|
rackerlabs/trial-by-combat
|
36f81e4a13c9497d91d62f47405151f00a6c75b7
|
[
"Apache-2.0"
] | null | null | null |
TBC/types/Table.py
|
rackerlabs/trial-by-combat
|
36f81e4a13c9497d91d62f47405151f00a6c75b7
|
[
"Apache-2.0"
] | null | null | null |
class Table(object):
"""
Represents a SQL table
"""
def __init__(self, name, columns=()):
"""
:param name: the name of the table
:param columns: columns to add to the table. Columns may
optionally be added later using the add_column() function
"""
self.name = name
self.columns = []
for column in columns:
self.add_column(column)
def add_column(self, column):
column.table = self
self.columns.append(column)
def __getitem__(self, column_name):
"""
Get a column by name
:param column_name: the name of the column to get
:return: a Column
"""
for column in self.columns:
if column.name == column_name:
return column
raise KeyError('Column ' + column_name + ' is not in table ' + self.name)
def __str__(self):
result = '<table ' + self.name + '>\n'
for column in self.columns:
result += '\t' + str(column)
result += '\n'
return result
| 27.3
| 81
| 0.545788
|
4a040eb1f43a1c23de28be6363857e73c35468ce
| 176
|
py
|
Python
|
ExercíciosDoModulo1dePythonColoridos/ex014.py
|
BossNX/ExerciciosDePython
|
27c79d284794f65f94d3a07de11429d665ec92da
|
[
"MIT"
] | null | null | null |
ExercíciosDoModulo1dePythonColoridos/ex014.py
|
BossNX/ExerciciosDePython
|
27c79d284794f65f94d3a07de11429d665ec92da
|
[
"MIT"
] | null | null | null |
ExercíciosDoModulo1dePythonColoridos/ex014.py
|
BossNX/ExerciciosDePython
|
27c79d284794f65f94d3a07de11429d665ec92da
|
[
"MIT"
] | null | null | null |
c = float(input('Qual é a temperatura atual em °C? '))
f = c * 9 / 5 + 32
print('A temperatura \033[34;4m{:.1f}°C\033[m é equivalente à \033[31;4m{:.1f}°F\033[m'.format(c, f))
| 44
| 101
| 0.613636
|
4a040f462ded206feb330b9aa56e3ee054b5f122
| 1,203
|
py
|
Python
|
examples/hopv/hopv_graph_conv.py
|
hl2500/deepchem
|
09ed9c04110eb822c2d6c9be61c27da4939896f6
|
[
"MIT"
] | 2
|
2021-04-01T01:17:53.000Z
|
2021-10-04T16:46:13.000Z
|
examples/hopv/hopv_graph_conv.py
|
evenhe/deepchem
|
9d0fc5554b286117ae08b21b3f15877b06a1009e
|
[
"MIT"
] | null | null | null |
examples/hopv/hopv_graph_conv.py
|
evenhe/deepchem
|
9d0fc5554b286117ae08b21b3f15877b06a1009e
|
[
"MIT"
] | 1
|
2021-04-06T20:32:02.000Z
|
2021-04-06T20:32:02.000Z
|
"""
Script that trains graph-conv models on HOPV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from deepchem.molnet import load_hopv
# Load HOPV dataset
hopv_tasks, hopv_datasets, transformers = load_hopv(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = hopv_datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean, mode="regression"),
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression")
]
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(hopv_tasks), batch_size=batch_size, mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=25)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| 25.0625
| 79
| 0.786367
|
4a040f82b96beb721b6ac9818e2d9e2aeda70d9e
| 326
|
py
|
Python
|
instance/config.py
|
OpenUpSA/odac-victim-empowerment
|
7eb6d533e5445da1af3a90741cf3adbd4210e09d
|
[
"Apache-2.0"
] | null | null | null |
instance/config.py
|
OpenUpSA/odac-victim-empowerment
|
7eb6d533e5445da1af3a90741cf3adbd4210e09d
|
[
"Apache-2.0"
] | null | null | null |
instance/config.py
|
OpenUpSA/odac-victim-empowerment
|
7eb6d533e5445da1af3a90741cf3adbd4210e09d
|
[
"Apache-2.0"
] | null | null | null |
import os.path as op
import logging
LOG_LEVEL = logging.DEBUG
LOGGER_NAME = "msg_handler_logger" # make sure this is not the same as the name of the package to avoid conflicts with Flask's own logger
DEBUG = True # If this is true, then replies get logged to file, rather than hitting the vumi API.
ONLINE_LAST_MINUTES = 5
| 40.75
| 138
| 0.776074
|
4a0410e6b7a97be5555db09da08a170170e11e33
| 4,557
|
py
|
Python
|
camelot/admin/table.py
|
FrDeGraux/camelot
|
56aa93f774edbb0c31a21109e187cf81f49a68d8
|
[
"BSD-3-Clause"
] | 12
|
2020-08-02T17:00:37.000Z
|
2022-01-22T17:49:23.000Z
|
camelot/admin/table.py
|
FrDeGraux/camelot
|
56aa93f774edbb0c31a21109e187cf81f49a68d8
|
[
"BSD-3-Clause"
] | 124
|
2020-08-19T15:03:54.000Z
|
2022-03-31T07:12:16.000Z
|
camelot/admin/table.py
|
FrDeGraux/camelot
|
56aa93f774edbb0c31a21109e187cf81f49a68d8
|
[
"BSD-3-Clause"
] | 17
|
2020-04-25T19:22:20.000Z
|
2022-01-25T08:46:19.000Z
|
# ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / info@conceptive.be
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
"""
A :class:`Table` and a :class:`ColumnGroup` class to define table views that
are more complex.
"""
import six
class ColumnGroup( object ):
"""
A group of columns to be displayed in a table view. By building a Table
with multiple column groups, lots of data can be displayed in a limited
space.
:param verbose_name: the text to be displayed in the tab widget of the
column group
:param columns: a list of fields to display within this column group
:param icon: a :class:`camelot.view.art.Icon` object
.. literalinclude:: ../../test/test_view.py
:start-after: begin column group
:end-before: end column group
.. image:: /_static/controls/column_group.png
"""
def __init__( self,
verbose_name,
columns,
icon = None ):
self.verbose_name = verbose_name
self.icon = icon
self.columns = columns
def get_fields( self ):
"""
:return: an ordered list of field names displayed in the column group
"""
return self.columns
class Table( object ):
"""
Represents the columns that should be displayed in a table view.
:param columns: a list of strings with the fields to be displayed, or a
list of :class:`ColumnGroup` objects
"""
def __init__( self,
columns ):
self.columns = columns
def get_fields( self, column_group = None ):
"""
:param column_group: if given, only return the fields in this column group,
where column_group is the index of the group
:return: a ordered list of field names displayed in the table
"""
fields = []
for i, column in enumerate(self.columns):
if isinstance( column, six.string_types ):
fields.append( column )
else:
if (column_group is None) or (column_group==i):
fields.extend( column.get_fields() )
return fields
def render( self, item_view, parent = None ):
"""
Create a tab widget that allows the user to switch between column
groups.
:param item_view: a :class:`QtWidgets.QAbstractItemView` object.
:param parent: a :class:`QtWidgets.QWidget` object
"""
pass
def structure_to_table( structure ):
"""Convert a python data structure to a table, using the following rules :
* if structure is an instance of Table, return structure
* if structure is a list, create a Table from this list
"""
if isinstance( structure, Table ):
return structure
return Table( structure )
| 38.294118
| 83
| 0.630898
|
4a0410e6de5a249ede2751e1148dff8106fabd41
| 3,961
|
py
|
Python
|
auto_smart_commit/auto_smart_commit.py
|
deferred/auto-smart-commit
|
9214447d65894260e6bddd01af47ec45670e1b81
|
[
"MIT"
] | null | null | null |
auto_smart_commit/auto_smart_commit.py
|
deferred/auto-smart-commit
|
9214447d65894260e6bddd01af47ec45670e1b81
|
[
"MIT"
] | null | null | null |
auto_smart_commit/auto_smart_commit.py
|
deferred/auto-smart-commit
|
9214447d65894260e6bddd01af47ec45670e1b81
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import re
import sys
from datetime import datetime
from math import floor
from subprocess import check_output
from typing import NoReturn, Optional
def run_command(command: str) -> str:
try:
stdout: str = check_output(command.split()).decode("utf-8").strip()
except Exception:
stdout = ""
return stdout
def current_git_branch_name() -> str:
return run_command("git symbolic-ref --short HEAD")
def extract_jira_issue_key(message: str) -> Optional[str]:
project_key, issue_number = r"[A-Z0-9]{2,}", r"[0-9]+"
match = re.search(f"{project_key}-{issue_number}", message)
if match:
return match.group(0)
return None
def last_commit_datetime() -> datetime:
# https://git-scm.com/docs/git-log#_pretty_formats
git_log = "git log -1 --branches --format=%aI"
author = run_command("git config user.email")
last_author_datetime = run_command(f"{git_log} --author={author}") or run_command(git_log)
if "+" in last_author_datetime:
return datetime.strptime(last_author_datetime.split("+")[0], "%Y-%m-%dT%H:%M:%S")
return datetime.now()
def num_lunches(start: datetime, end: datetime) -> int:
n = (end.date() - start.date()).days - 1
if start < start.replace(hour=12, minute=0, second=0):
n += 1
if end > end.replace(hour=12, minute=45, second=0):
n += 1
return max(n, 0)
def num_nights(start: datetime, end: datetime) -> int:
n = (end.date() - start.date()).days - 1
if start < start.replace(hour=1, minute=0, second=0):
n += 1
if end > end.replace(hour=5, minute=0, second=0):
n += 1
return max(n, 0)
def time_worked_on_commit() -> Optional[str]:
now = datetime.now()
last = last_commit_datetime()
# Determine the number of minutes worked on this commit as the number of
# minutes since the last commit minus the lunch breaks and nights.
working_hours_per_day = 8
working_days_per_week = 5
minutes = max(
round((now - last).total_seconds() / 60)
- num_nights(last, now) * (24 - working_hours_per_day) * 60
- num_lunches(last, now) * 45,
0,
)
# Convert the number of minutes worked to working weeks, days, hours,
# minutes.
if minutes > 0:
hours = floor(minutes / 60)
minutes -= hours * 60
days = floor(hours / working_hours_per_day)
hours -= days * working_hours_per_day
weeks = floor(days / working_days_per_week)
days -= weeks * working_days_per_week
return f"{weeks}w {days}d {hours}h {minutes}m"
return None
def main() -> NoReturn:
# https://confluence.atlassian.com/fisheye/using-smart-commits-960155400.html
# Exit if the branch name does not contain a Jira issue key.
git_branch_name = current_git_branch_name()
jira_issue_key = extract_jira_issue_key(git_branch_name)
if not jira_issue_key:
sys.exit(0)
# Read the commit message.
commit_msg_filepath = sys.argv[1]
with open(commit_msg_filepath, "r") as f:
commit_msg = f.read()
# Split the commit into a subject and body and apply some light formatting.
commit_elements = commit_msg.split("\n", maxsplit=1)
commit_subject = commit_elements[0].strip()
commit_subject = f"{commit_subject[:1].upper()}{commit_subject[1:]}"
commit_subject = re.sub(r"\.+$", "", commit_subject)
commit_body = None if len(commit_elements) == 1 else commit_elements[1].strip()
# Build the new commit message:
# Make sure the subject starts with a Jira issue key.
if not extract_jira_issue_key(commit_subject):
commit_subject = f"{jira_issue_key} {commit_subject}"
# Override commit message.
commit_msg = f"{commit_subject}\n\n{commit_body}" if commit_body else commit_subject
with open(commit_msg_filepath, "w") as f:
f.write(commit_msg)
sys.exit(0)
if __name__ == "__main__":
exit(main())
| 34.443478
| 94
| 0.661449
|
4a0410ffc523ce1e5d6dd016f71ecb30f244b946
| 2,274
|
py
|
Python
|
src/lib/crawler/finder.py
|
SohailAlvi/vault_scanner
|
72358cbc9f1bc5ac8b9b7373dca2cb2fc1dce3de
|
[
"MIT"
] | null | null | null |
src/lib/crawler/finder.py
|
SohailAlvi/vault_scanner
|
72358cbc9f1bc5ac8b9b7373dca2cb2fc1dce3de
|
[
"MIT"
] | null | null | null |
src/lib/crawler/finder.py
|
SohailAlvi/vault_scanner
|
72358cbc9f1bc5ac8b9b7373dca2cb2fc1dce3de
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
from html.parser import HTMLParser
import urllib.request
import urllib.response
import urllib.parse
import urllib.error
import colors
class Linkfinder(HTMLParser):
def __init__(self, base_url):
super().__init__()
self.base_url = base_url
self.links = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
for(attribute , value) in attrs:
if attribute == 'href':
url = urllib.parse.urljoin(self.base_url, value)
self.links.add(url)
def links_obtained(self):
return self.links
def error(self, message):
pass
class Imagefinder(HTMLParser):
def __init__(self, url):
super().__init__()
self.url = url
self.img_links = set()
def handle_starttag(self, tag, attrs):
if tag == 'img':
for(attribute , value) in attrs:
if attribute == 'src' or attribute == 'alt' or attribute == 'srcset':
url = urllib.parse.urljoin(self.url, value)
self.img_links.add(url)
def img_links_obtained(self):
return self.img_links
def error(self, message):
pass
def crawl(self):
try:
if not self.url.lower().startswith('ftp:/') or not self.url.lower().startswith('file:/'):
req = urllib.request.Request(self.url, headers = {'User-Agent':'Mozilla/5.0'})
con = urllib.request.urlopen(req)
html_string = con.read().decode("utf-8")
self.feed(html_string)
except urllib.error.URLError as e:
colors.error("Was not able to open the URL.")
except Exception as e:
colors.error(str(e))
return ''
def initiate(list_url, path):
try:
from . import imutil as imu
c=0
for x in list_url:
I = Imagefinder(x)
I.crawl()
for i in I.img_links_obtained():
imu.image_download(i, path + '/'+str(c))
c = c+1
colors.success('Returning..')
except ImportError as e:
colors.error('Could not import the required module')
except Exception as e:
colors.error(str(e))
| 27.071429
| 101
| 0.559807
|
4a0411ce8b1cf41df7c6e0d45a6f208faef91a05
| 2,302
|
py
|
Python
|
test/functional/sample_ta/Splunk_TA_mysolarwinds/bin/splunk_ta_mysolarwinds_rh_settings.py
|
isabella232/addonfactory-cloudconnect-library
|
ba01a5492e0d6f185954f6906b4f506325838fa4
|
[
"Apache-2.0"
] | null | null | null |
test/functional/sample_ta/Splunk_TA_mysolarwinds/bin/splunk_ta_mysolarwinds_rh_settings.py
|
isabella232/addonfactory-cloudconnect-library
|
ba01a5492e0d6f185954f6906b4f506325838fa4
|
[
"Apache-2.0"
] | null | null | null |
test/functional/sample_ta/Splunk_TA_mysolarwinds/bin/splunk_ta_mysolarwinds_rh_settings.py
|
isabella232/addonfactory-cloudconnect-library
|
ba01a5492e0d6f185954f6906b4f506325838fa4
|
[
"Apache-2.0"
] | null | null | null |
import splunk_ta_mysolarwinds_import_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
MultipleModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunktaucclib.rest_handler.admin_external import AdminExternalHandler
util.remove_http_proxy_env_vars()
fields_logging = [
field.RestField(
'loglevel',
required=False,
encrypted=False,
default='INFO',
validator=None
)
]
model_logging = RestModel(fields_logging, name='logging')
fields_proxy = [
field.RestField(
'proxy_enabled',
required=False,
encrypted=False,
default=None,
validator=None
),
field.RestField(
'proxy_rdns',
required=False,
encrypted=False,
default=None,
validator=None
),
field.RestField(
'proxy_type',
required=False,
encrypted=False,
default='http',
validator=None
),
field.RestField(
'proxy_url',
required=False,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^(?:(?:https?|ftp|opc\.tcp):\/\/)?(?:\S+(?::\S*)?@)?(?:(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\u00a1-\uffff0-9]+-?_?)*[a-z\u00a1-\uffff0-9]+)(?:\.(?:[a-z\u00a1-\uffff0-9]+-?)*[a-z\u00a1-\uffff0-9]+)*(?:\.(?:[a-z\u00a1-\uffff]{2,}))?)(?::\d{2,5})?(?:\/[^\s]*)?$""",
)
),
field.RestField(
'proxy_port',
required=False,
encrypted=False,
default=None,
validator=None
),
field.RestField(
'proxy_username',
required=False,
encrypted=False,
default=None,
validator=None
),
field.RestField(
'proxy_password',
required=False,
encrypted=True,
default=None,
validator=None
)
]
model_proxy = RestModel(fields_proxy, name='proxy')
endpoint = MultipleModel(
'splunk_ta_mysolarwinds_settings',
models=[
model_logging,
model_proxy
],
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=AdminExternalHandler,
)
| 23.489796
| 373
| 0.565595
|
4a0411d63ad58f53cd52fb5a61ad75877dd04b65
| 897
|
py
|
Python
|
share/qt/clean_mac_info_plist.py
|
Blaksmith/FuMPCoin
|
641ddb00cf232de591c7715d0f0001765ea02ef9
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
Blaksmith/FuMPCoin
|
641ddb00cf232de591c7715d0f0001765ea02ef9
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
Blaksmith/FuMPCoin
|
641ddb00cf232de591c7715d0f0001765ea02ef9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Bitcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "FuMPcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"fumpcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 29.9
| 109
| 0.725753
|
4a04128fca6990ce3fb96026a5fb0bf2d08f3a78
| 1,358
|
py
|
Python
|
fizzbuzz/fizzbuzz.py
|
sotetsuk/python-dev-tutorial
|
aec676b78720b6de6eae29709c48eeac65fdb41b
|
[
"MIT"
] | 6
|
2020-12-17T09:31:23.000Z
|
2022-03-30T07:57:23.000Z
|
fizzbuzz/fizzbuzz.py
|
sotetsuk/python-dev-tutorial
|
aec676b78720b6de6eae29709c48eeac65fdb41b
|
[
"MIT"
] | 12
|
2020-11-08T01:50:30.000Z
|
2020-12-17T12:27:05.000Z
|
fizzbuzz/fizzbuzz.py
|
sotetsuk/python-dev-tutorial
|
aec676b78720b6de6eae29709c48eeac65fdb41b
|
[
"MIT"
] | 4
|
2020-12-22T09:51:16.000Z
|
2021-01-21T11:49:37.000Z
|
import sys
import click
def fizzbuzz(n: int, fizz: int = 3, buzz: int = 5) -> str:
"""Fizz Buzz function.
>>> fizzbuzz(3)
'Fizz'
>>> fizzbuzz(5)
'Buzz'
>>> fizzbuzz(15)
'FizzBuzz'
>>> fizzbuzz(2)
'2'
"""
if n % fizz == 0 and n % buzz == 0:
return "FizzBuzz"
elif n % fizz == 0:
return "Fizz"
elif n % buzz == 0:
return "Buzz"
else:
return str(n)
@click.command()
@click.argument("nums", nargs=-1, type=int)
@click.option("--fizz", type=int, default=3, help="Number corresponds to Fizz.")
@click.option("--buzz", type=int, default=5, help="Number corresponds to Buzz.")
def main(nums, fizz, buzz):
"""Fizz Buzz program. If no arguments are passed, it reads numbers from stdin."""
if nums: # 数字の列が引数から渡された場合には、それらの数字にFizzBuzzを適用する
sys.stderr.write("Reading numbers from arguments ...\n")
for n in nums:
sys.stdout.write(f"{fizzbuzz(n, fizz=fizz, buzz=buzz)}\n")
else: # 数字の列が引数から渡されなかった場合には、標準入力から数字を読み込む
sys.stderr.write("Reading numbers from stdin ...\n")
try:
line = sys.stdin.readline()
while line:
sys.stdout.write(f"{fizzbuzz(int(line), fizz=fizz, buzz=buzz)}\n")
line = sys.stdin.readline()
except KeyboardInterrupt:
return
| 28.291667
| 85
| 0.576583
|
4a0415f0e34fbdf9217a61d1d38d0a8a91f549a2
| 8,163
|
py
|
Python
|
site-packages/cheroot/workers/threadpool.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 2
|
2017-09-30T13:32:53.000Z
|
2017-10-02T10:41:46.000Z
|
site-packages/cheroot/workers/threadpool.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1
|
2017-04-22T22:24:36.000Z
|
2017-04-22T22:24:36.000Z
|
site-packages/cheroot/workers/threadpool.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 2
|
2017-04-22T22:23:09.000Z
|
2021-06-09T03:03:52.000Z
|
"""A thread-based worker pool."""
import threading
import time
import sys
import socket
from six.moves import queue
__all__ = ['WorkerThread', 'ThreadPool']
class TrueyZero(object):
"""An object which equals and does math like the integer 0 but evals True.
"""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
(self.start_time is None) and
trueyzero or
self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
(self.start_time is None) and
trueyzero or
self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
(self.start_time is None) and
trueyzero or
self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
(self.start_time is None) and
trueyzero or
time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1,
accepted_queue_size=-1, accepted_queue_timeout=10):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName('CP Server ' + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName('CP Server ' + worker.getName())
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout is not None and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See
# https://github.com/cherrypy/cherrypy/issues/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
| 34.154812
| 78
| 0.543918
|
4a04169a9f7ab92450d599b041019de0382e103f
| 7,509
|
py
|
Python
|
depccg/tools/ja/data.py
|
justheuristic/depccg
|
9a60ead5804dc3f03edaf25418a7aa1378d54c17
|
[
"MIT"
] | null | null | null |
depccg/tools/ja/data.py
|
justheuristic/depccg
|
9a60ead5804dc3f03edaf25418a7aa1378d54c17
|
[
"MIT"
] | null | null | null |
depccg/tools/ja/data.py
|
justheuristic/depccg
|
9a60ead5804dc3f03edaf25418a7aa1378d54c17
|
[
"MIT"
] | null | null | null |
import logging
import json
from collections import defaultdict
from pathlib import Path
from depccg.tools.ja.reader import read_ccgbank
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
UNK = "*UNKNOWN*"
START = "*START*"
END = "*END*"
IGNORE = -1
class TrainingDataCreator(object):
def __init__(self, filepath, word_freq_cut, char_freq_cut, cat_freq_cut):
self.filepath = filepath
# those categories whose frequency < freq_cut are discarded.
self.word_freq_cut = word_freq_cut
self.char_freq_cut = char_freq_cut
self.cat_freq_cut = cat_freq_cut
self.seen_rules = defaultdict(int) # seen binary rules
self.unary_rules = defaultdict(int) # seen unary rules
self.cats = defaultdict(int, {
START: cat_freq_cut,
END: cat_freq_cut
}) # all cats
self.words = defaultdict(int, {
UNK: word_freq_cut,
START: word_freq_cut,
END: word_freq_cut
})
self.chars = defaultdict(int, {
UNK: char_freq_cut,
START: char_freq_cut,
END: char_freq_cut
})
self.samples = []
self.sents = []
def _traverse(self, tree):
if tree.is_leaf:
self.cats[str(tree.cat)] += 1
word = tree.word
self.words[word] += 1
for char in word:
self.chars[char] += 1
else:
children = tree.children
if len(children) == 1:
rule = str(tree.cat), str(children[0].cat)
self.unary_rules[rule] += 1
self._traverse(children[0])
else:
rule = str(children[0].cat), str(children[1].cat)
self.seen_rules[rule] += 1
self._traverse(children[0])
self._traverse(children[1])
@staticmethod
def _write(dct, filename):
with open(filename, 'w') as f:
logger.info(f'writing to {f.name}')
for key, value in dct.items():
print(f'{key} # {str(value)}', file=f)
def _get_dependencies(self, tree, sent_len):
def rec(subtree):
if not subtree.is_leaf:
children = subtree.children
if len(children) == 2:
head = rec(children[0 if subtree.head_is_left else 1])
dep = rec(children[1 if subtree.head_is_left else 0])
res[dep] = head
else:
head = rec(children[0])
return head
else:
return subtree.head_id
res = [-1 for _ in range(sent_len)]
rec(tree)
res = [i + 1 for i in res]
assert len(list(filter(lambda i:i == 0, res))) == 1
return res
def _to_conll(self, out):
for sent, (cats, deps) in self.samples:
words = sent.split(' ')
for i, (word, cat, dep) in enumerate(zip(words, cats, deps), 1):
print(f'{i}\t{word}\t{cat}\t{dep}', file=out)
print('', file=out)
def _create_samples(self, trees):
for tree in trees:
tokens = tree.leaves
words = [token.word for token in tokens]
cats = [str(token.cat) for token in tokens]
deps = self._get_dependencies(tree, len(tokens))
sent = ' '.join(words)
self.sents.append(sent)
self.samples.append((sent, [cats, deps]))
@staticmethod
def create_traindata(args):
self = TrainingDataCreator(args.PATH,
args.word_freq_cut,
args.char_freq_cut,
args.cat_freq_cut)
trees = [tree for _, _, tree in read_ccgbank(self.filepath)]
for tree in trees:
self._traverse(tree)
self._create_samples(trees)
cats = {k: v for k, v in self.cats.items() if v >= self.cat_freq_cut}
self._write(cats, args.OUT / 'target.txt')
words = {k: v for k, v in self.words.items() if v >= self.word_freq_cut}
self._write(words, args.OUT / 'words.txt')
chars = {k: v for k, v in self.chars.items() if v >= self.char_freq_cut}
self._write(chars, args.OUT / 'chars.txt')
seen_rules = {f'{c1} {c2}': v for (c1, c2), v in self.seen_rules.items()
if c1 in cats and c2 in cats}
self._write(seen_rules, args.OUT / 'seen_rules.txt')
unary_rules = {f'{c1} {c2}': v for (c1, c2), v in self.unary_rules.items()}
self._write(unary_rules, args.OUT / 'unary_rules.txt')
with open(args.OUT / 'traindata.json', 'w') as f:
logger.info(f'writing to {f.name}')
json.dump(self.samples, f)
with open(args.OUT / 'trainsents.txt', 'w') as f:
logger.info(f'writing to {f.name}')
for sent in self.sents:
print(sent, file=f)
with open(args.OUT / 'trainsents.conll', 'w') as f:
logger.info(f'writing to {f.name}')
self._to_conll(f)
@staticmethod
def create_testdata(args):
self = TrainingDataCreator(args.PATH,
args.word_freq_cut,
args.cat_freq_cut,
args.char_freq_cut)
trees = [tree for _, _, tree in read_ccgbank(self.filepath)]
self._create_samples(trees)
with open(args.OUT / 'testdata.json', 'w') as f:
logger.info(f'writing to {f.name}')
json.dump(self.samples, f)
with open(args.OUT / 'testsents.txt', 'w') as f:
logger.info(f'writing to {f.name}')
for sent in self.sents:
print(sent, file=f)
with open(args.OUT / 'testsents.conll', 'w') as f:
logger.info(f'writing to {f.name}')
self._to_conll(f)
@staticmethod
def convert_json(autopath):
self = TrainingDataCreator(autopath, None, None, None)
trees = [tree for _, _, tree in read_ccgbank(self.filepath)]
logger.info(f'loaded {len(trees)} trees')
self._create_samples(trees)
return self.samples
def convert_ccgbank_to_json(ccgbankpath):
return TrainingDataCreator.convert_json(ccgbankpath)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# Creating training data
parser.add_argument('PATH',
type=Path,
help='path to conll data file')
parser.add_argument('OUT',
type=Path,
help='output directory path')
parser.add_argument('--cat-freq-cut',
type=int,
default=10,
help='only allow categories which appear >= freq-cut')
parser.add_argument('--word-freq-cut',
type=int,
default=5,
help='only allow words which appear >= freq-cut')
parser.add_argument('--char-freq-cut',
type=int,
default=5,
help='only allow characters which appear >= freq-cut')
parser.add_argument('--mode',
choices=['train', 'test'],
default='train')
args = parser.parse_args()
if args.mode == 'train':
TrainingDataCreator.create_traindata(args)
else:
TrainingDataCreator.create_testdata(args)
| 34.444954
| 83
| 0.548808
|
4a0416aba2b8ec694c3b5a3fb2a96ed29372b3d4
| 13,335
|
py
|
Python
|
qiskit/transpiler/passmanager.py
|
dmquinones/qiskit-terra
|
f8fdfc514b051b4a37f7ac738b9716aecba8fc37
|
[
"Apache-2.0"
] | 1
|
2021-10-13T14:37:54.000Z
|
2021-10-13T14:37:54.000Z
|
qiskit/transpiler/passmanager.py
|
errvnd/qiskit-terra
|
c9c6d46cae3e48f06f4513be9dc0de3a49128424
|
[
"Apache-2.0"
] | null | null | null |
qiskit/transpiler/passmanager.py
|
errvnd/qiskit-terra
|
c9c6d46cae3e48f06f4513be9dc0de3a49128424
|
[
"Apache-2.0"
] | 2
|
2020-02-10T16:34:18.000Z
|
2020-05-22T08:37:07.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""PassManager class for the transpiler."""
from functools import partial
from collections import OrderedDict
from qiskit.dagcircuit import DAGCircuit
from qiskit.converters import circuit_to_dag, dag_to_circuit
from .propertyset import PropertySet
from .basepasses import BasePass
from .fencedobjs import FencedPropertySet, FencedDAGCircuit
from .exceptions import TranspilerError
class PassManager():
"""A PassManager schedules the passes"""
def __init__(self, passes=None,
ignore_requires=None,
ignore_preserves=None,
max_iteration=None):
"""
Initialize an empty PassManager object (with no passes scheduled).
Args:
passes (list[BasePass] or BasePass): pass(es) to be added to schedule. The default is
None.
ignore_requires (bool): The schedule ignores the requires field in the passes. The
default setting in the pass is False.
ignore_preserves (bool): The schedule ignores the preserves field in the passes. The
default setting in the pass is False.
max_iteration (int): The schedule looping iterates until the condition is met or until
max_iteration is reached.
"""
# the pass manager's schedule of passes, including any control-flow.
# Populated via PassManager.append().
self.working_list = []
# global property set is the context of the circuit held by the pass manager
# as it runs through its scheduled passes. Analysis passes may update the property_set,
# but transformation passes have read-only access (via the fenced_property_set).
self.property_set = PropertySet()
self.fenced_property_set = FencedPropertySet(self.property_set)
# passes already run that have not been invalidated
self.valid_passes = set()
# pass manager's overriding options for the passes it runs (for debugging)
self.passmanager_options = {'ignore_requires': ignore_requires,
'ignore_preserves': ignore_preserves,
'max_iteration': max_iteration}
if passes is not None:
self.append(passes)
def _join_options(self, passset_options):
"""Set the options of each passset, based on precedence rules:
passset options (set via ``PassManager.append()``) override
passmanager options (set via ``PassManager.__init__()``), which override Default.
.
"""
default = {'ignore_preserves': False, # Ignore preserves for this pass
'ignore_requires': False, # Ignore requires for this pass
'max_iteration': 1000} # Maximum allowed iteration on this pass
passmanager_level = {k: v for k, v in self.passmanager_options.items() if v is not None}
passset_level = {k: v for k, v in passset_options.items() if v is not None}
return {**default, **passmanager_level, **passset_level}
def append(self, passes, ignore_requires=None, ignore_preserves=None, max_iteration=None,
**flow_controller_conditions):
"""
Args:
passes (list[BasePass] or BasePass): pass(es) to be added to schedule
ignore_preserves (bool): ignore the preserves claim of passes. Default: False
ignore_requires (bool): ignore the requires need of passes. Default: False
max_iteration (int): max number of iterations of passes. Default: 1000
flow_controller_conditions (kwargs): See add_flow_controller(): Dictionary of
control flow plugins. Default:
* do_while (callable property_set -> boolean): The passes repeat until the
callable returns False.
Default: `lambda x: False # i.e. passes run once`
* condition (callable property_set -> boolean): The passes run only if the
callable returns True.
Default: `lambda x: True # i.e. passes run`
Raises:
TranspilerError: if a pass in passes is not a proper pass.
"""
passset_options = {'ignore_requires': ignore_requires,
'ignore_preserves': ignore_preserves,
'max_iteration': max_iteration}
options = self._join_options(passset_options)
if isinstance(passes, BasePass):
passes = [passes]
for pass_ in passes:
if not isinstance(pass_, BasePass):
raise TranspilerError('%s is not a pass instance' % pass_.__class__)
for name, param in flow_controller_conditions.items():
if callable(param):
flow_controller_conditions[name] = partial(param, self.fenced_property_set)
else:
raise TranspilerError('The flow controller parameter %s is not callable' % name)
self.working_list.append(
FlowController.controller_factory(passes, options, **flow_controller_conditions))
def reset(self):
""" "Resets the pass manager instance """
self.valid_passes = set()
self.property_set.clear()
def run(self, circuit):
"""Run all the passes on a QuantumCircuit
Args:
circuit (QuantumCircuit): circuit to transform via all the registered passes
Returns:
QuantumCircuit: Transformed circuit.
"""
name = circuit.name
dag = circuit_to_dag(circuit)
del circuit
self.reset() # Reset passmanager instance before starting
for passset in self.working_list:
for pass_ in passset:
dag = self._do_pass(pass_, dag, passset.options)
circuit = dag_to_circuit(dag)
circuit.name = name
return circuit
def _do_pass(self, pass_, dag, options):
"""Do a pass and its "requires".
Args:
pass_ (BasePass): Pass to do.
dag (DAGCircuit): The dag on which the pass is ran.
options (dict): PassManager options.
Returns:
DAGCircuit: The transformed dag in case of a transformation pass.
The same input dag in case of an analysis pass.
Raises:
TranspilerError: If the pass is not a proper pass instance.
"""
# First, do the requires of pass_
if not options["ignore_requires"]:
for required_pass in pass_.requires:
dag = self._do_pass(required_pass, dag, options)
# Run the pass itself, if not already run
if pass_ not in self.valid_passes:
if pass_.is_transformation_pass:
pass_.property_set = self.fenced_property_set
new_dag = pass_.run(dag)
if not isinstance(new_dag, DAGCircuit):
raise TranspilerError("Transformation passes should return a transformed dag."
"The pass %s is returning a %s" % (type(pass_).__name__,
type(new_dag)))
dag = new_dag
elif pass_.is_analysis_pass:
pass_.property_set = self.property_set
pass_.run(FencedDAGCircuit(dag))
else:
raise TranspilerError("I dont know how to handle this type of pass")
# update the valid_passes property
self._update_valid_passes(pass_, options['ignore_preserves'])
return dag
def _update_valid_passes(self, pass_, ignore_preserves):
self.valid_passes.add(pass_)
if not pass_.is_analysis_pass: # Analysis passes preserve all
if ignore_preserves:
self.valid_passes.clear()
else:
self.valid_passes.intersection_update(set(pass_.preserves))
def passes(self):
"""
Returns a list structure of the appended passes and its options.
Returns (list): The appended passes.
"""
ret = []
for pass_ in self.working_list:
ret.append(pass_.dump_passes())
return ret
class FlowController():
"""This class is a base class for multiple types of working list. When you iterate on it, it
returns the next pass to run. """
registered_controllers = OrderedDict()
def __init__(self, passes, options, **partial_controller):
self._passes = passes
self.passes = FlowController.controller_factory(passes, options, **partial_controller)
self.options = options
def __iter__(self):
for pass_ in self.passes:
yield pass_
def dump_passes(self):
"""
Fetches the passes added to this flow controller.
Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)}
"""
ret = {'options': self.options, 'passes': [], 'type': type(self)}
for pass_ in self._passes:
if isinstance(pass_, FlowController):
ret['passes'].append(pass_.dump_passes())
else:
ret['passes'].append(pass_)
return ret
@classmethod
def add_flow_controller(cls, name, controller):
"""
Adds a flow controller.
Args:
name (string): Name of the controller to add.
controller (type(FlowController)): The class implementing a flow controller.
"""
cls.registered_controllers[name] = controller
@classmethod
def remove_flow_controller(cls, name):
"""
Removes a flow controller.
Args:
name (string): Name of the controller to remove.
Raises:
KeyError: If the controller to remove was not registered.
"""
if name not in cls.registered_controllers:
raise KeyError("Flow controller not found: %s" % name)
del cls.registered_controllers[name]
@classmethod
def controller_factory(cls, passes, options, **partial_controller):
"""
Constructs a flow controller based on the partially evaluated controller arguments.
Args:
passes (list[BasePass]): passes to add to the flow controller.
options (dict): PassManager options.
**partial_controller (dict): Partially evaluated controller arguments in the form
`{name:partial}`
Raises:
TranspilerError: When partial_controller is not well-formed.
Returns:
FlowController: A FlowController instance.
"""
if None in partial_controller.values():
raise TranspilerError('The controller needs a condition.')
if partial_controller:
for registered_controller in cls.registered_controllers.keys():
if registered_controller in partial_controller:
return cls.registered_controllers[registered_controller](passes, options,
**partial_controller)
raise TranspilerError("The controllers for %s are not registered" % partial_controller)
else:
return FlowControllerLinear(passes, options)
class FlowControllerLinear(FlowController):
"""The basic controller runs the passes one after the other."""
def __init__(self, passes, options): # pylint: disable=super-init-not-called
self.passes = self._passes = passes
self.options = options
class DoWhileController(FlowController):
"""Implements a set of passes in a do-while loop."""
def __init__(self, passes, options, do_while=None,
**partial_controller):
self.do_while = do_while
self.max_iteration = options['max_iteration']
super().__init__(passes, options, **partial_controller)
def __iter__(self):
for _ in range(self.max_iteration):
for pass_ in self.passes:
yield pass_
if not self.do_while():
return
raise TranspilerError("Maximum iteration reached. max_iteration=%i" % self.max_iteration)
class ConditionalController(FlowController):
"""Implements a set of passes under a certain condition."""
def __init__(self, passes, options, condition=None,
**partial_controller):
self.condition = condition
super().__init__(passes, options, **partial_controller)
def __iter__(self):
if self.condition():
for pass_ in self.passes:
yield pass_
# Default controllers
FlowController.add_flow_controller('condition', ConditionalController)
FlowController.add_flow_controller('do_while', DoWhileController)
| 39.336283
| 99
| 0.624747
|
4a0416cd69fd06474a155dd2763588c0f705a0d2
| 29,824
|
py
|
Python
|
Jumpscale/clients/blockchain/tfchain/tests/7_balance_drain.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
Jumpscale/clients/blockchain/tfchain/tests/7_balance_drain.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/clients/blockchain/tfchain/tests/7_balance_drain.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
from Jumpscale import j
from Jumpscale.clients.blockchain.tfchain.stub.ExplorerClientStub import TFChainExplorerGetClientStub
def main(self):
"""
to run:
kosmos 'j.clients.tfchain.test(name="balance_drain")'
"""
# create a tfchain client for devnet
c = j.clients.tfchain.new("mydevclient", network_type="DEV")
# or simply `c = j.tfchain.clients.mydevclient`, should the client already exist
# (we replace internal client logic with custom logic as to ensure we can test without requiring an active network)
explorer_client = TFChainExplorerGetClientStub()
explorer_client.hash_add(
"000000000000000000000000000000000000000000000000000000000000000000000000000000",
'{"hashtype":"unlockhash","block":{"minerpayoutids":null,"transactions":null,"rawblock":{"parentid":"0000000000000000000000000000000000000000000000000000000000000000","timestamp":0,"pobsindexes":{"BlockHeight":0,"TransactionIndex":0,"OutputIndex":0},"minerpayouts":null,"transactions":null},"blockid":"0000000000000000000000000000000000000000000000000000000000000000","difficulty":"0","estimatedactivebs":"0","height":0,"maturitytimestamp":0,"target":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"totalcoins":"0","arbitrarydatatotalsize":0,"minerpayoutcount":0,"transactioncount":0,"coininputcount":0,"coinoutputcount":0,"blockstakeinputcount":0,"blockstakeoutputcount":0,"minerfeecount":0,"arbitrarydatacount":0},"blocks":null,"transaction":{"id":"0000000000000000000000000000000000000000000000000000000000000000","height":0,"parent":"0000000000000000000000000000000000000000000000000000000000000000","rawtransaction":{"version":0,"data":{"coininputs":[],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},"transactions":[{"id":"00d1eb537582e31f86a818b32e3a8e10110c1e4348b2d1d0c6746b4b75f3ddc8","height":8887,"parent":"09548093238b2592cc88e0e834a641bf2bcc6fe85275e50d0e179f720a5157c7","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"fb0ce589309af98870f7aa1b620948f8fbca2900d0729bfe1e4f501b45ae87c3","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"d0ff7079fcf804a011c7bfc226a2d9fc3ab07fbe135abbe44ebb81d64a59cb90bd647c48747e305d8af2c13ac04b8e4eb992156beeef3f31a74566684ad0c009"}}}],"coinoutputs":[{"value":"1","condition":{}},{"value":"2999999999","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"4000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["4be59838a2baaf69afbc558e961aae584f69b74ab72321799f380d02d5adea01","1744c053787a5662fad3651c0ed6c68b8bb5584370a81c95aa15cfcf220bfe13"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"040e33a58c70e3f912b0851650ba6708c6e167b05bbe1f15fc5e870e46de435a","height":8830,"parent":"a06da42631ed8bc80ba9b383474172900fa6b804758b2f6d866d95542b4b4a28","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"0846ce4e40bd153f4b24c1131908ba87e2c99d78615c16eaac846cb3ca033562","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"90e6d2c8bb8d5ba7d5edcf38cf87d7ce1ec9d936b6939c05571f5317fd83ba495f895adc8695eedd35087c08f780eff05a74240bf1245eac920971c604892802"}}}],"coinoutputs":[{"value":"1000000000","condition":{}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"2000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["6eb896edab9539b41077a7fb540a4987d5e8b434ec77e150c1e571d2883652f2"],"coinoutputunlockhashes":[""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"3d3ef900a0c54b45430c6248cf7bfbbbf92da663c84b73b10794eb2a9b0a74f5","height":7023,"parent":"e12271a40b3959d64cdf5d3845546c568a7db9313ca7b117a03454469cd6b348","rawtransaction":{"version":129,"data":{"nonce":"kyA3WfkRL6M=","mintfulfillment":{"type":3,"data":{"pairs":[{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"fa5560db80a0dcf7676885cf3517685c18bc82d787337299179d7c06271cbf724ced43d45f7bed7364c89f5adf5231264f322dee8d851606db8fd0494b45960e"}]}},"coinoutputs":[{"value":"1000000000","condition":{}}],"minerfees":["1000000000"],"arbitrarydata":"bW9yZSBmb3IgYWxs"}},"coininputoutputs":null,"coinoutputids":["0ffc3aceee0f3f695d1173056998e49e964c72c6b9d9ce08258f51cce6d9cd18"],"coinoutputunlockhashes":[""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"422ff9ec3d34e263a9eb910c41b4c031e5ff0b8ba9dc5377518e7ed2cfda72ec","height":8872,"parent":"2b465d727d65da5c2986ed8b9b1a3211cd27f7b99f4bc887c801f7bbfb05f884","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"b4c0b5d51891608fc2bf0c93c001325ee5048ffdaf6239bca99b9cf46cbe6932","fulfillment":{"type":3,"data":{"pairs":[{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"7303e9e048ee66def55305fb069d9a75cc15e96168bd85ca102dfdea2c26a4e3776adcae341610e85217508cf24a3a82f711f12286d1de37fc248b375556fd08"},{"publickey":"ed25519:9e310aa31e236f4f1da9c5384138674dc68323da2d8d6cf6e8ee5055b88b61e3","signature":"acd50c4825e639e6423b07022b5b69125ee04014adc6b870e358222dc4a7b2733200ad68aa4f978fba2e306acb06912ae625cfd697f42d9d20bdac3e7faf5f0f"}]}}}],"coinoutputs":[{"value":"42000000000","condition":{}},{"value":"390999999534","condition":{"type":4,"data":{"unlockhashes":["0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab","01822fd5fefd2748972ea828a5c56044dec9a2b2275229ce5b212f926cd52fba015846451e4e46"],"minimumsignaturecount":2}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"433999999534","condition":{"type":4,"data":{"unlockhashes":["0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab","01822fd5fefd2748972ea828a5c56044dec9a2b2275229ce5b212f926cd52fba015846451e4e46"],"minimumsignaturecount":2}},"unlockhash":"03e9dbb15388d815ecb1d898bf94cc60e37053d12c7fe97bba2578c8b6a7dbdfb0b3caff77c6c6"}],"coinoutputids":["170815e3fd93f34e5b40644dd116efcfa27fd3e4f6992a68759978336a16fe5e","8ffb6836d68e12a9eb99b8b312399832cdfcbe461d75c3ceca6256b5afabe29e"],"coinoutputunlockhashes":["","03e9dbb15388d815ecb1d898bf94cc60e37053d12c7fe97bba2578c8b6a7dbdfb0b3caff77c6c6"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"617c2fd23b25a55c688e0cdb97c456b51bda09fdac24bc08e20196c25a3f4f95","height":6825,"parent":"7798fefb7ff9d07672c10028b1e26c1fe44865bbf47c150cb7dabb539dce8cb3","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"d5b4eddf4472bb5014b132ec331fd5e09421917d183ce31a58fa7a272b01b25d","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"f15072fedb61b6526c65f8dc0bee871bf51fe413de8bddf6a74edc0985aec41784cb54c4d756f0bc36e871a981e46ce23361345d8d092c9212ea92a995897c06"}}}],"coinoutputs":[{"value":"10000000000","condition":{"type":1,"data":{"unlockhash":"01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214"}}},{"value":"89000000000","condition":{}}],"minerfees":["1000000000"],"arbitrarydata":"ZnJvbSBtZSAoMik="}},"coininputoutputs":[{"value":"100000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["b910bb831df2454bc739f90a9854649f0f6a6be215497e14205a8e25d159d551","445579891a0c84b3b362f5266204c7b34cebe50b9d55ea6c9a05048baf7b5bf2"],"coinoutputunlockhashes":["01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214",""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"7273cb1475fe04ea9f0ba1fd3e201fc39b416a580a5d0b3b8444e2e49b75bd95","height":8766,"parent":"c7db16c052e7899b03ff2e31d1dbd176420f816199c884a5ca080a328b028965","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"d3446e886a480405f74a78bc8347f5b822abc58622fa0ef102f4f4272dede799","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"5206f05fe42b4ba41658284dab74ab7f70ef47513d4ef4e0e9a9f361df8eee16d524ffdb311593d1373fa838447c7a36434568c833ed5820224b64334fd6ab0c"}}}],"coinoutputs":[{"value":"1000000000","condition":{}},{"value":"2000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"4000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["4f524b591aea65c5b36c8ef18102f2a69d6a7e07c54e3cedbd6fc85ac8ed2611","0846ce4e40bd153f4b24c1131908ba87e2c99d78615c16eaac846cb3ca033562"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"8603ede968d434a5c61cf6cc8bf474aa6225823e72d500ad20b90293408a2694","height":11264,"parent":"ff6d7607ae89047482eb647ba915a1e305fcc5d0115686ab69d6cbf35f6b82b6","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"e2a567179c6e9fda9d30d5476a74cbc297829852afc23c644b566555a71e2a3b","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"77b026caf2cb18ccd628043e9f8728a064036d1ee845eb638b9dc44a4c7a7171e2513d2161172b574341268d7c142d78e20af978576265c1c0bd37a2327b250e"}}},{"parentid":"996d4956f5de5354a63ff644bcac26c30f3fe1f2a18ae17a457cd999bd8e9447","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"658dc60c13bd677679a904b0e7dc847b471841648679de40586aa2542a2b235c3dd3ece8e6b7b74678107d782c7608db4c6695f18ab1192a6048d1a9c423da09"}}}],"coinoutputs":[{"value":"100000000000","condition":{}},{"value":"15000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"31000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"},{"value":"85000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["ea75d01b64a05e1652bbc334a9fabf8ec0fa11f02c7d3657b4be3f3270a927d5","cf83f6453aaa7a49db8c448a86bf5e26715075a5b608e15d1628df5d2e1ed4ae"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"972482a9407b16272b6f79e7df27d1ebe29587ab76081c8569f098fc33998768","height":11264,"parent":"ff6d7607ae89047482eb647ba915a1e305fcc5d0115686ab69d6cbf35f6b82b6","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"1744c053787a5662fad3651c0ed6c68b8bb5584370a81c95aa15cfcf220bfe13","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"9085545b4884a0724925ad88e311199961a51e018f985ba262d7516a0293c416e774ead5f1de44012e784522b12cc43bc3ba1680b4df1bf25785ab2e7094c200"}}}],"coinoutputs":[{"value":"1000000000","condition":{}},{"value":"999999999","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"2999999999","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["4834fadc322aa9cfdef0294e98624424c1f972523d335f8ae45175b1035f8958","de7ce2184e2a5c6f2f0d643a1ea08902459c2ee983127051f240a6819ae4bc35"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"9e537af0eb87f6820341a7b435195ed89f0b3e003bb4c3715f30d6a713645bb7","height":7055,"parent":"fd5cfca2da14430c6e42ae65d15af9268addc469258cdb62d14553c8f05757a3","rawtransaction":{"version":129,"data":{"nonce":"IBKoTH6nDgM=","mintfulfillment":{"type":1,"data":{"publickey":"ed25519:32b9bdde2a079a4e0b75ab38b281ed67baabeff5dc27b1792190a0be900f6d90","signature":"e679cbddecb145d3d79fc4604c1ddd1443f59340e0e2f968623713b1d540615cd253a305bd456cb0f0cc125d3a2a2aa60408518927233884c53b3dba045a1e02"}},"coinoutputs":[{"value":"1000000000","condition":{}}],"minerfees":["1000000000"],"arbitrarydata":"bW9yZSBmb3IgYWxs"}},"coininputoutputs":null,"coinoutputids":["ebd166582e892cdbbc01b71c29b9a0e64f0f69eb91e7a1d99ffde99307ecaa2e"],"coinoutputunlockhashes":[""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"9fcf663997809994acf89899f029e9e1eccb4ee64390af3797f771a61ea14575","height":6805,"parent":"ef827bb13a75e61126fe75d9a6fd1956a10a5feb824259c03fe04e1dc9a57968","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"47e816e82e699e4e15c2d03d1a69d24a8c49933a4fc09f2d28d23c3fbf3b8c90","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"99904e9745db92a8ac155ac9b4a59de8fde2757269ce76ed2640f010cc9b6235774115193fc55c96cdced3997f6688ffad7a4db56a2809c41cc591b46d746b02"}}}],"coinoutputs":[{"value":"10000000000","condition":{"type":1,"data":{"unlockhash":"01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214"}}},{"value":"475999999500","condition":{}}],"minerfees":["1000000000"],"arbitrarydata":"ZnJvbSBtZSgxKQ=="}},"coininputoutputs":[{"value":"486999999500","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["d4d8124abbecf29f965dc4186bbaa4c42758202775310522e9168609e85952ad","28bcc4bdef67f304b64b4525443c5257f775ea89122a6b9f2f4526af701aaeff"],"coinoutputunlockhashes":["01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214",""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"c15ccc0e6120d156104b9b2fa64adf6688a4d6b9bfaa56b87215b5d1b92c076a","height":12165,"parent":"231f22fab108316d06bd3c33a8189560ea5e7322dfb62dcaf6f21ea437ceedfc","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"8908d1b7af0ed9d9a16c00dcbccd583c027c737df4233cd70c5c94abf6f177ff","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"bb0cef5bef645b4a8cd77986bf5923dbb54415157bb113615da71fdded9109f8e30e04264066381c3d2782f1733063076974596d9c9faf76e420fc9dcd763704"}}}],"coinoutputs":[{"value":"1000000000","condition":{"type":3,"data":{"locktime":1609412400,"condition":{}}}},{"value":"11000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"],"arbitrarydata":"aGFwcHkgZWFybHkgbmV3IHllYXI="}},"coininputoutputs":[{"value":"13000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["ed1a8a793203d59c9a43483a78fc0143f5cab3881e94c928ecc46443127ae14f","efc235db4100bc88a01bf48cb15df0ac5f3efc2aa634c563dc7b61b9c837e083"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"c1faca5dbf14484df8ea5ee9555946d34853f1af154fb2ecf45802af3fd58390","height":7011,"parent":"97e2632ca8941a04ceaf688aec591926726a5786adba5887d3372bf98c5c3bd0","rawtransaction":{"version":129,"data":{"nonce":"Q9QjsC/7ixI=","mintfulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"8e0fd7a3f710b6bece93e9fcdf0da742512bd3010f068348f242cda97f914dad3fac922f7225023d501c42e3a944910d5baa4df5b0e23f777332f86b329b3408"}},"coinoutputs":[{"value":"1000000000000000","condition":{"type":3,"data":{"locktime":42,"condition":{}}}}],"minerfees":["1000000000"],"arbitrarydata":"Y29pbnMgZm9yIGFsbCB0aGUgcGVvcGxl"}},"coininputoutputs":null,"coinoutputids":["5cc1cdcd0962403ee112509033c87eca5e07468f3c996f1f1e3240a6e806a920"],"coinoutputunlockhashes":[""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"c4c17978c29e4fbc1c202c9b46b641efad059250f0fa4009d19ff252425f7e40","height":12181,"parent":"1cae514a987b8d5d5f13ee9d22998146c75bc633078f43df3540ab8f8ad1f7a4","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"e4691377980ad92560769a1ae7161f7e4cceec065542ae72000a66fbb3154232","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"aa489200decf5b2c7b9623f0c5819d9ece8cbd8115e71b65e5965ddd10521ab28a5a12d829010d88524a46783bd04ab60de21e5d7f85f057b2290c32b06aa009"}}}],"coinoutputs":[{"value":"1000000000","condition":{}},{"value":"2000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"],"arbitrarydata":"bW9yZSBmcmVlIG1vbmV5"}},"coininputoutputs":[{"value":"4000000000","condition":{"type":3,"data":{"locktime":1262300400,"condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["126e7a270a3548a67e7756497506a4a53e578d5b818d7dec28117f92f8b74a6d","e04651829ae0e4636a958057c1426c4fc63569ccb04406b54bce472c3322af86"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"ce1b49ec09d52e18b8e9038f6fc4a51627f1a417cfc044d995cce416f06b7802","height":6827,"parent":"dbd1bc09dd3c501a8674a699c297cd1dd50bf3d0d87a2c79a80c313f85375361","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"fe236bf66b542be84d50e0445449f6eddb11ffd93aa044040a73e35a3b33d8d9","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"116602636fa757b92b1ac06837cbcfecb7e0ceb553cc07ef819b557b872cc671ec27971171ba180595fa4d35bc748d6be9e899d0802179a355c07f2d9739100e"}}}],"coinoutputs":[{"value":"10000000000","condition":{"type":1,"data":{"unlockhash":"01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214"}}},{"value":"89000000000","condition":{}}],"minerfees":["1000000000"],"arbitrarydata":"ZnJvbSBtZSAoMyk="}},"coininputoutputs":[{"value":"100000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["de63dc7d73748cb41909688a29f52e7eb32aef37c30738d1e5c993bab26c7066","5ff190d50ea0d63ecfdd1500aa146344864c82512e1492947405552e1689d31a"],"coinoutputunlockhashes":["01f7e0686b2d38b3dee9295416857b06037a632ffe1d769153abcd522ab03d6a11b2a7d9383214",""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"ce8f3ee3835afb7f587ab2473bf9eaacda7dbd27f8f36761ca62f9f12af68ecc","height":7026,"parent":"b38334513e11a5b93988af07b6ea8f39c808be6a3b0478f9f8a6bd7728a5f235","rawtransaction":{"version":129,"data":{"nonce":"Ewj99diiO6U=","mintfulfillment":{"type":3,"data":{"pairs":[{"publickey":"ed25519:9e310aa31e236f4f1da9c5384138674dc68323da2d8d6cf6e8ee5055b88b61e3","signature":"d39465e4522a08de394001160d0da5fd3dc4c2ca53ee6c571bf368786a66d137ace2b2ac927d09195ea32bd9e4cb6fc979e7994b8ea99b638f29408f41388207"},{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"d5bf7d9948d9125fdb15a17cf03c30405ebd4d4dbbbb3d86884edd5062d6ceddd7b0a54c1c2f5d80f4e6ca352a9a66c2642a89d1b9ae185face9af6c740bfe00"}]}},"coinoutputs":[{"value":"42000000000","condition":{}}],"minerfees":["1000000000"]}},"coininputoutputs":null,"coinoutputids":["63891de50dcb285689e1cd02618917ab9df4c6001f14e63532a5a75716ec0b6c"],"coinoutputunlockhashes":[""],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},{"id":"ffd4ebd18d71c58674b28b116fc1cf67385a71b01b43104d817488dfd7e1c4c1","height":12159,"parent":"b93a31ee0eb16200101b07a4fd962bb8a8d928682b8d252a818fd7b56ff459e3","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"cf83f6453aaa7a49db8c448a86bf5e26715075a5b608e15d1628df5d2e1ed4ae","fulfillment":{"type":1,"data":{"publickey":"ed25519:89ba466d80af1b453a435175dbba6da7718e9cb19c64c0ed41fca3e6982e3636","signature":"59cc566ac52b46eccf63480530dede3f773f289137ae3b1b333bdb09ae2d065983828af1751b28be76c31d61e445ca0e24abda60d1403b08b9edaaeab9e91402"}}}],"coinoutputs":[{"value":"1000000000","condition":{"type":3,"data":{"locktime":1549571431,"condition":{}}}},{"value":"13000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"15000000000","condition":{"type":1,"data":{"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}},"unlockhash":"0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"}],"coinoutputids":["9a8f6b4524e07d7f0d2ece2a0d00c31fad9082ee21d4966af0ba9130c6f4eb6f","8908d1b7af0ed9d9a16c00dcbccd583c027c737df4233cd70c5c94abf6f177ff"],"coinoutputunlockhashes":["","0107e83d2bd8a7aad7ab0af0c0a0f1f116fb42335f64eeeb5ed1b76bd63e62ce59a3872a7279ab"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false}],"multisigaddresses":null,"unconfirmed":false}',
)
explorer_client.chain_info = '{"blockid":"552e410481cce1358ffcd4687f4199dd2181c799d55da26178e55643355bbd2e","difficulty":"27801","estimatedactivebs":"59","height":3644,"maturitytimestamp":1549012510,"target":[0,2,91,116,78,165,130,72,116,162,127,4,125,67,108,16,140,247,132,198,107,159,114,177,44,25,18,162,38,157,169,245],"totalcoins":"0","arbitrarydatatotalsize":6,"minerpayoutcount":3650,"transactioncount":3652,"coininputcount":12,"coinoutputcount":15,"blockstakeinputcount":3644,"blockstakeoutputcount":3645,"minerfeecount":7,"arbitrarydatacount":1}'
explorer_client.hash_add(
"552e410481cce1358ffcd4687f4199dd2181c799d55da26178e55643355bbd2e",
'{"hashtype":"blockid","block":{"minerpayoutids":["468db689f752414702ef3a5aa06238f03a4539434a61624b3b8a0fb5dc38a211"],"transactions":[{"id":"2396f8e57bbb9b22bd1d749d5de3fd532ea6886e9660a556a13571d701d83e27","height":3644,"parent":"552e410481cce1358ffcd4687f4199dd2181c799d55da26178e55643355bbd2e","rawtransaction":{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"ff5a002ec356b7cb24fbee9f076f239fb8c72d5a8a448cee92ee6d29a87aef52","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"7bec94dfb87640726c6a14de2110599db0f81cf9fa456249e7bf79b0c74b79517edde25c4ee87f181880af44fe6ee054ff20b74eda2144fe07fa5bfb9d884208"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}},"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"blockstakeoutputids":["f683e7319659c61f54e93546bc41b57c5bffe79de26c06ec7371034465804c81"],"blockstakeunlockhashes":["015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"],"unconfirmed":false}],"rawblock":{"parentid":"47db4274551b0372564f8d1ab89c596428f00e460c0b416327e53983c8765198","timestamp":1549012665,"pobsindexes":{"BlockHeight":3643,"TransactionIndex":0,"OutputIndex":0},"minerpayouts":[{"value":"10000000000","unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"transactions":[{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"ff5a002ec356b7cb24fbee9f076f239fb8c72d5a8a448cee92ee6d29a87aef52","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"7bec94dfb87640726c6a14de2110599db0f81cf9fa456249e7bf79b0c74b79517edde25c4ee87f181880af44fe6ee054ff20b74eda2144fe07fa5bfb9d884208"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}}]},"blockid":"552e410481cce1358ffcd4687f4199dd2181c799d55da26178e55643355bbd2e","difficulty":"27801","estimatedactivebs":"59","height":3644,"maturitytimestamp":1549012510,"target":[0,2,91,116,78,165,130,72,116,162,127,4,125,67,108,16,140,247,132,198,107,159,114,177,44,25,18,162,38,157,169,245],"totalcoins":"0","arbitrarydatatotalsize":6,"minerpayoutcount":3650,"transactioncount":3652,"coininputcount":12,"coinoutputcount":15,"blockstakeinputcount":3644,"blockstakeoutputcount":3645,"minerfeecount":7,"arbitrarydatacount":1},"blocks":null,"transaction":{"id":"0000000000000000000000000000000000000000000000000000000000000000","height":0,"parent":"0000000000000000000000000000000000000000000000000000000000000000","rawtransaction":{"version":0,"data":{"coininputs":[],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},"transactions":null,"multisigaddresses":null,"unconfirmed":false}',
)
c._explorer_get = explorer_client.explorer_get
# you can also get the balance from an unlock hash results
balance = c.unlockhash_get(
"000000000000000000000000000000000000000000000000000000000000000000000000000000"
).balance()
assert len(balance.outputs_spent) == 0
assert str(balance.available) == "1000843.999999501"
assert str(balance.locked) == "2"
# a balance can be drained,
# meaning all confirmed outputs are spent
txns = balance.drain(
recipient="01ffd7c884aa869056bfb832d957bb71a0005fee13c19046cebec84b3a5047ee8829eab070374b",
miner_fee=c.minimum_miner_fee,
data="drain the swamp",
lock="06/06/2018 06:58",
)
assert len(txns) == 1
txn = txns[0]
# the given miner fee is used as the only miner fee
assert len(txn.miner_fees) == 1
assert txn.miner_fees[0] == c.minimum_miner_fee
# the data should be asigned
assert txn.data.value == b"drain the swamp"
# all inputs should be orinating from the balance's available outputs
assert [ci.parentid for ci in txn.coin_inputs] == [co.id for co in balance.outputs_available]
assert len(txn.coin_outputs) == 1
# the only output should be the drain output
co = txn.coin_outputs[0]
assert co.condition.unlockhash == "01ffd7c884aa869056bfb832d957bb71a0005fee13c19046cebec84b3a5047ee8829eab070374b"
assert co.value == (balance.available - c.minimum_miner_fee)
# no block stake inputs or outputs are obviously defined
assert len(txn.blockstake_inputs) == 0
assert len(txn.blockstake_outputs) == 0
# NOTE: balance.drain also takes an optional parameter 'unconfirmed` which is False by default,
# if True unconfirmed outputs will also be used when available.
| 451.878788
| 23,232
| 0.837882
|
4a0416d6fe1903f944a1540a6910feb9e525d8af
| 5,496
|
py
|
Python
|
pyiwfm/reader.py
|
dwr-psandhu/pyiwfm
|
aa06c014f9e6365946b035d39ac0464c2bee6045
|
[
"MIT"
] | 1
|
2022-03-25T16:05:58.000Z
|
2022-03-25T16:05:58.000Z
|
pyiwfm/reader.py
|
dwr-psandhu/pyiwfm
|
aa06c014f9e6365946b035d39ac0464c2bee6045
|
[
"MIT"
] | null | null | null |
pyiwfm/reader.py
|
dwr-psandhu/pyiwfm
|
aa06c014f9e6365946b035d39ac0464c2bee6045
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import pandas as pd
import os
import re
def read_elements(file):
with open(file, 'r') as fh:
line = fh.readline()
while line.find('IE') < 0 and line.find('IDE(1)') < 0:
line = fh.readline()
fh.readline()
dfe = pd.read_csv(fh, sep='\s+',
header=None, names=['1', '2', '3', '4', '5'],
index_col=0, comment='C')
if len(dfe.columns) == 4:
dfe.columns = ['1', '2', '3', '4']
return dfe
def read_nodes(file):
with open(file, 'r') as fh:
line = fh.readline()
while line.find('/ND') < 0:
line = fh.readline()
fh.readline()
return pd.read_csv(fh, sep='\s+',
header=None, names=['x', 'y'],
index_col=0, comment='C')
def read_nodes(file):
with open(file, 'r') as fh:
line = fh.readline()
while line.find('/ND') < 0:
line = fh.readline()
fh.readline()
return pd.read_csv(fh, sep='\s+',
header=None, names=['x', 'y'],
index_col=0, comment='C')
def read_hydrograph(file):
with open(file, 'r') as fh:
line = fh.readline()
while line.find('/ NOUTH') < 0:
line = fh.readline()
nrows = int(re.findall('\d+', line)[0])
line = fh.readline() # skip next line before continuing
line = fh.readline()
while line.startswith('C'):
pos = fh.tell()
line = fh.readline()
fh.seek(pos)
return pd.read_csv(fh, sep='\s+',
header=None,
names=['iouthl', 'x', 'y', 'iouth', 'sep', 'Calibration_ID'],
nrows=nrows)
def read_stratigraphy(file):
with open(file, 'r') as fh:
line = fh.readline()
while (line.find('/NL') < 0):
line = fh.readline()
fields = line.split()
nlayers = int(str.strip(fields[0])) # number of layers in the file
layer_cols = []
for i in range(1, nlayers + 1):
layer_cols.append('A%d' % i)
layer_cols.append('L%d' % i)
#
while (line.find('ID') < 0 and line.find('GSE') < 0) or \
(line.find('ID') < 0 and line.find('ELV') < 0):
line = fh.readline()
fh.readline()
cols = ['NodeID', 'GSE'] + layer_cols
return pd.read_csv(fh, sep='\s+', comment='C', index_col=0,
header=None, names=cols, usecols=cols)
def get_index(df0):
# split the first column into date and time
date_col = df0.iloc[:, 0].str.split('_', expand=True).iloc[:, 0]
# set index to date part
idx = pd.to_datetime(date_col)
idx.index = idx.index
idx.name = 'Time'
idx.freq = pd.infer_freq(idx)
return idx
def rearrange(df0, drop_first=False):
if drop_first:
# drop the first columnn as it is index now
df0 = df0.drop(0, axis=1)
df0.columns = df0.columns.astype('str')
df0 = df0.dropna(axis=1)
return df0.astype('float')
def read_gwhead(gwheadfile, nlayers):
dfh = pd.read_fwf(gwheadfile, skiprows=5, sep='\s+', nrows=1)
colspecs = [(i * 12 + 22, i * 12 + 34) for i in range(0, len(dfh.columns))]
colspecs = [(0, 22)] + colspecs
df = pd.read_fwf(gwheadfile, skiprows=6, header=None, sep='\s+', colspecs=colspecs)
# 4 layers --> 4 dataframes, one for each layer
layer_df = {i: df.iloc[i::nlayers] for i in range(nlayers)}
# get index from first layer as a time index
idx = get_index(layer_df[0])
# rearrange and drop columns
layer_df[0] = rearrange(layer_df[0], drop_first=True) # ,cols_shift_left=1)
for i in range(1, nlayers):
layer_df[i] = rearrange(layer_df[i], drop_first=True) # ,cols_shift_left=2)
# set index of each dataframe to time index
for i in range(nlayers):
layer_df[i].index = idx[0:len(layer_df[i])]
return layer_df
# caching for gwhead
def gwh_feather_filename(file, layer=0):
return f'{file}.{layer}.ftr'
def cache_gwh_feather(file, dfgh):
for k in dfgh.keys():
ffile = f'{file}.{k}.ftr'
dfx = dfgh[k].reset_index().to_feather(ffile)
def load_gwh_feather(file, nlayers):
dfgh = {}
for k in range(nlayers):
ffile = f'{file}.{k}.ftr'
df = pd.read_feather(ffile)
dfgh[k] = df.set_index(df.columns[0])
return dfgh
def read_and_cache(gwh_file, nlayers, recache=False):
if recache or not os.path.exists(gwh_feather_filename(gwh_file)):
dfgwh = read_gwhead(gwh_file, nlayers)
cache_gwh_feather(gwh_file, dfgwh)
else:
dfgwh = load_gwh_feather(gwh_file, nlayers)
return dfgwh
#
GridData = namedtuple('GridData', ['elements', 'nodes', 'stratigraphy', 'nlayers'])
def load_data(elements_file, nodes_file, stratigraphy_file):
el = read_elements(elements_file)
nodes = read_nodes(nodes_file)
strat = read_stratigraphy(stratigraphy_file)
# FIXME: not a great way to get layers but works. need a cleaner implementation
nlayers = len(strat.columns) // 2
return GridData(el, nodes, strat, nlayers)
def load_gwh(gwh_file, nlayers, recache=False):
gwh = read_and_cache(gwh_file, nlayers, recache)
return gwh
def diff_heads(dfgwh, dfgwh_base):
for k in dfgwh.keys():
dfgwh[k] = dfgwh[k] - dfgwh_base[k]
return dfgwh
| 31.768786
| 88
| 0.571143
|
4a0417080906315f6da60feff89413e2f17e2bf6
| 1,521
|
py
|
Python
|
third_party/mox3/mox3/tests/test_stubout.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/mox3/mox3/tests/test_stubout.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/mox3/mox3/tests/test_stubout.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
# Unit tests for stubout.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a fork of the pymox library intended to work with Python 3.
# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
import fixtures
import testtools
from mox3 import mox
from mox3 import stubout
from mox3.tests import stubout_helper
class StubOutForTestingTest(testtools.TestCase):
def setUp(self):
super(StubOutForTestingTest, self).setUp()
self.mox = mox.Mox()
self.useFixture(fixtures.MonkeyPatch(
'mox3.tests.stubout_helper.SampleFunction',
stubout_helper.SampleFunction))
def testSmartSetOnModule(self):
mock_function = self.mox.CreateMockAnything()
mock_function()
stubber = stubout.StubOutForTesting()
stubber.SmartSet(stubout_helper, 'SampleFunction', mock_function)
self.mox.ReplayAll()
stubout_helper.SampleFunction()
self.mox.VerifyAll()
if __name__ == '__main__':
testtools.main()
| 30.42
| 74
| 0.724523
|
4a041726be2588b408a6a52efd2cfde1529cdba5
| 1,040
|
py
|
Python
|
test/python/crash.py
|
webosce/umediaserver
|
988605735c1e35937fbfb7ac28422dee678d70d6
|
[
"Apache-2.0"
] | 8
|
2018-03-17T22:28:05.000Z
|
2021-11-16T15:29:06.000Z
|
test/python/crash.py
|
webosce/umediaserver
|
988605735c1e35937fbfb7ac28422dee678d70d6
|
[
"Apache-2.0"
] | 1
|
2021-05-21T22:51:00.000Z
|
2021-05-21T22:51:00.000Z
|
test/python/crash.py
|
webosce/umediaserver
|
988605735c1e35937fbfb7ac28422dee678d70d6
|
[
"Apache-2.0"
] | 4
|
2018-03-22T18:48:22.000Z
|
2021-11-16T15:29:08.000Z
|
import Queue
import argparse
import sys
from uMediaServer.uMediaClient import MediaPlayer
def wait_reply(recv, tags, timeout = 5):
while tags:
try:
(ev, data) = recv.get(timeout = timeout)
except Queue.Empty: raise Exception('operation timeout')
try:
tags.remove(ev)
except: pass
arg_parser = argparse.ArgumentParser(description='Performs load/unload stress test')
arg_parser.add_argument('files', nargs='+', action ='store', help ='media file to play')
arg_parser.add_argument('-t','--type',type=str,default='sim',help='pipeline type')
arg_parser.add_argument('--tv', type=bool, default=False, help='enable tv-mode')
args = vars(arg_parser.parse_args())
try:
recv = Queue.Queue()
umc = MediaPlayer()
umc.setQueue(recv)
for file in args['files']:
umc.load('file://' + file, args['type'], '{}')
wait_reply(recv, ['load'])
umc.unload()
except Exception as e:
sys.stderr.write('Failed to run test: ' + e.args[0] + '\n')
exit(-1)
| 29.714286
| 88
| 0.646154
|
4a0418bfc0e3107a51051aa12ed432ce65e71782
| 2,014
|
py
|
Python
|
fibonacci/app/views.py
|
shiminsh/callhub-fibonacci
|
869374878b2d5793e19c0f53879d2f89ffca7044
|
[
"MIT"
] | null | null | null |
fibonacci/app/views.py
|
shiminsh/callhub-fibonacci
|
869374878b2d5793e19c0f53879d2f89ffca7044
|
[
"MIT"
] | null | null | null |
fibonacci/app/views.py
|
shiminsh/callhub-fibonacci
|
869374878b2d5793e19c0f53879d2f89ffca7044
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from app.models import Fibonacci
from django.core.cache import cache
# Create your views here.
fib_matrix = [[1,1],
[1,0]]
def matrix_square(A, mod):
return mat_mult(A,A,mod)
def mat_mult(A,B, mod):
if mod is not None:
return [[(A[0][0]*B[0][0] + A[0][1]*B[1][0])%mod, (A[0][0]*B[0][1] + A[0][1]*B[1][1])%mod],
[(A[1][0]*B[0][0] + A[1][1]*B[1][0])%mod, (A[1][0]*B[0][1] + A[1][1]*B[1][1])%mod]]
def matrix_pow(M, power, mod):
#Special definition for power=0:
if power <= 0:
return M
powers = list(reversed([True if i=="1" else False for i in bin(power)[2:]])) #Order is 1,2,4,8,16,...
matrices = [None for _ in powers]
matrices[0] = M
for i in range(1,len(powers)):
matrices[i] = matrix_square(matrices[i-1], mod)
result = None
for matrix, power in zip(matrices, powers):
if power:
if result is None:
result = matrix
else:
result = mat_mult(result, matrix, mod)
return result
def home(request):
context = {}
n = request.GET.get('fibonacci', None)
key = str(n)
if n:
if cache.get(key):
data = cache.get(key)
context['fibonacci'] = data
elif Fibonacci.objects.filter(number=n).exists():
obj = Fibonacci.objects.filter(number=n)
value = obj[0].value
context['fibonacci'] = value
elif int(n) < 1000000:
a,b = 1,1
for i in range(int(n)-1):
a,b = b,a+b
context['fibonacci'] = a
data = Fibonacci(number=n, value=a)
data.save()
cache.set(key, str(a))
else:
ans = matrix_pow(fib_matrix, int(n), 1000000007)[0][1]
context['fibonacci'] = ans
data = Fibonacci(number=n, value=ans)
data.save()
cache.set(key, str(ans))
return render(request, 'index.html', context)
| 27.972222
| 106
| 0.527309
|
4a0419e4ffc1ce852c16b3819ca069d797ca5230
| 448
|
py
|
Python
|
backend/models/validators.py
|
gogaz/coach_royale
|
fc6b9f9021c2470a8bcd8aa27dffef8ec079364c
|
[
"MIT"
] | 10
|
2018-11-08T08:29:17.000Z
|
2021-11-02T11:55:49.000Z
|
backend/models/validators.py
|
gogaz/coach_royale
|
fc6b9f9021c2470a8bcd8aa27dffef8ec079364c
|
[
"MIT"
] | 138
|
2018-09-14T03:47:51.000Z
|
2022-02-01T18:33:09.000Z
|
backend/models/validators.py
|
gogaz/coach_royale
|
fc6b9f9021c2470a8bcd8aa27dffef8ec079364c
|
[
"MIT"
] | null | null | null |
import re
from django.core.validators import RegexValidator
def comma_separated_token_list_validator(sep=',', message=None, code='invalid'):
regexp = re.compile(r'^[\w_-]+(?:%(sep)s[\w_-]+)*\Z' % {
'sep': re.escape(sep),
})
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_token_list = comma_separated_token_list_validator(
message='Enter only tokens ([\\w_-]+) separated by commas.'
)
| 29.866667
| 80
| 0.703125
|
4a0419f993ca0c0090e802b58041c53460e350b8
| 2,038
|
py
|
Python
|
var/spack/repos/builtin/packages/intel-oneapi-dpl/package.py
|
QianJianhua1/spack
|
363536fd929d2aee280e07780ff6c98498d7be46
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/intel-oneapi-dpl/package.py
|
QianJianhua1/spack
|
363536fd929d2aee280e07780ff6c98498d7be46
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/intel-oneapi-dpl/package.py
|
QianJianhua1/spack
|
363536fd929d2aee280e07780ff6c98498d7be46
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
from spack import *
@IntelOneApiPackage.update_description
class IntelOneapiDpl(IntelOneApiLibraryPackage):
"""Intel oneAPI DPL."""
maintainers = ['rscohn2']
homepage = 'https://github.com/oneapi-src/oneDPL'
if platform.system() == 'Linux':
version('2021.7.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18752/l_oneDPL_p_2021.7.0.631_offline.sh',
sha256='1e2d735d5eccfe8058e18f96d733eda8de5b7a07d613447b7d483fd3f9cec600',
expand=False)
version('2021.6.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18372/l_oneDPL_p_2021.6.0.501_offline.sh',
sha256='0225f133a6c38b36d08635986870284a958e5286c55ca4b56a4058bd736f8f4f',
expand=False)
version('2021.5.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18189/l_oneDPL_p_2021.5.0.445_offline.sh',
sha256='7d4adf300a18f779c3ab517070c61dba10e3952287d5aef37c38f739e9041a68',
expand=False)
version('2021.4.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17889/l_oneDPL_p_2021.4.0.337_offline.sh',
sha256='540ef0d308c4b0f13ea10168a90edd42a56dc0883024f6f1a678b94c10b5c170',
expand=False)
@property
def component_dir(self):
return 'dpl'
@property
def headers(self):
include_path = join_path(self.component_path, 'linux', 'include')
headers = find_headers('*', include_path, recursive=True)
# Force this directory to be added to include path, even
# though no files are here because all includes are relative
# to this path
headers.directories = [include_path]
return headers
| 39.960784
| 123
| 0.679588
|
4a041a5671fa279f33e47b5ee80d783c112fe87b
| 6,970
|
py
|
Python
|
Question_semaseg/answers/transposeconv_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 1
|
2021-11-07T13:14:41.000Z
|
2021-11-07T13:14:41.000Z
|
Question_semaseg/answers/transposeconv_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | null | null | null |
Question_semaseg/answers/transposeconv_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 64, 64#572, 572
out_height, out_width = 64, 64#388, 388
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__()
self.enc1 = torch.nn.Sequential()
for i in range(2):
f = 3 if i == 0 else 32
self.enc1.add_module("conv1_{}".format(i+1), torch.nn.Conv2d(f, 32, kernel_size=3, padding=1, stride=1))
self.enc1.add_module("conv1_{}_relu".format(i+1), torch.nn.ReLU())
self.enc1.add_module("bn1_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.enc2 = torch.nn.Sequential()
for i in range(2):
self.enc2.add_module("conv2_{}".format(i+1), torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
self.enc2.add_module("conv2_{}_relu".format(i+1), torch.nn.ReLU())
self.enc2.add_module("bn2_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.upsample = torch.nn.Sequential()
self.upsample.add_module("tconv", torch.nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, padding=0))
self.upsample.add_module("tconv_relu", torch.nn.ReLU())
self.upsample.add_module("tconv_bn", torch.nn.BatchNorm2d(32))
self.dec1 = torch.nn.Sequential()
for i in range(2):
self.dec1.add_module("dec1_conv1_{}".format(i+1), torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
self.dec1.add_module("dec1_conv1_{}_relu".format(i+1), torch.nn.ReLU())
self.dec1.add_module("dec1_bn1_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.out = torch.nn.Conv2d(32, num_classes+1, kernel_size=1, padding=0, stride=1)
def forward(self, x):
# block conv1
x = self.enc1(x)
x = F.max_pool2d(x, 2)
x = self.enc2(x)
x = self.upsample(x)
x = self.dec1(x)
x = self.out(x)
return x
CLS = {'akahara': [0,0,128],
'madara': [0,128,0]}
# get train data
def data_load(path, hf=False, vf=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
gt_path = path.replace("images", "seg_images").replace(".jpg", ".png")
gt = cv2.imread(gt_path)
gt = cv2.resize(gt, (out_width, out_height), interpolation=cv2.INTER_NEAREST)
t = np.zeros((out_height, out_width), dtype=np.int)
for i, (_, vs) in enumerate(CLS.items()):
ind = (gt[...,0] == vs[0]) * (gt[...,1] == vs[1]) * (gt[...,2] == vs[2])
t[ind] = i+1
#print(gt_path)
#import matplotlib.pyplot as plt
#plt.subplot(1,2,1)
#plt.imshow(x)
#plt.subplot(1,2,2)
#plt.imshow(t, vmin=0, vmax=2)
#plt.show()
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t[:, ::-1])
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t[::-1])
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t[::-1, ::-1])
paths.append(path)
xs = np.array(xs)
ts = np.array(ts)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = Mynet().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)
# training
mb = 4
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(500):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)
opt.zero_grad()
y = model(x)
y = y.permute(0,2,3,1).contiguous()
y = y.view(-1, num_classes+1)
t = t.view(-1)
y = F.log_softmax(y, dim=1)
loss = torch.nn.CrossEntropyLoss()(y, t)
loss.backward()
opt.step()
pred = y.argmax(dim=1, keepdim=True)
acc = pred.eq(t.view_as(pred)).sum().item() / mb
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
# test
def test():
device = torch.device("cuda" if GPU else "cpu")
model = Mynet().to(device)
model.eval()
model.load_state_dict(torch.load('cnn.pt'))
xs, ts, paths = data_load('../Dataset/test/images/')
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
x = torch.tensor(x, dtype=torch.float).to(device)
pred = model(x)
pred = pred.permute(0,2,3,1).reshape(-1, num_classes+1)
pred = F.softmax(pred, dim=1)
pred = pred.reshape(-1, out_height, out_width, num_classes+1)
pred = pred.detach().cpu().numpy()[0]
pred = pred.argmax(axis=-1)
# visualize
out = np.zeros((out_height, out_width, 3), dtype=np.uint8)
for i, (_, vs) in enumerate(CLS.items()):
out[pred == (i+1)] = vs
print("in {}".format(path))
plt.subplot(1,2,1)
plt.imshow(x.detach().cpu().numpy()[0].transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(out[..., ::-1])
plt.show()
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| 29.914163
| 122
| 0.535151
|
4a041a92120d14085969928b59066e7bc6ce70f5
| 4,680
|
py
|
Python
|
networkx/algorithms/components/weakly_connected.py
|
rakschahsa/networkx
|
6cac55b1064c3c346665f9281680fa3b66442ad0
|
[
"BSD-3-Clause"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/components/weakly_connected.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/components/weakly_connected.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T11:12:19.000Z
|
2020-04-21T11:12:19.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (hagberg@lanl.gov)
# Christopher Ellison
"""Weakly connected components."""
import warnings as _warnings
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__all__ = [
'number_weakly_connected_components',
'weakly_connected_components',
'weakly_connected_component_subgraphs',
'is_weakly_connected',
]
@not_implemented_for('undirected')
def weakly_connected_components(G):
"""Generate weakly connected components of G.
Parameters
----------
G : NetworkX graph
A directed graph
Returns
-------
comp : generator of sets
A generator of sets of nodes, one for each weakly connected
component of G.
Raises
------
NetworkXNotImplemented:
If G is undirected.
Examples
--------
Generate a sorted list of weakly connected components, largest first.
>>> G = nx.path_graph(4, create_using=nx.DiGraph())
>>> nx.add_path(G, [10, 11, 12])
>>> [len(c) for c in sorted(nx.weakly_connected_components(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort:
>>> largest_cc = max(nx.weakly_connected_components(G), key=len)
See Also
--------
connected_components
strongly_connected_components
Notes
-----
For directed graphs only.
"""
seen = set()
for v in G:
if v not in seen:
c = set(_plain_bfs(G, v))
yield c
seen.update(c)
@not_implemented_for('undirected')
def number_weakly_connected_components(G):
"""Return the number of weakly connected components in G.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of weakly connected components
Raises
------
NetworkXNotImplemented:
If G is undirected.
See Also
--------
weakly_connected_components
number_connected_components
number_strongly_connected_components
Notes
-----
For directed graphs only.
"""
return sum(1 for wcc in weakly_connected_components(G))
@not_implemented_for('undirected')
def weakly_connected_component_subgraphs(G, copy=True):
"""DEPRECATED: Use ``(G.subgraph(c) for c in weakly_connected_components(G))``
Or ``(G.subgraph(c).copy() for c in weakly_connected_components(G))``
"""
msg = "weakly_connected_component_subgraphs is deprecated and will be removed in 2.2" \
"use (G.subgraph(c).copy() for c in weakly_connected_components(G))"
_warnings.warn(msg, DeprecationWarning)
for c in weakly_connected_components(G):
if copy:
yield G.subgraph(c).copy()
else:
yield G.subgraph(c)
@not_implemented_for('undirected')
def is_weakly_connected(G):
"""Test directed graph for weak connectivity.
A directed graph is weakly connected if and only if the graph
is connected when the direction of the edge between nodes is ignored.
Note that if a graph is strongly connected (i.e. the graph is connected
even when we account for directionality), it is by definition weakly
connected as well.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is weakly connected, False otherwise.
Raises
------
NetworkXNotImplemented:
If G is undirected.
See Also
--------
is_strongly_connected
is_semiconnected
is_connected
is_biconnected
weakly_connected_components
Notes
-----
For directed graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(list(weakly_connected_components(G))[0]) == len(G)
def _plain_bfs(G, source):
"""A fast BFS node generator
The direction of the edge between nodes is ignored.
For directed graphs only.
"""
Gsucc = G.succ
Gpred = G.pred
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(Gsucc[v])
nextlevel.update(Gpred[v])
| 24.123711
| 91
| 0.628846
|
4a041b0efa34b195358f4de9ef6e22276262ff99
| 2,917
|
py
|
Python
|
grim/spells/time/time_line_chart.py
|
banjtheman/grimoire
|
949fec396afeb73b74c2667bebd64453b179cc4e
|
[
"Apache-2.0"
] | null | null | null |
grim/spells/time/time_line_chart.py
|
banjtheman/grimoire
|
949fec396afeb73b74c2667bebd64453b179cc4e
|
[
"Apache-2.0"
] | 5
|
2021-03-10T00:55:21.000Z
|
2022-02-26T20:53:49.000Z
|
grim/spells/time/time_line_chart.py
|
banjtheman/grimoire
|
949fec396afeb73b74c2667bebd64453b179cc4e
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import altair as alt
import streamlit as st
import sys, argparse, logging
import json
import time
def spell(spell_inputs):
mana = spell_inputs
st.markdown("## Create the graph you would like to animate")
time_col = st.selectbox("Select time column", mana.columns)
curr_time = st.selectbox("Select time", mana[time_col].unique())
x_col = st.selectbox("Select x axis for line chart", mana.columns)
xcol_string=x_col+":O"
if st.checkbox("Show as continuous?",key="time_line_x_is_cont"):
xcol_string=x_col+":Q"
y_col = st.selectbox("Select y axis for line chart", mana.columns)
ycol_string=alt.Y(y_col)
if st.checkbox("Show as sorted?",key="time_line_sort_y"):
ycol_string=alt.Y(y_col, sort="-x")
z_col = st.selectbox("Select z axis for line chart", mana.columns)
time_mana = mana.loc[mana[time_col] == curr_time]
chart = (
alt.Chart(time_mana)
.mark_line(point=True)
.encode(
y=ycol_string,
x=xcol_string,
color=z_col,
tooltip=list(time_mana.columns),
)
.properties(
title="Line graph of " + x_col + "," + y_col + " at " + str(curr_time)
)
.configure_title(fontSize=20,)
.configure_axis(labelFontSize=20, titleFontSize=20)
.configure_legend(labelFontSize=20, titleFontSize=20)
).properties(height=700)
st.altair_chart(chart, use_container_width=True)
# basicaly the animate button should make n graphs and show them and have a time.sleep in between
st.markdown("## Animate the graph above using "+time_col+" as the time.")
time_interval = st.number_input("Time interval", 0.0, None, value=1.0)
if st.button("Animate Graph"):
# declare an empty obj here then update it in loop
time_chart = st.empty()
sorted_vals = mana[time_col].unique()
sorted_vals.sort()
for times in sorted_vals:
curr_time_mana = mana.loc[mana[time_col] <= times]
st.write(times)
curr_chart = (
alt.Chart(curr_time_mana)
.mark_line(point=True)
.encode(
y=ycol_string,
x=xcol_string,
color=z_col,
tooltip=list(time_mana.columns),
)
.properties(
title="Line graph of " + x_col + "," + y_col + " at " + str(times)
)
.configure_title(fontSize=20,)
.configure_axis(labelFontSize=20, titleFontSize=20)
.configure_legend(labelFontSize=20, titleFontSize=20)
).properties(height=700)
time_chart.altair_chart(curr_chart, use_container_width=True)
# sleep
time.sleep(time_interval)
return None,mana
| 33.918605
| 101
| 0.59856
|
4a041b3c27880e30daff1ca9f5c2793502adf9fe
| 548
|
py
|
Python
|
survey/urls.py
|
HiroshiFuu/cs-balloting
|
565eb3ee88769d88b27705828c10c7b5be964ef5
|
[
"MIT"
] | null | null | null |
survey/urls.py
|
HiroshiFuu/cs-balloting
|
565eb3ee88769d88b27705828c10c7b5be964ef5
|
[
"MIT"
] | null | null | null |
survey/urls.py
|
HiroshiFuu/cs-balloting
|
565eb3ee88769d88b27705828c10c7b5be964ef5
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from django.urls import path, re_path
from survey import views
app_name = 'survey'
urlpatterns = [
path('voting_result_json/<int:survey_id>/', views.voting_result_json, name='voting_result_json'),
path('surveys/', views.surveys, name='surveys'),
path('survey/<int:survey_id>/', views.survey, name='survey'),
path('<int:survey_id>/vote/', views.survey_vote, name='survey_vote'),
path('survery_vote_done/<int:survey_id>/<int:survey_option_id>/', views.survery_vote_done, name='survery_vote_done'),
]
| 34.25
| 121
| 0.713504
|
4a041b4c82b8f214eae520eb8c09f863a6e53e15
| 27,478
|
py
|
Python
|
Thai_DNN_inverse_model_SW_1700_revised_1.py
|
rimalim2009/DNN_inverse_2004IOT_Thailand
|
23c7e5deee9b349d5427277811500f8756110484
|
[
"MIT"
] | null | null | null |
Thai_DNN_inverse_model_SW_1700_revised_1.py
|
rimalim2009/DNN_inverse_2004IOT_Thailand
|
23c7e5deee9b349d5427277811500f8756110484
|
[
"MIT"
] | null | null | null |
Thai_DNN_inverse_model_SW_1700_revised_1.py
|
rimalim2009/DNN_inverse_2004IOT_Thailand
|
23c7e5deee9b349d5427277811500f8756110484
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # SW 1700
# In[1]:
import numpy as np
import os
#import ipdb
def connect_dataset(file_list, icond_file_list, outputdir,
topodx=15, roi=2500, offset=5000,gclass_num=5,test_data_num=500):
"""
複数のデータセットを連結する
"""
#ipdb.set_trace()
#Reading and combining files Decide start and end points of the learning area and convert them to grid numbers
H = np.loadtxt(file_list[0], delimiter = ',')
icond = np.loadtxt(icond_file_list[0], delimiter = ',')
#Reading and combining files
if len(file_list) > 1:
for i in range(1, len(file_list)):
H_temp = np.loadtxt(file_list[i], delimiter = ',')
icond_temp = np.loadtxt(icond_file_list[i], delimiter = ',')
H = np.concatenate((H,H_temp),axis=0)
icond = np.concatenate((icond,icond_temp),axis = 0)
roi_grids = int(roi / topodx)
num_grids = int(H.shape[1] / gclass_num)
H_subset = np.zeros([H.shape[0], roi_grids * gclass_num])
for i in range(gclass_num):
H_subset[:, i*roi_grids:(i+1)*roi_grids] = H[:, i*num_grids:(i*num_grids+roi_grids)]
#Obtain the maximum and minimum values of data
max_x = np.max(H_subset)
min_x = np.min(H_subset)
icond_max = np.max(icond, axis=0)
icond_min = np.min(icond, axis=0)
#Split the data into tests and training
H_train = H_subset[0:-test_data_num,:]
H_test = H_subset[H_subset.shape[0] - test_data_num:,:]
icond_train = icond[0:-test_data_num,:]
icond_test = icond[H.shape[0] - test_data_num:,:]
#Save the data
if not os.path.exists(outputdir):
os.mkdir(outputdir)
np.savetxt(outputdir + '/x_train.txt',H_train,delimiter = ',')
np.savetxt(outputdir + '/x_test.txt',H_test,delimiter = ',')
np.savetxt(outputdir + '/icond_train.txt',icond_train,delimiter = ',')
np.savetxt(outputdir + '/icond_test.txt',icond_test,delimiter = ',')
np.savetxt(outputdir + '/icond_min.txt',icond_min,delimiter = ',')
np.savetxt(outputdir + '/icond_max.txt',icond_max,delimiter = ',')
np.savetxt(outputdir + '/x_minmax.txt',[min_x, max_x],delimiter = ',')
if __name__=="__main__":
original_data_dir = "/home/rimali2009/Journal_2"
parent_dir = "/home/rimali2009/Journal_2"
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
outputdir = parent_dir + "/data_g6_j2_roi1700_thai_revised_1"
file_list = ['/home/rimali2009/Journal_2/eta_5000_g6_300grid_thai_g5_revised_1.csv']
initial_conditions = ['/home/rimali2009/Journal_2/start_param_random_5000_thai_g5_revised_1.csv']
connect_dataset(file_list, initial_conditions, outputdir, test_data_num=500, gclass_num=5, topodx=15., roi=1700)
# In[2]:
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 15:43:18 2017
@author: hanar
"""
import time
import numpy as np
import os
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from keras.optimizers import Adadelta
from keras.optimizers import Adam
from keras.optimizers import Adamax
from keras.optimizers import Nadam
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
from keras.models import load_model
#from keras.utils.visualize_util import plot
import matplotlib.pyplot as plt
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
#Global variables for normalizing parameters
max_x = 1.0
min_x = 0.0
max_y = 1.0
min_y = 0.0
def deep_learning_tsunami(resdir, X_train_raw, y_train_raw, X_test_raw, y_test_raw,
_lr=0.02, _decay=0,
_validation_split=0.2, _batch_size=32,
_momentum=0.9, _nesterov=True,
num_layers=4, dropout=0.5,
node_num = 2500,
_epochs=2000):
"""
Creating the inversion model of turbidity currents by deep learning
"""
#Normalizing dataset
X_train = get_normalized_data(X_train_raw, min_x, max_x)
X_test = get_normalized_data(X_test_raw, min_x, max_x)
y_train = get_normalized_data(y_train_raw, min_y, max_y)
y_test = get_normalized_data(y_test_raw, min_y, max_y)
#Generation of neural network model
model = Sequential()
model.add(Dense(node_num, input_dim=X_train.shape[1], activation='relu', kernel_initializer ='glorot_uniform'))#1st layer
model.add(Dropout(dropout))
for i in range(num_layers - 2):
model.add(Dense(node_num, activation='relu', kernel_initializer ='glorot_uniform'))#2nd layer
model.add(Dropout(dropout))
model.add(Dense(y_train.shape[1], activation = 'relu', kernel_initializer ='glorot_uniform')) #last layer
#Compiling the model
model.compile(loss="mean_squared_error",
optimizer=SGD(lr=_lr, decay=_decay, momentum=_momentum, nesterov=_nesterov),
#optimizer=Adadelta(),
metrics=["mean_squared_error"])
#Perform learning
t = time.time()
check = ModelCheckpoint("model3.hdf5")
#es_cb = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
#tb_cb = TensorBoard(log_dir=resdir, histogram_freq=2, write_graph=True, write_images=True)
history = model.fit(X_train, y_train, epochs=_epochs,
validation_split=_validation_split, batch_size=_batch_size,
callbacks=[check])
#Evaluate learning result
loss_and_metrics = model.evaluate(X_test,y_test)
print("\nloss:{} mse:{}".format(loss_and_metrics[0],loss_and_metrics[1]))
print("Elapsed time: {:.1f} sec.".format(time.time()-t))
# model The state of change when letting you learnplot
plot_history(history)
return model, history
def apply_model(model, X, min_x, max_x, min_y, max_y):
"""
Apply model
Maximum and minimum values of X and Y are required to normalize
"""
X_norm = (X - min_x) / (max_x - min_x)
Y_norm = model.predict(X_norm)
Y = Y_norm*(max_y - min_y)+min_y
return Y
def plot_history(history):
# Plot accuracy history
plt.plot(history.history['mean_squared_error'],"o-",label="mse")
plt.plot(history.history['val_mean_squared_error'],"o-",label="val mse")
plt.title('model mse')
plt.xlabel('epoch')
plt.ylabel('mse')
plt.legend(loc="upper right")
plt.show()
def test_model(model, x_test):
#Test the results
x_test_norm = get_normalized_data(x_test, min_x, max_x)
test_result_norm = model.predict(x_test_norm)
test_result = get_raw_data(test_result_norm, min_y, max_y)
return test_result
def save_result(savedir, model, history, test_result):
np.savetxt(savedir + 'test_result.txt',test_result,delimiter=',')
np.savetxt(savedir+'loss.txt',history.history.get('loss'),delimiter=',')
np.savetxt(savedir+'val_loss.txt',history.history.get('val_loss'),delimiter=',')
#Serialize model and save
print('save the model')
model.save(savedir + 'model3.hdf5')
def load_data(datadir):
"""
This function load training and test data sets, and returns variables
"""
global min_x, max_x, min_y, max_y
x_train = np.loadtxt(datadir + 'x_train.txt',delimiter=',')
x_test = np.loadtxt(datadir + 'x_test.txt',delimiter=',')
y_train = np.loadtxt(datadir + 'icond_train.txt',delimiter=',')
y_test = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
min_y = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
max_y = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
[min_x, max_x] = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
return x_train, y_train, x_test, y_test
def set_minmax_data(_min_x, _max_x, _min_y, _max_y):
global min_x, max_x, min_y, max_y
min_x, max_x, min_y, max_y = _min_x, _max_x, _min_y, _max_y
return
def get_normalized_data(x, min_val, max_val):
"""
Normalizing the training and test dataset
"""
x_norm = (x - min_val) / (max_val - min_val)
return x_norm
def get_raw_data(x_norm, min_val, max_val):
"""
Get raw data from the normalized dataset
"""
x = x_norm * (max_val - min_val) + min_val
return x
if __name__ == "__main__":
#Reading data
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/'
if not os.path.exists(resdir):
os.mkdir(resdir)
x_train, y_train, x_test, y_test = load_data(datadir)
#Execution of learning
testcases = [5000]
for i in range(len(testcases)):
resdir_case = resdir + '{}/'.format(testcases[i])
if not os.path.exists(resdir_case):
os.mkdir(resdir_case)
x_train_sub = x_train[0:testcases[i],:]
y_train_sub = y_train[0:testcases[i],:]
model, history = deep_learning_tsunami(resdir_case, x_train_sub, y_train_sub, x_test, y_test, num_layers=5)
#Verify and save results
result = test_model(model, x_test)
save_result(resdir_case,model,history,result)
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
import ipdb
get_ipython().run_line_magic('matplotlib', 'inline')
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/5000/'
test_result = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
icond = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
print(icond.shape)
loss = np.loadtxt(resdir+'loss.txt',delimiter=',')
epoch = range(0,2000)
vloss = np.loadtxt(resdir+'val_loss.txt',delimiter=',')
resi = test_result - icond
fig = plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(epoch, loss, 'bo',label='Loss')
plt.plot(epoch, vloss, 'yo',label='Validation')
plt.xlabel('Epoch')
plt.ylabel('Mean Squared Error')
plt.legend(loc="upper right")
plt.savefig(resdir+ 'mse.pdf')
plt.show()
fig2 = plt.figure()
hfont = {'fontname':'Century Gothic'}
textcol = 'k'
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', '$C_1$', '$C_2$', '$C_3$', '$C_4$','$C_5$']
xymin=[1700,2.0,1.5,0.0001,0.0001,0.0001,0.0001,0.0001]
xymax=[4500,10.0,12.0,0.02,0.02,0.02,0.02,0.02]
xstep=[500,1.5,1.5,0.005,0.005,0.005,0.005,0.005]
stepmin=[1700,1.0,2.0,0.000,0.0000,0.0000,0.0000,0.0000]
stepmax=[4550,10.5,13.0,0.025,0.025,0.025,0.025,0.025]
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(icond[:,i],test_result[:,i],"o",markersize = 2.5)
x=icond[:,i]
y=test_result[:,i]
max_value = np.max([x, y])
min_value = np.min([x, y])
y_lim = plt.ylim([min_value * 0.8, max_value * 1.1])
x_lim = plt.xlim([min_value * 0.8, max_value * 1.1])
plt.plot(x_lim, y_lim, 'k-', color = 'k')
plt.axes().set_aspect('equal')
plt.xticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.yticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.xlabel('Original Value',color=textcol,size=14,**hfont)
plt.ylabel('Estimated Value',color=textcol,size=14,**hfont)
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + '.eps')
plt.savefig(resdir+titlelabel[i] + '.pdf')
#plt.show()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist' + '.pdf')
plt.show()
# In[2]:
from scipy.stats import variation
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/5000/'
test_result = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
icond = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
print(icond.shape)
resi = test_result - icond
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', 'C_1', 'C_2', 'C_3', 'C_4','C_5']
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
print('Standard Deviation:', np.std(resi[:,i]))
print('Standard Deviation sample:', np.std(resi[:,i],ddof=1))
print('Mean:', np.mean(resi[:,i]))
print('CV:', np.std(resi[:,i],ddof=1)/np.mean(resi[:,i]))
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist' + '.pdf')
plt.show()
# In[1]:
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from scipy import stats
from scipy.interpolate import interp1d
import pandas as pd
from pykrige import OrdinaryKriging as OK
import ipdb
from scipy import stats
#import ipdb
#ipdb.set_trace()
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/5000/'
#Initial setting
if not "model" in locals():
model = load_model(resdir+'model3.hdf5')
# Load test datasets
X_test = np.loadtxt(datadir + 'x_test.txt',delimiter=',')
y_test = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
# Normalize the test datasets
min_x, max_x = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
X_test_norm = (X_test - min_x) / (max_x - min_x)
gclass = 5
gclass_label = ["726 ${\mu}m$","364 ${\mu}m$","182 ${\mu}m$","91 ${\mu}m$","46 ${\mu}m$"]
topodx = 15.0
coord_num = int(model.layers[0].input_shape[1]/gclass)
#Acquires a value for normalizing input data to [0, 1]
y_min = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
y_max = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
# Load outcrop data
outcrop = pd.read_csv('../Journal_2/Thai_gs5_revised_1.csv')
outcrop = outcrop.sort_values('distance')
outcrop['distance'] = outcrop['distance'] - 0
outcrop_num = len(outcrop['distance'])
print(outcrop)
#Preparation under interpolation
thick_interp_at_outcrop = np.zeros([X_test.shape[0],outcrop_num*gclass])
thick_interp = np.zeros([X_test.shape[0],coord_num*gclass])#Interpolated sample thickness data
outcrop_x_id = np.round(outcrop['distance']/topodx).astype(np.int32) #Index number of sampling point in inverse analysis system
x = np.arange(0,coord_num*topodx,topodx)
# Interpolation of test datasets at the outcrop locations
for i in range(X_test.shape[0]):
for j in range(gclass):
f= interp1d(x,X_test_norm[i,j * coord_num : (j+1) * coord_num], kind="cubic",bounds_error=False,fill_value='extrapolate')
thick_interp_at_outcrop[i,outcrop_num*j:outcrop_num*(j+1)] = f(outcrop['distance']) #Supplemented data
# Interpolation of test datasets at the grids of the forward model
for j in range(gclass):
f = interp1d(outcrop['distance'],thick_interp_at_outcrop[i,j * outcrop_num : (j+1) * outcrop_num], kind="cubic",bounds_error=False,fill_value='extrapolate')
thick_interp[i,coord_num*j:coord_num*(j+1)] = f(x) #Supplemented data
#Normalize data
thick_interp[thick_interp < 0] = 0
#Perform inverse analysis
test_result_outcrop = model.predict(thick_interp)
test_result_outcrop = test_result_outcrop * (y_max - y_min) + y_min
print(test_result_outcrop)
np.savetxt('outcrop_location_interp.txt',test_result_outcrop, delimiter=',')
test_result=np.loadtxt('outcrop_location_interp.txt', delimiter=',')
test_result_normal = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
resi=test_result-y_test
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', 'C_1', 'C_2', 'C_3', 'C_4','C_5']
hfont = {'fontname':'Century Gothic'}
textcol = 'k'
xymin=[1700,2.0,1.5,0.0001,0.0001,0.0001,0.0001,0.0001]
xymax=[4500,10.0,12.0,0.02,0.02,0.02,0.02,0.02]
xstep=[500,1.5,1.5,0.005,0.005,0.005,0.005,0.005]
stepmin=[1700,1.0,2.0,0.000,0.0000,0.0000,0.0000,0.0000]
stepmax=[4550,10.5,13.0,0.025,0.025,0.025,0.025,0.025]
# Plot curve fitting
for i in range(len(gclass_label)):
plt.plot(x,thick_interp[0,coord_num * i:coord_num * (i+1)], label='estimated')
for j in range(gclass):
plt.plot(x,X_test_norm[0,j * coord_num : (j+1) * coord_num],'o',label='test')
#plt.plot(outcrop['distance'], thick_interp_at_outcrop[0,outcrop_num*j:outcrop_num*(j+1)],'o',label='test')
plt.plot()
plt.legend()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(y_test[:,i],test_result[:,i],"o", markersize=4.5)
plt.plot(y_test[:,i],test_result_normal[:,i],"*",label='estimate',markersize=3.5)
x=y_test[:,i]
y=test_result[:,i]
y2= test_result_normal[:, i]
max_value = np.max([x, y, y2])
min_value = np.min([x, y, y2])
y_lim = plt.ylim([min_value * 0.8, max_value * 1.1])
x_lim = plt.xlim([min_value * 0.8, max_value * 1.1])
plt.plot(x_lim, y_lim, 'k-', color = 'k')
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('True values',color=textcol,size=14,**hfont)
plt.ylabel('Estimated values',color=textcol,size=14,**hfont)
plt.legend()
plt.axes().set_aspect('equal')
plt.xticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.yticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
#plt.plot(x_lim, y_lim, color = 'k')
plt.tick_params(labelsize=14,colors='k')
plt.savefig(resdir+titlelabel[i] + 'outcrop_location' + '.pdf')
plt.show()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
print('Standard Deviation sample:', np.std(resi[:,i],ddof=1))
print('Mean:', np.mean(resi[:,i]))
print('mode',stats.mode(resi[:,i]))
print('m',np.median(resi[:,i]))
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist_outcrop_location' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist_outcrop_location' + '.pdf')
plt.show()
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from scipy import stats
from scipy.interpolate import interp1d
import pandas as pd
from pykrige import OrdinaryKriging as OK
import ipdb
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/5000/'
#Initial setting
if not "model" in locals():
model = load_model(resdir+'model3.hdf5')
gclass = 5
gclass_label = ["726 ${\mu}m$","364 ${\mu}m$","182 ${\mu}m$","91 ${\mu}m$","46 ${\mu}m$"]
gclass_name=['726','364','182','91','46']
topodx = 15.0
coord_num = int(model.layers[0].input_shape[1]/gclass)
#Acquires a value for normalizing input data to [0, 1]
min_x, max_x = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
y_min = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
y_max = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
#Read outcrop data
outcrop = pd.read_csv('../Journal_2/Thai_gs5_revised_1.csv')
outcrop = outcrop.sort_values('distance')
outcrop['distance'] = outcrop['distance'] - 0
print(outcrop)
#Preparation under interpolation
thick_interp = np.zeros([1,coord_num*gclass])#補間されたサンプル厚さデータ
outcrop_x_id = np.round(outcrop['distance']/topodx).astype(np.int32) #逆解析システムでのサンプリング地点のindex番号
x = np.arange(0,coord_num*topodx,topodx)
#Complement data
for j in range(gclass):
f = interp1d(outcrop['distance'],outcrop.iloc[:,j+1], kind="cubic",bounds_error=False,fill_value='extrapolate')
#Interpolation function of jth granularity level
thick_interp[0,coord_num*j:coord_num*(j+1)] = f(x) #Supplemented data
#Normalize data
thick_interp[thick_interp < 0] = 0
thick_interp_norm = (thick_interp - min_x) / (max_x - min_x)
#Perform inverse analysis
test_result_outcrop = model.predict(thick_interp_norm)
test_result_outcrop = test_result_outcrop * (y_max - y_min) + y_min
print(test_result_outcrop)
np.savetxt('outcrop_result_g6_g300_j2_roi1700_thai_cubic_revised_1.txt',test_result_outcrop, delimiter=',')
for i in range(len(gclass_label)):
plt.plot(x,thick_interp[0,coord_num * i:coord_num * (i+1)],label=gclass_label[i])
for i in range(gclass):
plt.plot(outcrop['distance'], outcrop[gclass_name[i]],'o', label='Measured')
plt.legend()
plt.show()
# In[7]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import Forward_model_for_DNN_thai_revised_1 as fmodel
import time
import ipdb
get_ipython().run_line_magic('matplotlib', 'inline')
#Basic setting
#dist_max = 3000.
gclass = 5
topodx=15.0
gclass_name = ['726','364','182','91','46']
gname_tex = ["726 ${\mu}m$","364 ${\mu}m$","182 ${\mu}m$","91 ${\mu}m$","46 ${\mu}m$"]
estimated_icond=np.loadtxt('outcrop_result_g6_g300_j2_roi1700_thai_cubic_revised_1.txt', delimiter=',')
start = time.time()
fmodel.read_setfile("config_g5_300grid_thai_revised_1.ini")
(x,C,x_dep,deposit) = fmodel.forward(estimated_icond)
np.savetxt('eta_estimated_thickness_5K_g6_j2_roi1700_thai.csv', deposit, delimiter=',')
np.savetxt('eta_estimated_Distance_5K_g6_j2_roi1700_thai.csv', x_dep,delimiter=',')
estimated_dep_thickness_5000=np.transpose(np.loadtxt('/home/rimali2009/Journal_2/'+'eta_estimated_thickness_5K_g6_j2_roi1700_thai.csv', delimiter=','))
estimated_dep_thickness_5000=pd.DataFrame(estimated_dep_thickness_5000,columns=['726','364','182','91','46'])
estimated_dep_distance_5000=np.transpose(np.loadtxt('/home/rimali2009/Journal_2/'+'eta_estimated_Distance_5K_g6_j2_roi1700_thai.csv', delimiter=','))
estimated_dep_distance_5000=pd.DataFrame(estimated_dep_distance_5000,columns=['distance'])
estimated_dep_5000=pd.concat([estimated_dep_distance_5000,estimated_dep_thickness_5000],axis=1)
np.savetxt('estimated_dep_5K_g6_j2_roi1700_thai.csv',estimated_dep_5000,delimiter=',')
#Formatting the loaded data
estimated_dep_5000= estimated_dep_5000.query('distance > 0')
#Read original data
outcrop = pd.read_csv('../Journal_2/Thai_gs5_revised_1.csv')
outcrop= pd.DataFrame(outcrop,columns=['distance','726','364','182','91','46'])
print(outcrop)
#Plot
plt.figure(num=None, figsize=(17, 4), dpi=250, facecolor='w', edgecolor='g')
hfont = {'fontname':'Sans'}
plt.subplots_adjust(bottom=0.15, wspace=0.8)
for i in range(gclass):
plt.subplot(1,gclass,i+1)
plt.plot(estimated_dep_5000['distance'],estimated_dep_5000[gclass_name[i]],'-', label='Estimated')
plt.plot(outcrop['distance'], outcrop[gclass_name[i]],'o', label='Measured')
plt.yscale('log')
plt.ylim([0.00001,0.1])
plt.title(gname_tex[i], size=21,**hfont)
plt.xlabel('Distance (m)', size = 14, **hfont)
plt.ylabel('Volume per unit area (m)', size = 14, **hfont)
plt.legend(fontsize=10)
plt.savefig("thickness_distance_curve_5000_g6_j2_roi1700_thai_revised_1.png")
plt.savefig("thickness_distance_curve_5000_g6_j2_roi1700_thai_revised_1.eps")
plt.savefig("thickness_distance_curve_5000_g6_j2_roi1700_thai_revised_1.pdf")
plt.show()
# In[9]:
# Jackknife Method
import csv
import numpy as np
import pandas as pd
import math
from keras.models import load_model
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai_revised_1/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai_revised_1/5000/'
# Initial setting
if not "model" in locals():
model = load_model(resdir+'model3.hdf5')
gclass = 5
topodx = 15.0
coord_num = int(model.layers[0].input_shape[1]/gclass)
min_x, max_x = np.loadtxt(datadir + 'x_minmax.txt', delimiter=',')
y_min = np.loadtxt(datadir + 'icond_min.txt', delimiter=',')
y_max = np.loadtxt(datadir + 'icond_max.txt', delimiter=',')
a = pd.read_csv(
'../Journal_2/Thai_gs5_revised_1.csv', delimiter=',')
print(a)
y = pd.DataFrame()
output = []
with open('output_final_j2_roi1700_thai_revised_1.csv', 'w') as outfile:
# x=[]
for index in range(len(a)):
df = y.append(pd.read_csv(
'../Journal_2/Thai_gs5_revised_1.csv', skiprows=[index+1]))
print(df)
df = df.sort_values('distance')
thick_interp = np.zeros([1, coord_num*gclass])
x = np.arange(0, coord_num*topodx, topodx)
for j in range(gclass):
# Interpolation function of jth granularity level
f = interp1d(df['distance'], df.iloc[:, j+1],
kind="cubic", bounds_error=False,fill_value='extrapolate')
thick_interp[0, coord_num*j:coord_num*(j+1)] = f(x)
thick_interp[thick_interp < 0] = 0
thick_interp_norm = (thick_interp - min_x) / (max_x - min_x)
test_result_outcrop = model.predict(thick_interp_norm)
test_result_outcrop = test_result_outcrop * (y_max - y_min) + y_min
print(test_result_outcrop.shape)
print(test_result_outcrop)
np.savetxt(outfile,test_result_outcrop, delimiter=',')
hfont = {'fontname': 'Century Gothic'}
textcol = 'k'
resi = np.loadtxt('output_final_j2_roi1700_thai_revised_1.csv', delimiter=',')
titlelabel = ['Max. Inundation Lengthjk','Flow Velocityjk', 'Max Flow depthjk',
'$C_1$jk', '$C_2$jk', '$C_3$jk', '$C_4$jk', '$C_5$jk']
jk_er=[]
with open('jk_e_1700_j2_thai_cubic_revised_1.txt','wb') as ftext:
for i in range(len(titlelabel)):
plt.figure(num=None, dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:, i], bins=35)
mean = sum(resi[:,i]) / len(resi[:,i])
print("mean:",mean)
var_jk = sum(pow(x-mean,2) for x in resi[:,i]) / ((len(resi[:,i])-1)*(len(resi[:,i])))
jk_e= math.sqrt(var_jk)
#ci_u=mean+(1.96*jk_e)
#ci_l=mean-(1.96*jk_e)
CI=(1.96*jk_e)
print("jk_e:",jk_e)
#print("CI_u", ci_u)
#print("CI_l",ci_l)
print("CI",CI)
e=np.append(jk_e,jk_er)
np.savetxt(ftext,e,delimiter=',')
plt.title(titlelabel[i], color=textcol, size=14, **hfont)
plt.xlabel('Data from jackknife', color=textcol, size=14, **hfont)
plt.ylabel('Frequency', color=textcol, size=14, **hfont)
plt.tick_params(labelsize=14, colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'jkhist' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'jkhist' + '.pdf')
plt.show()
# In[ ]:
| 36.784471
| 164
| 0.688478
|
4a041bf479b369a42b1cbf73c6ac8e50646b8599
| 7,660
|
py
|
Python
|
uncertainty_metrics/tensorflow/information_criteria.py
|
krishnajalan/uncertainty-metrics
|
b6df9f7a817bf9b2207e4b020dde4a5de6d1b16e
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_metrics/tensorflow/information_criteria.py
|
krishnajalan/uncertainty-metrics
|
b6df9f7a817bf9b2207e4b020dde4a5de6d1b16e
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_metrics/tensorflow/information_criteria.py
|
krishnajalan/uncertainty-metrics
|
b6df9f7a817bf9b2207e4b020dde4a5de6d1b16e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Uncertainty Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Information criteria.
The posterior predictive distribution of a model is the average of model
predictions weighted by the parameter posterior. We implement information
criteria for general predictive distributions and which can be reliably
estimated from Monte Carlo approximations to the posterior.
"""
import math
import tensorflow.compat.v1 as tf
def model_uncertainty(logits):
"""Mutual information between the categorical label and the model parameters.
A way to evaluate uncertainty in ensemble models is to measure its spread or
`disagreement`. One way is to measure the mutual information between the
categorical label and the parameters of the categorical output. This assesses
uncertainty in predictions due to `model uncertainty`. Model
uncertainty can be expressed as the difference of the total uncertainty and
the expected data uncertainty:
`Model uncertainty = Total uncertainty - Expected data uncertainty`, where
* `Total uncertainty`: Entropy of expected predictive distribution.
* `Expected data uncertainty`: Expected entropy of individual predictive
distribution.
This formulation was given by [1, 2] and allows the decomposition of total
uncertainty into model uncertainty and expected data uncertainty. The
total uncertainty will be high whenever the model is uncertain. However, the
model uncertainty, the difference between total and expected data
uncertainty, will be non-zero iff the ensemble disagrees.
## References:
[1] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F, and Udluft, S.
Decomposition of uncertainty for active learning and reliable
reinforcement learning in stochastic systems.
stat 1050, p.11, 2017.
[2] Malinin, A., Mlodozeniec, B., and Gales, M.
Ensemble Distribution Distillation.
arXiv:1905.00076, 2019.
Args:
logits: Tensor, shape (N, k, nc). Logits for N instances, k ensembles and
nc classes.
Raises:
TypeError: Raised if both logits and probabilities are not set or both are
set.
ValueError: Raised if logits or probabilities do not conform to expected
shape.
Returns:
model_uncertainty: Tensor, shape (N,).
total_uncertainty: Tensor, shape (N,).
expected_data_uncertainty: Tensor, shape (N,).
"""
if logits is None:
raise TypeError(
"model_uncertainty expected logits to be set.")
if tf.rank(logits).numpy() != 3:
raise ValueError(
"model_uncertainty expected logits to be of shape (N, k, nc),"
"instead got {}".format(logits.shape))
# expected data uncertainty
log_prob = tf.math.log_softmax(logits, -1)
prob = tf.exp(log_prob)
expected_data_uncertainty = tf.reduce_mean(
tf.reduce_sum(- prob * log_prob, -1), -1)
n_ens = tf.cast(log_prob.shape[1], tf.float32)
log_expected_probabilities = tf.reduce_logsumexp(
log_prob, 1) - tf.math.log(n_ens)
expected_probabilities = tf.exp(log_expected_probabilities)
total_uncertainty = tf.reduce_sum(
- expected_probabilities * log_expected_probabilities, -1)
model_uncertainty_ = total_uncertainty - expected_data_uncertainty
return model_uncertainty_, total_uncertainty, expected_data_uncertainty
def negative_waic(logp, waic_type="waic1"):
"""Compute the negative Widely Applicable Information Criterion (WAIC).
The negative WAIC estimates the holdout log-likelihood from just the training
data and an approximation to the posterior predictive.
WAIC is a criterion that is evaluated on the _training set_ using the
posterior predictive distribution derived from the _same_ training set, see
[(Watanabe, 2018)][1].
Because the posterior predictive distribution is typically not available in
closed form, this implementation uses a Monte Carlo approximate,
theta_j ~ p(theta | D), where D is the training data.
Note that WAIC evaluated on the true parameter posterior is an accurate
estimate to O(B^{-2}), however, in this implementation we have two additional
sources of error: 1. the finite sample approximation to the posterior
predictive, and 2. approximation error in the posterior due to approximate
inference.
For the rationale of why one would want to use WAIC, see [2].
### References:
[1]: Sumio Watanabe. Mathematical Theory of Bayesian Statistics.
CRC Press. 2018
https://www.crcpress.com/Mathematical-Theory-of-Bayesian-Statistics/Watanabe/p/book/9781482238068
[2]: Sebastian Nowozin. Do Bayesians overfit?
http://www.nowozin.net/sebastian/blog/do-bayesians-overfit.html
Args:
logp: Tensor, shape (B,M,...), containing log p(y_i | x_i, theta_j)
for i=1,..,B instances and j=1,...,M models.
waic_type: 'waic1' or 'waic2'. The WAIC1 criterion uses the variance of the
log-probabilities, the WAIC2 criterion uses the difference between the
Bayes posterior and Gibbs posterior.
Returns:
neg_waic: Tensor, (...), the negative WAIC.
neg_waic_sem: Tensor, (...), the standard error of the mean of `neg_waic`.
"""
logp_mean = tf.reduce_logsumexp(logp, 1) - math.log(int(logp.shape[1]))
if waic_type == "waic1":
_, logp_var = tf.nn.moments(logp, 1)
neg_waic, neg_waic_var = tf.nn.moments(logp_mean - logp_var, 0)
elif waic_type == "waic2":
gibbs_logp = tf.reduce_mean(logp, 1)
neg_waic, neg_waic_var = tf.nn.moments(2.0*gibbs_logp - logp_mean, 0)
neg_waic_sem = tf.sqrt(neg_waic_var / float(int(logp.shape[1])))
return neg_waic, neg_waic_sem
def importance_sampling_cross_validation(logp):
"""Compute the importance-sampling cross validation (ISCV) estimate.
The ISCV estimates the holdout log-likelihood from just an approximation to
the posterior predictive log-likelihoods on the training data.
### References:
[1]: Alan E. Gelfand, Dipak K. Dey, Hong Chang.
Model determination using predictive distributions with implementation via
sampling-based methods.
Technical report No. 462, Department of Statistics,
Stanford university, 1992.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.860.3702&rep=rep1&type=pdf
[2]: Aki Vehtari, Andrew Gelman, Jonah Gabry.
Practical Bayesian model evaluation using leave-one-out cross-validation and
WAIC.
arXiv:1507.04544
https://arxiv.org/pdf/1507.04544.pdf
[3]: Sumio Watanabe. Mathematical Theory of Bayesian Statistics.
CRC Press. 2018
https://www.crcpress.com/Mathematical-Theory-of-Bayesian-Statistics/Watanabe/p/book/9781482238068
Args:
logp: Tensor, shape (B,M,...), containing log p(y_i | x_i, theta_j)
for i=1,..,B instances and j=1,...,M models.
Returns:
iscv_logp: Tensor, (...), the ISCV estimate of the holdout log-likelihood.
iscv_logp_sem: Tensor, (...), the standard error of th emean of `iscv_logp`.
"""
iscv_logp, iscv_logp_var = tf.nn.moments(tf.reduce_logsumexp(-logp, 1), 0)
m = int(logp.shape[1])
iscv_logp -= math.log(m)
iscv_logp = -iscv_logp
iscv_logp_sem = tf.sqrt(iscv_logp_var / float(m))
return iscv_logp, iscv_logp_sem
| 39.895833
| 101
| 0.738642
|
4a041c12d7c9b1593e3539e02af0004ec31540f5
| 6,325
|
py
|
Python
|
python/miind/miind_api/LifMeshGenerator.py
|
dekamps/miind
|
4b321c62c2bd27eb0d5d8336a16a9e840ba63856
|
[
"MIT"
] | 13
|
2015-09-15T17:28:25.000Z
|
2022-03-22T20:26:47.000Z
|
python/miind/miind_api/LifMeshGenerator.py
|
dekamps/miind
|
4b321c62c2bd27eb0d5d8336a16a9e840ba63856
|
[
"MIT"
] | 41
|
2015-08-25T07:50:55.000Z
|
2022-03-21T16:20:37.000Z
|
python/miind/miind_api/LifMeshGenerator.py
|
dekamps/miind
|
4b321c62c2bd27eb0d5d8336a16a9e840ba63856
|
[
"MIT"
] | 9
|
2015-09-14T20:52:07.000Z
|
2022-03-08T12:18:18.000Z
|
import miind.mesh3 as mesh
import miind.writemesh as writemesh
import numpy as np
class LifMeshGenerator:
'''This class helps to generate a leaky-integrate-and-fire mesh. The neural parameters are:
tau_m: :membrane time constant
V_rest: :resting potential
V_threshold: :threshold potential
Here there is :no need to set a refractive period, this can be done elsewhere.
The following :parameters need to be set to define the grind and require user input.
dt :the step size of the grid, which is the main determinant of the bin size
self.N_grid : determines how close the grid hugs V_rest. A large value results in an exponential pile up of the grid close to V_rest. A small value leaves a big gap.
The following parameters will be set to define the grid, and often don't require user setting
V_max :must be larger than V_threshold, but not by much. MIIND needs a threshold cell, rather than a boundary and V_max create a cell boundary
:above threshold for this purpose. The default value is V_max + epsilon
epsilon: :just a small value
self.strip_w : an arbitrary value for the strip width.
self.lambda : an arbitrary small value for the reset bin. Should not touch other grid cellss.'''
def __init__(self, basename, tau = 10e-3, V_threshold = -50., V_rest = -65.0, V_min = -80., dt = 0.0001, N_grid=300):
self.tau = tau # membrane time constant in s
self.V_threshold = V_threshold # threshold in V
self.epsilon = 0.001 # padding as fraction of the threshold potential
self.labda = 0.0001 # fiducial bin size
self.V_rest = V_rest # reversal/rest potential (also the reset potential)
self.V_min = V_min # guaranteed minimum value of the grid
self.V_max = self.V_threshold + self.epsilon # guaranteed maximum value of the grid
self.N_grid = N_grid # number of points in the interval (V_rest, self.V_threshold);
# note that this parameter controls how close to V_rest the grid extends, it does NOT control the bindwidth,
# which is determined by self.dt. Also note that the grid may extend to negative values
# e.g if V_min = 2*V.rest - V_threshold, the grid holds double this number of bins
self.dt = dt # timestep for each bin
self.strip_w = 0.005 # arbitrary value for strip width
self.basename = basename
self.pos_vs = []
self.neg_vs = []
def generateLifMesh(self):
if self.V_min > self.V_rest:
raise ValueError ("self.V_min must be less than V_rev.")
if self.V_max < self.V_threshold+self.epsilon:
raise ValueError ("self.V_max must be greater than or equal to self.V_threshold.")
with open(self.basename + '.mesh','w') as meshfile:
meshfile.write('ignore\n')
meshfile.write('{}\n'.format(self.dt))
ts = self.dt * np.arange(self.N_grid)
self.pos_vs = self.V_rest + (self.V_threshold-self.V_rest)*np.exp(-ts/self.tau)
self.pos_vs = np.insert(self.pos_vs, 0, self.V_max)
self.neg_vs = self.V_rest + (self.V_min-self.V_rest)*np.exp(-ts/self.tau)
if len(self.neg_vs) > 0:
for v in self.neg_vs:
meshfile.write(str(v) + '\t')
meshfile.write('\n')
for v in self.neg_vs:
meshfile.write(str(0.0) + '\t')
meshfile.write('\n')
for v in self.neg_vs:
meshfile.write(str(v) + '\t')
meshfile.write('\n')
for v in self.neg_vs:
meshfile.write(str(self.strip_w) + '\t')
meshfile.write('\n')
meshfile.write('closed\n')
for v in self.pos_vs:
meshfile.write(str(v) + '\t')
meshfile.write('\n')
for v in self.pos_vs:
meshfile.write(str(self.strip_w) + '\t')
meshfile.write('\n')
for v in self.pos_vs:
meshfile.write(str(v) + '\t')
meshfile.write('\n')
for v in self.pos_vs:
meshfile.write(str(0.0) + '\t')
meshfile.write('\n')
meshfile.write('closed\n')
meshfile.write('end')
return self.basename + '.mesh'
def generateLifStationary(self):
statname = self.basename + '.stat'
v_plus = self.pos_vs[-1]
v_min = self.neg_vs[-1]
with open(statname,'w') as statfile:
statfile.write('<Stationary>\n')
format = "%.9f"
statfile.write('<Quadrilateral>')
statfile.write('<vline>' + str(v_min) + ' ' + str(v_min) + ' ' + str(v_plus) + ' ' + str(v_plus) + '</vline>')
statfile.write('<wline>' + str(0) + ' ' + str(self.strip_w) + ' ' + str(self.strip_w) + ' ' + str(0) + '</wline>')
statfile.write('</Quadrilateral>\n')
statfile.write('</Stationary>')
def generateLifReversal(self):
revname = self.basename + '.rev'
m=mesh.Mesh(self.basename + '.mesh')
with open(revname,'w') as revfile:
revfile.write('<Mapping type=\"Reversal\">\n')
for i in range(1,len(m.cells)):
revfile.write(str(i) + ',' + str(0))
revfile.write('\t')
revfile.write(str(0) + ',' + str(0))
revfile.write('\t')
revfile.write(str(1.0) + '\n')
revfile.write('</Mapping>')
if __name__ == "__main__":
g=LifMeshGenerator('lif')
g.generateLifMesh()
g.generateLifStationary()
g.generateLifReversal()
| 51.008065
| 173
| 0.531067
|
4a041c2f68f5b8d16ff4a40b828e550a8e8c550f
| 1,013
|
py
|
Python
|
src/saml2/entity_category/at_egov_pvp2.py
|
cnelson/pysaml2
|
a30e51c271e27e4411a0243b65adbf5d7a3abb07
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/pysaml2-4.4.0/src/saml2/entity_category/at_egov_pvp2.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/pysaml2-4.4.0/src/saml2/entity_category/at_egov_pvp2.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
__author__ = 'rhoerbe' #2013-09-05
# Entity Categories specifying the PVP eGov Token as of "PVP2-Allgemein V2.1.0", http://www.ref.gv.at/
EGOVTOKEN = ["PVP-VERSION",
"PVP-PRINCIPAL-NAME",
"PVP-GIVENNAME",
"PVP-BIRTHDATE",
"PVP-USERID",
"PVP-GID",
"PVP-BPK",
"PVP-MAIL",
"PVP-TEL",
"PVP-PARTICIPANT-ID",
"PVP-PARTICIPANT-OKZ",
"PVP-OU-OKZ",
"PVP-OU",
"PVP-OU-GV-OU-ID",
"PVP-FUNCTION",
"PVP-ROLES",
]
CHARGEATTR = ["PVP-INVOICE-RECPT-ID",
"PVP-COST-CENTER-ID",
"PVP-CHARGE-CODE",
]
# all eGov Token attributes except (1) transaction charging and (2) chaining
PVP2 = "http://www.ref.gv.at/ns/names/agiz/pvp/egovtoken"
# transaction charging extension
PVP2CHARGE = "http://www.ref.gv.at/ns/names/agiz/pvp/egovtoken-charge"
RELEASE = {
PVP2: EGOVTOKEN,
PVP2CHARGE: CHARGEATTR,
}
| 26.657895
| 102
| 0.53998
|
4a041c48983bc1138f859f2436e145d8c373f439
| 6,727
|
py
|
Python
|
framework/helpers/argument_parser.py
|
lukovnikov/transformer_generalization
|
a538bfbba6877cd7a21e710f2535df2e9236ba52
|
[
"MIT"
] | 47
|
2021-08-30T00:41:15.000Z
|
2022-01-24T02:49:17.000Z
|
framework/helpers/argument_parser.py
|
xdever/modules
|
efdb8790b074862581e035c9ab5bf889440a8023
|
[
"BSD-3-Clause"
] | 6
|
2020-10-19T23:57:23.000Z
|
2022-03-12T00:51:58.000Z
|
framework/helpers/argument_parser.py
|
xdever/modules
|
efdb8790b074862581e035c9ab5bf889440a8023
|
[
"BSD-3-Clause"
] | 5
|
2021-09-04T23:51:51.000Z
|
2022-03-10T14:03:24.000Z
|
import os
import json
import argparse
import re
from ..data_structures.dotdict import create_recursive_dot_dict
def none_parser(other_parser):
def fn(x):
if x.lower() == "none":
return None
return other_parser(x)
return fn
class ArgumentParser:
_type = type
@staticmethod
@none_parser
def int_list_parser(x):
return [int(a) for a in re.split("[,_ ;]", x) if a]
@staticmethod
@none_parser
def str_list_parser(x):
return x.split(",")
@staticmethod
@none_parser
def int_or_none_parser(x):
return int(x)
@staticmethod
@none_parser
def float_or_none_parser(x):
return float(x)
@staticmethod
@none_parser
def float_list_parser(x):
return [float(a) for a in re.split("[,_ ;]", x) if a]
@staticmethod
def _merge_args(args, new_args, arg_schemas):
for name, val in new_args.items():
old = args.get(name)
if old is None:
args[name] = val
else:
args[name] = arg_schemas[name]["updater"](old, val)
class Profile:
def __init__(self, name, args=None, include=[]):
assert not (args is None and not include), "One of args or include must be defined"
self.name = name
self.args = args
if not isinstance(include, list):
include = [include]
self.include = include
def get_args(self, arg_schemas, profile_by_name):
res = {}
for n in self.include:
p = profile_by_name.get(n)
assert p is not None, "Included profile %s doesn't exists" % n
ArgumentParser._merge_args(res, p.get_args(arg_schemas, profile_by_name), arg_schemas)
ArgumentParser._merge_args(res, self.args, arg_schemas)
return res
def __init__(self, description=None, get_train_dir=lambda x: os.path.join("save", x.name)):
self.parser = argparse.ArgumentParser(description=description)
self.profiles = {}
self.args = {}
self.raw = None
self.parsed = None
self.get_train_dir = get_train_dir
self.parser.add_argument("-profile", "--profile", type=str, help="Pre-defined profiles.")
def add_argument(self, name, type=None, default=None, help="", save=True, parser=lambda x: x,
updater=lambda old, new: new, choice=[]):
assert name not in ["profile"], "Argument name %s is reserved" % name
assert not (type is None and default is None), "Either type or default must be given"
if type is None:
type = ArgumentParser._type(default)
self.parser.add_argument(name, "-" + name, type=int if type == bool else type, default=None, help=help)
if name[0] == '-':
name = name[1:]
self.args[name] = {
"type": type,
"default": int(default) if type == bool else default,
"save": save,
"parser": parser,
"updater": updater,
"choice": choice
}
def add_profile(self, prof):
if isinstance(prof, list):
for p in prof:
self.add_profile(p)
else:
self.profiles[prof.name] = prof
def do_parse_args(self, loaded={}):
self.raw = self.parser.parse_args()
profile = {}
if self.raw.profile:
if loaded:
if self.raw.profile != loaded.get("profile"):
assert False, "Loading arguments from file, but a different profile is given."
else:
for pr in self.raw.profile.split(","):
p = self.profiles.get(pr)
assert p is not None, "Invalid profile: %s. Valid profiles: %s" % (pr, self.profiles.keys())
p = p.get_args(self.args, self.profiles)
self._merge_args(profile, p, self.args)
for k, v in self.raw.__dict__.items():
if k in ["profile"]:
continue
if v is None:
if k in loaded and self.args[k]["save"]:
self.raw.__dict__[k] = loaded[k]
else:
self.raw.__dict__[k] = profile.get(k, self.args[k]["default"])
for k, v in self.raw.__dict__.items():
if k not in self.args:
continue
c = self.args[k]["choice"]
if c and not v in c:
assert False, f"Invalid value {v}. Allowed: {c}"
self.parsed = create_recursive_dot_dict({k: self.args[k]["parser"](self.args[k]["type"](v)) if v is not None
else None for k, v in self.raw.__dict__.items() if k in self.args})
return self.parsed
def parse_or_cache(self):
if self.parsed is None:
self.do_parse_args()
def parse(self):
self.parse_or_cache()
return self.parsed
def to_dict(self):
self.parse_or_cache()
return self.raw.__dict__
def clone(self):
parser = ArgumentParser()
parser.profiles = self.profiles
parser.args = self.args
for name, a in self.args.items():
parser.parser.add_argument("-" + name, type=int if a["type"] == bool else a["type"], default=None)
parser.parse()
return parser
def from_dict(self, dict):
return self.do_parse_args(dict)
def save(self, fname):
with open(fname, 'w') as outfile:
json.dump(self.to_dict(), outfile, indent=4)
return True
def load(self, fname):
if os.path.isfile(fname):
with open(fname, "r") as data_file:
map = json.load(data_file)
self.from_dict(map)
return self.parsed
def sync(self, fname=None):
if fname is None:
fname = self._get_save_filename()
if fname is not None:
if os.path.isfile(fname):
self.load(fname)
dir = os.path.dirname(fname)
os.makedirs(dir, exist_ok=True)
self.save(fname)
return self.parsed
def _get_save_filename(self, opt=None):
opt = self.parse() if opt is None else opt
dir = self.get_train_dir(opt)
return None if dir is None else os.path.join(dir, "args.json")
def parse_and_sync(self):
opt = self.parse()
return self.sync(self._get_save_filename(opt))
def parse_and_try_load(self):
fname = self._get_save_filename()
if fname and os.path.isfile(fname):
self.load(fname)
return self.parsed
| 31.288372
| 116
| 0.558198
|
4a041c95f7ffac596869f6c7efd7d6f3fa0dc87b
| 170
|
py
|
Python
|
telegram_bot/intergration/weather/const.py
|
daya0576/telegran_bot_weather
|
96df65cd607704fb5dbf89e13b2a130ca8cb0018
|
[
"MIT"
] | null | null | null |
telegram_bot/intergration/weather/const.py
|
daya0576/telegran_bot_weather
|
96df65cd607704fb5dbf89e13b2a130ca8cb0018
|
[
"MIT"
] | null | null | null |
telegram_bot/intergration/weather/const.py
|
daya0576/telegran_bot_weather
|
96df65cd607704fb5dbf89e13b2a130ca8cb0018
|
[
"MIT"
] | null | null | null |
WEATHER_2D_MESSAGE_TEMPLATE = """\
📍{location}
今天{d1},白天{d1_pretty}
明天{d2},白天{d2_pretty}
{extra}
"""
WEATHER_6H_MESSAGE_TEMPLATE = """\
📍{location}
{hours}
"""
| 11.333333
| 34
| 0.647059
|
4a041de30004a347d0b828ec1ca89108ab2eed17
| 5,052
|
py
|
Python
|
game/Cards.py
|
tcikezu/durakula
|
cd8afb80c871d9dbe054050e0a4d426947d854b3
|
[
"MIT"
] | 1
|
2020-01-28T06:41:23.000Z
|
2020-01-28T06:41:23.000Z
|
game/Cards.py
|
tcikezu/durakula
|
cd8afb80c871d9dbe054050e0a4d426947d854b3
|
[
"MIT"
] | null | null | null |
game/Cards.py
|
tcikezu/durakula
|
cd8afb80c871d9dbe054050e0a4d426947d854b3
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import deque
import random
import copy
_CARD_MAP_FULL = np.array([['D0', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10', 'D11', 'D12'],
['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11' , 'C12'],
['H0', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'H11' , 'H12'],
['S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11' , 'S12']])
_SUITS = ['Diamonds', 'Clubs', 'Hearts','Spades']
_VALUES = ['2','3','4','5','6','7','8','9','10','Jack','Queen','King','Ace']
class Card():
"""Map keys: (suit, value) to value (string inside _CARD_MAP_FULL). Additional string method outputs card as 'value' of 'suit' -- e.g., 'Ace' of 'Spades'."""
def __init__(self, suit_idx: int, value_idx: int):
self.card = _CARD_MAP_FULL[suit_idx,value_idx]
self.suit_idx = suit_idx
self.value_idx = value_idx
self.suit = _SUITS[suit_idx]
self.value = _VALUES[value_idx]
def __repr__(self):
return self.card
def __str__(self):
return self.suit + ' of ' + self.value
class CardCollection():
"""First In Last Out collection of Card objects, implemented with collection.deque storing deck order, and np.ndarray representing cards inside the deck."""
def __init__(self, n_suits: int, n_vals: int, fill=True):
assert(0 < n_suits <=4 and 0 < n_vals <= 13), 'Deck dimensions out of bounds'
if fill == True:
self.deck = deque([Card(s,v) for s in range(n_suits) for v in range(n_vals)])
self.cards = np.ones((n_suits, n_vals)).astype(int)
else:
self.deck = deque()
self.cards = np.zeros((n_suits, n_vals)).astype(int)
self.n_suits = n_suits
self.n_vals = n_vals
def __repr__(self):
return repr(self.deck)
def __len__(self):
return len(self.deck)
def __str__(self):
head = '--- Card Collection ---\n'
card_str = 'Cards: ' + ','.join([repr(x) for x in self.deck]) + '.\n'
size_str = 'Size: ' + str(self.__len__()) + '\n'
tail = '-----------------------\n'
return head + card_str + size_str + tail
def __getitem__(self, idx: int):
"""Get the idx'th card of the deck."""
return self.deck[idx]
def __setitem__(self, idx: int, value: Card):
"""Set the idx'th card of the deck."""
self.deck[idx] = value
def __add__(self, other):
self.cards += other.cards
self.deck = other.order + self.deck
other.empty()
return self
def __eq__(self, other):
return self.cards.all() == other.cards.all() and self.deck == other.order
def empty(self):
"""Empty this deck."""
self.cards *= 0
self.deck = deque()
def reorder(self):
self.deck = deque([Card(idx[0], idx[1]) for idx,v in np.ndenumerate(self.cards) if v == 1])
def draw(self, n=1):
drawn_cards = copy.deepcopy(self)
drawn_cards.empty()
if self.__len__() == 0:
return drawn_cards
drawn_cards.empty()
for i in range(min(n,len(self.deck))):
card = self.deck.popleft()
self.cards[card.suit_idx, card.value_idx] = 0
drawn_cards.cards[card.suit_idx, card.value_idx] = 1
drawn_cards.deck.appendleft(card)
return drawn_cards
def cut(self, idx=None):
if idx == None:
idx = random.randint(0,len(self.deck)-1)
self.deck.rotate(idx)
class DurakDeck(CardCollection):
def __init__(self, cards=None, mode='small',fill=True):
if (mode == 'small'):
super().__init__(n_suits = 4, n_vals = 9, fill=fill)
if (mode == 'full'):
super().__init__(n_suits = 4, n_vals = 13, fill=fill)
if cards is not None:
self.cards = cards
self.reorder()
def draw_hand_from_deck(self, trump_idx, n_cards=6):
drawn_cards = self.draw(n_cards)
return DurakDeck.convert_deck_to_hand(drawn_cards, trump_idx)
@staticmethod
def convert_deck_to_hand(deck, trump_idx):
if len(deck) > 0:
indices = list(range(deck.n_suits))
indices[0], indices[trump_idx] = indices[trump_idx], indices[0]
return deck.cards[indices]
else:
assert(np.sum(deck.cards)==0),"Empty deck had non-empty cards."
return deck.cards
@classmethod
def convert_hand_to_deck(cls, hand: np.ndarray, trump_idx: int):
indices = list(range(hand.shape[0]))
indices[0], indices[trump_idx] = indices[trump_idx], indices[0]
if hand.shape == (4,9):
deck = cls(cards = hand[indices], mode = 'small')
else:
deck = cls(cards = hand[indices], mode = 'full')
return deck
class SpadesDeck(CardCollection):
def __init__(self, fill=True):
super().__init__(n_suits=4, n_vals=13, fill=fill)
| 36.875912
| 161
| 0.565914
|
4a041ee37f8a2d16f66089b942f08f0d57833b46
| 2,267
|
py
|
Python
|
caldavclientlibrary/protocol/webdav/tests/test_options.py
|
LaudateCorpus1/ccs-caldavclientlibrary
|
5b1db7f3b49f03ba715f7286f71ddb9f54ddddac
|
[
"Apache-2.0"
] | 49
|
2016-08-22T17:34:34.000Z
|
2021-11-08T09:47:45.000Z
|
caldavclientlibrary/protocol/webdav/tests/test_options.py
|
DalavanCloud/ccs-caldavclientlibrary
|
ce8d554b8a0bcb13468f2dc87eef77da2302d6b3
|
[
"Apache-2.0"
] | null | null | null |
caldavclientlibrary/protocol/webdav/tests/test_options.py
|
DalavanCloud/ccs-caldavclientlibrary
|
ce8d554b8a0bcb13468f2dc87eef77da2302d6b3
|
[
"Apache-2.0"
] | 18
|
2017-01-21T22:28:04.000Z
|
2022-03-26T11:57:30.000Z
|
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.session import Session
from caldavclientlibrary.protocol.webdav.options import Options
import unittest
class TestRequest(unittest.TestCase):
def test_Method(self):
server = Session("www.example.com")
request = Options(server, "/")
self.assertEqual(request.getMethod(), "OPTIONS")
class TestRequestHeaders(unittest.TestCase):
pass
class TestRequestBody(unittest.TestCase):
pass
class TestResponse(unittest.TestCase):
pass
class TestResponseHeaders(unittest.TestCase):
def test_OneHeader(self):
server = Session("www.example.com")
request = Options(server, "/")
request.getResponseHeaders().update({
"allow": ("GET, PUT, OPTIONS, HEAD",),
})
self.assertEqual(set(request.getAllowed()), set(("GET", "PUT", "OPTIONS", "HEAD")))
self.assertTrue(request.isAllowed("GET"))
self.assertTrue(request.isAllowed("PUT"))
self.assertTrue(request.isAllowed("OPTIONS"))
self.assertTrue(request.isAllowed("HEAD"))
def test_MultipleHeader(self):
server = Session("www.example.com")
request = Options(server, "/")
request.getResponseHeaders().update({
"allow": ("GET, PUT", "OPTIONS, HEAD",),
})
self.assertEqual(set(request.getAllowed()), set(("GET", "PUT", "OPTIONS", "HEAD")))
self.assertTrue(request.isAllowed("GET"))
self.assertTrue(request.isAllowed("PUT"))
self.assertTrue(request.isAllowed("OPTIONS"))
self.assertTrue(request.isAllowed("HEAD"))
class TestResponseBody(unittest.TestCase):
pass
| 30.226667
| 91
| 0.683282
|
4a041f297aa0f00ac20853dc1356da00954725e3
| 2,634
|
py
|
Python
|
azure-quantum/setup.py
|
eionblanc/qdk-python
|
086901e46da147253e0aee5238844b08d20c3fd2
|
[
"MIT"
] | 3
|
2021-09-08T08:51:45.000Z
|
2021-09-08T08:52:27.000Z
|
azure-quantum/setup.py
|
eionblanc/qdk-python
|
086901e46da147253e0aee5238844b08d20c3fd2
|
[
"MIT"
] | null | null | null |
azure-quantum/setup.py
|
eionblanc/qdk-python
|
086901e46da147253e0aee5238844b08d20c3fd2
|
[
"MIT"
] | 1
|
2021-07-13T21:45:12.000Z
|
2021-07-13T21:45:12.000Z
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# setup.py: Installs Python host functionality for azure-quantum.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
# IMPORTS #
import setuptools
import os
import distutils
# VERSION INFORMATION #
# Our build process sets the PYTHON_VERSION environment variable to a version
# string that is compatible with PEP 440, and so we inherit that version number
# here and propagate that to qsharp/version.py.
#
# To make sure that local builds still work without the same environment
# variables, we'll default to 0.0.0.1 as a development version.
version = os.environ.get("PYTHON_VERSION", "0.0.0.1")
with open("./azure/quantum/version.py", "w") as f:
f.write(
f"""# Auto-generated file, do not edit.
##
# version.py: Specifies the version of the azure.quantum package.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
__version__ = "{version}"
"""
)
with open("./azure/quantum/_client/_version.py", "w") as f:
f.write(
f"""# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "{version}"
"""
)
# DESCRIPTION #
# The long description metadata passed to setuptools is used to populate the
# PyPI page for this package. Thus, we'll generate the description by using the
# same README.md file that we use in the GitHub repo.
with open("./README.md", "r") as fh:
long_description = fh.read()
# LIST OF REQUIREMENTS #
# Get list of requirements from requirements.txt
with open("./requirements.txt", "r") as fh:
requirements = fh.readlines()
# SETUPTOOLS INVOCATION #
setuptools.setup(
name="azure-quantum",
version=version,
author="Microsoft",
description="Python client for Azure Quantum",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/qdk-python",
packages=setuptools.find_namespace_packages(include=["azure.*"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=requirements,
)
| 31.73494
| 94
| 0.670084
|
4a042237f64eeacd4caa644c6af43c8f399e368e
| 11,595
|
py
|
Python
|
utttpy/game/ultimate_tic_tac_toe.py
|
bdotgradb/uttt
|
ab812918b75fdb0efac24fb007989d01e539c484
|
[
"Apache-2.0"
] | 28
|
2021-12-10T20:06:32.000Z
|
2022-02-01T00:20:52.000Z
|
utttpy/game/ultimate_tic_tac_toe.py
|
bdotgradb/uttt
|
ab812918b75fdb0efac24fb007989d01e539c484
|
[
"Apache-2.0"
] | null | null | null |
utttpy/game/ultimate_tic_tac_toe.py
|
bdotgradb/uttt
|
ab812918b75fdb0efac24fb007989d01e539c484
|
[
"Apache-2.0"
] | 2
|
2021-12-10T20:34:31.000Z
|
2021-12-22T02:53:51.000Z
|
from __future__ import annotations
from typing import List, Optional
from utttpy.game.action import Action
from utttpy.game.constants import (
STATE_SIZE,
NEXT_SYMBOL_STATE_INDEX,
CONSTRAINT_STATE_INDEX,
UTTT_RESULT_STATE_INDEX,
X_STATE_VALUE,
O_STATE_VALUE,
DRAW_STATE_VALUE,
UNCONSTRAINED_STATE_VALUE,
)
class UltimateTicTacToe:
def __init__(self, state: Optional[bytearray] = None):
if state:
self.state = state
else:
self.state = bytearray(STATE_SIZE)
self.state[NEXT_SYMBOL_STATE_INDEX] = X_STATE_VALUE
self.state[CONSTRAINT_STATE_INDEX] = UNCONSTRAINED_STATE_VALUE
def clone(self) -> UltimateTicTacToe:
return UltimateTicTacToe(state=self.state.copy())
def is_equal_to(self, uttt: UltimateTicTacToe) -> bool:
return self.state == uttt.state
def execute(self, action: Action, verify: bool = True) -> None:
if verify:
if self.is_terminated():
raise UltimateTicTacToeError("supergame is terminated")
self._verify_state()
self._verify_action(action=action)
self.state[action.index] = action.symbol
self._update_supergame_result(symbol=action.symbol, index=action.index)
self._toggle_next_symbol()
self._set_next_constraint(index=action.index)
if verify:
self._verify_state()
def get_legal_actions(self) -> List[Action]:
return [
Action(symbol=self.next_symbol, index=legal_index)
for legal_index in self.get_legal_indexes()
]
def get_legal_indexes(self) -> List[int]:
if self.is_terminated():
return []
if self.is_unconstrained():
indexes = []
for i, s in enumerate(self.state[81:90]):
if not s:
indexes.extend(self._get_empty_indexes(subgame=i))
else:
indexes = self._get_empty_indexes(subgame=self.constraint)
return indexes
@property
def next_symbol(self) -> int:
return self.state[NEXT_SYMBOL_STATE_INDEX]
@property
def constraint(self) -> int:
return self.state[CONSTRAINT_STATE_INDEX]
@property
def result(self) -> int:
return self.state[UTTT_RESULT_STATE_INDEX]
def is_next_symbol_X(self) -> bool:
return self.next_symbol == X_STATE_VALUE
def is_next_symbol_O(self) -> bool:
return self.next_symbol == O_STATE_VALUE
def is_constrained(self) -> bool:
return 0 <= self.constraint < 9
def is_unconstrained(self) -> bool:
return self.constraint == UNCONSTRAINED_STATE_VALUE
def is_terminated(self) -> bool:
return bool(self.result)
def is_result_X(self) -> bool:
return self.result == X_STATE_VALUE
def is_result_O(self) -> bool:
return self.result == O_STATE_VALUE
def is_result_draw(self) -> bool:
return self.result == DRAW_STATE_VALUE
def _get_empty_indexes(self, subgame: int) -> List[int]:
offset = subgame * 9
return [
i + offset for i, s in enumerate(self.state[offset : offset + 9]) if not s
]
def _is_winning_position(self, symbol: int, subgame: int) -> bool:
state = self.state
offset = subgame * 9
return (
(
symbol == state[offset + 4] and
(
symbol == state[offset + 0] == state[offset + 8] or
symbol == state[offset + 2] == state[offset + 6] or
symbol == state[offset + 1] == state[offset + 7] or
symbol == state[offset + 3] == state[offset + 5]
)
)
or
(
symbol == state[offset + 0] and
(
symbol == state[offset + 1] == state[offset + 2] or
symbol == state[offset + 3] == state[offset + 6]
)
)
or
(
symbol == state[offset + 8] and
(
symbol == state[offset + 2] == state[offset + 5] or
symbol == state[offset + 6] == state[offset + 7]
)
)
)
def _is_full(self, subgame: int) -> bool:
offset = subgame * 9
return all(self.state[offset : offset + 9])
def _update_supergame_result(self, symbol: int, index: int) -> None:
supergame_updated = False
subgame = index // 9
if self._is_winning_position(symbol=symbol, subgame=subgame):
self.state[81 + subgame] = symbol
supergame_updated = True
elif self._is_full(subgame=subgame):
self.state[81 + subgame] = DRAW_STATE_VALUE
supergame_updated = True
if supergame_updated:
if self._is_winning_position(symbol=symbol, subgame=9):
self.state[UTTT_RESULT_STATE_INDEX] = symbol
elif self._is_full(subgame=9):
self.state[UTTT_RESULT_STATE_INDEX] = DRAW_STATE_VALUE
def _toggle_next_symbol(self) -> None:
if self.is_next_symbol_X():
self.state[NEXT_SYMBOL_STATE_INDEX] = O_STATE_VALUE
elif self.is_next_symbol_O():
self.state[NEXT_SYMBOL_STATE_INDEX] = X_STATE_VALUE
def _set_next_constraint(self, index: int) -> None:
next_subgame = index % 9
if self.state[81 + next_subgame]:
self.state[CONSTRAINT_STATE_INDEX] = UNCONSTRAINED_STATE_VALUE
else:
self.state[CONSTRAINT_STATE_INDEX] = next_subgame
def _verify_state(self) -> None:
self._verify_supergame()
self._verify_subgames()
self._verify_constraint()
def _verify_supergame(self) -> None:
x_w = self._is_winning_position(symbol=X_STATE_VALUE, subgame=9)
o_w = self._is_winning_position(symbol=O_STATE_VALUE, subgame=9)
full = self._is_full(subgame=9)
if x_w and o_w:
raise UltimateTicTacToeError("X and O have winning positions on supergame")
if x_w and not self.is_result_X():
raise UltimateTicTacToeError("X won supergame, but result is not updated")
if o_w and not self.is_result_O():
raise UltimateTicTacToeError("O won supergame, but result is not updated")
if full and not self.is_result_draw() and not (x_w or o_w):
raise UltimateTicTacToeError("DRAW on supergame, but result is not updated")
def _verify_subgames(self) -> None:
for subgame in range(0, 9):
x_w = self._is_winning_position(symbol=X_STATE_VALUE, subgame=subgame)
o_w = self._is_winning_position(symbol=O_STATE_VALUE, subgame=subgame)
full = self._is_full(subgame=subgame)
if x_w and o_w:
raise UltimateTicTacToeError(f"X and O have winning positions on subgame={subgame}")
if x_w and self.state[81 + subgame] != X_STATE_VALUE:
raise UltimateTicTacToeError(f"X won subgame={subgame}, but supergame is not updated")
if o_w and self.state[81 + subgame] != O_STATE_VALUE:
raise UltimateTicTacToeError(f"O won subgame={subgame}, but supergame is not updated")
if full and self.state[81 + subgame] != DRAW_STATE_VALUE and not (x_w or o_w):
raise UltimateTicTacToeError(f"DRAW on subgame={subgame}, but supergame is not updated")
def _verify_constraint(self) -> None:
if not (self.is_constrained() or self.is_unconstrained()):
raise UltimateTicTacToeError(f"invalid constraint={self.constraint}")
if self.is_constrained() and self.state[81 + self.constraint]:
raise UltimateTicTacToeError(f"constraint={self.constraint} points to terminated subgame")
def _verify_action(self, action: Action) -> None:
illegal_action = f"Illegal {action} - "
if self.is_next_symbol_X() and not action.is_symbol_X():
raise UltimateTicTacToeError(illegal_action + "next move belongs to X")
if self.is_next_symbol_O() and not action.is_symbol_O():
raise UltimateTicTacToeError(illegal_action + "next move belongs to O")
if not (0 <= action.index < 81):
raise UltimateTicTacToeError(illegal_action + "index outside the valid range")
if self.is_constrained() and self.constraint != action.index // 9:
raise UltimateTicTacToeError(illegal_action + f"violated constraint={self.constraint}")
if self.state[81 + action.index // 9]:
raise UltimateTicTacToeError(illegal_action + "index from terminated subgame")
if self.state[action.index]:
raise UltimateTicTacToeError(illegal_action + "index is already taken")
def __str__(self):
state_values_map = {
X_STATE_VALUE: 'X',
O_STATE_VALUE: 'O',
DRAW_STATE_VALUE: '=',
0: '-',
}
subgames = [state_values_map[s] for s in self.state[0:81]]
supergame = [state_values_map[s] for s in self.state[81:90]]
if not self.is_terminated():
for legal_index in self.get_legal_indexes():
subgames[legal_index] = '•'
if self.is_constrained():
supergame[self.constraint] = '•'
elif self.is_unconstrained():
supergame = ['•' if s == '-' else s for s in supergame]
sb = lambda l, r: ' '.join(subgames[l : r + 1])
sp = lambda l, r: ' '.join(supergame[l : r + 1])
subgames = [
' 0 1 2 3 4 5 6 7 8',
' 0 ' + sb(0, 2) + ' │ ' + sb(9, 11) + ' │ ' + sb(18, 20),
' 1 ' + sb(3, 5) + ' │ ' + sb(12, 14) + ' │ ' + sb(21, 23),
' 2 ' + sb(6, 8) + ' │ ' + sb(15, 17) + ' │ ' + sb(24, 26),
' ' + '—' * 21,
' 3 ' + sb(27, 29) + ' │ ' + sb(36, 38) + ' │ ' + sb(45, 47),
' 4 ' + sb(30, 32) + ' │ ' + sb(39, 41) + ' │ ' + sb(48, 50),
' 5 ' + sb(33, 35) + ' │ ' + sb(42, 44) + ' │ ' + sb(51, 53),
' ' + '—' * 21,
' 6 ' + sb(54, 56) + ' │ ' + sb(63, 65) + ' │ ' + sb(72, 74),
' 7 ' + sb(57, 59) + ' │ ' + sb(66, 68) + ' │ ' + sb(75, 77),
' 8 ' + sb(60, 62) + ' │ ' + sb(69, 71) + ' │ ' + sb(78, 80),
]
supergame = [
' ' + sp(0, 2),
' ' + sp(3, 5),
' ' + sp(6, 8),
]
subgames = '\n'.join(subgames)
supergame = '\n'.join(supergame)
next_symbol = state_values_map[self.next_symbol]
constraint = 'None' if self.is_unconstrained() else str(self.constraint)
result = 'None'
if self.is_result_X():
result = 'X_WON'
elif self.is_result_O():
result = 'O_WON'
elif self.is_result_draw():
result = 'DRAW'
output = '{cls}(\n'
output += ' subgames:\n{subgames}\n'
if not self.is_terminated():
output += ' next_symbol: {next_symbol}\n'
output += ' constraint: {constraint}\n'
output += ' supergame:\n{supergame}\n'
output += ' result: {result}\n)'
output = output.format(
cls=self.__class__.__name__,
subgames=subgames,
supergame=supergame,
next_symbol=next_symbol,
constraint=constraint,
result=result,
)
return output
class UltimateTicTacToeError(Exception):
pass
| 39.845361
| 104
| 0.573954
|
4a04232704ce529e3e4da8bdfb3fa9dce87a9bbc
| 1,529
|
py
|
Python
|
common/directory.py
|
Suryavf/SelfDrivingCar
|
362ac830516366b1c31ef01ea0456eb99f0d9722
|
[
"MIT"
] | 11
|
2019-08-14T18:55:13.000Z
|
2021-09-10T05:54:49.000Z
|
common/directory.py
|
Suryavf/TVAnet
|
97170e81d7c1a0f683a0fdaae4c42989350823fc
|
[
"MIT"
] | 3
|
2020-05-05T15:20:20.000Z
|
2021-06-22T07:47:26.000Z
|
common/directory.py
|
Suryavf/TVAnet
|
97170e81d7c1a0f683a0fdaae4c42989350823fc
|
[
"MIT"
] | 1
|
2020-12-18T15:46:09.000Z
|
2020-12-18T15:46:09.000Z
|
ImitationModel = ['Basic','Multimodal','Codevilla18','Codevilla19','Kim2017','Experimental','ExpBranch','Approach']
Encoder = ['CNN5', 'CNN5Max', 'ResNet50', 'WideResNet50', 'VGG19', 'EfficientNetB0', 'EfficientNetB1', 'EfficientNetB2', 'EfficientNetB3']
Decoder = ['BasicDecoder', 'DualDecoder', 'TVADecoder']
Attention = ['Atten1','Atten2','Atten3','Atten4','Atten5','Atten6','Atten7','Atten8','Atten9','Atten10','Atten11','Atten12','Atten13','Atten14']
SpatialAttention = ['MHSA','MHRSA']
CategoryAttention = ['ECAnet']
Control = ['SumHiddenFeature','BranchesModule','SeqModule']
FilesForStudy100 = ['episode_00044','episode_00219','episode_00350','episode_00406','episode_00592','episode_00751',
'episode_01231','episode_01768','episode_01798','episode_02042','episode_04371','episode_05123',
'episode_00085','episode_00304','episode_00591','episode_00708','episode_01001',
'episode_01294','episode_01654','episode_01928','episode_03260','episode_04311','episode_05073',
'episode_00145','episode_00235','episode_00674','episode_00729','episode_00816','episode_00937',
'episode_01234','episode_01860','episode_02797','episode_03368','episode_04314','episode_05118',
'episode_00189','episode_00190','episode_00325','episode_01813','episode_02658','episode_03083',
'episode_03103','episode_03535','episode_04370','episode_04577','episode_04578','episode_05011']
| 101.933333
| 149
| 0.683453
|
4a0423a77d2b8395a54db48e4eeb4a63716e0ff1
| 1,428
|
py
|
Python
|
currencies.py
|
EnidVyshka/BeyondPricingCodingChallenge
|
002b0acf5599e37eb849900b0c09640ce1cee668
|
[
"MIT"
] | null | null | null |
currencies.py
|
EnidVyshka/BeyondPricingCodingChallenge
|
002b0acf5599e37eb849900b0c09640ce1cee668
|
[
"MIT"
] | null | null | null |
currencies.py
|
EnidVyshka/BeyondPricingCodingChallenge
|
002b0acf5599e37eb849900b0c09640ce1cee668
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, asdict
import requests
import ast
YOUR_APP_ID = "c445eedf3afd4694bce32d580a9186f8"
decoded_request = requests.get(
"https://openexchangerates.org/api/latest.json?app_id={}".format(YOUR_APP_ID)
).content.decode("utf-8")
exchange_dict = ast.literal_eval(decoded_request)
@dataclass
class Currency:
code: str
name: str
symbol: str
def to_dict(self):
return asdict(self)
class CURRENCIES:
# Codes
USD = "USD"
EUR = "EUR"
JPY = "JPY"
ILS = "ILS"
AUD = "AUD"
# Define all currencies
__ALL__ = [
Currency(USD, "United States Dollar", "$"),
Currency(EUR, "Euro", "€"),
Currency(JPY, "Japanese Yen", "¥"),
Currency(ILS, "Israeli shekel", "₪"),
Currency(AUD, "Australian Dollar", "A$"),
]
# Organize per code for convenience
__PER_CODE__ = {currency.code: currency for currency in __ALL__}
@classmethod
def get_all(cls):
return cls.__ALL__
@classmethod
def get_by_code(cls, code):
if code not in cls.__PER_CODE__:
raise Exception(f"Currency with code={code} does not exist")
return cls.__PER_CODE__[code]
@classmethod
def exchange_coefficient(cls, base_currency, target_currency) -> float:
return (
exchange_dict["rates"][target_currency]
/ exchange_dict["rates"][base_currency]
)
| 25.052632
| 81
| 0.636555
|
4a04241ddaa761c62b06fab0d7f20e977c70f011
| 7,067
|
py
|
Python
|
my_library/dataset_readers/full_classifier_reader.py
|
OnlpLab/Event-Based-Modality
|
6e6932bf8d8e8f790f3baff0b89c38eb376854b8
|
[
"Apache-2.0"
] | null | null | null |
my_library/dataset_readers/full_classifier_reader.py
|
OnlpLab/Event-Based-Modality
|
6e6932bf8d8e8f790f3baff0b89c38eb376854b8
|
[
"Apache-2.0"
] | 1
|
2022-03-27T13:38:35.000Z
|
2022-03-28T16:20:45.000Z
|
my_library/dataset_readers/full_classifier_reader.py
|
OnlpLab/Modality
|
6e6932bf8d8e8f790f3baff0b89c38eb376854b8
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, Union
import logging
import json
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, Field, ListField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
import sys
logger = logging.getLogger(__name__)
DEFAULT_WORD_TAG_DELIMITER = "###"
@DatasetReader.register("full_classifier_reader")
class TextClassificationJsonReader(DatasetReader):
"""
Reads tokens and their labels from a labeled text classification dataset.
Expects a "text" field and a "label" field in JSON format.
The output of `read` is a list of `Instance` s with the fields:
tokens : `TextField` and
label : `LabelField`
Registered as a `DatasetReader` with name "text_classification_json".
[0]: https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional
optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text.
See :class:`TokenIndexer`.
tokenizer : `Tokenizer`, optional (default = `{"tokens": SpacyTokenizer()}`)
Tokenizer to use to split the input text into words or other kinds of tokens.
segment_sentences : `bool`, optional (default = `False`)
If True, we will first segment the text into sentences using SpaCy and then tokenize words.
Necessary for some models that require pre-segmentation of sentences, like [the Hierarchical
Attention Network][0].
max_sequence_length : `int`, optional (default = `None`)
If specified, will truncate tokens to specified maximum length.
skip_label_indexing : `bool`, optional (default = `False`)
Whether or not to skip label indexing. You might want to skip label indexing if your
labels are numbers, so the dataset reader doesn't re-number them starting from 0.
"""
def __init__(
self,
word_tag_delimiter: str = DEFAULT_WORD_TAG_DELIMITER,
token_indexers: Dict[str, TokenIndexer] = None,
tokenizer: Tokenizer = None,
segment_sentences: bool = False,
max_sequence_length: int = 510,
skip_label_indexing: bool = False,
**kwargs,
) -> None:
super().__init__( **kwargs)
self._word_tag_delimiter = word_tag_delimiter
self._tokenizer = tokenizer or SpacyTokenizer()
self._segment_sentences = segment_sentences
self._max_sequence_length = max_sequence_length
self._skip_label_indexing = skip_label_indexing
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if self._segment_sentences:
self._sentence_segmenter = SpacySentenceSplitter()
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
for line_numb, line in enumerate(data_file.readlines()):
if not line:
continue
line = line.split()
text = []
labels = []
heads = []
for token in line:
tok, tag, head = token.split(self._word_tag_delimiter)
text.append(tok)
labels.append(tag)
heads.append(head)
# mask_indices, label
out = []
for ind, label in enumerate(labels):
if label != 'O':
tag, sense = label.split('-')
if tag == 'S':
out.append([[ind], sense])
elif tag == 'B':
out.append([[ind], sense])
else:
out[-1][0].append(ind)
for out_instance in out:
indices = out_instance[0]
newtext = ' '.join(text)
newtext = newtext.split()
for word_ind, word in enumerate(heads):
if word == 'TARGET':
newtext[word_ind] = '<head> '+newtext[word_ind]+' </head>'
masked_words = ' '.join(newtext[indices[0]:indices[-1]+1])
newtext = newtext[:indices[0]]+['<target_start>']+newtext[indices[0]:indices[-1]+1]+['<target_end>']+newtext[indices[-1]+1:]
out_text = ' '.join(newtext)
label = out_instance[1]
indices = [str(ind) for ind in indices]
if len(label) > 0 and len(text)>7:
yield self.text_to_instance(text=out_text, label=label, sent_id=line_numb, indices=' '.join(indices), masked_words = masked_words)
def _truncate(self, tokens):
"""
truncate a set of tokens using the provided sequence length
"""
if len(tokens) > self._max_sequence_length:
tokens = tokens[: self._max_sequence_length]
return tokens
@overrides
def text_to_instance(
self, text: str, sent_id, indices, masked_words, label: Union[str, int] = None
) -> Instance: # type: ignore
"""
# Parameters
text : `str`, required.
The text to classify
label : `str`, optional, (default = `None`).
The label for this text.
# Returns
An `Instance` containing the following fields:
- tokens (`TextField`) :
The tokens in the sentence or phrase.
- label (`LabelField`) :
The label label of the sentence or phrase.
"""
fields: Dict[str, Field] = {}
if self._segment_sentences:
sentences: List[Field] = []
sentence_splits = self._sentence_segmenter.split_sentences(text)
for sentence in sentence_splits:
word_tokens = self._tokenizer.tokenize(sentence)
if self._max_sequence_length is not None:
word_tokens = self._truncate(word_tokens)
sentences.append(TextField(word_tokens))
fields["tokens"] = ListField(sentences)
else:
tokens = self._tokenizer.tokenize(text)
if self._max_sequence_length is not None:
tokens = self._truncate(tokens)
fields["tokens"] = TextField(tokens, self._token_indexers)
if label is not None:
fields["label"] = LabelField(label, skip_indexing=self._skip_label_indexing)
fields["metadata"] = MetadataField({"sent_id": sent_id, "indices": indices, "masked_words": masked_words, "gold_label": label})
return Instance(fields)
| 46.493421
| 154
| 0.606339
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.