hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f717d534a0b8a9554184e8b7505e68a755c8fbed | 2,086 | py | Python | app/models/Config.py | samousli/ikiru | 4a4a002db398dd7ba1b112ea406c92b0a8cb6c37 | [
"MIT"
] | null | null | null | app/models/Config.py | samousli/ikiru | 4a4a002db398dd7ba1b112ea406c92b0a8cb6c37 | [
"MIT"
] | null | null | null | app/models/Config.py | samousli/ikiru | 4a4a002db398dd7ba1b112ea406c92b0a8cb6c37 | [
"MIT"
] | null | null | null | from ast import literal_eval
import enum
import logging
from . import db
from .Base import Base
LOG = logging.getLogger(__name__)
class ValueType(enum.Enum):
Int = (1, int, int)
Bool = (2, bool, lambda b: b == 'True')
Float = (3, float, float)
Text = (4, str, lambda s: s)
Tuple = (5, tuple, literal_eval)
List = (6, list, literal_eval)
Set = (7, set, literal_eval)
Dict = (8, dict, literal_eval)
ACCEPTED_TYPES = tuple(k.value[1] for k in ValueType)
PYTHON_TYPE_TO_ENUM_TYPE = {k.value[1]: k for k in ValueType}
VALUETYPE_TO_CONVERTER_FUNC = {k: k.value[2] for k in ValueType}
IGNORE_LIST = {'SQLALCHEMY_DATABASE_URI', 'JWT_SECRET_KEY', 'SECRET_KEY'}
class Config(Base):
# ToDo: Check for SQLAlchemy record size optimizations
# Allowing for large key sizes to allow tiered keys e.g. <parent>.<child>.<key_str>
key = db.Column(db.String(192), unique=True)
_type = db.Column(db.Enum(ValueType))
_value = db.Column(db.String(192))
@property
def value(self):
if self._type in VALUETYPE_TO_CONVERTER_FUNC:
return VALUETYPE_TO_CONVERTER_FUNC[self._type](self._value)
raise TypeError('Invalid config value type.')
@value.setter
def value(self, val):
if not isinstance(val, ACCEPTED_TYPES):
raise ValueError(f'Invalid config value type ([{type(val)}] {val}).')
self._type = PYTHON_TYPE_TO_ENUM_TYPE[type(val)]
self._value = str(val)
@staticmethod
def populate_from_conf_object(conf, name=None):
val = name or conf.__name__
db.session.add(Config(key='IKIRU_ENV', value=val))
for k, v in conf.as_dict().items():
if k in IGNORE_LIST:
continue
db.session.add(Config(key=k, value=v))
db.session.commit()
@staticmethod
def load_from_db(app):
with app.app_context():
for conf in Config.query:
app.config[conf.key] = conf.value
def __repr__(self):
return f'{self.__class__.__name__}(key={self.key}, value={self.value})'
| 31.606061 | 87 | 0.645733 | from ast import literal_eval
import enum
import logging
from . import db
from .Base import Base
LOG = logging.getLogger(__name__)
class ValueType(enum.Enum):
Int = (1, int, int)
Bool = (2, bool, lambda b: b == 'True')
Float = (3, float, float)
Text = (4, str, lambda s: s)
Tuple = (5, tuple, literal_eval)
List = (6, list, literal_eval)
Set = (7, set, literal_eval)
Dict = (8, dict, literal_eval)
ACCEPTED_TYPES = tuple(k.value[1] for k in ValueType)
PYTHON_TYPE_TO_ENUM_TYPE = {k.value[1]: k for k in ValueType}
VALUETYPE_TO_CONVERTER_FUNC = {k: k.value[2] for k in ValueType}
IGNORE_LIST = {'SQLALCHEMY_DATABASE_URI', 'JWT_SECRET_KEY', 'SECRET_KEY'}
class Config(Base):
key = db.Column(db.String(192), unique=True)
_type = db.Column(db.Enum(ValueType))
_value = db.Column(db.String(192))
@property
def value(self):
if self._type in VALUETYPE_TO_CONVERTER_FUNC:
return VALUETYPE_TO_CONVERTER_FUNC[self._type](self._value)
raise TypeError('Invalid config value type.')
@value.setter
def value(self, val):
if not isinstance(val, ACCEPTED_TYPES):
raise ValueError(f'Invalid config value type ([{type(val)}] {val}).')
self._type = PYTHON_TYPE_TO_ENUM_TYPE[type(val)]
self._value = str(val)
@staticmethod
def populate_from_conf_object(conf, name=None):
val = name or conf.__name__
db.session.add(Config(key='IKIRU_ENV', value=val))
for k, v in conf.as_dict().items():
if k in IGNORE_LIST:
continue
db.session.add(Config(key=k, value=v))
db.session.commit()
@staticmethod
def load_from_db(app):
with app.app_context():
for conf in Config.query:
app.config[conf.key] = conf.value
def __repr__(self):
return f'{self.__class__.__name__}(key={self.key}, value={self.value})'
| true | true |
f717d61c108d4a173265538f31a31ca0754a90ee | 2,281 | py | Python | Lectures/lec_05/genSymbolImg.py | diable201/ComputerVision | 5ee153363fa6757d3cd8b1add3e5d48b01a499e2 | [
"MIT"
] | 1 | 2021-02-23T08:44:02.000Z | 2021-02-23T08:44:02.000Z | Lectures/lec_05/genSymbolImg.py | diable201/ComputerVision | 5ee153363fa6757d3cd8b1add3e5d48b01a499e2 | [
"MIT"
] | 1 | 2021-02-23T09:12:44.000Z | 2021-02-27T17:05:58.000Z | Lectures/lec_05/genSymbolImg.py | diable201/ComputerVision | 5ee153363fa6757d3cd8b1add3e5d48b01a499e2 | [
"MIT"
] | 1 | 2021-02-28T14:15:57.000Z | 2021-02-28T14:15:57.000Z | import cv2
import numpy as np
from random import randint, uniform
import string, random
def addNoise(image):
row,col = image.shape
s_vs_p = 0.4
amount = 0.01
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[tuple(coords)] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
out[tuple(coords)] = 0
return out
# def addLines(img):
# for i in range(randint(0,2)):
# y1 = randint(0, img.shape[0])
# y2 = randint(0, img.shape[0])
# cv2.line(img, (0, y1), (img.shape[1], y2), 0, 1)
def addBlur(img, kw, kh):
return cv2.blur(img, (kw, kh))
def text_generator(chars, size = 8):
return ''.join(random.choice(chars) for _ in range(size))
def addText(img, chars, font, size, line_size):
text = text_generator(chars, 1)
cv2.putText(img, text, (0, img.shape[0]-4), font, size, (0, 0, 255), line_size, cv2.LINE_AA)
return text
sizes = [(70,58),(40,35),(75,70),(70,70),(70,70),(50,50)]
def genSymbolImg(chars = string.ascii_uppercase + string.digits,
font = None,
line_size = None,
blur = None,
kw = None,
kh = None):
if font is None:
font = randint(0, 5)
# if size is None:
# size = uniform(2.5, 3.5)
if line_size is None:
line_size = randint(1, 3)
if blur is None:
blur = randint(0, 1)
if kw is None:
kw = randint(3, 9)
if kh is None:
kh = randint(3, 9)
genImg = np.full(sizes[font], 255, dtype= np.uint8)
text = addText(genImg, chars, font, 3, line_size)
if randint(0, 1):
genImg = addNoise(genImg)
# if lines:
# addLines(genImg)
if blur:
genImg = addBlur(genImg, kw, kh)
return genImg, text
if __name__ == '__main__':
for i in xrange(10000):
img, text = genSymbolImg(kw = 5, kh = 5, blur = 1)
print(text)
cv2.imshow("W", img)
k = cv2.waitKey(0)
if k == 27:
break | 22.145631 | 96 | 0.551074 | import cv2
import numpy as np
from random import randint, uniform
import string, random
def addNoise(image):
row,col = image.shape
s_vs_p = 0.4
amount = 0.01
out = np.copy(image)
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[tuple(coords)] = 1
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
out[tuple(coords)] = 0
return out
def addBlur(img, kw, kh):
return cv2.blur(img, (kw, kh))
def text_generator(chars, size = 8):
return ''.join(random.choice(chars) for _ in range(size))
def addText(img, chars, font, size, line_size):
text = text_generator(chars, 1)
cv2.putText(img, text, (0, img.shape[0]-4), font, size, (0, 0, 255), line_size, cv2.LINE_AA)
return text
sizes = [(70,58),(40,35),(75,70),(70,70),(70,70),(50,50)]
def genSymbolImg(chars = string.ascii_uppercase + string.digits,
font = None,
line_size = None,
blur = None,
kw = None,
kh = None):
if font is None:
font = randint(0, 5)
if line_size is None:
line_size = randint(1, 3)
if blur is None:
blur = randint(0, 1)
if kw is None:
kw = randint(3, 9)
if kh is None:
kh = randint(3, 9)
genImg = np.full(sizes[font], 255, dtype= np.uint8)
text = addText(genImg, chars, font, 3, line_size)
if randint(0, 1):
genImg = addNoise(genImg)
if blur:
genImg = addBlur(genImg, kw, kh)
return genImg, text
if __name__ == '__main__':
for i in xrange(10000):
img, text = genSymbolImg(kw = 5, kh = 5, blur = 1)
print(text)
cv2.imshow("W", img)
k = cv2.waitKey(0)
if k == 27:
break | true | true |
f717d65f584bd26a05f750dc31f00fd352a9f051 | 7,242 | py | Python | filer/admin/clipboardadmin.py | haricot/django-filer | f3b90fbbb90a3c99ade104b1c3190621773fa7e1 | [
"BSD-3-Clause"
] | null | null | null | filer/admin/clipboardadmin.py | haricot/django-filer | f3b90fbbb90a3c99ade104b1c3190621773fa7e1 | [
"BSD-3-Clause"
] | 11 | 2019-11-02T20:57:52.000Z | 2020-09-27T09:08:33.000Z | filer/admin/clipboardadmin.py | haricot/django-filer | f3b90fbbb90a3c99ade104b1c3190621773fa7e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import url
from django.contrib import admin
from django.forms.models import modelform_factory
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .. import settings as filer_settings
from ..models import Clipboard, ClipboardItem, Folder
from ..utils.files import (
UploadException, handle_request_files_upload, handle_upload,
)
from ..utils.loader import load_model
from . import views
NO_FOLDER_ERROR = "Can't find folder to upload. Please refresh and try again"
NO_PERMISSIONS_FOR_FOLDER = (
"Can't use this folder, Permission Denied. Please select another folder."
)
Image = load_model(filer_settings.FILER_IMAGE_MODEL)
# ModelAdmins
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
return [
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
url(r'^operations/upload/(?P<folder_id>[0-9]+)/$',
ajax_upload,
name='filer-ajax_upload'),
url(r'^operations/upload/no_folder/$',
ajax_upload,
name='filer-ajax_upload'),
] + super(ClipboardAdmin, self).get_urls()
def get_model_perms(self, *args, **kwargs):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
@csrf_exempt
def ajax_upload(request, folder_id=None):
"""
Receives an upload from the uploader. Receives only one file at a time.
"""
folder = None
if folder_id:
try:
# Get folder
folder = Folder.objects.get(pk=folder_id)
except Folder.DoesNotExist:
return JsonResponse({'error': NO_FOLDER_ERROR})
# check permissions
if folder and not folder.has_add_children_permission(request):
return JsonResponse({'error': NO_PERMISSIONS_FOR_FOLDER})
try:
if len(request.FILES) == 1:
# dont check if request is ajax or not, just grab the file
upload, filename, is_raw = handle_request_files_upload(request)
else:
# else process the request as usual
upload, filename, is_raw = handle_upload(request)
# TODO: Deprecated/refactor
# Get clipboad
# clipboard = Clipboard.objects.get_or_create(user=request.user)[0]
# find the file type
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_model(filer_class)
# TODO: What if there are more than one that qualify?
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model=FileSubClass,
fields=('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
# Enforce the FILER_IS_PUBLIC_DEFAULT
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.folder = folder
file_with_thumbs = None
data = {}
file_obj.save()
# TODO: Deprecated/refactor
# clipboard_item = ClipboardItem(
# clipboard=clipboard, file=file_obj)
# clipboard_item.save()
# Try to generate thumbnails.
if not file_obj.icons:
if file_obj.extension not in filer_settings.FILER_FILE_EXTENSION_NOTHUMBS:
# There is no point to continue, as we can't generate
# thumbnails for this file. Usual reasons: bad format or
# filename.
file_obj.delete()
# This would be logged in BaseImage._generate_thumbnails()
# if FILER_ENABLE_LOGGING is on.
file_with_thumbs = True
return JsonResponse(
{'error': 'failed to generate icons for file'},
status=500,
)
else:
file_with_thumbs = True
if file_with_thumbs:
# Backwards compatibility: try to get specific icon size (32px)
# first. Then try medium icon size (they are already sorted),
# fallback to the first (smallest) configured icon.
thumbnail = None
for size in (['32']
+ filer_settings.FILER_ADMIN_ICON_SIZES[1::-1]):
try:
thumbnail = file_obj.icons[size]
break
except KeyError:
continue
# prepare preview thumbnail
if type(file_obj) == Image:
thumbnail_180_options = {
'size': (180, 180),
'crop': True,
'upscale': True,
}
thumbnail_180 = file_obj.file.get_thumbnail(
thumbnail_180_options)
data_thumbs = {
'thumbnail': thumbnail,
'thumbnail_180': thumbnail_180.url
}
data.update(data_thumbs)
data_common = {
'alt_text': '',
'label': str(file_obj),
'file_id': file_obj.pk,
'original_image': file_obj.url
}
data.update(data_common)
return JsonResponse(data)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in list(
uploadform.errors.items())
])
raise UploadException(
"AJAX request not valid: form invalid '%s'" % (
form_errors,))
except UploadException as e:
return JsonResponse({'error': str(e)}, status=500)
| 38.521277 | 90 | 0.543496 |
from __future__ import absolute_import
from django.conf.urls import url
from django.contrib import admin
from django.forms.models import modelform_factory
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .. import settings as filer_settings
from ..models import Clipboard, ClipboardItem, Folder
from ..utils.files import (
UploadException, handle_request_files_upload, handle_upload,
)
from ..utils.loader import load_model
from . import views
NO_FOLDER_ERROR = "Can't find folder to upload. Please refresh and try again"
NO_PERMISSIONS_FOR_FOLDER = (
"Can't use this folder, Permission Denied. Please select another folder."
)
Image = load_model(filer_settings.FILER_IMAGE_MODEL)
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
return [
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
url(r'^operations/upload/(?P<folder_id>[0-9]+)/$',
ajax_upload,
name='filer-ajax_upload'),
url(r'^operations/upload/no_folder/$',
ajax_upload,
name='filer-ajax_upload'),
] + super(ClipboardAdmin, self).get_urls()
def get_model_perms(self, *args, **kwargs):
return {
'add': False,
'change': False,
'delete': False,
}
@csrf_exempt
def ajax_upload(request, folder_id=None):
folder = None
if folder_id:
try:
folder = Folder.objects.get(pk=folder_id)
except Folder.DoesNotExist:
return JsonResponse({'error': NO_FOLDER_ERROR})
if folder and not folder.has_add_children_permission(request):
return JsonResponse({'error': NO_PERMISSIONS_FOR_FOLDER})
try:
if len(request.FILES) == 1:
upload, filename, is_raw = handle_request_files_upload(request)
else:
upload, filename, is_raw = handle_upload(request)
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_model(filer_class)
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model=FileSubClass,
fields=('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.folder = folder
file_with_thumbs = None
data = {}
file_obj.save()
if not file_obj.icons:
if file_obj.extension not in filer_settings.FILER_FILE_EXTENSION_NOTHUMBS:
# thumbnails for this file. Usual reasons: bad format or
# filename.
file_obj.delete()
# This would be logged in BaseImage._generate_thumbnails()
# if FILER_ENABLE_LOGGING is on.
file_with_thumbs = True
return JsonResponse(
{'error': 'failed to generate icons for file'},
status=500,
)
else:
file_with_thumbs = True
if file_with_thumbs:
# Backwards compatibility: try to get specific icon size (32px)
# first. Then try medium icon size (they are already sorted),
# fallback to the first (smallest) configured icon.
thumbnail = None
for size in (['32']
+ filer_settings.FILER_ADMIN_ICON_SIZES[1::-1]):
try:
thumbnail = file_obj.icons[size]
break
except KeyError:
continue
# prepare preview thumbnail
if type(file_obj) == Image:
thumbnail_180_options = {
'size': (180, 180),
'crop': True,
'upscale': True,
}
thumbnail_180 = file_obj.file.get_thumbnail(
thumbnail_180_options)
data_thumbs = {
'thumbnail': thumbnail,
'thumbnail_180': thumbnail_180.url
}
data.update(data_thumbs)
data_common = {
'alt_text': '',
'label': str(file_obj),
'file_id': file_obj.pk,
'original_image': file_obj.url
}
data.update(data_common)
return JsonResponse(data)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in list(
uploadform.errors.items())
])
raise UploadException(
"AJAX request not valid: form invalid '%s'" % (
form_errors,))
except UploadException as e:
return JsonResponse({'error': str(e)}, status=500)
| true | true |
f717d6a1554caa5ee66a91c8ac8b847a9b74aadc | 1,083 | py | Python | indico/modules/events/reminders/blueprint.py | uxmaster/indico | ecd19f17ef6fdc9f5584f59c87ec647319ce5d31 | [
"MIT"
] | 1 | 2019-11-03T11:34:16.000Z | 2019-11-03T11:34:16.000Z | indico/modules/events/reminders/blueprint.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | indico/modules/events/reminders/blueprint.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.events.reminders.controllers import (RHAddReminder, RHDeleteReminder, RHEditReminder,
RHListReminders, RHPreviewReminder)
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('event_reminders', __name__, template_folder='templates',
virtual_template_folder='events/reminders', url_prefix='/event/<confId>/manage/reminders')
_bp.add_url_rule('/', 'list', RHListReminders)
_bp.add_url_rule('/add', 'add', RHAddReminder, methods=('GET', 'POST'))
_bp.add_url_rule('/preview', 'preview', RHPreviewReminder, methods=('POST',))
_bp.add_url_rule('/<int:reminder_id>/', 'edit', RHEditReminder, methods=('GET', 'POST'))
_bp.add_url_rule('/<int:reminder_id>/delete', 'delete', RHDeleteReminder, methods=('POST',))
| 47.086957 | 112 | 0.710988 |
from __future__ import unicode_literals
from indico.modules.events.reminders.controllers import (RHAddReminder, RHDeleteReminder, RHEditReminder,
RHListReminders, RHPreviewReminder)
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('event_reminders', __name__, template_folder='templates',
virtual_template_folder='events/reminders', url_prefix='/event/<confId>/manage/reminders')
_bp.add_url_rule('/', 'list', RHListReminders)
_bp.add_url_rule('/add', 'add', RHAddReminder, methods=('GET', 'POST'))
_bp.add_url_rule('/preview', 'preview', RHPreviewReminder, methods=('POST',))
_bp.add_url_rule('/<int:reminder_id>/', 'edit', RHEditReminder, methods=('GET', 'POST'))
_bp.add_url_rule('/<int:reminder_id>/delete', 'delete', RHDeleteReminder, methods=('POST',))
| true | true |
f717d74e6dbd251d84df1d67ed85b3b52ba68270 | 2,105 | py | Python | labellab-flask/api/models/User.py | darkshredder/LabelLab | fc762e6eea52b9023e38ba5f32bbcaa7cbc17dbe | [
"Apache-2.0"
] | 70 | 2019-01-25T19:16:00.000Z | 2022-03-23T14:37:28.000Z | labellab-flask/api/models/User.py | darkshredder/LabelLab | fc762e6eea52b9023e38ba5f32bbcaa7cbc17dbe | [
"Apache-2.0"
] | 350 | 2019-01-30T10:50:34.000Z | 2022-03-31T19:58:44.000Z | labellab-flask/api/models/User.py | darkshredder/LabelLab | fc762e6eea52b9023e38ba5f32bbcaa7cbc17dbe | [
"Apache-2.0"
] | 140 | 2019-01-30T08:53:35.000Z | 2022-03-25T15:37:12.000Z | from datetime import datetime
from flask import current_app, jsonify
from flask_bcrypt import Bcrypt
import json
from api.extensions import db, Base, ma
class User(db.Model):
"""
This model holds information about a user registered
"""
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
username = db.Column(db.String(80), unique=True, nullable=False,)
password = db.Column(db.String(128))
email = db.Column(db.String(100), nullable=False, unique=True)
date = db.Column(db.DateTime,
default=datetime.now())
thumbnail = db.Column(db.String(1500),
default='https://react.semantic-ui.com/images/avatar/large/elliot.jpg')
projects = db.relationship('Project',
backref='user',
lazy=True,
cascade="all, save-update, delete",
passive_deletes=True)
project_members = db.relationship('ProjectMember',
backref='user',
lazy=True,
cascade="all, save-update, delete",
passive_deletes=True)
def __init__(self, name, username, email, password=None):
"""
Initializes the user instance
"""
self.name = name
self.username = username
self.email = email
if password:
self.password = User.generate_password_hash(password)
def __repr__(self):
"""
Returns the object reprensentation of user
"""
return "<User %r>" % self.name
@staticmethod
def generate_password_hash(password):
"""
Returns hash of password
"""
return Bcrypt().generate_password_hash(password,10).decode()
def verify_password(self, password):
"""
Verify the password
"""
return Bcrypt().check_password_hash(self.password, password)
| 33.951613 | 94 | 0.560095 | from datetime import datetime
from flask import current_app, jsonify
from flask_bcrypt import Bcrypt
import json
from api.extensions import db, Base, ma
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
username = db.Column(db.String(80), unique=True, nullable=False,)
password = db.Column(db.String(128))
email = db.Column(db.String(100), nullable=False, unique=True)
date = db.Column(db.DateTime,
default=datetime.now())
thumbnail = db.Column(db.String(1500),
default='https://react.semantic-ui.com/images/avatar/large/elliot.jpg')
projects = db.relationship('Project',
backref='user',
lazy=True,
cascade="all, save-update, delete",
passive_deletes=True)
project_members = db.relationship('ProjectMember',
backref='user',
lazy=True,
cascade="all, save-update, delete",
passive_deletes=True)
def __init__(self, name, username, email, password=None):
self.name = name
self.username = username
self.email = email
if password:
self.password = User.generate_password_hash(password)
def __repr__(self):
return "<User %r>" % self.name
@staticmethod
def generate_password_hash(password):
return Bcrypt().generate_password_hash(password,10).decode()
def verify_password(self, password):
return Bcrypt().check_password_hash(self.password, password)
| true | true |
f717d76a526f8cc95d2243ac714a7311f53a0737 | 4,588 | py | Python | Project2/program1/clean.py | Sandeep-AnilKumar/Data-Science-Projects | b7d890f855ffc6edd0544ff3bd115fa85d19fd4f | [
"MIT"
] | null | null | null | Project2/program1/clean.py | Sandeep-AnilKumar/Data-Science-Projects | b7d890f855ffc6edd0544ff3bd115fa85d19fd4f | [
"MIT"
] | null | null | null | Project2/program1/clean.py | Sandeep-AnilKumar/Data-Science-Projects | b7d890f855ffc6edd0544ff3bd115fa85d19fd4f | [
"MIT"
] | null | null | null | import sys
import re
import string
# dictionary to store clean data.
cleaned_data = {}
# list of professors
professors = []
# list of courses
courses = []
def createdict(profname, course_list):
new_course_list = []
is_course_match = False
profname = profname.title()
prof_courses = course_list.split('|')
prof_courses = [course.strip() for course in prof_courses]
if profname not in cleaned_data:
cleaned_data.setdefault(profname, [])
for c in prof_courses:
# replace & with and
if '&' in c:
c = c.replace('&', 'and ')
# replace intro. or intro with introduction
matcher = re.match("intro\.?", c)
if matcher:
c = re.sub("intro\.?", "introduction ", c)
# replace or make all roman numerals capitals.
matcher = re.match(r"\bi+\b", c.lower())
if matcher:
c = c.lower()
c = re.sub(r"\bi\b", "I", c)
c = re.sub(r"\bii\b", "II", c)
c = re.sub(r"\biii\b", "III", c)
# remove all punctuation marks.
punctuation_regex = re.compile('[%s]' % re.escape(string.punctuation))
c = punctuation_regex.sub('', c)
c_split = c.split()
c_word_array = []
# make only non roman numeral words as "title", roman numerals as "uppercase".
for c_split_constituent in c_split:
matcher = re.match(r"\bi+\b", c_split_constituent.lower())
if matcher:
c_split_constituent = c_split_constituent.upper()
else:
c_split_constituent = c_split_constituent.title()
c_word_array.append(c_split_constituent)
c = (" ".join(c_word for c_word in c_word_array))
if not courses:
courses.append(c)
is_course_match = False
if c in courses:
is_course_match = True
else:
# calculate courses similarity using edit distance using DP.
for c2 in courses:
c_length = len(c)
c2_length = len(c2)
table = [[0 for x in range(c2_length + 1)] for x in range(c_length + 1)]
for i in range(c_length + 1):
table[i][0] = i
for j in range(c2_length + 1):
table[0][j] = j
for i in range(1, c_length + 1):
for j in range(1, c2_length + 1):
if c[i - 1] == c2[j - 1]:
table[i][j] = table[i - 1][j - 1]
else:
table[i][j] = 1 + min(table[i][j - 1], table[i - 1][j], table[i - 1][j - 1])
distance = table[i][j]
if distance <= 2:
is_course_match = True
c = c2
break
if not is_course_match:
courses.append(c)
new_course_list.append(c)
cleaned_data[profname] = cleaned_data[profname] + new_course_list
return
# output file where the cleaned data is stored.
out = open("cleaned.txt", "w")
# input file to read the data.
inFile = sys.argv[1]
file_buffer = open(inFile, "r").read().splitlines()
for line in file_buffer:
if not line.strip():
continue
# separate the prof names and course lists.
separator = line.split('-', 1)
# if the professor name has comma, since we only need last name, we take only that.
prof = separator[0].strip()
if ',' in prof:
prof = (prof.split(',')[0]).strip()
# if professor name has a space in the last name, take only the last part from it.
if ' ' in prof:
prof = (prof.split()[-1]).strip()
# if professor name has a '.' in the last name, take only the last part form it.
elif '.' in prof:
prof = (prof.split('.')[-1]).strip()
# if professor name is in firstName.lastName format, take only lastName.
elif '.' in prof:
prof = ((prof.split('.')[-1]).split()[-1]).strip()
# if professor name is in firstName lastName format, take only lastName.
elif ' ' in prof:
prof = (prof.split()[-1]).strip()
else:
prof = prof.strip()
# create a dictionary of professor to their courses.
createdict(prof, separator[1].strip())
for key, value in cleaned_data.items():
professors.append(key)
# sort the courses list
value = list(set(value))
value.sort()
cleaned_data[key] = value
professors.sort()
for name in professors:
out.write(name + " - " + ("|".join(cleaned_data[name]))+"\n")
| 35.022901 | 104 | 0.549259 | import sys
import re
import string
cleaned_data = {}
professors = []
courses = []
def createdict(profname, course_list):
new_course_list = []
is_course_match = False
profname = profname.title()
prof_courses = course_list.split('|')
prof_courses = [course.strip() for course in prof_courses]
if profname not in cleaned_data:
cleaned_data.setdefault(profname, [])
for c in prof_courses:
if '&' in c:
c = c.replace('&', 'and ')
matcher = re.match("intro\.?", c)
if matcher:
c = re.sub("intro\.?", "introduction ", c)
matcher = re.match(r"\bi+\b", c.lower())
if matcher:
c = c.lower()
c = re.sub(r"\bi\b", "I", c)
c = re.sub(r"\bii\b", "II", c)
c = re.sub(r"\biii\b", "III", c)
punctuation_regex = re.compile('[%s]' % re.escape(string.punctuation))
c = punctuation_regex.sub('', c)
c_split = c.split()
c_word_array = []
for c_split_constituent in c_split:
matcher = re.match(r"\bi+\b", c_split_constituent.lower())
if matcher:
c_split_constituent = c_split_constituent.upper()
else:
c_split_constituent = c_split_constituent.title()
c_word_array.append(c_split_constituent)
c = (" ".join(c_word for c_word in c_word_array))
if not courses:
courses.append(c)
is_course_match = False
if c in courses:
is_course_match = True
else:
for c2 in courses:
c_length = len(c)
c2_length = len(c2)
table = [[0 for x in range(c2_length + 1)] for x in range(c_length + 1)]
for i in range(c_length + 1):
table[i][0] = i
for j in range(c2_length + 1):
table[0][j] = j
for i in range(1, c_length + 1):
for j in range(1, c2_length + 1):
if c[i - 1] == c2[j - 1]:
table[i][j] = table[i - 1][j - 1]
else:
table[i][j] = 1 + min(table[i][j - 1], table[i - 1][j], table[i - 1][j - 1])
distance = table[i][j]
if distance <= 2:
is_course_match = True
c = c2
break
if not is_course_match:
courses.append(c)
new_course_list.append(c)
cleaned_data[profname] = cleaned_data[profname] + new_course_list
return
out = open("cleaned.txt", "w")
inFile = sys.argv[1]
file_buffer = open(inFile, "r").read().splitlines()
for line in file_buffer:
if not line.strip():
continue
separator = line.split('-', 1)
prof = separator[0].strip()
if ',' in prof:
prof = (prof.split(',')[0]).strip()
if ' ' in prof:
prof = (prof.split()[-1]).strip()
elif '.' in prof:
prof = (prof.split('.')[-1]).strip()
elif '.' in prof:
prof = ((prof.split('.')[-1]).split()[-1]).strip()
elif ' ' in prof:
prof = (prof.split()[-1]).strip()
else:
prof = prof.strip()
createdict(prof, separator[1].strip())
for key, value in cleaned_data.items():
professors.append(key)
value = list(set(value))
value.sort()
cleaned_data[key] = value
professors.sort()
for name in professors:
out.write(name + " - " + ("|".join(cleaned_data[name]))+"\n")
| true | true |
f717d7ca70131a15c0bee7dca10a57ff4d0cb3db | 9,967 | py | Python | spacy_pytorch_transformers/pipeline/tok2vec.py | tamuhey/spacy-pytorch-transformers | 1b4a58505ee3618a6288a47d4b5716981e39e581 | [
"MIT"
] | 1 | 2021-01-11T19:35:46.000Z | 2021-01-11T19:35:46.000Z | spacy_pytorch_transformers/pipeline/tok2vec.py | tamuhey/spacy-pytorch-transformers | 1b4a58505ee3618a6288a47d4b5716981e39e581 | [
"MIT"
] | null | null | null | spacy_pytorch_transformers/pipeline/tok2vec.py | tamuhey/spacy-pytorch-transformers | 1b4a58505ee3618a6288a47d4b5716981e39e581 | [
"MIT"
] | null | null | null | from typing import Any, List
from thinc.neural.ops import get_array_module
from spacy.pipeline import Pipe
from spacy.tokens import Doc
from spacy.vocab import Vocab
from spacy.util import minibatch
from ..wrapper import PyTT_Wrapper
from ..model_registry import get_model_function
from ..activations import Activations, RaggedArray
from ..util import get_pytt_config, get_pytt_model, get_sents
class PyTT_TokenVectorEncoder(Pipe):
"""spaCy pipeline component to use PyTorch-Transformers models.
The component assigns the output of the transformer to the `doc._.pytt_outputs`
extension attribute. We also calculate an alignment between the word-piece
tokens and the spaCy tokenization, so that we can use the last hidden states
to set the doc.tensor attribute. When multiple word-piece tokens align to
the same spaCy token, the spaCy token receives the sum of their values.
"""
name = "pytt_tok2vec"
@classmethod
def from_nlp(cls, nlp, **cfg):
"""Factory to add to Language.factories via entry point."""
return cls(nlp.vocab, **cfg)
@classmethod
def from_pretrained(cls, vocab: Vocab, name: str, **cfg):
"""Create a PyTT_TokenVectorEncoder instance using pre-trained weights
from a PyTorch Transformer model, even if it's not installed as a
spaCy package.
vocab (spacy.vocab.Vocab): The spaCy vocab to use.
name (unicode): Name of pre-trained model, e.g. 'bert-base-uncased'.
RETURNS (PyTT_TokenVectorEncoder): The token vector encoder.
"""
cfg["pytt_name"] = name
model = cls.Model(from_pretrained=True, **cfg)
cfg["pytt_config"] = dict(model._model.pytt_model.config.to_dict())
self = cls(vocab, model=model, **cfg)
return self
@classmethod
def Model(cls, **cfg) -> Any:
"""Create an instance of `PyTT_Wrapper`, which holds the
PyTorch-Transformers model.
**cfg: Optional config parameters.
RETURNS (thinc.neural.Model): The wrapped model.
"""
name = cfg.get("pytt_name")
if not name:
raise ValueError("Need pytt_name argument, e.g. 'bert-base-uncased'")
if cfg.get("from_pretrained"):
pytt_wrap = PyTT_Wrapper.from_pretrained(name)
else:
pytt_config = cfg["pytt_config"]
# Work around floating point limitation in ujson:
# If we have the setting cfg["pytt_config"]["layer_norm_eps"] as 0,
# that's because of misprecision in serializing. Fix that.
pytt_config["layer_norm_eps"] = 1e-12
config_cls = get_pytt_config(name)
model_cls = get_pytt_model(name)
# Need to match the name their constructor expects.
if "vocab_size" in cfg["pytt_config"]:
vocab_size = cfg["pytt_config"]["vocab_size"]
cfg["pytt_config"]["vocab_size_or_config_json_file"] = vocab_size
pytt_wrap = PyTT_Wrapper(
name, pytt_config, model_cls(config_cls(**pytt_config))
)
make_model = get_model_function(cfg.get("architecture", "tok2vec_per_sentence"))
model = make_model(pytt_wrap, cfg)
setattr(model, "nO", pytt_wrap.nO)
setattr(model, "_model", pytt_wrap)
return model
def __init__(self, vocab, model=True, **cfg):
"""Initialize the component.
model (thinc.neural.Model / True): The component's model or `True` if
not initialized yet.
**cfg: Optional config parameters.
"""
self.vocab = vocab
self.model = model
self.cfg = cfg
@property
def token_vector_width(self):
return self.model._model.nO
@property
def pytt_model(self):
return self.model._model.pytt_model
def __call__(self, doc):
"""Process a Doc and assign the extracted features.
doc (spacy.tokens.Doc): The Doc to process.
RETURNS (spacy.tokens.Doc): The processed Doc.
"""
self.require_model()
outputs = self.predict([doc])
self.set_annotations([doc], outputs)
return doc
def pipe(self, stream, batch_size=128):
"""Process Doc objects as a stream and assign the extracted features.
stream (iterable): A stream of Doc objects.
batch_size (int): The number of texts to buffer.
YIELDS (spacy.tokens.Doc): Processed Docs in order.
"""
for docs in minibatch(stream, size=batch_size):
docs = list(docs)
outputs = self.predict(docs)
self.set_annotations(docs, outputs)
for doc in docs:
yield doc
def begin_update(self, docs, drop=None, **cfg):
"""Get the predictions and a callback to complete the gradient update.
This is only used internally within PyTT_Language.update.
"""
outputs, backprop = self.model.begin_update(docs, drop=drop)
def finish_update(docs, sgd=None):
assert len(docs)
d_lh = []
d_po = []
lh_lengths = []
po_lengths = []
for doc in docs:
d_lh.append(doc._.pytt_d_last_hidden_state)
d_po.append(doc._.pytt_d_pooler_output)
lh_lengths.append(doc._.pytt_d_last_hidden_state.shape[0])
po_lengths.append(doc._.pytt_d_pooler_output.shape[0])
xp = self.model.ops.xp
gradients = Activations(
RaggedArray(xp.vstack(d_lh), lh_lengths),
RaggedArray(xp.vstack(d_po), po_lengths),
)
backprop(gradients, sgd=sgd)
for doc in docs:
doc._.pytt_d_last_hidden_state.fill(0)
doc._.pytt_d_pooler_output.fill(0)
return None
return outputs, finish_update
def predict(self, docs):
"""Run the transformer model on a batch of docs and return the
extracted features.
docs (iterable): A batch of Docs to process.
RETURNS (list): A list of Activations objects, one per doc.
"""
return self.model.predict(docs)
def set_annotations(self, docs: List[Doc], activations: Activations):
"""Assign the extracted features to the Doc objects and overwrite the
vector and similarity hooks.
docs (iterable): A batch of `Doc` objects.
activations (iterable): A batch of activations.
"""
xp = activations.xp
for i, doc in enumerate(docs):
# Make it 2d -- acts are always 3d, to represent batch size.
wp_tensor = activations.lh.get(i)
doc.tensor = self.model.ops.allocate((len(doc), self.model.nO))
doc._.pytt_last_hidden_state = wp_tensor
if activations.has_po:
pooler_output = activations.po.get(i)
doc._.pytt_pooler_output = pooler_output
doc._.pytt_d_last_hidden_state = xp.zeros((0, 0), dtype=wp_tensor.dtype)
doc._.pytt_d_pooler_output = xp.zeros((0, 0), dtype=wp_tensor.dtype)
doc._.pytt_d_all_hidden_states = []
doc._.pytt_d_all_attentions = []
if wp_tensor.shape != (len(doc._.pytt_word_pieces), self.model.nO):
raise ValueError(
"Mismatch between tensor shape and word pieces. This usually "
"means we did something wrong in the sentence reshaping, "
"or possibly finding the separator tokens."
)
# Count how often each word-piece token is represented. This allows
# a weighted sum, so that we can make sure doc.tensor.sum()
# equals wp_tensor.sum(). Do this with sensitivity to boundary tokens
wp_rows, align_sizes = _get_boundary_sensitive_alignment(doc)
wp_weighted = wp_tensor / xp.array(align_sizes, dtype="f").reshape((-1, 1))
# TODO: Obviously incrementing the rows individually is bad. How
# to do in one shot without blowing up the memory?
for i, word_piece_slice in enumerate(wp_rows):
for j in word_piece_slice:
doc.tensor[i] += wp_weighted[j]
doc.user_hooks["vector"] = get_doc_vector_via_tensor
doc.user_span_hooks["vector"] = get_span_vector_via_tensor
doc.user_token_hooks["vector"] = get_token_vector_via_tensor
doc.user_hooks["similarity"] = get_similarity_via_tensor
doc.user_span_hooks["similarity"] = get_similarity_via_tensor
doc.user_token_hooks["similarity"] = get_similarity_via_tensor
def _get_boundary_sensitive_alignment(doc):
align_sizes = [0 for _ in range(len(doc._.pytt_word_pieces))]
wp_rows = []
for word_piece_slice in doc._.pytt_alignment:
wp_rows.append(list(word_piece_slice))
for i in word_piece_slice:
align_sizes[i] += 1
# To make this weighting work, we "align" the boundary tokens against
# every token in their sentence. The boundary tokens are otherwise
# unaligned, which is how we identify them.
for sent in get_sents(doc):
offset = sent._.pytt_start
for i in range(len(sent._.pytt_word_pieces)):
if align_sizes[offset + i] == 0:
align_sizes[offset + i] = len(sent)
for tok in sent:
wp_rows[tok.i].append(offset + i)
return wp_rows, align_sizes
def get_doc_vector_via_tensor(doc):
return doc.tensor.sum(axis=0)
def get_span_vector_via_tensor(span):
return span.doc.tensor[span.start : span.end].sum(axis=0)
def get_token_vector_via_tensor(token):
return token.doc.tensor[token.i]
def get_similarity_via_tensor(doc1, doc2):
v1 = doc1.vector
v2 = doc2.vector
xp = get_array_module(v1)
return xp.dot(v1, v2) / (doc1.vector_norm * doc2.vector_norm)
| 40.681633 | 88 | 0.631685 | from typing import Any, List
from thinc.neural.ops import get_array_module
from spacy.pipeline import Pipe
from spacy.tokens import Doc
from spacy.vocab import Vocab
from spacy.util import minibatch
from ..wrapper import PyTT_Wrapper
from ..model_registry import get_model_function
from ..activations import Activations, RaggedArray
from ..util import get_pytt_config, get_pytt_model, get_sents
class PyTT_TokenVectorEncoder(Pipe):
name = "pytt_tok2vec"
@classmethod
def from_nlp(cls, nlp, **cfg):
return cls(nlp.vocab, **cfg)
@classmethod
def from_pretrained(cls, vocab: Vocab, name: str, **cfg):
cfg["pytt_name"] = name
model = cls.Model(from_pretrained=True, **cfg)
cfg["pytt_config"] = dict(model._model.pytt_model.config.to_dict())
self = cls(vocab, model=model, **cfg)
return self
@classmethod
def Model(cls, **cfg) -> Any:
name = cfg.get("pytt_name")
if not name:
raise ValueError("Need pytt_name argument, e.g. 'bert-base-uncased'")
if cfg.get("from_pretrained"):
pytt_wrap = PyTT_Wrapper.from_pretrained(name)
else:
pytt_config = cfg["pytt_config"]
pytt_config["layer_norm_eps"] = 1e-12
config_cls = get_pytt_config(name)
model_cls = get_pytt_model(name)
# Need to match the name their constructor expects.
if "vocab_size" in cfg["pytt_config"]:
vocab_size = cfg["pytt_config"]["vocab_size"]
cfg["pytt_config"]["vocab_size_or_config_json_file"] = vocab_size
pytt_wrap = PyTT_Wrapper(
name, pytt_config, model_cls(config_cls(**pytt_config))
)
make_model = get_model_function(cfg.get("architecture", "tok2vec_per_sentence"))
model = make_model(pytt_wrap, cfg)
setattr(model, "nO", pytt_wrap.nO)
setattr(model, "_model", pytt_wrap)
return model
def __init__(self, vocab, model=True, **cfg):
self.vocab = vocab
self.model = model
self.cfg = cfg
@property
def token_vector_width(self):
return self.model._model.nO
@property
def pytt_model(self):
return self.model._model.pytt_model
def __call__(self, doc):
self.require_model()
outputs = self.predict([doc])
self.set_annotations([doc], outputs)
return doc
def pipe(self, stream, batch_size=128):
for docs in minibatch(stream, size=batch_size):
docs = list(docs)
outputs = self.predict(docs)
self.set_annotations(docs, outputs)
for doc in docs:
yield doc
def begin_update(self, docs, drop=None, **cfg):
outputs, backprop = self.model.begin_update(docs, drop=drop)
def finish_update(docs, sgd=None):
assert len(docs)
d_lh = []
d_po = []
lh_lengths = []
po_lengths = []
for doc in docs:
d_lh.append(doc._.pytt_d_last_hidden_state)
d_po.append(doc._.pytt_d_pooler_output)
lh_lengths.append(doc._.pytt_d_last_hidden_state.shape[0])
po_lengths.append(doc._.pytt_d_pooler_output.shape[0])
xp = self.model.ops.xp
gradients = Activations(
RaggedArray(xp.vstack(d_lh), lh_lengths),
RaggedArray(xp.vstack(d_po), po_lengths),
)
backprop(gradients, sgd=sgd)
for doc in docs:
doc._.pytt_d_last_hidden_state.fill(0)
doc._.pytt_d_pooler_output.fill(0)
return None
return outputs, finish_update
def predict(self, docs):
return self.model.predict(docs)
def set_annotations(self, docs: List[Doc], activations: Activations):
xp = activations.xp
for i, doc in enumerate(docs):
# Make it 2d -- acts are always 3d, to represent batch size.
wp_tensor = activations.lh.get(i)
doc.tensor = self.model.ops.allocate((len(doc), self.model.nO))
doc._.pytt_last_hidden_state = wp_tensor
if activations.has_po:
pooler_output = activations.po.get(i)
doc._.pytt_pooler_output = pooler_output
doc._.pytt_d_last_hidden_state = xp.zeros((0, 0), dtype=wp_tensor.dtype)
doc._.pytt_d_pooler_output = xp.zeros((0, 0), dtype=wp_tensor.dtype)
doc._.pytt_d_all_hidden_states = []
doc._.pytt_d_all_attentions = []
if wp_tensor.shape != (len(doc._.pytt_word_pieces), self.model.nO):
raise ValueError(
"Mismatch between tensor shape and word pieces. This usually "
"means we did something wrong in the sentence reshaping, "
"or possibly finding the separator tokens."
)
# Count how often each word-piece token is represented. This allows
# a weighted sum, so that we can make sure doc.tensor.sum()
# equals wp_tensor.sum(). Do this with sensitivity to boundary tokens
wp_rows, align_sizes = _get_boundary_sensitive_alignment(doc)
wp_weighted = wp_tensor / xp.array(align_sizes, dtype="f").reshape((-1, 1))
# TODO: Obviously incrementing the rows individually is bad. How
# to do in one shot without blowing up the memory?
for i, word_piece_slice in enumerate(wp_rows):
for j in word_piece_slice:
doc.tensor[i] += wp_weighted[j]
doc.user_hooks["vector"] = get_doc_vector_via_tensor
doc.user_span_hooks["vector"] = get_span_vector_via_tensor
doc.user_token_hooks["vector"] = get_token_vector_via_tensor
doc.user_hooks["similarity"] = get_similarity_via_tensor
doc.user_span_hooks["similarity"] = get_similarity_via_tensor
doc.user_token_hooks["similarity"] = get_similarity_via_tensor
def _get_boundary_sensitive_alignment(doc):
align_sizes = [0 for _ in range(len(doc._.pytt_word_pieces))]
wp_rows = []
for word_piece_slice in doc._.pytt_alignment:
wp_rows.append(list(word_piece_slice))
for i in word_piece_slice:
align_sizes[i] += 1
# To make this weighting work, we "align" the boundary tokens against
# every token in their sentence. The boundary tokens are otherwise
# unaligned, which is how we identify them.
for sent in get_sents(doc):
offset = sent._.pytt_start
for i in range(len(sent._.pytt_word_pieces)):
if align_sizes[offset + i] == 0:
align_sizes[offset + i] = len(sent)
for tok in sent:
wp_rows[tok.i].append(offset + i)
return wp_rows, align_sizes
def get_doc_vector_via_tensor(doc):
return doc.tensor.sum(axis=0)
def get_span_vector_via_tensor(span):
return span.doc.tensor[span.start : span.end].sum(axis=0)
def get_token_vector_via_tensor(token):
return token.doc.tensor[token.i]
def get_similarity_via_tensor(doc1, doc2):
v1 = doc1.vector
v2 = doc2.vector
xp = get_array_module(v1)
return xp.dot(v1, v2) / (doc1.vector_norm * doc2.vector_norm)
| true | true |
f717d7dbab9408394ce847244ea76b8cf45150f2 | 10,895 | py | Python | tau.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | 1 | 2019-08-08T13:11:57.000Z | 2019-08-08T13:11:57.000Z | tau.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | null | null | null | tau.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import logging
import numpy as np
import pystella as ps
from pystella.model.sn_tau import StellaTauDetail
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
__author__ = 'bakl'
# todo Show filters
# todo show valuse for filters
# todo compute SED = 4 pi R^2 sig T^4
def plot_tau_moments(tau, moments=None, xlim=None):
import matplotlib.pyplot as plt
moments = moments or np.exp(np.linspace(np.log(0.5), np.log(400.), 40))
fig, (axV, axT) = plt.subplots(2, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})
axV.set_title(tau.Name)
axV.set_xlabel('')
axV.set_ylabel('Velocity [1000 km/s]')
axT.set_xlabel('Radius [cm]')
axT.set_ylabel('Temperature [K]')
for i, time in enumerate(moments):
b = tau.block_nearest(time)
n = int(2 - np.log10(max(1e-03, abs(b.Time)))) # if b.Time >= 10. else 4 # label format
p = axV.semilogx(b.R, b.V8, label="t= {:.{}f}".format(b.Time, n))
color = p[0].get_color()
axT.loglog(b.R, b.T, label="t={:.2f}".format(time), color=color)
axV.legend(frameon=False)
if xlim is not None:
axT.set_xlim(xlim)
axV.set_xlim(xlim)
fig.tight_layout()
return fig
def plot_bands(ax, bnames, amp=30, alpha=0.5):
"""Plot the filter responses"""
color_dic = ps.band.colors()
res = {}
for bname in bnames:
b = ps.band.band_by_name(bname)
wl = b.wl * ps.phys.cm_to_angs
ax.plot(wl, b.resp_wl*amp, color_dic[bname], alpha=alpha)
wl_eff = b.wl_eff_angs
ax.axvline(x=wl_eff, ymin=0., ymax=0.99, linestyle='--', color=color_dic[bname], alpha=alpha)
ax.text(wl_eff, 10, bname, fontsize=12)
ax.text(wl_eff*.95, 3, "{:.0f}".format(wl_eff), fontsize=6)
res[bname] = (wl_eff, color_dic[bname])
return res
def plot_tau_phot(tau_data, pars, tau_ph, xlim=None, title='', bnames=None):
"""
Plot photosphere as Func(nu). Maybe: R, V, V8, T
:param pars: the parameters of photosphere
:param tau_data: the data at the optical depth tau_ph
:param tau_ph: the photosphere location
:param xlim: wave length interval [A]
:param title: the plot title
:param bnames: array of filter names to show the filter responses
:return: figure
"""
import matplotlib.pyplot as plt
def fr2wv(nu):
return ps.phys.c / nu * ps.phys.cm_to_angs
fig, axs = plt.subplots(len(pars)+1, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})
# Setup
ax = axs[0]
ax.set_ylabel(r'Zone ($\tau_{{ph}}= {:.2f}$)'.format(tau_ph))
ax.set_title(title)
ax.xaxis.set_ticks_position('top')
# ax.xaxis.tick_top()
# ax.tick_params(axis="x", direction="in", pad=-22)
# ax.tick_params(direction='in')
for i, p in enumerate(pars, 1):
ax = axs[i]
ax.set_ylabel(r'{}$_{{ph}}$'.format(p))
if i < len(axs)-1:
ax.set_xlabel('')
ax.tick_params(which='both', top=False, bottom=False)
else:
ax.set_xlabel('Wavelength [A]')
# Plot Zone_ph
colors = []
for j, (t, freq, y) in enumerate(tau_data[StellaTauDetail.col_zon]):
axzon = axs[0]
n = int(3 - np.log10(max(1e-03, abs(t)))) # label format
lbl = "t= {:.{}f} d".format(t, n)
ll = axzon.semilogx(fr2wv(freq), y, label=lbl)
color = ll[0].get_color()
colors.append(color)
bnames_waves = None
if bnames is not None:
ylim = axzon.get_ylim()
bnames_waves = plot_bands(axzon, bnames, amp=ylim[1]*0.25, alpha=0.5)
# Plot other params
for i, p in enumerate(pars, 1):
is_log = p.startswith('log')
p_data = p.replace('log', '') if is_log else p
ax = axs[i]
for j, (t, freq, y) in enumerate(tau_data[p_data]):
x = fr2wv(freq)
if is_log:
ax.loglog(x, y, color=colors[j])
else:
ax.semilogx(x, y, color=colors[j])
if bnames_waves is not None:
for bn, (wl, col) in bnames_waves.items():
ax.axvline(x=wl, ymin=0., ymax=0.99, linestyle='--', color=col, alpha=0.5)
# Post-plotting
for i, ax in enumerate(axs):
ax.tick_params(which='both', left=True, right=True, direction="in")
# ax.grid(axis="x", color="grey", alpha=.5, linewidth=1, linestyle=":")
if xlim is not None:
ax.set_xlim(xlim)
axs[0].legend(frameon=False)
fig.tight_layout()
return fig
def get_parser(times='0.1:1:10:25:65', bnames='U:B:V:R'):
parser = argparse.ArgumentParser(description='Standard Candle Method.')
print(" Plot the tau-wave diagram for STELLA models")
parser.add_argument('-b', '--band',
nargs='?',
required=False,
# default=bnames,
const=bnames,
type=str,
dest="bnames",
help="-b <bands>: string. If set only -b BNAMES is {}".format(bnames))
parser.add_argument('-i', '--input',
required=True,
dest="input",
help="Model name, example: cat_R450_M15_Ni007")
parser.add_argument('-p', '--path',
required=False,
type=str,
default=False,
dest="path",
help="Model directory")
parser.add_argument('-ph', '--phot',
required=False,
type=str,
default=False,
dest="phot",
help='Plot photosphere parameter. Maybe: R, V, V8, T. Example: -ph R:V8:T '
'You may use prefix log, e.g. logT or logV8')
parser.add_argument('-s', '--save',
action='store_const',
const=True,
dest="is_save",
help="To save the result plot to pdf-file. Format: tau_[name]_t[times].pdf.")
parser.add_argument('-t', '--time',
required=False,
type=str,
default=times,
dest="times",
help="Plot tau snap for selected time moments. Default: {0}".format(times))
parser.add_argument('--tau_ph',
required=False,
type=float,
default=2./3.,
dest="tau_ph",
help="The optical depth at the photosphere. Default: 2/3")
parser.add_argument('-x', '--xlim',
required=False,
type=str,
default=None,
dest="xlim",
help="wave length interval [A]. Example: 1.:25e3. Default: all waves in the tau-file")
parser.add_argument('-w', '--write',
required=False,
type=str,
default=None,
dest="write_prefix",
help="The prefix of file + -ParamName.dat")
return parser
def str2float(s):
return list(map(float, s.split(':')))
def main():
import os
import sys
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
ps.Band.load_settings()
model_ext = '.tau'
parser = get_parser()
args, unknownargs = parser.parse_known_args()
path = os.getcwd()
if args.path:
path = os.path.expanduser(args.path)
# Set model names
fname = None
if args.input:
fname = args.input.strip()
fname = fname.replace(model_ext, '')
if fname is None:
parser.print_help()
sys.exit(2)
model = ps.Stella(fname, path=path)
if not model.is_tau:
print("No tau-data for: " + str(model))
return None
fig = None
xlim = None
fplot = None
print('\n Arguments')
times = str2float(args.times)
print(' The time moments: ', args.times)
print(' The optical depth ', args.tau_ph)
if args.phot:
print(' The photospheric parameters ', args.phot)
if args.xlim is not None:
xlim = str2float(args.xlim)
print(" xlim: ", xlim)
# Set band names
bnames = ('B',)
ps.Band.load_settings()
if args.bnames:
bnames = []
for bname in args.bnames.split('-'):
if not ps.band.is_exist(bname):
print('No such band: ' + bname)
parser.print_help()
sys.exit(2)
bnames.append(bname)
tau = model.get_tau().load(is_info=False)
print('\n Loaded data from {}'.format(tau.FName))
print('Model has Nzone= {} Ntimes= {}'.format(tau.Nzon, tau.Ntimes))
print("The model time interval: {:.3e} - {:3e} days".format(min(tau.Times), max(tau.Times)))
print("The bnames are {}".format(', '.join(bnames)))
# print(tau.Wl2angs)
# tau = b.Tau
# print(tau.shape)
###
# Plot
if args.phot:
pars = args.phot.split(':')
if isinstance(pars, str):
pars = [pars]
pars_data = [p.replace('log', '') for p in pars]
tau_data = tau.params_ph(pars=pars_data, moments=times, tau_ph=args.tau_ph)
if args.write_prefix:
fwrite = os.path.expanduser(args.write_prefix)
tau.data_save(fwrite, tau_data, pars_data)
else:
# Print parameters
print('\nPhotospheric parameters:')
for ii, p in enumerate(pars_data):
print('{:9s} {}'.format('t_real', ' '.join([f'{p}_{b:10s}' for b in bnames])))
for i, (t, freq, y) in enumerate(tau_data[p]):
s = '{:9.4f} '.format(t)
for bname in bnames:
b = ps.band.band_by_name(bname)
fr_eff = b.freq_eff
idx = (np.abs(freq - fr_eff)).argmin()
s += ' {:10e}'.format( y[idx])
print(s)
# Plot
fig = plot_tau_phot(tau_data, pars, tau_ph=args.tau_ph, xlim=xlim, title=tau.Name, bnames=bnames)
fplot = os.path.expanduser("~/tau_{}_{}.pdf".format(fname, str.replace(args.phot, ':', '-')))
else:
fig = plot_tau_moments(tau, moments=times, xlim=xlim)
if args.is_save:
if fplot is None:
fplot = os.path.expanduser("~/tau_{0}_t{1}.pdf".format(fname, str.replace(args.times, ':', '-')))
print("Save plot to {0}".format(fplot))
fig.savefig(fplot, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
| 33.626543 | 110 | 0.534832 |
import argparse
import logging
import numpy as np
import pystella as ps
from pystella.model.sn_tau import StellaTauDetail
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
__author__ = 'bakl'
def plot_tau_moments(tau, moments=None, xlim=None):
import matplotlib.pyplot as plt
moments = moments or np.exp(np.linspace(np.log(0.5), np.log(400.), 40))
fig, (axV, axT) = plt.subplots(2, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})
axV.set_title(tau.Name)
axV.set_xlabel('')
axV.set_ylabel('Velocity [1000 km/s]')
axT.set_xlabel('Radius [cm]')
axT.set_ylabel('Temperature [K]')
for i, time in enumerate(moments):
b = tau.block_nearest(time)
n = int(2 - np.log10(max(1e-03, abs(b.Time)))) xV.semilogx(b.R, b.V8, label="t= {:.{}f}".format(b.Time, n))
color = p[0].get_color()
axT.loglog(b.R, b.T, label="t={:.2f}".format(time), color=color)
axV.legend(frameon=False)
if xlim is not None:
axT.set_xlim(xlim)
axV.set_xlim(xlim)
fig.tight_layout()
return fig
def plot_bands(ax, bnames, amp=30, alpha=0.5):
color_dic = ps.band.colors()
res = {}
for bname in bnames:
b = ps.band.band_by_name(bname)
wl = b.wl * ps.phys.cm_to_angs
ax.plot(wl, b.resp_wl*amp, color_dic[bname], alpha=alpha)
wl_eff = b.wl_eff_angs
ax.axvline(x=wl_eff, ymin=0., ymax=0.99, linestyle='--', color=color_dic[bname], alpha=alpha)
ax.text(wl_eff, 10, bname, fontsize=12)
ax.text(wl_eff*.95, 3, "{:.0f}".format(wl_eff), fontsize=6)
res[bname] = (wl_eff, color_dic[bname])
return res
def plot_tau_phot(tau_data, pars, tau_ph, xlim=None, title='', bnames=None):
import matplotlib.pyplot as plt
def fr2wv(nu):
return ps.phys.c / nu * ps.phys.cm_to_angs
fig, axs = plt.subplots(len(pars)+1, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})
ax = axs[0]
ax.set_ylabel(r'Zone ($\tau_{{ph}}= {:.2f}$)'.format(tau_ph))
ax.set_title(title)
ax.xaxis.set_ticks_position('top')
for i, p in enumerate(pars, 1):
ax = axs[i]
ax.set_ylabel(r'{}$_{{ph}}$'.format(p))
if i < len(axs)-1:
ax.set_xlabel('')
ax.tick_params(which='both', top=False, bottom=False)
else:
ax.set_xlabel('Wavelength [A]')
colors = []
for j, (t, freq, y) in enumerate(tau_data[StellaTauDetail.col_zon]):
axzon = axs[0]
n = int(3 - np.log10(max(1e-03, abs(t))))
lbl = "t= {:.{}f} d".format(t, n)
ll = axzon.semilogx(fr2wv(freq), y, label=lbl)
color = ll[0].get_color()
colors.append(color)
bnames_waves = None
if bnames is not None:
ylim = axzon.get_ylim()
bnames_waves = plot_bands(axzon, bnames, amp=ylim[1]*0.25, alpha=0.5)
for i, p in enumerate(pars, 1):
is_log = p.startswith('log')
p_data = p.replace('log', '') if is_log else p
ax = axs[i]
for j, (t, freq, y) in enumerate(tau_data[p_data]):
x = fr2wv(freq)
if is_log:
ax.loglog(x, y, color=colors[j])
else:
ax.semilogx(x, y, color=colors[j])
if bnames_waves is not None:
for bn, (wl, col) in bnames_waves.items():
ax.axvline(x=wl, ymin=0., ymax=0.99, linestyle='--', color=col, alpha=0.5)
for i, ax in enumerate(axs):
ax.tick_params(which='both', left=True, right=True, direction="in")
if xlim is not None:
ax.set_xlim(xlim)
axs[0].legend(frameon=False)
fig.tight_layout()
return fig
def get_parser(times='0.1:1:10:25:65', bnames='U:B:V:R'):
parser = argparse.ArgumentParser(description='Standard Candle Method.')
print(" Plot the tau-wave diagram for STELLA models")
parser.add_argument('-b', '--band',
nargs='?',
required=False,
const=bnames,
type=str,
dest="bnames",
help="-b <bands>: string. If set only -b BNAMES is {}".format(bnames))
parser.add_argument('-i', '--input',
required=True,
dest="input",
help="Model name, example: cat_R450_M15_Ni007")
parser.add_argument('-p', '--path',
required=False,
type=str,
default=False,
dest="path",
help="Model directory")
parser.add_argument('-ph', '--phot',
required=False,
type=str,
default=False,
dest="phot",
help='Plot photosphere parameter. Maybe: R, V, V8, T. Example: -ph R:V8:T '
'You may use prefix log, e.g. logT or logV8')
parser.add_argument('-s', '--save',
action='store_const',
const=True,
dest="is_save",
help="To save the result plot to pdf-file. Format: tau_[name]_t[times].pdf.")
parser.add_argument('-t', '--time',
required=False,
type=str,
default=times,
dest="times",
help="Plot tau snap for selected time moments. Default: {0}".format(times))
parser.add_argument('--tau_ph',
required=False,
type=float,
default=2./3.,
dest="tau_ph",
help="The optical depth at the photosphere. Default: 2/3")
parser.add_argument('-x', '--xlim',
required=False,
type=str,
default=None,
dest="xlim",
help="wave length interval [A]. Example: 1.:25e3. Default: all waves in the tau-file")
parser.add_argument('-w', '--write',
required=False,
type=str,
default=None,
dest="write_prefix",
help="The prefix of file + -ParamName.dat")
return parser
def str2float(s):
return list(map(float, s.split(':')))
def main():
import os
import sys
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
ps.Band.load_settings()
model_ext = '.tau'
parser = get_parser()
args, unknownargs = parser.parse_known_args()
path = os.getcwd()
if args.path:
path = os.path.expanduser(args.path)
fname = None
if args.input:
fname = args.input.strip()
fname = fname.replace(model_ext, '')
if fname is None:
parser.print_help()
sys.exit(2)
model = ps.Stella(fname, path=path)
if not model.is_tau:
print("No tau-data for: " + str(model))
return None
fig = None
xlim = None
fplot = None
print('\n Arguments')
times = str2float(args.times)
print(' The time moments: ', args.times)
print(' The optical depth ', args.tau_ph)
if args.phot:
print(' The photospheric parameters ', args.phot)
if args.xlim is not None:
xlim = str2float(args.xlim)
print(" xlim: ", xlim)
bnames = ('B',)
ps.Band.load_settings()
if args.bnames:
bnames = []
for bname in args.bnames.split('-'):
if not ps.band.is_exist(bname):
print('No such band: ' + bname)
parser.print_help()
sys.exit(2)
bnames.append(bname)
tau = model.get_tau().load(is_info=False)
print('\n Loaded data from {}'.format(tau.FName))
print('Model has Nzone= {} Ntimes= {}'.format(tau.Nzon, tau.Ntimes))
print("The model time interval: {:.3e} - {:3e} days".format(min(tau.Times), max(tau.Times)))
print("The bnames are {}".format(', '.join(bnames)))
if args.phot:
pars = args.phot.split(':')
if isinstance(pars, str):
pars = [pars]
pars_data = [p.replace('log', '') for p in pars]
tau_data = tau.params_ph(pars=pars_data, moments=times, tau_ph=args.tau_ph)
if args.write_prefix:
fwrite = os.path.expanduser(args.write_prefix)
tau.data_save(fwrite, tau_data, pars_data)
else:
print('\nPhotospheric parameters:')
for ii, p in enumerate(pars_data):
print('{:9s} {}'.format('t_real', ' '.join([f'{p}_{b:10s}' for b in bnames])))
for i, (t, freq, y) in enumerate(tau_data[p]):
s = '{:9.4f} '.format(t)
for bname in bnames:
b = ps.band.band_by_name(bname)
fr_eff = b.freq_eff
idx = (np.abs(freq - fr_eff)).argmin()
s += ' {:10e}'.format( y[idx])
print(s)
fig = plot_tau_phot(tau_data, pars, tau_ph=args.tau_ph, xlim=xlim, title=tau.Name, bnames=bnames)
fplot = os.path.expanduser("~/tau_{}_{}.pdf".format(fname, str.replace(args.phot, ':', '-')))
else:
fig = plot_tau_moments(tau, moments=times, xlim=xlim)
if args.is_save:
if fplot is None:
fplot = os.path.expanduser("~/tau_{0}_t{1}.pdf".format(fname, str.replace(args.times, ':', '-')))
print("Save plot to {0}".format(fplot))
fig.savefig(fplot, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
| true | true |
f717d82ecf5183dae516e756dfbcb6f492d9702a | 1,019 | py | Python | tests/__init__.py | dolfinus/pexpect | 3453ea9b8b326179cf720351001e64c7ea6b07bc | [
"0BSD"
] | 2,132 | 2015-01-02T12:48:45.000Z | 2022-03-28T05:32:54.000Z | tests/__init__.py | dolfinus/pexpect | 3453ea9b8b326179cf720351001e64c7ea6b07bc | [
"0BSD"
] | 1,274 | 2015-09-22T20:06:16.000Z | 2018-08-31T22:14:00.000Z | tests/__init__.py | dolfinus/pexpect | 3453ea9b8b326179cf720351001e64c7ea6b07bc | [
"0BSD"
] | 517 | 2015-01-07T02:09:44.000Z | 2022-03-26T14:18:23.000Z |
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
# __init__.py
# The mere presence of this file makes the dir a package.
pass
| 39.192308 | 76 | 0.750736 |
pass
| true | true |
f717da674668d80ac70707def8bcf6d62e7209d0 | 2,410 | py | Python | git_nit/tests/test_parse_review_id.py | dhellmann/git-nit | 9fce9eb8806d6997182107eb5d755a1220fa5e88 | [
"Apache-2.0"
] | 8 | 2018-04-27T07:03:50.000Z | 2018-10-02T08:05:40.000Z | git_nit/tests/test_parse_review_id.py | dhellmann/git-nit | 9fce9eb8806d6997182107eb5d755a1220fa5e88 | [
"Apache-2.0"
] | null | null | null | git_nit/tests/test_parse_review_id.py | dhellmann/git-nit | 9fce9eb8806d6997182107eb5d755a1220fa5e88 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from git_nit import cmd
import testscenarios.testcase
import testtools
class ParseReviewIDTest(testscenarios.testcase.WithScenarios,
testtools.TestCase):
scenarios = [
('fragment with patchset', {
'url': 'https://review.openstack.org/#/c/564559/5/',
'review': '564559',
'patchset': '5',
}),
('fragment with patchset, no trailing slash', {
'url': 'https://review.openstack.org/#/c/564559/5',
'review': '564559',
'patchset': '5',
}),
('fragment without patchset', {
'url': 'https://review.openstack.org/#/c/564559/',
'review': '564559',
'patchset': None,
}),
('fragment without patchset, no trailing slash', {
'url': 'https://review.openstack.org/#/c/564559',
'review': '564559',
'patchset': None,
}),
('path with patchset', {
'url': 'https://review.openstack.org/564559/5/',
'review': '564559',
'patchset': '5',
}),
('path with patchset, no trailing slash', {
'url': 'https://review.openstack.org/564559/5',
'review': '564559',
'patchset': '5',
}),
('path without patchset', {
'url': 'https://review.openstack.org/564559/',
'review': '564559',
'patchset': None,
}),
('path without patchset, no trailing slash', {
'url': 'https://review.openstack.org/564559',
'review': '564559',
'patchset': None,
}),
]
def test(self):
review, patchset = cmd.parse_review_id(self.url)
self.assertEqual(
(self.review, self.patchset),
(review, patchset),
)
| 33.013699 | 74 | 0.554357 |
from git_nit import cmd
import testscenarios.testcase
import testtools
class ParseReviewIDTest(testscenarios.testcase.WithScenarios,
testtools.TestCase):
scenarios = [
('fragment with patchset', {
'url': 'https://review.openstack.org/#/c/564559/5/',
'review': '564559',
'patchset': '5',
}),
('fragment with patchset, no trailing slash', {
'url': 'https://review.openstack.org/#/c/564559/5',
'review': '564559',
'patchset': '5',
}),
('fragment without patchset', {
'url': 'https://review.openstack.org/#/c/564559/',
'review': '564559',
'patchset': None,
}),
('fragment without patchset, no trailing slash', {
'url': 'https://review.openstack.org/#/c/564559',
'review': '564559',
'patchset': None,
}),
('path with patchset', {
'url': 'https://review.openstack.org/564559/5/',
'review': '564559',
'patchset': '5',
}),
('path with patchset, no trailing slash', {
'url': 'https://review.openstack.org/564559/5',
'review': '564559',
'patchset': '5',
}),
('path without patchset', {
'url': 'https://review.openstack.org/564559/',
'review': '564559',
'patchset': None,
}),
('path without patchset, no trailing slash', {
'url': 'https://review.openstack.org/564559',
'review': '564559',
'patchset': None,
}),
]
def test(self):
review, patchset = cmd.parse_review_id(self.url)
self.assertEqual(
(self.review, self.patchset),
(review, patchset),
)
| true | true |
f717da9d442484cb8be34d4f36e19f573b0849be | 10,841 | py | Python | happybase_mock/table.py | evgenia-ch/happybase-mock | 6fbf4a4f9685829b32ad8dc3de3e01b2a9fba964 | [
"MIT"
] | null | null | null | happybase_mock/table.py | evgenia-ch/happybase-mock | 6fbf4a4f9685829b32ad8dc3de3e01b2a9fba964 | [
"MIT"
] | null | null | null | happybase_mock/table.py | evgenia-ch/happybase-mock | 6fbf4a4f9685829b32ad8dc3de3e01b2a9fba964 | [
"MIT"
] | 3 | 2018-05-15T14:10:23.000Z | 2020-08-12T13:45:28.000Z | import struct
import time
import six
from six import iteritems
from .batch import Batch
def _check_table_existence(method):
def wrap(table, *args, **kwargs):
if not table._exists():
raise IOError('TableNotFoundException: %s' % table.name)
return method(table, *args, **kwargs)
return wrap
# Copied from happybase.util
def bytes_increment(b):
"""Increment and truncate a byte string (for sorting purposes)
This functions returns the shortest string that sorts after the given
string when compared using regular string comparison semantics.
This function increments the last byte that is smaller than ``0xFF``, and
drops everything after it. If the string only contains ``0xFF`` bytes,
`None` is returned.
"""
assert isinstance(b, six.binary_type)
b = bytearray(b) # Used subset of its API is the same on Python 2 and 3.
for i in range(len(b) - 1, -1, -1):
if b[i] != 0xff:
b[i] += 1
return bytes(b[:i+1])
return None
class Table(object):
def __init__(self, name, connection):
self.name = name
self.connection = connection
self._enabled = True
# A multi-dimentional map, _data[rowkey][colname][timestamp] = value
self._data = {}
def __repr__(self):
return '<%s.%s name=%r>' % (
__name__,
self.__class__.__name__,
self.name,
)
@_check_table_existence
def families(self):
return self._families
def regions(self):
if not self._exists():
return []
# Table.regions() is meaningless for in-memory mocking, so just return
# some fake data
return [{
'end_key': '',
'id': 1,
'name': '%s,,1.1234' % self.name,
'port': 60000,
'server_name': 'localhost',
'start_key': '',
'version': 1
}]
@_check_table_existence
def row(self, row, columns=None, timestamp=None, include_timestamp=False):
if not isinstance(row, bytes):
row = row.encode('utf-8')
data = self._data.get(row, {})
result = {}
if not columns:
columns = data.keys()
for colname in columns:
if colname in data:
cell = data[colname]
timestamps = sorted(cell.keys(), reverse=True)
if timestamp is None:
# Use latest version if timestamp isn't specified
ts = timestamps[0]
if include_timestamp:
result[colname] = cell[ts], ts
else:
result[colname] = cell[ts]
else:
# Find the first ts < timestamp
for ts in timestamps:
if ts < timestamp:
if include_timestamp:
result[colname] = cell[ts], ts
else:
result[colname] = cell[ts]
break
return result
@_check_table_existence
def rows(self, rows, columns=None, timestamp=None,
include_timestamp=False):
result = []
for row in rows:
data = self.row(row, columns, timestamp, include_timestamp)
result.append((row, data))
return result
@_check_table_existence
def cells(self, row, column, versions=None, timestamp=None,
include_timestamp=False):
if not isinstance(row, bytes):
row = row.encode('utf-8')
if not isinstance(column, bytes):
column = column.encode('utf-8')
result = []
timestamps = sorted(self._data.get(row, {}).get(column, {}).keys(),
reverse=True)
for ts in timestamps:
value = self._data[row][column][ts]
if timestamp is None or ts < timestamp:
if include_timestamp:
result.append((value, ts))
else:
result.append(value)
return result
@_check_table_existence
def scan(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, timestamp=None, include_timestamp=False,
batch_size=1000, scan_batching=None, limit=None,
reverse=False, sorted_columns=False, **kwargs):
# encode columns key and data (for python3 compatibility)
if reverse:
old_row_start = row_start
row_start = row_stop
row_stop = old_row_start
if columns:
for i, col in enumerate(columns):
if not isinstance(col, bytes):
columns[i] = col.encode('utf-8')
if row_prefix is not None:
if not isinstance(row_prefix, bytes):
row_prefix = row_prefix.encode('utf-8')
if row_start is not None or row_stop is not None:
raise TypeError(
"'row_prefix' cannot be combined with 'row_start' "
"or 'row_stop'")
row_start = row_prefix
row_stop = bytes_increment(row_prefix)
if row_start is None:
row_start = b''
else:
if not isinstance(row_start, bytes):
row_start = row_start.encode('utf-8')
if columns:
rows = (k for k, v in self._data.items() if set(columns).intersection(v))
else:
rows = self._data.keys()
if not reverse:
rows = filter(lambda k: k >= row_start, rows)
else:
rows = filter(lambda k: k > row_start, rows)
if row_stop is not None:
if not isinstance(row_stop, bytes):
row_stop = row_stop.encode('utf-8')
if not reverse:
rows = filter(lambda k: k < row_stop, rows)
else:
rows = filter(lambda k: k <= row_stop, rows)
result = sorted([
(row, self.row(row, columns, timestamp, include_timestamp))
for row in rows
], reverse=reverse)
if limit:
if len(result) > limit:
result = result[:limit]
return iter(result)
@_check_table_existence
def put(self, row, data, timestamp=None, wal=True):
# encode row key and data before put (for python3 compatibility)
if not isinstance(row, bytes):
row = row.encode('utf-8')
data = {
(k if isinstance(k, bytes) else k.encode('utf-8')):
(v if isinstance(v, bytes) else v.encode('utf-8'))
for k, v in iteritems(data)
}
# Check data against column families
for colname in data:
cf = colname.decode('utf-8').split(':')[0]
if cf not in self._families:
raise IOError('NoSuchColumnFamilyException: %s' % cf)
if timestamp is None:
timestamp = int(time.time() * 1000)
columns = self._data.get(row)
if columns is None:
columns = {}
self._data[row] = columns
for colname, value in iteritems(data):
column = columns.get(colname)
if column is None:
column = {}
columns[colname] = column
column[timestamp] = value
# Check if it exceeds max_versions
cf = colname.decode('utf-8').split(':')[0]
max_versions = self._max_versions(cf)
if len(column) > max_versions:
# Delete cell with minimum timestamp
del column[min(column.keys())]
@_check_table_existence
def delete(self, row, columns=None, timestamp=None, wal=True):
if not isinstance(row, bytes):
row = row.encode('utf-8')
if columns:
columns = [
column if isinstance(column, bytes)
else column.encode('utf-8')
for column in columns
]
if not columns and timestamp is None:
# Delete whole row
self._data.pop(row, None)
elif row in self._data:
data = self._data[row]
if not columns:
# Delete all columns if not specified
columns = data.keys()
else:
columns = list(set(columns) & data.keys())
if timestamp is None:
timestamp = int(time.time() * 1000)
to_be_deleted = []
for colname in columns:
for ts in data[colname]:
if ts <= timestamp:
to_be_deleted.append((colname, ts))
for colname, ts in to_be_deleted:
del data[colname][ts]
if not data[colname]:
# Delete a column if it doesn't have any timestamps
del data[colname]
def batch(self, timestamp=None, batch_size=None, transaction=False,
wal=True):
return Batch(self, timestamp, batch_size, transaction, wal)
def counter_get(self, row, column):
# Decode as long integer, big endian
value = self.row(row, (column,)).get(column)
if not value:
return 0
return struct.unpack('>q', value)[0]
@_check_table_existence
def counter_set(self, row, column, value=0):
# Encode as long integer, big endian
value = struct.pack('>q', value)
self.delete(row, (column,))
self.put(row, {column: value})
@_check_table_existence
def counter_inc(self, row, column, value=1):
orig_value = self.counter_get(row, column)
self.counter_set(row, column, orig_value + value)
@_check_table_existence
def counter_dec(self, row, column, value=1):
orig_value = self.counter_get(row, column)
self.counter_set(row, column, orig_value - value)
def _exists(self):
return self.name in self.connection._tables
def _max_versions(self, cf):
return self._families[cf]['max_versions']
def _set_families(self, families):
# Default family options
defaults = {
'block_cache_enabled': False,
'bloom_filter_nb_hashes': 0,
'bloom_filter_type': 'NONE',
'bloom_filter_vector_size': 0,
'compression': 'NONE',
'in_memory': False,
'max_versions': 3,
'time_to_live': -1
}
self._families = {}
for name, opts in iteritems(families):
family_options = defaults.copy()
family_options['name'] = name
family_options.update(opts)
self._families[name] = family_options
| 33.984326 | 85 | 0.54091 | import struct
import time
import six
from six import iteritems
from .batch import Batch
def _check_table_existence(method):
def wrap(table, *args, **kwargs):
if not table._exists():
raise IOError('TableNotFoundException: %s' % table.name)
return method(table, *args, **kwargs)
return wrap
def bytes_increment(b):
assert isinstance(b, six.binary_type)
b = bytearray(b)
for i in range(len(b) - 1, -1, -1):
if b[i] != 0xff:
b[i] += 1
return bytes(b[:i+1])
return None
class Table(object):
def __init__(self, name, connection):
self.name = name
self.connection = connection
self._enabled = True
self._data = {}
def __repr__(self):
return '<%s.%s name=%r>' % (
__name__,
self.__class__.__name__,
self.name,
)
@_check_table_existence
def families(self):
return self._families
def regions(self):
if not self._exists():
return []
return [{
'end_key': '',
'id': 1,
'name': '%s,,1.1234' % self.name,
'port': 60000,
'server_name': 'localhost',
'start_key': '',
'version': 1
}]
@_check_table_existence
def row(self, row, columns=None, timestamp=None, include_timestamp=False):
if not isinstance(row, bytes):
row = row.encode('utf-8')
data = self._data.get(row, {})
result = {}
if not columns:
columns = data.keys()
for colname in columns:
if colname in data:
cell = data[colname]
timestamps = sorted(cell.keys(), reverse=True)
if timestamp is None:
ts = timestamps[0]
if include_timestamp:
result[colname] = cell[ts], ts
else:
result[colname] = cell[ts]
else:
# Find the first ts < timestamp
for ts in timestamps:
if ts < timestamp:
if include_timestamp:
result[colname] = cell[ts], ts
else:
result[colname] = cell[ts]
break
return result
@_check_table_existence
def rows(self, rows, columns=None, timestamp=None,
include_timestamp=False):
result = []
for row in rows:
data = self.row(row, columns, timestamp, include_timestamp)
result.append((row, data))
return result
@_check_table_existence
def cells(self, row, column, versions=None, timestamp=None,
include_timestamp=False):
if not isinstance(row, bytes):
row = row.encode('utf-8')
if not isinstance(column, bytes):
column = column.encode('utf-8')
result = []
timestamps = sorted(self._data.get(row, {}).get(column, {}).keys(),
reverse=True)
for ts in timestamps:
value = self._data[row][column][ts]
if timestamp is None or ts < timestamp:
if include_timestamp:
result.append((value, ts))
else:
result.append(value)
return result
@_check_table_existence
def scan(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, timestamp=None, include_timestamp=False,
batch_size=1000, scan_batching=None, limit=None,
reverse=False, sorted_columns=False, **kwargs):
# encode columns key and data (for python3 compatibility)
if reverse:
old_row_start = row_start
row_start = row_stop
row_stop = old_row_start
if columns:
for i, col in enumerate(columns):
if not isinstance(col, bytes):
columns[i] = col.encode('utf-8')
if row_prefix is not None:
if not isinstance(row_prefix, bytes):
row_prefix = row_prefix.encode('utf-8')
if row_start is not None or row_stop is not None:
raise TypeError(
"'row_prefix' cannot be combined with 'row_start' "
"or 'row_stop'")
row_start = row_prefix
row_stop = bytes_increment(row_prefix)
if row_start is None:
row_start = b''
else:
if not isinstance(row_start, bytes):
row_start = row_start.encode('utf-8')
if columns:
rows = (k for k, v in self._data.items() if set(columns).intersection(v))
else:
rows = self._data.keys()
if not reverse:
rows = filter(lambda k: k >= row_start, rows)
else:
rows = filter(lambda k: k > row_start, rows)
if row_stop is not None:
if not isinstance(row_stop, bytes):
row_stop = row_stop.encode('utf-8')
if not reverse:
rows = filter(lambda k: k < row_stop, rows)
else:
rows = filter(lambda k: k <= row_stop, rows)
result = sorted([
(row, self.row(row, columns, timestamp, include_timestamp))
for row in rows
], reverse=reverse)
if limit:
if len(result) > limit:
result = result[:limit]
return iter(result)
@_check_table_existence
def put(self, row, data, timestamp=None, wal=True):
# encode row key and data before put (for python3 compatibility)
if not isinstance(row, bytes):
row = row.encode('utf-8')
data = {
(k if isinstance(k, bytes) else k.encode('utf-8')):
(v if isinstance(v, bytes) else v.encode('utf-8'))
for k, v in iteritems(data)
}
# Check data against column families
for colname in data:
cf = colname.decode('utf-8').split(':')[0]
if cf not in self._families:
raise IOError('NoSuchColumnFamilyException: %s' % cf)
if timestamp is None:
timestamp = int(time.time() * 1000)
columns = self._data.get(row)
if columns is None:
columns = {}
self._data[row] = columns
for colname, value in iteritems(data):
column = columns.get(colname)
if column is None:
column = {}
columns[colname] = column
column[timestamp] = value
# Check if it exceeds max_versions
cf = colname.decode('utf-8').split(':')[0]
max_versions = self._max_versions(cf)
if len(column) > max_versions:
# Delete cell with minimum timestamp
del column[min(column.keys())]
@_check_table_existence
def delete(self, row, columns=None, timestamp=None, wal=True):
if not isinstance(row, bytes):
row = row.encode('utf-8')
if columns:
columns = [
column if isinstance(column, bytes)
else column.encode('utf-8')
for column in columns
]
if not columns and timestamp is None:
# Delete whole row
self._data.pop(row, None)
elif row in self._data:
data = self._data[row]
if not columns:
# Delete all columns if not specified
columns = data.keys()
else:
columns = list(set(columns) & data.keys())
if timestamp is None:
timestamp = int(time.time() * 1000)
to_be_deleted = []
for colname in columns:
for ts in data[colname]:
if ts <= timestamp:
to_be_deleted.append((colname, ts))
for colname, ts in to_be_deleted:
del data[colname][ts]
if not data[colname]:
# Delete a column if it doesn't have any timestamps
del data[colname]
def batch(self, timestamp=None, batch_size=None, transaction=False,
wal=True):
return Batch(self, timestamp, batch_size, transaction, wal)
def counter_get(self, row, column):
value = self.row(row, (column,)).get(column)
if not value:
return 0
return struct.unpack('>q', value)[0]
@_check_table_existence
def counter_set(self, row, column, value=0):
value = struct.pack('>q', value)
self.delete(row, (column,))
self.put(row, {column: value})
@_check_table_existence
def counter_inc(self, row, column, value=1):
orig_value = self.counter_get(row, column)
self.counter_set(row, column, orig_value + value)
@_check_table_existence
def counter_dec(self, row, column, value=1):
orig_value = self.counter_get(row, column)
self.counter_set(row, column, orig_value - value)
def _exists(self):
return self.name in self.connection._tables
def _max_versions(self, cf):
return self._families[cf]['max_versions']
def _set_families(self, families):
defaults = {
'block_cache_enabled': False,
'bloom_filter_nb_hashes': 0,
'bloom_filter_type': 'NONE',
'bloom_filter_vector_size': 0,
'compression': 'NONE',
'in_memory': False,
'max_versions': 3,
'time_to_live': -1
}
self._families = {}
for name, opts in iteritems(families):
family_options = defaults.copy()
family_options['name'] = name
family_options.update(opts)
self._families[name] = family_options
| true | true |
f717dab203ae844f0e1a238ee942846c22823c19 | 38,297 | py | Python | test/terra/backends/qasm_simulator/qasm_snapshot.py | ares201005/qiskit-aer | fb3bab00ab810e73ad333b0f538fa6c3c53f054e | [
"Apache-2.0"
] | null | null | null | test/terra/backends/qasm_simulator/qasm_snapshot.py | ares201005/qiskit-aer | fb3bab00ab810e73ad333b0f538fa6c3c53f054e | [
"Apache-2.0"
] | null | null | null | test/terra/backends/qasm_simulator/qasm_snapshot.py | ares201005/qiskit-aer | fb3bab00ab810e73ad333b0f538fa6c3c53f054e | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
QasmSimulator Integration Tests for Snapshot instructions
"""
import logging
import itertools as it
import numpy as np
from qiskit import QuantumCircuit
from qiskit.compiler import assemble
from qiskit.quantum_info import DensityMatrix, Pauli, Operator
from qiskit.providers.aer import QasmSimulator
from qiskit.providers.aer import AerError
from test.terra.reference.ref_snapshot_state import (
snapshot_state_circuits_deterministic, snapshot_state_counts_deterministic,
snapshot_state_pre_measure_statevector_deterministic,
snapshot_state_post_measure_statevector_deterministic,
snapshot_state_circuits_nondeterministic,
snapshot_state_counts_nondeterministic,
snapshot_state_pre_measure_statevector_nondeterministic,
snapshot_state_post_measure_statevector_nondeterministic)
from test.terra.reference.ref_snapshot_probabilities import (
snapshot_probabilities_circuits, snapshot_probabilities_counts,
snapshot_probabilities_labels_qubits,
snapshot_probabilities_post_meas_probs,
snapshot_probabilities_pre_meas_probs)
from test.terra.reference.ref_snapshot_expval import (
snapshot_expval_circuits, snapshot_expval_counts, snapshot_expval_labels,
snapshot_expval_post_meas_values, snapshot_expval_pre_meas_values)
class QasmSnapshotStatevectorTests:
"""QasmSimulator snapshot statevector tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'matrix_product_state'
]
BACKEND_OPTS = {}
def statevector_snapshots(self, data, label):
"""Format snapshots as list of Numpy arrays"""
snaps = data.get("snapshots", {}).get("statevector", {}).get(label, [])
statevecs = []
for snap in snaps:
self.assertIsInstance(snap, np.ndarray)
statevecs.append(snap)
return statevecs
def test_snapshot_statevector_pre_measure_det(self):
"""Test snapshot statevector before deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'statevector',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = statevec_targets[j]
value = snaps[0]
self.assertTrue(np.allclose(value, target))
def test_snapshot_statevector_pre_measure_nondet(self):
"""Test snapshot statevector before non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'statevector',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = statevec_targets[j]
value = snaps[0]
self.assertTrue(np.allclose(value, target))
def test_snapshot_statevector_post_measure_det(self):
"""Test snapshot statevector after deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'statevector',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
logging.getLogger().setLevel(logging.CRITICAL)
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
self.assertTrue(np.allclose(snaps[j], target))
def test_snapshot_statevector_post_measure_nondet(self):
"""Test snapshot statevector after non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'statevector',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
self.assertTrue(np.allclose(snaps[j], target))
class QasmSnapshotStabilizerTests:
"""QasmSimulator method snapshot stabilizer tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = ['automatic', 'stabilizer']
BACKEND_OPTS = {}
@staticmethod
def stabilizer_snapshots(data, label):
"""Get stabilizer snapshots"""
return data.get("snapshots", {}).get("stabilizer", {}).get(label, [])
@staticmethod
def stabilizes_statevector(stabilizer, statevector):
"""Return True if two stabilizer states are equal."""
# Get stabilizer and destabilizers and convert to sets
for stab in stabilizer:
if stab[0] == '-':
pauli_mat = -1 * Pauli.from_label(stab[1:]).to_matrix()
else:
pauli_mat = Pauli.from_label(stab).to_matrix()
val = statevector.conj().dot(pauli_mat.dot(statevector))
if not np.isclose(val, 1):
return False
return True
def test_snapshot_stabilizer_pre_measure_det(self):
"""Test snapshot stabilizer before deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'stabilizer',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
self.assertEqual(len(snaps), 1)
statevec = statevec_targets[j]
stabilizer = snaps[0]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_pre_measure_nondet(self):
"""Test snapshot stabilizer before non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'stabilizer',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
self.assertEqual(len(snaps), 1)
statevec = statevec_targets[j]
stabilizer = snaps[0]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_post_measure_det(self):
"""Test snapshot stabilizer after deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'stabilizer',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
for j, mem in enumerate(data['memory']):
statevec = statevec_targets[i].get(mem)
stabilizer = snaps[j]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_post_measure_nondet(self):
"""Test snapshot stabilizer after non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'stabilizer',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
for j, mem in enumerate(data['memory']):
statevec = statevec_targets[i].get(mem)
stabilizer = snaps[j]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
class QasmSnapshotDensityMatrixTests:
"""QasmSimulator snapshot density matrix tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'density_matrix', 'density_matrix_gpu',
'density_matrix_thrust'
]
BACKEND_OPTS = {}
def density_snapshots(self, data, label):
"""Format snapshots as list of Numpy arrays"""
# Check snapshot entry exists in data
snaps = data.get("snapshots", {}).get("density_matrix",
{}).get(label, [])
# Convert nested lists to numpy arrays
output = {}
for snap_dict in snaps:
memory = snap_dict['memory']
self.assertIsInstance(snap_dict['value'], np.ndarray)
output[memory] = snap_dict['value']
return output
def test_snapshot_density_matrix_pre_measure_det(self):
"""Test snapshot density matrix before deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'density_matrix',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = np.outer(statevec_targets[j],
statevec_targets[j].conj())
# Pre-measurement all memory bits should be 0
value = snaps.get('0x0')
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_pre_measure_nondet(self):
"""Test snapshot density matrix before non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'density_matrix',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = np.outer(statevec_targets[j],
statevec_targets[j].conj())
value = snaps.get('0x0')
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_post_measure_det(self):
"""Test snapshot density matrix after deterministic final measurement"""
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'density_matrix',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
target = np.outer(target, target.conj())
value = snaps.get(mem)
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_post_measure_nondet(self):
"""Test snapshot density matrix after non-deterministic final measurement"""
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'density_matrix',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
# Check snapshots
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
target = np.outer(target, target.conj())
value = snaps.get(mem)
self.assertTrue(np.allclose(value, target))
class QasmSnapshotProbabilitiesTests:
"""QasmSimulator snapshot probabilities tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic',
'statevector',
'statevector_gpu',
'statevector_thrust',
'stabilizer',
'density_matrix',
'density_matrix_gpu',
'density_matrix_thrust',
'matrix_product_state',
]
BACKEND_OPTS = {}
@staticmethod
def probability_snapshots(data, labels):
"""Format snapshots as nested dicts"""
# Check snapshot entry exists in data
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("probabilities",
{}).get(label, [])
output[label] = {
snap_dict['memory']: snap_dict['value']
for snap_dict in snaps
}
return output
def test_snapshot_probabilities_pre_measure(self):
"""Test snapshot probabilities before final measurement"""
shots = 1000
labels = list(snapshot_probabilities_labels_qubits().keys())
counts_targets = snapshot_probabilities_counts(shots)
prob_targets = snapshot_probabilities_pre_meas_probs()
circuits = snapshot_probabilities_circuits(post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.probability_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = prob_targets[j].get(label, {}).get(memory, {})
self.assertDictAlmostEqual(value, target, delta=1e-7)
def test_snapshot_probabilities_post_measure(self):
"""Test snapshot probabilities after final measurement"""
shots = 1000
labels = list(snapshot_probabilities_labels_qubits().keys())
counts_targets = snapshot_probabilities_counts(shots)
prob_targets = snapshot_probabilities_post_meas_probs()
circuits = snapshot_probabilities_circuits(post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.probability_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
for memory, value in snaps.items():
target = prob_targets[j].get(label, {}).get(memory, {})
self.assertDictAlmostEqual(value, target, delta=1e-7)
class QasmSnapshotExpValPauliTests:
"""QasmSimulator snapshot pauli expectation value tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',
'matrix_product_state', 'stabilizer'
]
BACKEND_OPTS = {}
@staticmethod
def expval_snapshots(data, labels):
"""Format snapshots as nested dicts"""
# Check snapshot entry exists in data
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("expectation_value",
{}).get(label, [])
# Convert list into dict
inner = {}
for snap_dict in snaps:
val = snap_dict['value']
inner[snap_dict['memory']] = val
output[label] = inner
return output
def test_snapshot_expval_pauli_pre_measure(self):
"""Test snapshot expectation value (pauli) before final measurement"""
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_pre_meas_values()
circuits = snapshot_expval_circuits(pauli=True, post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
def test_snapshot_expval_pauli_post_measure(self):
"""Test snapshot expectation value (pauli) after final measurement"""
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_post_meas_values()
circuits = snapshot_expval_circuits(pauli=True, post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
class QasmSnapshotExpvalPauliNCTests:
"""QasmSimulator snapshot pauli expectation value tests on random states."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',
'matrix_product_state',
]
BACKEND_OPTS = {}
def general_test(self, pauli, num_qubits=None, seed=None):
"""General test case"""
pauli_qubits = list(range(len(pauli)))
if num_qubits is None:
num_qubits = len(pauli_qubits)
# Prepare random N-qubit product input state
# from seed
rng = np.random.default_rng(seed)
params = rng.uniform(-1, 1, size=(num_qubits, 3))
init_circ = QuantumCircuit(num_qubits)
for i, par in enumerate(params):
init_circ.u3(*par, i)
# Compute the target expectation value
rho = DensityMatrix.from_instruction(init_circ)
op = Operator.from_label(pauli)
target = np.trace(Operator(rho).compose(op, pauli_qubits).data)
# Simulate expectation value
qc = init_circ.copy()
qc.snapshot_expectation_value('final', [(1, pauli)], pauli_qubits)
qobj = assemble(qc)
result = self.SIMULATOR.run(
qobj, backend_options=self.BACKEND_OPTS).result()
self.assertTrue(getattr(result, 'success', False))
snapshots = result.data(0).get('snapshots', {})
self.assertIn('expectation_value', snapshots)
self.assertIn('final', snapshots['expectation_value'])
expval = snapshots.get('expectation_value', {})['final'][0]['value']
self.assertAlmostEqual(expval, target)
def test_pauli1(self):
"""Test all 1-qubit Pauli snapshots."""
seed = 100
for tup in ['I', 'X', 'Y', 'Z']:
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
def test_pauli2(self):
"""Test all 2-qubit Pauli snapshots."""
seed = 100
for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=2):
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
def test_pauli3(self):
"""Test all 3-qubit Pauli snapshots."""
seed = 100
for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=3):
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
class QasmSnapshotExpValMatrixTests:
"""QasmSimulator snapshot pauli expectation value tests."""
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'matrix_product_state'
]
BACKEND_OPTS = {}
@staticmethod
def expval_snapshots(data, labels):
"""Format snapshots as nested dicts"""
# Check snapshot entry exists in data
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("expectation_value",
{}).get(label, [])
# Convert list into dict
inner = {}
for snap_dict in snaps:
inner[snap_dict['memory']] = snap_dict['value']
output[label] = inner
return output
def test_snapshot_expval_matrix_pre_measure(self):
"""Test snapshot expectation value (matrix) before final measurement"""
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_pre_meas_values()
circuits = snapshot_expval_circuits(pauli=False, post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
def test_snapshot_expval_matrix_post_measure(self):
"""Test snapshot expectation value (matrix) after final measurement"""
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_post_meas_values()
circuits = snapshot_expval_circuits(pauli=False, post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
# Check snapshots
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
| 44.844262 | 85 | 0.589733 |
import logging
import itertools as it
import numpy as np
from qiskit import QuantumCircuit
from qiskit.compiler import assemble
from qiskit.quantum_info import DensityMatrix, Pauli, Operator
from qiskit.providers.aer import QasmSimulator
from qiskit.providers.aer import AerError
from test.terra.reference.ref_snapshot_state import (
snapshot_state_circuits_deterministic, snapshot_state_counts_deterministic,
snapshot_state_pre_measure_statevector_deterministic,
snapshot_state_post_measure_statevector_deterministic,
snapshot_state_circuits_nondeterministic,
snapshot_state_counts_nondeterministic,
snapshot_state_pre_measure_statevector_nondeterministic,
snapshot_state_post_measure_statevector_nondeterministic)
from test.terra.reference.ref_snapshot_probabilities import (
snapshot_probabilities_circuits, snapshot_probabilities_counts,
snapshot_probabilities_labels_qubits,
snapshot_probabilities_post_meas_probs,
snapshot_probabilities_pre_meas_probs)
from test.terra.reference.ref_snapshot_expval import (
snapshot_expval_circuits, snapshot_expval_counts, snapshot_expval_labels,
snapshot_expval_post_meas_values, snapshot_expval_pre_meas_values)
class QasmSnapshotStatevectorTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'matrix_product_state'
]
BACKEND_OPTS = {}
def statevector_snapshots(self, data, label):
snaps = data.get("snapshots", {}).get("statevector", {}).get(label, [])
statevecs = []
for snap in snaps:
self.assertIsInstance(snap, np.ndarray)
statevecs.append(snap)
return statevecs
def test_snapshot_statevector_pre_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'statevector',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = statevec_targets[j]
value = snaps[0]
self.assertTrue(np.allclose(value, target))
def test_snapshot_statevector_pre_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'statevector',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = statevec_targets[j]
value = snaps[0]
self.assertTrue(np.allclose(value, target))
def test_snapshot_statevector_post_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'statevector',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
logging.getLogger().setLevel(logging.CRITICAL)
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
self.assertTrue(np.allclose(snaps[j], target))
def test_snapshot_statevector_post_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'statevector',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.statevector_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
self.assertTrue(np.allclose(snaps[j], target))
class QasmSnapshotStabilizerTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = ['automatic', 'stabilizer']
BACKEND_OPTS = {}
@staticmethod
def stabilizer_snapshots(data, label):
return data.get("snapshots", {}).get("stabilizer", {}).get(label, [])
@staticmethod
def stabilizes_statevector(stabilizer, statevector):
for stab in stabilizer:
if stab[0] == '-':
pauli_mat = -1 * Pauli.from_label(stab[1:]).to_matrix()
else:
pauli_mat = Pauli.from_label(stab).to_matrix()
val = statevector.conj().dot(pauli_mat.dot(statevector))
if not np.isclose(val, 1):
return False
return True
def test_snapshot_stabilizer_pre_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'stabilizer',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
self.assertEqual(len(snaps), 1)
statevec = statevec_targets[j]
stabilizer = snaps[0]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_pre_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'stabilizer',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
self.assertEqual(len(snaps), 1)
statevec = statevec_targets[j]
stabilizer = snaps[0]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_post_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'stabilizer',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
for j, mem in enumerate(data['memory']):
statevec = statevec_targets[i].get(mem)
stabilizer = snaps[j]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
def test_snapshot_stabilizer_post_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'stabilizer',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.stabilizer_snapshots(data, label)
for j, mem in enumerate(data['memory']):
statevec = statevec_targets[i].get(mem)
stabilizer = snaps[j]
self.assertTrue(
self.stabilizes_statevector(stabilizer, statevec))
class QasmSnapshotDensityMatrixTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'density_matrix', 'density_matrix_gpu',
'density_matrix_thrust'
]
BACKEND_OPTS = {}
def density_snapshots(self, data, label):
snaps = data.get("snapshots", {}).get("density_matrix",
{}).get(label, [])
output = {}
for snap_dict in snaps:
memory = snap_dict['memory']
self.assertIsInstance(snap_dict['value'], np.ndarray)
output[memory] = snap_dict['value']
return output
def test_snapshot_density_matrix_pre_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'density_matrix',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = np.outer(statevec_targets[j],
statevec_targets[j].conj())
value = snaps.get('0x0')
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_pre_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'density_matrix',
post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
self.assertTrue(len(snaps), 1)
target = np.outer(statevec_targets[j],
statevec_targets[j].conj())
value = snaps.get('0x0')
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_post_measure_det(self):
shots = 10
label = "snap"
counts_targets = snapshot_state_counts_deterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_deterministic(
)
circuits = snapshot_state_circuits_deterministic(label,
'density_matrix',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result, circuits, counts_targets, delta=0)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
target = np.outer(target, target.conj())
value = snaps.get(mem)
self.assertTrue(np.allclose(value, target))
def test_snapshot_density_matrix_post_measure_nondet(self):
shots = 100
label = "snap"
counts_targets = snapshot_state_counts_nondeterministic(shots)
statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(
)
circuits = snapshot_state_circuits_nondeterministic(label,
'density_matrix',
post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.2 * shots)
for i, circuit in enumerate(circuits):
data = result.data(circuit)
snaps = self.density_snapshots(data, label)
for j, mem in enumerate(data['memory']):
target = statevec_targets[i].get(mem)
target = np.outer(target, target.conj())
value = snaps.get(mem)
self.assertTrue(np.allclose(value, target))
class QasmSnapshotProbabilitiesTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic',
'statevector',
'statevector_gpu',
'statevector_thrust',
'stabilizer',
'density_matrix',
'density_matrix_gpu',
'density_matrix_thrust',
'matrix_product_state',
]
BACKEND_OPTS = {}
@staticmethod
def probability_snapshots(data, labels):
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("probabilities",
{}).get(label, [])
output[label] = {
snap_dict['memory']: snap_dict['value']
for snap_dict in snaps
}
return output
def test_snapshot_probabilities_pre_measure(self):
shots = 1000
labels = list(snapshot_probabilities_labels_qubits().keys())
counts_targets = snapshot_probabilities_counts(shots)
prob_targets = snapshot_probabilities_pre_meas_probs()
circuits = snapshot_probabilities_circuits(post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.probability_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = prob_targets[j].get(label, {}).get(memory, {})
self.assertDictAlmostEqual(value, target, delta=1e-7)
def test_snapshot_probabilities_post_measure(self):
shots = 1000
labels = list(snapshot_probabilities_labels_qubits().keys())
counts_targets = snapshot_probabilities_counts(shots)
prob_targets = snapshot_probabilities_post_meas_probs()
circuits = snapshot_probabilities_circuits(post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.probability_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
for memory, value in snaps.items():
target = prob_targets[j].get(label, {}).get(memory, {})
self.assertDictAlmostEqual(value, target, delta=1e-7)
class QasmSnapshotExpValPauliTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',
'matrix_product_state', 'stabilizer'
]
BACKEND_OPTS = {}
@staticmethod
def expval_snapshots(data, labels):
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("expectation_value",
{}).get(label, [])
inner = {}
for snap_dict in snaps:
val = snap_dict['value']
inner[snap_dict['memory']] = val
output[label] = inner
return output
def test_snapshot_expval_pauli_pre_measure(self):
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_pre_meas_values()
circuits = snapshot_expval_circuits(pauli=True, post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
def test_snapshot_expval_pauli_post_measure(self):
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_post_meas_values()
circuits = snapshot_expval_circuits(pauli=True, post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
class QasmSnapshotExpvalPauliNCTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',
'matrix_product_state',
]
BACKEND_OPTS = {}
def general_test(self, pauli, num_qubits=None, seed=None):
pauli_qubits = list(range(len(pauli)))
if num_qubits is None:
num_qubits = len(pauli_qubits)
rng = np.random.default_rng(seed)
params = rng.uniform(-1, 1, size=(num_qubits, 3))
init_circ = QuantumCircuit(num_qubits)
for i, par in enumerate(params):
init_circ.u3(*par, i)
rho = DensityMatrix.from_instruction(init_circ)
op = Operator.from_label(pauli)
target = np.trace(Operator(rho).compose(op, pauli_qubits).data)
qc = init_circ.copy()
qc.snapshot_expectation_value('final', [(1, pauli)], pauli_qubits)
qobj = assemble(qc)
result = self.SIMULATOR.run(
qobj, backend_options=self.BACKEND_OPTS).result()
self.assertTrue(getattr(result, 'success', False))
snapshots = result.data(0).get('snapshots', {})
self.assertIn('expectation_value', snapshots)
self.assertIn('final', snapshots['expectation_value'])
expval = snapshots.get('expectation_value', {})['final'][0]['value']
self.assertAlmostEqual(expval, target)
def test_pauli1(self):
seed = 100
for tup in ['I', 'X', 'Y', 'Z']:
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
def test_pauli2(self):
seed = 100
for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=2):
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
def test_pauli3(self):
seed = 100
for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=3):
pauli = ''.join(reversed(tup))
with self.subTest(msg='Pauli {}'.format(pauli)):
self.general_test(pauli, num_qubits=3, seed=seed)
class QasmSnapshotExpValMatrixTests:
SIMULATOR = QasmSimulator()
SUPPORTED_QASM_METHODS = [
'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',
'matrix_product_state'
]
BACKEND_OPTS = {}
@staticmethod
def expval_snapshots(data, labels):
output = {}
for label in labels:
snaps = data.get("snapshots", {}).get("expectation_value",
{}).get(label, [])
inner = {}
for snap_dict in snaps:
inner[snap_dict['memory']] = snap_dict['value']
output[label] = inner
return output
def test_snapshot_expval_matrix_pre_measure(self):
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_pre_meas_values()
circuits = snapshot_expval_circuits(pauli=False, post_measure=False)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
def test_snapshot_expval_matrix_post_measure(self):
shots = 1000
labels = snapshot_expval_labels()
counts_targets = snapshot_expval_counts(shots)
value_targets = snapshot_expval_post_meas_values()
circuits = snapshot_expval_circuits(pauli=False, post_measure=True)
qobj = assemble(circuits, self.SIMULATOR, shots=shots)
job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)
result = job.result()
success = getattr(result, 'success', False)
method = self.BACKEND_OPTS.get('method', 'automatic')
if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:
self.assertFalse(success)
else:
self.assertTrue(success)
self.compare_counts(result,
circuits,
counts_targets,
delta=0.1 * shots)
for j, circuit in enumerate(circuits):
data = result.data(circuit)
all_snapshots = self.expval_snapshots(data, labels)
for label in labels:
snaps = all_snapshots.get(label, {})
self.assertTrue(len(snaps), 1)
for memory, value in snaps.items():
target = value_targets[j].get(label,
{}).get(memory, {})
self.assertAlmostEqual(value, target, delta=1e-7)
| true | true |
f717dab74980995a0147cf18b683ffeabedb256b | 9,351 | py | Python | samoyed_ts/nmt.py | oshiooshi/cirneco | f71f1cd583bf6e290d7b8e74f148f06cadd39d63 | [
"MIT"
] | null | null | null | samoyed_ts/nmt.py | oshiooshi/cirneco | f71f1cd583bf6e290d7b8e74f148f06cadd39d63 | [
"MIT"
] | null | null | null | samoyed_ts/nmt.py | oshiooshi/cirneco | f71f1cd583bf6e290d7b8e74f148f06cadd39d63 | [
"MIT"
] | 13 | 2021-07-01T07:58:30.000Z | 2021-09-09T16:52:22.000Z | import torch
# import torchtext
import torch.nn as nn
# from torchtext.vocab import Vocab, build_vocab_from_iterator
# from torchtext.utils import unicode_csv_reader
# from torchtext.data.datasets_utils import _RawTextIterableDataset
from torch import Tensor
from typing import Iterable, List
# import sentencepiece as spm
# import io
import math
import vocab
SEED = 1234
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
# 特殊トークンの定義
UNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3
special_symbols = ['<unk>', '<pad>', '<sos>', '<eos>', '<blk>', '</blk>', '<sep>']
MAX_LEN=80
# sp = spm.SentencePieceProcessor(model_file='corpus_Python-JPN/p3/p3.model')
# def jpn_tokenizer(text):
# ss = [tok.replace('▁', '') for tok in sp.encode(text, out_type=str)][:MAX_LEN]
# return [s for s in ss if len(s) != 0]
# def py_tokenizer(text):
# return [tok for tok in text.split()][:MAX_LEN]
from torch.nn.utils.rnn import pad_sequence
# 連続した操作をまとめて行うためのヘルパー関数
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
# SOS/EOSトークンを追加し、入力配列のインデックス用のテンソルを作成
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([SOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
## Transformer の定義
from torch.nn import (TransformerEncoder, TransformerDecoder,
TransformerEncoderLayer, TransformerDecoderLayer)
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding +
self.pos_embedding[:token_embedding.size(0),:])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)
def forward(self,
src: Tensor,
tgt: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer_encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer_decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
### Masking
## 異なるマスク処理を行う2つの関数を定義
# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
# ソースとターゲットのパディングトークンを隠すためのマスク
def create_mask(src, tgt):
src_seq_len = src.shape[0]
tgt_seq_len = tgt.shape[0]
tgt_mask = generate_square_subsequent_mask(tgt_seq_len)
src_mask = torch.zeros((src_seq_len, src_seq_len), device=DEVICE).type(torch.bool)
src_padding_mask = (src == PAD_IDX).transpose(0, 1)
tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)
return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask
def greedy_decode(model, src, src_mask, max_len, beamsize, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
for i in range(max_len-1):
memory = memory.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(ys.size(0))
.type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1]) # prob.size() の実行結果 : torch.Size([1, 1088]) => 1088 はTGT のVOCAV_SIZE
next_prob, next_word = prob.topk(k=beamsize, dim=1)
# print(next_word)
# print(next_prob)
next_word = next_word[:, 0] # greedy なので、もっとも確率が高いものを選ぶ
next_word = next_word.item() # 要素の値を取得 (int に変換)
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)
if next_word == EOS_IDX:
break
return ys
class NMT(object):
src_vocab: object
tgt_vocab: object
def __init__(self, src_vocab='kujira', tgt_vocab='python'):
self.src_vocab = vocab.load_vocab(src_vocab)
self.tgt_vocab = vocab.load_vocab(tgt_vocab)
tokenizer = vocab.tokenizer_from_vocab(self.src_vocab)
self.src_transform = sequential_transforms(tokenizer, #Tokenization
self.src_vocab, #Numericalization
tensor_transform) # Add SOS/EOS and create tensor
# パラメータの定義
self.SRC_VOCAB_SIZE = len(self.src_vocab)
self.TGT_VOCAB_SIZE = len(self.tgt_vocab)
self.EMB_SIZE = 512 # BERT の次元に揃えれば良いよ
self.NHEAD = 8
self.FFN_HID_DIM = 512
self.BATCH_SIZE = 128
self.NUM_ENCODER_LAYERS = 3
self.NUM_DECODER_LAYERS = 3
# インスタンスの作成
self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS,
self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,
self.FFN_HID_DIM)
# TODO: ?
for p in self.transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# デバイスの設定
self.transformer = self.transformer.to(DEVICE)
# 損失関数の定義 (クロスエントロピー)
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)
# オプティマイザの定義 (Adam)
self.optimizer = torch.optim.Adam(self.transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)
def load(self, filename='all-model.pt'):
self.transformer.load_state_dict(torch.load(filename, map_location=DEVICE))
def translate(self, src_sentence: str):
self.transformer.eval()
src = self.src_transform(src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = greedy_decode(
self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=5, start_symbol=SOS_IDX).flatten()
return " ".join(self.tgt_vocab.lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace("<sos>", "").replace("<eos>", "")
if __name__ == '__main__':
nmt = NMT()
nmt.load('./all-model.pt')
pred = nmt.translate('もし<A>が偶数のとき')
print('pred:', pred) | 39.791489 | 127 | 0.634371 | import torch
import torch.nn as nn
from torch import Tensor
from typing import Iterable, List
import math
import vocab
SEED = 1234
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
UNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3
special_symbols = ['<unk>', '<pad>', '<sos>', '<eos>', '<blk>', '</blk>', '<sep>']
MAX_LEN=80
from torch.nn.utils.rnn import pad_sequence
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([SOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
mport (TransformerEncoder, TransformerDecoder,
TransformerEncoderLayer, TransformerDecoderLayer)
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding +
self.pos_embedding[:token_embedding.size(0),:])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)
def forward(self,
src: Tensor,
tgt: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer_encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer_decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def create_mask(src, tgt):
src_seq_len = src.shape[0]
tgt_seq_len = tgt.shape[0]
tgt_mask = generate_square_subsequent_mask(tgt_seq_len)
src_mask = torch.zeros((src_seq_len, src_seq_len), device=DEVICE).type(torch.bool)
src_padding_mask = (src == PAD_IDX).transpose(0, 1)
tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)
return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask
def greedy_decode(model, src, src_mask, max_len, beamsize, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
for i in range(max_len-1):
memory = memory.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(ys.size(0))
.type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1])
next_prob, next_word = prob.topk(k=beamsize, dim=1)
next_word = next_word[:, 0]
next_word = next_word.item()
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)
if next_word == EOS_IDX:
break
return ys
class NMT(object):
src_vocab: object
tgt_vocab: object
def __init__(self, src_vocab='kujira', tgt_vocab='python'):
self.src_vocab = vocab.load_vocab(src_vocab)
self.tgt_vocab = vocab.load_vocab(tgt_vocab)
tokenizer = vocab.tokenizer_from_vocab(self.src_vocab)
self.src_transform = sequential_transforms(tokenizer,
self.src_vocab,
tensor_transform)
self.SRC_VOCAB_SIZE = len(self.src_vocab)
self.TGT_VOCAB_SIZE = len(self.tgt_vocab)
self.EMB_SIZE = 512
self.NHEAD = 8
self.FFN_HID_DIM = 512
self.BATCH_SIZE = 128
self.NUM_ENCODER_LAYERS = 3
self.NUM_DECODER_LAYERS = 3
self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS,
self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,
self.FFN_HID_DIM)
for p in self.transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.transformer = self.transformer.to(DEVICE)
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)
self.optimizer = torch.optim.Adam(self.transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)
def load(self, filename='all-model.pt'):
self.transformer.load_state_dict(torch.load(filename, map_location=DEVICE))
def translate(self, src_sentence: str):
self.transformer.eval()
src = self.src_transform(src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = greedy_decode(
self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=5, start_symbol=SOS_IDX).flatten()
return " ".join(self.tgt_vocab.lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace("<sos>", "").replace("<eos>", "")
if __name__ == '__main__':
nmt = NMT()
nmt.load('./all-model.pt')
pred = nmt.translate('もし<A>が偶数のとき')
print('pred:', pred) | true | true |
f717dbb4e70ad1e7af8abf4d384448d348389a87 | 9,237 | py | Python | neo/io/pynnio.py | lkoelman/python-neo | 6b0454519b4ead6605d3ce4100a07c33f57df830 | [
"BSD-3-Clause"
] | null | null | null | neo/io/pynnio.py | lkoelman/python-neo | 6b0454519b4ead6605d3ce4100a07c33f57df830 | [
"BSD-3-Clause"
] | 8 | 2018-06-02T11:46:10.000Z | 2018-09-04T15:51:45.000Z | src/neo/neo/io/pynnio.py | grg2rsr/SeqPeelSort | 58a207976fb33a50ea8e42b70d7da73b03474f42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Module for reading/writing data from/to legacy PyNN formats.
PyNN is available at http://neuralensemble.org/PyNN
Classes:
PyNNNumpyIO
PyNNTextIO
Supported: Read/Write
Authors: Andrew Davison, Pierre Yger
"""
from itertools import chain
import numpy
import quantities as pq
import warnings
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, SpikeTrain
try:
unicode
PY2 = True
except NameError:
PY2 = False
UNITS_MAP = {
'spikes': pq.ms,
'v': pq.mV,
'gsyn': pq.UnitQuantity('microsiemens', 1e-6 * pq.S, 'uS', 'µS'), # checked
}
class BasePyNNIO(BaseIO):
"""
Base class for PyNN IO classes
"""
is_readable = True
is_writable = True
has_header = True
is_streameable = False # TODO - correct spelling to "is_streamable"
supported_objects = [Segment, AnalogSignal, SpikeTrain]
readable_objects = supported_objects
writeable_objects = supported_objects
mode = 'file'
def __init__(self, filename=None, **kargs):
BaseIO.__init__(self, filename, *kargs)
warnings.warn("PyNNTextIO and PyNNNumpyIO will be removed in Neo 0.7.0. " +
"Please contact the Neo developers if this will cause you problems.",
DeprecationWarning)
def _read_file_contents(self):
raise NotImplementedError
def _extract_array(self, data, channel_index):
idx = numpy.where(data[:, 1] == channel_index)[0]
return data[idx, 0]
def _determine_units(self, metadata):
if 'units' in metadata:
return metadata['units']
elif 'variable' in metadata and metadata['variable'] in UNITS_MAP:
return UNITS_MAP[metadata['variable']]
else:
raise IOError("Cannot determine units")
def _extract_signals(self, data, metadata):
arr = numpy.vstack(self._extract_array(data, channel_index)
for channel_index in
range(metadata['first_index'], metadata['last_index'] + 1))
if len(arr) > 0:
signal = AnalogSignal(arr.T,
units=self._determine_units(metadata),
sampling_period=metadata['dt'] * pq.ms)
signal.annotate(label=metadata["label"],
variable=metadata["variable"])
return signal
def _extract_spikes(self, data, metadata, channel_index):
spiketrain = None
spike_times = self._extract_array(data, channel_index)
if len(spike_times) > 0:
spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
spiketrain.annotate(label=metadata["label"],
channel_index=channel_index,
dt=metadata["dt"])
return spiketrain
def _write_file_contents(self, data, metadata):
raise NotImplementedError
def read_segment(self, lazy=False):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
annotations = dict((k, metadata.get(k, 'unknown'))
for k in ("label", "variable", "first_id", "last_id"))
seg = Segment(**annotations)
if metadata['variable'] == 'spikes':
for i in range(metadata['first_index'], metadata['last_index'] + 1):
spiketrain = self._extract_spikes(data, metadata, i)
if spiketrain is not None:
seg.spiketrains.append(spiketrain)
# store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
seg.annotate(dt=metadata['dt'])
else:
signal = self._extract_signals(data, metadata)
if signal is not None:
seg.analogsignals.append(signal)
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment):
source = segment.analogsignals or segment.spiketrains
assert len(source) > 0, "Segment contains neither analog signals nor spike trains."
metadata = segment.annotations.copy()
s0 = source[0]
if isinstance(s0, AnalogSignal):
if len(source) > 1:
warnings.warn("Cannot handle multiple analog signals. Writing only the first.")
source = s0.T
metadata['size'] = s0.shape[1]
n = source.size
else:
metadata['size'] = len(source)
n = sum(s.size for s in source)
metadata['first_index'] = 0
metadata['last_index'] = metadata['size'] - 1
if 'label' not in metadata:
metadata['label'] = 'unknown'
if 'dt' not in metadata: # dt not included in annotations if Segment contains only AnalogSignals
metadata['dt'] = s0.sampling_period.rescale(pq.ms).magnitude
metadata['n'] = n
data = numpy.empty((n, 2))
# if the 'variable' annotation is a standard one from PyNN, we rescale
# to use standard PyNN units
# we take the units from the first element of source and scale all
# the signals to have the same units
if 'variable' in segment.annotations:
units = UNITS_MAP.get(segment.annotations['variable'], source[0].dimensionality)
else:
units = source[0].dimensionality
metadata['variable'] = 'unknown'
try:
metadata['units'] = units.unicode
except AttributeError:
metadata['units'] = units.u_symbol
start = 0
for i, signal in enumerate(source): # here signal may be AnalogSignal or SpikeTrain
end = start + signal.size
data[start:end, 0] = numpy.array(signal.rescale(units))
data[start:end, 1] = i * numpy.ones((signal.size,), dtype=float)
start = end
self._write_file_contents(data, metadata)
def read_analogsignal(self, lazy=False):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
if metadata['variable'] == 'spikes':
raise TypeError("File contains spike data, not analog signals")
else:
signal = self._extract_signals(data, metadata)
if signal is None:
raise IndexError("File does not contain a signal")
else:
return signal
def read_spiketrain(self, lazy=False, channel_index=0):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
if metadata['variable'] != 'spikes':
raise TypeError("File contains analog signals, not spike data")
else:
spiketrain = self._extract_spikes(data, metadata, channel_index)
if spiketrain is None:
raise IndexError(
"File does not contain any spikes with channel index %d" % channel_index)
else:
return spiketrain
class PyNNNumpyIO(BasePyNNIO):
"""
(DEPRECATED) Reads/writes data from/to PyNN NumpyBinaryFile format
"""
name = "PyNN NumpyBinaryFile"
extensions = ['npz']
def _read_file_contents(self):
contents = numpy.load(self.filename)
data = contents["data"]
metadata = {}
for name, value in contents['metadata']:
try:
metadata[name] = eval(value)
except Exception:
metadata[name] = value
return data, metadata
def _write_file_contents(self, data, metadata):
# we explicitly set the dtype to ensure roundtrips preserve file contents exactly
max_metadata_length = max(chain([len(k) for k in metadata.keys()],
[len(str(v)) for v in metadata.values()]))
if PY2:
dtype = "S%d" % max_metadata_length
else:
dtype = "U%d" % max_metadata_length
metadata_array = numpy.array(sorted(metadata.items()), dtype)
numpy.savez(self.filename, data=data, metadata=metadata_array)
class PyNNTextIO(BasePyNNIO):
"""
(DEPRECATED) Reads/writes data from/to PyNN StandardTextFile format
"""
name = "PyNN StandardTextFile"
extensions = ['v', 'ras', 'gsyn']
def _read_metadata(self):
metadata = {}
with open(self.filename) as f:
for line in f:
if line[0] == "#":
name, value = line[1:].strip().split("=")
name = name.strip()
try:
metadata[name] = eval(value)
except Exception:
metadata[name] = value.strip()
else:
break
return metadata
def _read_file_contents(self):
data = numpy.loadtxt(self.filename)
metadata = self._read_metadata()
return data, metadata
def _write_file_contents(self, data, metadata):
with open(self.filename, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
numpy.savetxt(f, data)
| 36.800797 | 105 | 0.590343 |
from itertools import chain
import numpy
import quantities as pq
import warnings
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, SpikeTrain
try:
unicode
PY2 = True
except NameError:
PY2 = False
UNITS_MAP = {
'spikes': pq.ms,
'v': pq.mV,
'gsyn': pq.UnitQuantity('microsiemens', 1e-6 * pq.S, 'uS', 'µS'),
}
class BasePyNNIO(BaseIO):
is_readable = True
is_writable = True
has_header = True
is_streameable = False
supported_objects = [Segment, AnalogSignal, SpikeTrain]
readable_objects = supported_objects
writeable_objects = supported_objects
mode = 'file'
def __init__(self, filename=None, **kargs):
BaseIO.__init__(self, filename, *kargs)
warnings.warn("PyNNTextIO and PyNNNumpyIO will be removed in Neo 0.7.0. " +
"Please contact the Neo developers if this will cause you problems.",
DeprecationWarning)
def _read_file_contents(self):
raise NotImplementedError
def _extract_array(self, data, channel_index):
idx = numpy.where(data[:, 1] == channel_index)[0]
return data[idx, 0]
def _determine_units(self, metadata):
if 'units' in metadata:
return metadata['units']
elif 'variable' in metadata and metadata['variable'] in UNITS_MAP:
return UNITS_MAP[metadata['variable']]
else:
raise IOError("Cannot determine units")
def _extract_signals(self, data, metadata):
arr = numpy.vstack(self._extract_array(data, channel_index)
for channel_index in
range(metadata['first_index'], metadata['last_index'] + 1))
if len(arr) > 0:
signal = AnalogSignal(arr.T,
units=self._determine_units(metadata),
sampling_period=metadata['dt'] * pq.ms)
signal.annotate(label=metadata["label"],
variable=metadata["variable"])
return signal
def _extract_spikes(self, data, metadata, channel_index):
spiketrain = None
spike_times = self._extract_array(data, channel_index)
if len(spike_times) > 0:
spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
spiketrain.annotate(label=metadata["label"],
channel_index=channel_index,
dt=metadata["dt"])
return spiketrain
def _write_file_contents(self, data, metadata):
raise NotImplementedError
def read_segment(self, lazy=False):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
annotations = dict((k, metadata.get(k, 'unknown'))
for k in ("label", "variable", "first_id", "last_id"))
seg = Segment(**annotations)
if metadata['variable'] == 'spikes':
for i in range(metadata['first_index'], metadata['last_index'] + 1):
spiketrain = self._extract_spikes(data, metadata, i)
if spiketrain is not None:
seg.spiketrains.append(spiketrain)
seg.annotate(dt=metadata['dt'])
else:
signal = self._extract_signals(data, metadata)
if signal is not None:
seg.analogsignals.append(signal)
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment):
source = segment.analogsignals or segment.spiketrains
assert len(source) > 0, "Segment contains neither analog signals nor spike trains."
metadata = segment.annotations.copy()
s0 = source[0]
if isinstance(s0, AnalogSignal):
if len(source) > 1:
warnings.warn("Cannot handle multiple analog signals. Writing only the first.")
source = s0.T
metadata['size'] = s0.shape[1]
n = source.size
else:
metadata['size'] = len(source)
n = sum(s.size for s in source)
metadata['first_index'] = 0
metadata['last_index'] = metadata['size'] - 1
if 'label' not in metadata:
metadata['label'] = 'unknown'
if 'dt' not in metadata:
metadata['dt'] = s0.sampling_period.rescale(pq.ms).magnitude
metadata['n'] = n
data = numpy.empty((n, 2))
if 'variable' in segment.annotations:
units = UNITS_MAP.get(segment.annotations['variable'], source[0].dimensionality)
else:
units = source[0].dimensionality
metadata['variable'] = 'unknown'
try:
metadata['units'] = units.unicode
except AttributeError:
metadata['units'] = units.u_symbol
start = 0
for i, signal in enumerate(source):
end = start + signal.size
data[start:end, 0] = numpy.array(signal.rescale(units))
data[start:end, 1] = i * numpy.ones((signal.size,), dtype=float)
start = end
self._write_file_contents(data, metadata)
def read_analogsignal(self, lazy=False):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
if metadata['variable'] == 'spikes':
raise TypeError("File contains spike data, not analog signals")
else:
signal = self._extract_signals(data, metadata)
if signal is None:
raise IndexError("File does not contain a signal")
else:
return signal
def read_spiketrain(self, lazy=False, channel_index=0):
assert not lazy, 'Do not support lazy'
data, metadata = self._read_file_contents()
if metadata['variable'] != 'spikes':
raise TypeError("File contains analog signals, not spike data")
else:
spiketrain = self._extract_spikes(data, metadata, channel_index)
if spiketrain is None:
raise IndexError(
"File does not contain any spikes with channel index %d" % channel_index)
else:
return spiketrain
class PyNNNumpyIO(BasePyNNIO):
name = "PyNN NumpyBinaryFile"
extensions = ['npz']
def _read_file_contents(self):
contents = numpy.load(self.filename)
data = contents["data"]
metadata = {}
for name, value in contents['metadata']:
try:
metadata[name] = eval(value)
except Exception:
metadata[name] = value
return data, metadata
def _write_file_contents(self, data, metadata):
max_metadata_length = max(chain([len(k) for k in metadata.keys()],
[len(str(v)) for v in metadata.values()]))
if PY2:
dtype = "S%d" % max_metadata_length
else:
dtype = "U%d" % max_metadata_length
metadata_array = numpy.array(sorted(metadata.items()), dtype)
numpy.savez(self.filename, data=data, metadata=metadata_array)
class PyNNTextIO(BasePyNNIO):
name = "PyNN StandardTextFile"
extensions = ['v', 'ras', 'gsyn']
def _read_metadata(self):
metadata = {}
with open(self.filename) as f:
for line in f:
if line[0] == "#":
name, value = line[1:].strip().split("=")
name = name.strip()
try:
metadata[name] = eval(value)
except Exception:
metadata[name] = value.strip()
else:
break
return metadata
def _read_file_contents(self):
data = numpy.loadtxt(self.filename)
metadata = self._read_metadata()
return data, metadata
def _write_file_contents(self, data, metadata):
with open(self.filename, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
numpy.savetxt(f, data)
| true | true |
f717dc0c491e0e926111b82f4d9a35f3ae57502b | 397 | py | Python | class_book/wsgi.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | 1 | 2020-11-19T14:49:41.000Z | 2020-11-19T14:49:41.000Z | class_book/wsgi.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | null | null | null | class_book/wsgi.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | null | null | null | """
WSGI config for class_book project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'class_book.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'class_book.settings')
application = get_wsgi_application()
| true | true |
f717df53a4c80dee569899eb3e7c32f7b58fef74 | 12,004 | py | Python | main.py | lakmalniranga/OpenCV-average-color-detection | 615ca69002d2bc37191c118247ddd8986f04edb1 | [
"MIT"
] | null | null | null | main.py | lakmalniranga/OpenCV-average-color-detection | 615ca69002d2bc37191c118247ddd8986f04edb1 | [
"MIT"
] | null | null | null | main.py | lakmalniranga/OpenCV-average-color-detection | 615ca69002d2bc37191c118247ddd8986f04edb1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
OpenCV Python image average color detection script. You can use this to finding darkest color.
Coded by : Lakmal Niranga. 2016
"""
import os
import cv2
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(790, 550)
Dialog.setSizeGripEnabled(True)
self.frame = QtGui.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayoutWidget = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnImg1_pc = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_pc.setObjectName(_fromUtf8("btnImg1_pc"))
self.horizontalLayout.addWidget(self.btnImg1_pc)
self.btnImg1_cam = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_cam.setObjectName(_fromUtf8("btnImg1_cam"))
self.horizontalLayout.addWidget(self.btnImg1_cam)
self.label_img1 = QtGui.QLabel(self.frame)
self.label_img1.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img1.setText(_fromUtf8(""))
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.label_img1.raise_()
self.frame_3 = QtGui.QFrame(Dialog)
self.frame_3.setGeometry(QtCore.QRect(400, 10, 381, 281))
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame_3)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnImg2_pc = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_pc.setObjectName(_fromUtf8("btnImg2_pc"))
self.horizontalLayout_2.addWidget(self.btnImg2_pc)
self.btnImg2_cam = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_cam.setObjectName(_fromUtf8("btnImg2_cam"))
self.horizontalLayout_2.addWidget(self.btnImg2_cam)
self.label_img2 = QtGui.QLabel(self.frame_3)
self.label_img2.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img2.setText(_fromUtf8(""))
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
self.label_img2.setObjectName(_fromUtf8("label_img2"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 370, 771, 41))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 410, 381, 143))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.colorbox_1 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.colorbox_1.setText(_fromUtf8(""))
self.colorbox_1.setObjectName(_fromUtf8("colorbox_1"))
self.verticalLayout_2.addWidget(self.colorbox_1)
self.lable_img1 = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img1.setFont(font)
self.lable_img1.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img1.setObjectName(_fromUtf8("lable_img1"))
self.verticalLayout_2.addWidget(self.lable_img1)
self.verticalLayoutWidget_3 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(400, 410, 381, 143))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.colorbox_2 = QtGui.QLabel(self.verticalLayoutWidget_3)
self.colorbox_2.setText(_fromUtf8(""))
self.colorbox_2.setObjectName(_fromUtf8("colorbox_2"))
self.verticalLayout_3.addWidget(self.colorbox_2)
self.lable_img2 = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img2.setFont(font)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.lable_img2.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img2.setObjectName(_fromUtf8("lable_img2"))
self.verticalLayout_3.addWidget(self.lable_img2)
self.btnComp = QtGui.QPushButton(Dialog)
self.btnComp.setGeometry(QtCore.QRect(310, 310, 171, 51))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.btnComp.setFont(font)
self.btnComp.setObjectName(_fromUtf8("btnComp"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "OpenCV Darkest Cloth Identifier", None))
self.btnImg1_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg1_cam.setText(_translate("Dialog", "Take image from camera", None))
self.btnImg2_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg2_cam.setText(_translate("Dialog", "Take image from camera", None))
self.label.setText(_translate("Dialog", "Most Suitable Average Color", None))
self.lable_img1.setText(_translate("Dialog", "", None))
self.lable_img2.setText(_translate("Dialog", "", None))
self.btnComp.setText(_translate("Dialog", "Compare", None))
self.btnImg1_pc.clicked.connect(self.openimg1)
self.btnImg2_pc.clicked.connect(self.openimg2)
self.btnComp.clicked.connect(self.compare_color)
self.btnImg1_cam.clicked.connect(self.cameraImg1)
self.btnImg2_cam.clicked.connect(self.cameraImg2)
avg1=None
avg2=None
def get_avg_color(self, img_path):
img = cv2.imread(img_path,cv2.IMREAD_COLOR)
img_width = img.shape[1]
img_height = img.shape[0]
rows_cols = 10
part_of_width = img_width/rows_cols
part_of_height = img_height/rows_cols
avg_B=0
avg_G=0
avg_R=0
for x in range(part_of_width,img_width-part_of_width,part_of_width):
for y in range(part_of_height,img_height-part_of_height,part_of_height):
color = img[y,x] #[y and x] - gives BGR
avg_B+=color[0]
avg_G+=color[1]
avg_R+=color[2]
cv2.circle(img,(x,y), 5, (0,0,0), -1) #[x and y]
return (avg_B/81,avg_G/81,avg_R/81)[::-1] #return tuple in BGR
def openimg1(self):
global avg1
img1_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img1_path))
avg1 = self.get_avg_color(str(img1_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
def openimg2(self):
global avg2
img2_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img2_path))
avg2 = self.get_avg_color(str(img2_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
def compare_color(self):
global avg1, avg2
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Critical)
try:
img1_avarage = sum(i for i in avg1)
img2_avarage = sum(i for i in avg2)
avg1_per = (float(img1_avarage)/(img1_avarage+img2_avarage))*100
avg2_per = (float(img2_avarage)/(img1_avarage+img2_avarage))*100
self.lable_img1.setText(str(round(100-avg1_per, 2)) + "%")
self.lable_img2.setText(str(round(100-avg2_per, 2)) + "%")
except NameError as e:
msgBox.setText("Please select images first!")
msgBox.setWindowTitle("Error")
msgBox.exec_()
def cameraImg1(self):
global avg1
cap = cv2.VideoCapture(0)
while(True):
global avg1
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image1.jpg"
cv2.imwrite(img_path, frame)
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img_path))
avg1 = self.get_avg_color(str(img_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
def cameraImg2(self):
global avg2
cap = cv2.VideoCapture(0)
while(True):
global avg2
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image2.jpg"
cv2.imwrite(img_path, frame)
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img_path))
avg2 = self.get_avg_color(str(img_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
Dialog.setFixedSize(790, 550)
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 43.02509 | 116 | 0.673692 |
import os
import cv2
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(790, 550)
Dialog.setSizeGripEnabled(True)
self.frame = QtGui.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayoutWidget = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnImg1_pc = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_pc.setObjectName(_fromUtf8("btnImg1_pc"))
self.horizontalLayout.addWidget(self.btnImg1_pc)
self.btnImg1_cam = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_cam.setObjectName(_fromUtf8("btnImg1_cam"))
self.horizontalLayout.addWidget(self.btnImg1_cam)
self.label_img1 = QtGui.QLabel(self.frame)
self.label_img1.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img1.setText(_fromUtf8(""))
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.label_img1.raise_()
self.frame_3 = QtGui.QFrame(Dialog)
self.frame_3.setGeometry(QtCore.QRect(400, 10, 381, 281))
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame_3)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnImg2_pc = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_pc.setObjectName(_fromUtf8("btnImg2_pc"))
self.horizontalLayout_2.addWidget(self.btnImg2_pc)
self.btnImg2_cam = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_cam.setObjectName(_fromUtf8("btnImg2_cam"))
self.horizontalLayout_2.addWidget(self.btnImg2_cam)
self.label_img2 = QtGui.QLabel(self.frame_3)
self.label_img2.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img2.setText(_fromUtf8(""))
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
self.label_img2.setObjectName(_fromUtf8("label_img2"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 370, 771, 41))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 410, 381, 143))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.colorbox_1 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.colorbox_1.setText(_fromUtf8(""))
self.colorbox_1.setObjectName(_fromUtf8("colorbox_1"))
self.verticalLayout_2.addWidget(self.colorbox_1)
self.lable_img1 = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img1.setFont(font)
self.lable_img1.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img1.setObjectName(_fromUtf8("lable_img1"))
self.verticalLayout_2.addWidget(self.lable_img1)
self.verticalLayoutWidget_3 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(400, 410, 381, 143))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.colorbox_2 = QtGui.QLabel(self.verticalLayoutWidget_3)
self.colorbox_2.setText(_fromUtf8(""))
self.colorbox_2.setObjectName(_fromUtf8("colorbox_2"))
self.verticalLayout_3.addWidget(self.colorbox_2)
self.lable_img2 = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img2.setFont(font)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.lable_img2.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img2.setObjectName(_fromUtf8("lable_img2"))
self.verticalLayout_3.addWidget(self.lable_img2)
self.btnComp = QtGui.QPushButton(Dialog)
self.btnComp.setGeometry(QtCore.QRect(310, 310, 171, 51))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.btnComp.setFont(font)
self.btnComp.setObjectName(_fromUtf8("btnComp"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "OpenCV Darkest Cloth Identifier", None))
self.btnImg1_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg1_cam.setText(_translate("Dialog", "Take image from camera", None))
self.btnImg2_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg2_cam.setText(_translate("Dialog", "Take image from camera", None))
self.label.setText(_translate("Dialog", "Most Suitable Average Color", None))
self.lable_img1.setText(_translate("Dialog", "", None))
self.lable_img2.setText(_translate("Dialog", "", None))
self.btnComp.setText(_translate("Dialog", "Compare", None))
self.btnImg1_pc.clicked.connect(self.openimg1)
self.btnImg2_pc.clicked.connect(self.openimg2)
self.btnComp.clicked.connect(self.compare_color)
self.btnImg1_cam.clicked.connect(self.cameraImg1)
self.btnImg2_cam.clicked.connect(self.cameraImg2)
avg1=None
avg2=None
def get_avg_color(self, img_path):
img = cv2.imread(img_path,cv2.IMREAD_COLOR)
img_width = img.shape[1]
img_height = img.shape[0]
rows_cols = 10
part_of_width = img_width/rows_cols
part_of_height = img_height/rows_cols
avg_B=0
avg_G=0
avg_R=0
for x in range(part_of_width,img_width-part_of_width,part_of_width):
for y in range(part_of_height,img_height-part_of_height,part_of_height):
color = img[y,x]
avg_B+=color[0]
avg_G+=color[1]
avg_R+=color[2]
cv2.circle(img,(x,y), 5, (0,0,0), -1)
return (avg_B/81,avg_G/81,avg_R/81)[::-1]
def openimg1(self):
global avg1
img1_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img1_path))
avg1 = self.get_avg_color(str(img1_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
def openimg2(self):
global avg2
img2_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img2_path))
avg2 = self.get_avg_color(str(img2_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
def compare_color(self):
global avg1, avg2
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Critical)
try:
img1_avarage = sum(i for i in avg1)
img2_avarage = sum(i for i in avg2)
avg1_per = (float(img1_avarage)/(img1_avarage+img2_avarage))*100
avg2_per = (float(img2_avarage)/(img1_avarage+img2_avarage))*100
self.lable_img1.setText(str(round(100-avg1_per, 2)) + "%")
self.lable_img2.setText(str(round(100-avg2_per, 2)) + "%")
except NameError as e:
msgBox.setText("Please select images first!")
msgBox.setWindowTitle("Error")
msgBox.exec_()
def cameraImg1(self):
global avg1
cap = cv2.VideoCapture(0)
while(True):
global avg1
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image1.jpg"
cv2.imwrite(img_path, frame)
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img_path))
avg1 = self.get_avg_color(str(img_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
def cameraImg2(self):
global avg2
cap = cv2.VideoCapture(0)
while(True):
global avg2
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image2.jpg"
cv2.imwrite(img_path, frame)
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img_path))
avg2 = self.get_avg_color(str(img_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
Dialog.setFixedSize(790, 550)
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| true | true |
f717e0abb049f00234b9b3cbf5c4910e36250c30 | 1,028 | py | Python | pyvortex/__init__.py | pankajkarman/pyvortex | ba92d9b7702c33218377ac88f3045e880339f3ad | [
"MIT"
] | 5 | 2021-01-12T16:52:45.000Z | 2021-10-13T23:26:42.000Z | pyvortex/__init__.py | pankajkarman/pyvortex | ba92d9b7702c33218377ac88f3045e880339f3ad | [
"MIT"
] | 2 | 2020-12-18T15:16:37.000Z | 2021-12-02T14:47:07.000Z | pyvortex/__init__.py | pankajkarman/pyvortex | ba92d9b7702c33218377ac88f3045e880339f3ad | [
"MIT"
] | 3 | 2021-01-12T16:52:18.000Z | 2021-10-14T02:18:06.000Z | """
This module consists of functions to calculate the [equivalent latitude](https://journals.ametsoc.org/doi/citedby/10.1175/1520-0469%282003%29060%3C0287%3ATELADT%3E2.0.CO%3B2) and edge of a polar vortex using [Nash criteria](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/96JD00066).
### Installation
```
pip install -U pyvortex
```
install the latest version using
```
pip install git+https://github.com/pankajkarman/pyvortex.git
```
### Usage
`pyvortex` is easy to use. Just import:
```python
import pyvortex as vr
```
#### Northern Hemisphere
Instantiate the `PolarVortex` class using:
```python
pol = PolarVortex(pv, uwind)
```
Get equivalent lqtitude for the provided vorticity data as:
```python
eql = pol.get_eql()
```
If you want to get both equivalent latitude and Vortex edge, just use:
```python
eql = pol.get_edge(min_eql=30)
```
#### Southern Hemisphere
Flip pv and uwind along latitude dimension and multiply pv by -1. All other things will be the same.
"""
from .pyvortex import PolarVortex
| 22.844444 | 287 | 0.737354 |
from .pyvortex import PolarVortex
| true | true |
f717e10070ef6a2f208a3ff8fb842d2f4dcf4f84 | 3,440 | py | Python | simulation/simulation.py | cyberImperial/attack-graphs | 40c5c2bcc3eaf01c484e51d8339d29da5154dd42 | [
"MIT"
] | 18 | 2018-02-21T13:14:11.000Z | 2021-07-25T05:15:56.000Z | simulation/simulation.py | BenDerPan/attack-graphs | 40c5c2bcc3eaf01c484e51d8339d29da5154dd42 | [
"MIT"
] | 70 | 2017-10-16T22:18:26.000Z | 2020-05-11T14:01:06.000Z | simulation/simulation.py | BenDerPan/attack-graphs | 40c5c2bcc3eaf01c484e51d8339d29da5154dd42 | [
"MIT"
] | 14 | 2019-04-24T23:26:39.000Z | 2021-12-03T09:36:13.000Z | from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from topology.graph.graph import Graph
from topology.graph.graph import Node
from clint.textui import colored
import json
import ast
import time
from random import randrange
class Simulation():
"""
Class used to mock sniffer connections and ip discovery for running
simulations.
General description: The simulation module is lightweight and can easily
handle overlay topologies of magnitude of thousands. The simulations are
run on random overlay topologies with fixed number of nodes and edges.
Random packets get generated whenever the simulation module connection gets
a call within a fixed timeout of 0.5 seconds, whereas the scans are
generated within a timeout of 3 seconds.
"""
def __init__(self, conf_file, connection_timeout = 0.5, scan_timeout = 10):
"""
Construct a new simulation object from a given configuartion file.
:param conf_file: The configuration file must be a json that contains
a graph. For an example see: `confs/simple.json`
:param connection_timeout: packets get generated each
connection_timeout seconds
:param scan_timeout: the time to run a scan
"""
self.connection_timeout = connection_timeout
self.scan_timeout = scan_timeout
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, "confs")
with open(os.path.join(dir_path, conf_file), 'r') as f:
data = json.dumps(ast.literal_eval(f.read()))
self.conf = json.loads(data)
logger.info("Configuration successfully parsed...")
self.graph = Graph.from_json(self.conf)
logger.info("Graph successfully loaded...")
def connection(self):
"""
Return a Connection class. The internals of the topology module use
only the next function from the `libpcap` Python wrapper.
"""
def build_packet(src, dest):
time.sleep(self.connection_timeout)
return "header", {
"src" : str(src),
"dest" : str(dest)
}
class Connection():
def __init__(self, graph):
self.graph = graph
def next(self):
# return a new random packet sent between 2 nodes of the graph
link_idx = randrange(len(self.graph.edges))
for (n1, n2) in self.graph.edges:
if link_idx == 0:
return build_packet(n1.ip, n2.ip)
link_idx -= 1
logger.error("Simulated connection crashed.")
raise Exception("Malformed simulation graph!")
return Connection(self.graph)
def discovery_ip(self, ip):
"""
Function used as a seam instead of the original `discovery_ip` function.
See sniffer module for more details.
"""
logger.info(colored.cyan("Started scan."))
time.sleep(self.scan_timeout)
for node in self.graph.nodes:
if Node(ip) == node:
logger.info(colored.green("Successful scan."))
return node.running
logger.info("Failed scan.")
return {}
| 34.4 | 80 | 0.62936 | from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from topology.graph.graph import Graph
from topology.graph.graph import Node
from clint.textui import colored
import json
import ast
import time
from random import randrange
class Simulation():
def __init__(self, conf_file, connection_timeout = 0.5, scan_timeout = 10):
self.connection_timeout = connection_timeout
self.scan_timeout = scan_timeout
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, "confs")
with open(os.path.join(dir_path, conf_file), 'r') as f:
data = json.dumps(ast.literal_eval(f.read()))
self.conf = json.loads(data)
logger.info("Configuration successfully parsed...")
self.graph = Graph.from_json(self.conf)
logger.info("Graph successfully loaded...")
def connection(self):
def build_packet(src, dest):
time.sleep(self.connection_timeout)
return "header", {
"src" : str(src),
"dest" : str(dest)
}
class Connection():
def __init__(self, graph):
self.graph = graph
def next(self):
link_idx = randrange(len(self.graph.edges))
for (n1, n2) in self.graph.edges:
if link_idx == 0:
return build_packet(n1.ip, n2.ip)
link_idx -= 1
logger.error("Simulated connection crashed.")
raise Exception("Malformed simulation graph!")
return Connection(self.graph)
def discovery_ip(self, ip):
logger.info(colored.cyan("Started scan."))
time.sleep(self.scan_timeout)
for node in self.graph.nodes:
if Node(ip) == node:
logger.info(colored.green("Successful scan."))
return node.running
logger.info("Failed scan.")
return {}
| true | true |
f717e11a8a97f2e8f936ce7233ccad30aa232626 | 7,806 | py | Python | examples/pwr_run/checkpointing/final_trace/top50/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final_trace/top50/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final_trace/top50/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 36
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.936709 | 118 | 0.691135 |
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 36
starting_epoch = 0
pid = os.getpid()
message = job_name + ' pid ' + str(pid)
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true | true |
f717e184728b5d47b6b0a24c1fd6cd16b391b36a | 35,865 | py | Python | pycromanager/zmq.py | ilyasdc/pycro-manager | 5f0153e8a90104eb8715348c6eb22c4d8fdee477 | [
"BSD-3-Clause"
] | null | null | null | pycromanager/zmq.py | ilyasdc/pycro-manager | 5f0153e8a90104eb8715348c6eb22c4d8fdee477 | [
"BSD-3-Clause"
] | null | null | null | pycromanager/zmq.py | ilyasdc/pycro-manager | 5f0153e8a90104eb8715348c6eb22c4d8fdee477 | [
"BSD-3-Clause"
] | null | null | null | import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
from threading import Lock
class DataSocket:
"""
Wrapper for ZMQ socket that sends and recieves dictionaries
Includes ZMQ client, push, and pull sockets
"""
def __init__(self, context, port, type, debug=False, ip_address="127.0.0.1"):
# request reply socket
self._socket = context.socket(type)
self._debug = debug
# store these as wekrefs so that circular refs dont prevent garbage collection
self._java_objects = set()
self._port = port
self._close_lock = Lock()
self._closed = False
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
def _register_java_object(self, object):
self._java_objects.add(object)
def _convert_np_to_python(self, d):
"""
recursively search dictionary and convert any values from numpy floats/ints to
python floats/ints so they can be json serialized
:return:
"""
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
"""
make a string to replace bytes data or numpy array in message, which encode data type if numpy
"""
# make up a random 32 bit int as the identifier
# TODO: change to simple counting
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
# '@{some_number}_{bytes_per_pixel}'
# if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts
# TODO : I thinkg its always raw binary and the argument deserialization types handles conversion to java arrays
# This definitely could use some cleanup and simplification. Probably best to encode the data type here and remove
# argument deserialization types
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
# make sure any np types convert to python types so they can be json serialized
self._convert_np_to_python(message)
# Send binary data in seperate messages so it doesnt need to be json serialized
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
# convert keys to byte array
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass # ignore, keep trying
return False
def _replace_bytes(self, dict_or_list, hash, value):
"""
Replace placeholders for byte arrays in JSON message with their actual values
"""
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[entry]:
hash_in_message = int(entry.split("@")[1], 16) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass # ignore, keep trying
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
# replace any byte data placeholders with the byte data itself
for i in np.arange(1, len(reply), 2):
# messages come in pairs: first is hash, second it byte data
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def __del__(self):
self.close() # make sure it closes properly
def close(self):
with self._close_lock:
if not self._closed:
for java_object in self._java_objects:
java_object._close()
del java_object #potentially redundant, trying to fix closing race condition
self._java_objects = None
self._socket.close()
while not self._socket.closed:
time.sleep(0.01)
self._socket = None
if self._debug:
print('closed socket {}'.format(self._port))
self._closed = True
class Bridge:
"""
Create an object which acts as a client to a corresponding server (running in a Java process).
This enables construction and interaction with arbitrary java objects. Each bridge object should
be run using a context manager (i.e. `with Bridge() as b:`) or bridge.close() should be explicitly
called when finished
"""
DEFAULT_PORT = 4827
DEFAULT_TIMEOUT = 500
_EXPECTED_ZMQ_SERVER_VERSION = "4.2.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
"""
Only one instance of Bridge per a thread
"""
port = kwargs.get('port', Bridge.DEFAULT_PORT)
if hasattr(Bridge.thread_local, "bridge") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:
Bridge.thread_local.bridge_count[port] += 1
return Bridge.thread_local.bridge[port]
else:
if (not hasattr(Bridge.thread_local, "bridge_count")) or Bridge.thread_local.bridge_count is None:
Bridge.thread_local.bridge_count = {}
Bridge.thread_local.bridge_count[port] = 1
return super(Bridge, cls).__new__(cls)
def __init__(
self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,
debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT
):
"""
Parameters
----------
port : int
The port on which the bridge operates
convert_camel_case : bool
If True, methods for Java objects that are passed across the bridge
will have their names converted from camel case to underscores. i.e. class.methodName()
becomes class.method_name()
debug : bool
If True print helpful stuff for debugging
"""
self._ip_address = ip_address
self._port = port
self._closed = False
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
# if hasattr(self.thread_local, "bridge") and port in self.thread_local.bridge:
# return ### What was this supposed to do?
if not hasattr(Bridge.thread_local, "bridge") or Bridge.thread_local.bridge is None:
Bridge.thread_local.bridge = {}
Bridge.thread_local.bridge[port] = self # cache a thread-local version of the bridge
self._convert_camel_case = convert_camel_case
self._debug = debug
self._timeout = timeout
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=timeout)
if reply_json is None:
raise TimeoutError(
f"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0" # before version was added
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
Bridge.thread_local.bridge_count[self._port] -= 1
if Bridge.thread_local.bridge_count[self._port] == 0:
del Bridge.thread_local.bridge_count[self._port]
del Bridge.thread_local.bridge[self._port]
self._master_socket.close()
self._master_socket = None
self._closed = True
if len(Bridge.thread_local.bridge) == 0:
Bridge.thread_local.bridge = None
Bridge.thread_local.bridge_count = None
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):
"""
Create a new instance of a an object on the Java side. Returns a Python "Shadow" of the object, which behaves
just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at
runtime using iPython autocomplete
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
args : list
list of arguments to the constructor, if applicable
Returns
-------
Python "Shadow" to the Java object
"""
if args is None:
args = []
# classpath_minus_class = '.'.join(classpath.split('.')[:-1])
# query the server for constructors matching this classpath
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
# Calling a constructor, rather than getting return from method
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def get_java_class(self, classpath: str, new_socket: bool=False):
"""
Get an an object corresponding to a java class, for example to be used
when calling static methods on the class directly
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
Returns
-------
Python "Shadow" to the Java class
"""
message = {"command": "get-class", "classpath": classpath}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
"""
Connect a push socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
"""
Connect to a pull socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
"""
return an instance of the Micro-Magellan API
"""
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
"""
Connect to CMMCore and return object that has its methods
:return: Python "shadow" object for micromanager core
"""
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
"""
return an instance of the Studio object that provides access to micro-manager Java APIs
"""
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
"""
This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.
If a given class has already been generate once it will be returns from the cache rather than re-generating it.
"""
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
"""Create a class (or return a class from the cache) based on the contents of `serialized_object` message."""
if serialized_obj["class"] in self.classes.keys(): # Return a cached class
return self.classes[serialized_obj["class"]]
else: # Generate a new class since it wasn't found in the cache.
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
"""
Generic class for serving as a python interface for a java class using a zmq server backend
"""
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
self._close_lock = Lock()
def _close(self):
with self._close_lock:
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
"closing: {}".format(self)
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
"""
Tell java side this object is garbage collected so it can do the same if needed
"""
self._close()
def _access_field(self, name):
"""
Return a python version of the field with a given name
:return:
"""
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
"""
Return a python version of the field with a given name
:return:
"""
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple, static: bool):
"""
Translate to appropriate Java method, call it, and return converted python version of its result
Parameters
----------
args :
args[0] is list of dictionaries of possible method specifications
kwargs :
hold possible polymorphic args, or none
"""
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"static": static,
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
if self._bridge._closed:
raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a "with" block?')
self._socket.send(message)
recieved = self._socket.receive()
return self._deserialize(recieved)
def _deserialize(self, json_return):
"""
method_spec :
info about the method that called it
reply :
bytes that represents return
Returns
-------
An appropriate python type of the converted value
"""
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
"""
Convert a serialized java array to the appropriate numpy type
Parameters
----------
json_return
"""
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
"""
Serialize function arguments and also include description of their Java types
Parameters
----------
valid_method_spec:
fn_args :
"""
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
"""
Check if a single method specificiation is compatible with the arguments the function recieved
Parameters
----------
method_spec :
fn_args :
"""
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
"""
Compare python arguments to java arguments to find correct function to call
Parameters
----------
method_specs :
fn_args :
Returns
-------
one of the method_specs that is valid
"""
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
# Used for generating type hints in arguments
_CLASS_NAME_MAPPING = {
"byte[]": "uint8array",
"double[]": "float64_array",
"int[]": "uint32_array",
"short[]": "int16_array",
"char[]": "int16_array",
"float[]": "int16_array",
"long[]": "int16_array",
"java.lang.String": "string",
"boolean": "boolean",
"double": "float",
"float": "float",
"int": "int",
"long": "int",
"short": "int",
"void": "void",
}
#Used for deserializing java arrarys into numpy arrays
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"boolean[]": np.bool,
"byte[]": np.uint8,
"short[]": np.int16,
"char[]": np.uint16,
"float[]": np.float32,
"double[]": np.float64,
"int[]": np.int32,
"long[]": np.int64,
}
#used for figuring our which java methods to call and if python args match
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
#maybe could make these more specific to array type?
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"char[]": np.ndarray,
"float[]": np.ndarray,
"long[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "short[]", "char[]", "long[]", "boolean[]",
"java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
| 39.026115 | 138 | 0.597992 | import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
from threading import Lock
class DataSocket:
def __init__(self, context, port, type, debug=False, ip_address="127.0.0.1"):
self._socket = context.socket(type)
self._debug = debug
self._java_objects = set()
self._port = port
self._close_lock = Lock()
self._closed = False
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
def _register_java_object(self, object):
self._java_objects.add(object)
def _convert_np_to_python(self, d):
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
self._convert_np_to_python(message)
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass
return False
def _replace_bytes(self, dict_or_list, hash, value):
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
)
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[entry]:
hash_in_message = int(entry.split("@")[1], 16)
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
for i in np.arange(1, len(reply), 2):
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def __del__(self):
self.close()
def close(self):
with self._close_lock:
if not self._closed:
for java_object in self._java_objects:
java_object._close()
del java_object
self._java_objects = None
self._socket.close()
while not self._socket.closed:
time.sleep(0.01)
self._socket = None
if self._debug:
print('closed socket {}'.format(self._port))
self._closed = True
class Bridge:
DEFAULT_PORT = 4827
DEFAULT_TIMEOUT = 500
_EXPECTED_ZMQ_SERVER_VERSION = "4.2.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
port = kwargs.get('port', Bridge.DEFAULT_PORT)
if hasattr(Bridge.thread_local, "bridge") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:
Bridge.thread_local.bridge_count[port] += 1
return Bridge.thread_local.bridge[port]
else:
if (not hasattr(Bridge.thread_local, "bridge_count")) or Bridge.thread_local.bridge_count is None:
Bridge.thread_local.bridge_count = {}
Bridge.thread_local.bridge_count[port] = 1
return super(Bridge, cls).__new__(cls)
def __init__(
self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,
debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT
):
self._ip_address = ip_address
self._port = port
self._closed = False
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
Bridge.thread_local.bridge = {}
Bridge.thread_local.bridge[port] = self
self._convert_camel_case = convert_camel_case
self._debug = debug
self._timeout = timeout
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=timeout)
if reply_json is None:
raise TimeoutError(
f"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0"
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
Bridge.thread_local.bridge_count[self._port] -= 1
if Bridge.thread_local.bridge_count[self._port] == 0:
del Bridge.thread_local.bridge_count[self._port]
del Bridge.thread_local.bridge[self._port]
self._master_socket.close()
self._master_socket = None
self._closed = True
if len(Bridge.thread_local.bridge) == 0:
Bridge.thread_local.bridge = None
Bridge.thread_local.bridge_count = None
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):
if args is None:
args = []
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def get_java_class(self, classpath: str, new_socket: bool=False):
message = {"command": "get-class", "classpath": classpath}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
if serialized_obj["class"] in self.classes.keys():
return self.classes[serialized_obj["class"]]
else:
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
self._close_lock = Lock()
def _close(self):
with self._close_lock:
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
"closing: {}".format(self)
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
self._close()
def _access_field(self, name):
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple, static: bool):
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"static": static,
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
if self._bridge._closed:
raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a "with" block?')
self._socket.send(message)
recieved = self._socket.receive()
return self._deserialize(recieved)
def _deserialize(self, json_return):
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
# Used for generating type hints in arguments
_CLASS_NAME_MAPPING = {
"byte[]": "uint8array",
"double[]": "float64_array",
"int[]": "uint32_array",
"short[]": "int16_array",
"char[]": "int16_array",
"float[]": "int16_array",
"long[]": "int16_array",
"java.lang.String": "string",
"boolean": "boolean",
"double": "float",
"float": "float",
"int": "int",
"long": "int",
"short": "int",
"void": "void",
}
#Used for deserializing java arrarys into numpy arrays
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"boolean[]": np.bool,
"byte[]": np.uint8,
"short[]": np.int16,
"char[]": np.uint16,
"float[]": np.float32,
"double[]": np.float64,
"int[]": np.int32,
"long[]": np.int64,
}
#used for figuring our which java methods to call and if python args match
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
#maybe could make these more specific to array type?
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"char[]": np.ndarray,
"float[]": np.ndarray,
"long[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "short[]", "char[]", "long[]", "boolean[]",
"java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
| true | true |
f717e197e7a1baf6c61fc8fc8503678d23f39768 | 2,360 | py | Python | app2.py | GFDRR/mobility_app | 27285a0691fabcc2cede6772a04bb98d29e636da | [
"MIT"
] | null | null | null | app2.py | GFDRR/mobility_app | 27285a0691fabcc2cede6772a04bb98d29e636da | [
"MIT"
] | null | null | null | app2.py | GFDRR/mobility_app | 27285a0691fabcc2cede6772a04bb98d29e636da | [
"MIT"
] | null | null | null | import streamlit as st
import pandas as pd
import seaborn as sns
import pylab as plt
import datetime as dt
#import geopandas as gpd
df = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')
df.ds = pd.to_datetime(df.ds)
df = df.set_index('ds')
df['datetime'] = df.index.copy()
## Header
st.title('Mobility trends of states in India')
st.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')
default_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']
states = st.multiselect('Select a state',df.polygon_name.unique())
# Line plot
colors = 'rgbycmkrgbycmkrgbycmkrgbycmk'
f, ax = plt.subplots(figsize = [9,9])
for background_state in df.polygon_name.unique():
sns.lineplot(x=df.index[df.polygon_name == background_state], y=df["all_day_bing_tiles_visited_relative_change"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)
for n, state in enumerate(list(states)):
col = colors[n]
ax = sns.lineplot(x=df.index[df.polygon_name == state], y="all_day_bing_tiles_visited_relative_change", color = col,data=df[df.polygon_name == state], linewidth = 4)
plt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)
plt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)
plt.title('Percent users remaining in home grid cell all day', fontsize = 16);
st.write(f)
df
## Map
gdf = gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')
gdf = gdf[['NAME_1','geometry']]
income_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])
income_data = income_data.dropna()
income_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]
income_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')
income_data.nsdp_USD = income_data.nsdp_USD.astype(int)
gdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))
gdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]
gdf.head(1)
mydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])
f = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')
st.pyplot()
| 41.403509 | 198 | 0.747034 | import streamlit as st
import pandas as pd
import seaborn as sns
import pylab as plt
import datetime as dt
df = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')
df.ds = pd.to_datetime(df.ds)
df = df.set_index('ds')
df['datetime'] = df.index.copy()
le('Mobility trends of states in India')
st.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')
default_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']
states = st.multiselect('Select a state',df.polygon_name.unique())
colors = 'rgbycmkrgbycmkrgbycmkrgbycmk'
f, ax = plt.subplots(figsize = [9,9])
for background_state in df.polygon_name.unique():
sns.lineplot(x=df.index[df.polygon_name == background_state], y=df["all_day_bing_tiles_visited_relative_change"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)
for n, state in enumerate(list(states)):
col = colors[n]
ax = sns.lineplot(x=df.index[df.polygon_name == state], y="all_day_bing_tiles_visited_relative_change", color = col,data=df[df.polygon_name == state], linewidth = 4)
plt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)
plt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)
plt.title('Percent users remaining in home grid cell all day', fontsize = 16);
st.write(f)
df
= gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')
gdf = gdf[['NAME_1','geometry']]
income_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])
income_data = income_data.dropna()
income_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]
income_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')
income_data.nsdp_USD = income_data.nsdp_USD.astype(int)
gdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))
gdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]
gdf.head(1)
mydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])
f = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')
st.pyplot()
| true | true |
f717e1a9e531045800c5e7a2a00ed7b1dc29c82c | 2,569 | py | Python | sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetAssetEncryptionKeyResult',
'AwaitableGetAssetEncryptionKeyResult',
'get_asset_encryption_key',
]
@pulumi.output_type
class GetAssetEncryptionKeyResult:
"""
The Asset Storage encryption key.
"""
def __init__(__self__, storage_encryption_key=None):
if storage_encryption_key and not isinstance(storage_encryption_key, str):
raise TypeError("Expected argument 'storage_encryption_key' to be a str")
pulumi.set(__self__, "storage_encryption_key", storage_encryption_key)
@property
@pulumi.getter(name="storageEncryptionKey")
def storage_encryption_key(self) -> Optional[str]:
"""
The Asset storage encryption key.
"""
return pulumi.get(self, "storage_encryption_key")
class AwaitableGetAssetEncryptionKeyResult(GetAssetEncryptionKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAssetEncryptionKeyResult(
storage_encryption_key=self.storage_encryption_key)
def get_asset_encryption_key(account_name: Optional[str] = None,
asset_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssetEncryptionKeyResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The Media Services account name.
:param str asset_name: The Asset name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['assetName'] = asset_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180601preview:getAssetEncryptionKey', __args__, opts=opts, typ=GetAssetEncryptionKeyResult).value
return AwaitableGetAssetEncryptionKeyResult(
storage_encryption_key=__ret__.storage_encryption_key)
| 37.779412 | 157 | 0.709225 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetAssetEncryptionKeyResult',
'AwaitableGetAssetEncryptionKeyResult',
'get_asset_encryption_key',
]
@pulumi.output_type
class GetAssetEncryptionKeyResult:
def __init__(__self__, storage_encryption_key=None):
if storage_encryption_key and not isinstance(storage_encryption_key, str):
raise TypeError("Expected argument 'storage_encryption_key' to be a str")
pulumi.set(__self__, "storage_encryption_key", storage_encryption_key)
@property
@pulumi.getter(name="storageEncryptionKey")
def storage_encryption_key(self) -> Optional[str]:
return pulumi.get(self, "storage_encryption_key")
class AwaitableGetAssetEncryptionKeyResult(GetAssetEncryptionKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAssetEncryptionKeyResult(
storage_encryption_key=self.storage_encryption_key)
def get_asset_encryption_key(account_name: Optional[str] = None,
asset_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssetEncryptionKeyResult:
__args__ = dict()
__args__['accountName'] = account_name
__args__['assetName'] = asset_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180601preview:getAssetEncryptionKey', __args__, opts=opts, typ=GetAssetEncryptionKeyResult).value
return AwaitableGetAssetEncryptionKeyResult(
storage_encryption_key=__ret__.storage_encryption_key)
| true | true |
f717e1e1dd47ba1807b3c7b1e5175c4d151b536b | 447 | py | Python | students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py | voronoff2803/ITMO_ICT_WebProgramming_2020 | c59d8b2cdefe8b821049a2716733070983d08ad2 | [
"MIT"
] | null | null | null | students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py | voronoff2803/ITMO_ICT_WebProgramming_2020 | c59d8b2cdefe8b821049a2716733070983d08ad2 | [
"MIT"
] | null | null | null | students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py | voronoff2803/ITMO_ICT_WebProgramming_2020 | c59d8b2cdefe8b821049a2716733070983d08ad2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-04-03 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='vehicles',
field=models.ManyToManyField(through='project_first_app.Ownership', to='project_first_app.Vehicle'),
),
]
| 23.526316 | 112 | 0.63311 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='vehicles',
field=models.ManyToManyField(through='project_first_app.Ownership', to='project_first_app.Vehicle'),
),
]
| true | true |
f717e28a218983e53bb05193c90627d19da33fc9 | 110 | py | Python | CodeWars/7 Kyu/Average Array.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/7 Kyu/Average Array.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/7 Kyu/Average Array.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | def avg_array(arrs):
x=[]
for i in zip(*arrs):
x.append(sum(i)/len(arrs))
return x
| 18.333333 | 34 | 0.5 | def avg_array(arrs):
x=[]
for i in zip(*arrs):
x.append(sum(i)/len(arrs))
return x
| true | true |
f717e456d5d1b3e37be07a8321b8d5d0fadafa26 | 10,526 | py | Python | kubernetes/client/models/v1_persistent_volume_claim_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | 1 | 2022-02-22T23:10:55.000Z | 2022-02-22T23:10:55.000Z | kubernetes/client/models/v1_persistent_volume_claim_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | 6 | 2021-09-13T19:03:02.000Z | 2022-03-16T18:56:42.000Z | kubernetes/client/models/v1_persistent_volume_claim_spec.py | philipp-sontag-by/python | 51c481692ab0d9c71b9dd96342bfa93b721b029d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'data_source_ref': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'data_source_ref': 'dataSourceRef',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, data_source_ref=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._data_source = None
self._data_source_ref = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if data_source_ref is not None:
self.data_source_ref = data_source_ref
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:return: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeClaimSpec.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def data_source(self):
"""Gets the data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source
@data_source.setter
def data_source(self, data_source):
"""Sets the data_source of this V1PersistentVolumeClaimSpec.
:param data_source: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source = data_source
@property
def data_source_ref(self):
"""Gets the data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source_ref
@data_source_ref.setter
def data_source_ref(self, data_source_ref):
"""Sets the data_source_ref of this V1PersistentVolumeClaimSpec.
:param data_source_ref: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source_ref = data_source_ref
@property
def resources(self):
"""Gets the resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1PersistentVolumeClaimSpec.
:param resources: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def selector(self):
"""Gets the selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1PersistentVolumeClaimSpec.
:param selector: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def storage_class_name(self):
"""Gets the storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:return: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
"""Sets the storage_class_name of this V1PersistentVolumeClaimSpec.
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:param storage_class_name: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
"""Gets the volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:return: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""Sets the volume_mode of this V1PersistentVolumeClaimSpec.
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:param volume_mode: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_mode = volume_mode
@property
def volume_name(self):
"""Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1PersistentVolumeClaimSpec.
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:param volume_name: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_name = volume_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return True
return self.to_dict() != other.to_dict()
| 33.845659 | 219 | 0.652005 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimSpec(object):
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'data_source_ref': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'data_source_ref': 'dataSourceRef',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, data_source_ref=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._data_source = None
self._data_source_ref = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if data_source_ref is not None:
self.data_source_ref = data_source_ref
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
self._access_modes = access_modes
@property
def data_source(self):
return self._data_source
@data_source.setter
def data_source(self, data_source):
self._data_source = data_source
@property
def data_source_ref(self):
return self._data_source_ref
@data_source_ref.setter
def data_source_ref(self, data_source_ref):
self._data_source_ref = data_source_ref
@property
def resources(self):
return self._resources
@resources.setter
def resources(self, resources):
self._resources = resources
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, selector):
self._selector = selector
@property
def storage_class_name(self):
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
self._volume_mode = volume_mode
@property
def volume_name(self):
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
self._volume_name = volume_name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1PersistentVolumeClaimSpec):
return True
return self.to_dict() != other.to_dict()
| true | true |
f717e46b95455cc849096cc7da73943ce2b7377f | 2,104 | py | Python | tests/test_searchalgo.py | Intelecy/chocolate | 0ba4f6f0130eab851d32d5534241c8cac3f6666e | [
"BSD-3-Clause"
] | 105 | 2017-10-27T02:14:22.000Z | 2022-01-13T12:57:05.000Z | tests/test_searchalgo.py | Intelecy/chocolate | 0ba4f6f0130eab851d32d5534241c8cac3f6666e | [
"BSD-3-Clause"
] | 31 | 2017-10-03T13:41:35.000Z | 2021-08-20T21:01:29.000Z | tests/test_searchalgo.py | areeh/chocolate | 5f946cb9daf42c3ab44508648917d46bc105c2fc | [
"BSD-3-Clause"
] | 38 | 2017-10-05T20:19:42.000Z | 2022-03-28T11:34:04.000Z | import unittest
from unittest.mock import MagicMock
from chocolate.space import *
from chocolate.base import SearchAlgorithm
class TestSearchAlgorithm(unittest.TestCase):
def setUp(self):
self.mock_conn = MagicMock(name="connection")
def test_space_none_none(self):
self.mock_conn.get_space.return_value = None
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, None)
def test_space_not_equal_nowrite(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, s2)
def test_space_not_equal_write(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, s2, clear_db=True)
self.mock_conn.clear.assert_called_with()
self.mock_conn.insert_space.assert_called_with(s2)
self.assertEqual(algo.space, s2)
def test_space_none_not_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = None
algo = SearchAlgorithm(self.mock_conn, s1)
self.mock_conn.insert_space.assert_called_with(s1)
self.assertEqual(algo.space, s1)
def test_space_not_none_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, None)
self.assertEqual(algo.space, s1)
def test_update_value(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, 9.0)
expected = {"_loss": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
def test_update_mapping(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, {"f1": 9.0})
expected = {"_loss_f1": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
| 30.941176 | 78 | 0.663023 | import unittest
from unittest.mock import MagicMock
from chocolate.space import *
from chocolate.base import SearchAlgorithm
class TestSearchAlgorithm(unittest.TestCase):
def setUp(self):
self.mock_conn = MagicMock(name="connection")
def test_space_none_none(self):
self.mock_conn.get_space.return_value = None
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, None)
def test_space_not_equal_nowrite(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, s2)
def test_space_not_equal_write(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, s2, clear_db=True)
self.mock_conn.clear.assert_called_with()
self.mock_conn.insert_space.assert_called_with(s2)
self.assertEqual(algo.space, s2)
def test_space_none_not_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = None
algo = SearchAlgorithm(self.mock_conn, s1)
self.mock_conn.insert_space.assert_called_with(s1)
self.assertEqual(algo.space, s1)
def test_space_not_none_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, None)
self.assertEqual(algo.space, s1)
def test_update_value(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, 9.0)
expected = {"_loss": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
def test_update_mapping(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, {"f1": 9.0})
expected = {"_loss_f1": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
| true | true |
f717e481bf8d3b27429577d92a41047f92b8a9d4 | 185 | py | Python | exam_retake/grocery_shop/project/deliveries/food.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | 1 | 2021-05-27T07:59:17.000Z | 2021-05-27T07:59:17.000Z | exam_retake/grocery_shop/project/deliveries/food.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | exam_retake/grocery_shop/project/deliveries/food.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | from project.deliveries.product import Product
class Food(Product):
food_quantity: int = 15
def __init__(self, name: str):
super().__init__(name, self.food_quantity)
| 20.555556 | 50 | 0.708108 | from project.deliveries.product import Product
class Food(Product):
food_quantity: int = 15
def __init__(self, name: str):
super().__init__(name, self.food_quantity)
| true | true |
f717e56f1c9229960dfeaed5d108ebdcab4bd8a6 | 3,760 | py | Python | contrib/macdeploy/custom_dsstore.py | sqoin/xdisk | 7f93d461b0168f11512a9dcfd9cf133122157544 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | sqoin/xdisk | 7f93d461b0168f11512a9dcfd9cf133122157544 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | sqoin/xdisk | 7f93d461b0168f11512a9dcfd9cf133122157544 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00xdiskuser:\x00Documents:\x00xdisk:\x00xdisk:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/xdiskuser/Documents/xdisk/xdisk/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['xdisk-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.639344 | 1,817 | 0.72633 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00xdiskuser:\x00Documents:\x00xdisk:\x00xdisk:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/xdiskuser/Documents/xdisk/xdisk/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['xdisk-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f717e5abe192eeacd489fb3abdcfc529c914593b | 8,031 | py | Python | src/tests/test_markdown2man.py | dante-signal31/markdown2man | ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf | [
"BSD-3-Clause"
] | null | null | null | src/tests/test_markdown2man.py | dante-signal31/markdown2man | ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf | [
"BSD-3-Clause"
] | null | null | null | src/tests/test_markdown2man.py | dante-signal31/markdown2man | ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf | [
"BSD-3-Clause"
] | null | null | null | """ Test for markdown2man launcher."""
import gzip
import os
import sys
import tempfile
import test_common.fs.ops as test_ops
from test_common.fs.temp import temp_dir
# TODO: Refactor project layout to leave tests folder out of src.
sys.path.append("src")
import src.markdown2man as markdown2man
def test_launcher_all_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_all_long_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "--manpage_section", "1", "--manpage_title",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_changed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "2", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.2.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
expected_content = expected_content.replace(".TH \"cifra\" \"1\"",
".TH \"cifra\" \"2\"")
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_title_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_line = ".TH \"cifra\" \"1\" \"\" \"\" \"cifra\"\n"
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = [line.decode() for line in output_file.readlines()]
assert expected_line in recovered_content
def test_launcher_uncompressed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-u"]
expected_output_file = os.path.join(temp_dir, "cifra.1")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with open(expected_output_file) as output_file:
recovered_content = output_file.read()
assert recovered_content == expected_content
def test_launcher_different_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_folder}"]
expected_output_file = os.path.join(temp_output_folder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_different_non_existing_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
temp_output_subfolder = os.path.join(temp_output_folder, "man/")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_subfolder}"]
expected_output_file = os.path.join(temp_output_subfolder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content | 43.410811 | 102 | 0.696551 | import gzip
import os
import sys
import tempfile
import test_common.fs.ops as test_ops
from test_common.fs.temp import temp_dir
sys.path.append("src")
import src.markdown2man as markdown2man
def test_launcher_all_options_given(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_all_long_options_given(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "--manpage_section", "1", "--manpage_title",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_changed(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "2", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.2.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
expected_content = expected_content.replace(".TH \"cifra\" \"1\"",
".TH \"cifra\" \"2\"")
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_omitted(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_title_omitted(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_line = ".TH \"cifra\" \"1\" \"\" \"\" \"cifra\"\n"
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = [line.decode() for line in output_file.readlines()]
assert expected_line in recovered_content
def test_launcher_uncompressed(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-u"]
expected_output_file = os.path.join(temp_dir, "cifra.1")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with open(expected_output_file) as output_file:
recovered_content = output_file.read()
assert recovered_content == expected_content
def test_launcher_different_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_folder}"]
expected_output_file = os.path.join(temp_output_folder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_different_non_existing_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
temporal_markdown_file = os.path.join(temp_dir, "README.md")
temp_output_subfolder = os.path.join(temp_output_folder, "man/")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_subfolder}"]
expected_output_file = os.path.join(temp_output_subfolder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content | true | true |
f717e6e123e9b4e6acce7b7cd6d35c7024149784 | 104 | py | Python | rules/tabs_spaces.py | Ahuge/Pepper | 2afe398629d0505dfa1b5ad7d13eb68a3df695bf | [
"MIT"
] | null | null | null | rules/tabs_spaces.py | Ahuge/Pepper | 2afe398629d0505dfa1b5ad7d13eb68a3df695bf | [
"MIT"
] | 3 | 2015-10-16T00:58:27.000Z | 2019-06-20T16:57:03.000Z | rules/tabs_spaces.py | Ahuge/Pepper | 2afe398629d0505dfa1b5ad7d13eb68a3df695bf | [
"MIT"
] | null | null | null | __author__ = 'Alex'
import re
def main(line):
sub = re.sub(r"(\t)", r" ", line)
return sub
| 13 | 40 | 0.548077 | __author__ = 'Alex'
import re
def main(line):
sub = re.sub(r"(\t)", r" ", line)
return sub
| true | true |
f717e99028c5d14443a9263ee3de86569fca8475 | 377 | py | Python | ppr-api/src/endpoints/api.py | gh2os/ppr | 9f67321baa5bbb450ac5e06755e2838497a2cf96 | [
"Apache-2.0"
] | null | null | null | ppr-api/src/endpoints/api.py | gh2os/ppr | 9f67321baa5bbb450ac5e06755e2838497a2cf96 | [
"Apache-2.0"
] | 2 | 2020-03-18T23:26:53.000Z | 2020-03-18T23:40:19.000Z | ppr-api/src/endpoints/api.py | gh2os/ppr | 9f67321baa5bbb450ac5e06755e2838497a2cf96 | [
"Apache-2.0"
] | null | null | null | """ Set up all the endpoints for the web service. """
import fastapi
from . import financing_statement, healthcheck, search
router = fastapi.APIRouter()
router.include_router(healthcheck.router, prefix='/operations', tags=['Operations'])
router.include_router(financing_statement.router, tags=['Financing Statement'])
router.include_router(search.router, tags=['Search'])
| 29 | 84 | 0.777188 |
import fastapi
from . import financing_statement, healthcheck, search
router = fastapi.APIRouter()
router.include_router(healthcheck.router, prefix='/operations', tags=['Operations'])
router.include_router(financing_statement.router, tags=['Financing Statement'])
router.include_router(search.router, tags=['Search'])
| true | true |
f717e9994215a9e2f730997e5778606b01734396 | 2,349 | py | Python | openspeech_cli/hydra_train.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-04T02:52:44.000Z | 2022-03-04T02:52:44.000Z | openspeech_cli/hydra_train.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | null | null | null | openspeech_cli/hydra_train.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import hydra
import wandb
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_info
from openspeech.tokenizers import TOKENIZER_REGISTRY
from openspeech.datasets import DATA_MODULE_REGISTRY
from openspeech.dataclass.initialize import hydra_train_init
from openspeech.models import MODEL_REGISTRY
from openspeech.utils import parse_configs, get_pl_trainer
@hydra.main(config_path=os.path.join("..", "openspeech", "configs"), config_name="train")
def hydra_main(configs: DictConfig) -> None:
rank_zero_info(OmegaConf.to_yaml(configs))
pl.seed_everything(configs.trainer.seed)
logger, num_devices = parse_configs(configs)
data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs)
data_module.prepare_data()
tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs)
data_module.setup(tokenizer=tokenizer)
model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer)
trainer = get_pl_trainer(configs, num_devices, logger)
trainer.fit(model, data_module)
trainer.test()
if __name__ == '__main__':
hydra_train_init()
hydra_main()
| 39.15 | 90 | 0.787143 |
import os
import hydra
import wandb
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_info
from openspeech.tokenizers import TOKENIZER_REGISTRY
from openspeech.datasets import DATA_MODULE_REGISTRY
from openspeech.dataclass.initialize import hydra_train_init
from openspeech.models import MODEL_REGISTRY
from openspeech.utils import parse_configs, get_pl_trainer
@hydra.main(config_path=os.path.join("..", "openspeech", "configs"), config_name="train")
def hydra_main(configs: DictConfig) -> None:
rank_zero_info(OmegaConf.to_yaml(configs))
pl.seed_everything(configs.trainer.seed)
logger, num_devices = parse_configs(configs)
data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs)
data_module.prepare_data()
tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs)
data_module.setup(tokenizer=tokenizer)
model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer)
trainer = get_pl_trainer(configs, num_devices, logger)
trainer.fit(model, data_module)
trainer.test()
if __name__ == '__main__':
hydra_train_init()
hydra_main()
| true | true |
f717eab9315eef9eda1defc31f9c5122f0ff1655 | 1,026 | py | Python | Math/x^2 = y^3.py | vsriv90/mechanical_engineering | c922cdce1a595e9acb6a87cf415fb3685caf51a3 | [
"MIT"
] | 1 | 2021-11-03T06:37:44.000Z | 2021-11-03T06:37:44.000Z | Math/x^2 = y^3.py | vsriv90/mechanical_engineering | c922cdce1a595e9acb6a87cf415fb3685caf51a3 | [
"MIT"
] | null | null | null | Math/x^2 = y^3.py | vsriv90/mechanical_engineering | c922cdce1a595e9acb6a87cf415fb3685caf51a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# #### Show the common numbers for $x^2=y^3$
#
# [Link](https://www.quora.com/Is-64-the-first-perfect-square-and-a-perfect-cube-Is-it-the-only-one/answer/Alon-Amit?ch=3&share=e27e1c03&srid=iBLa) to Quora (Alon Amit's answer)
#
#
#
# In[1]:
import numpy
import sympy
import pandas
import csv
import matplotlib.pyplot as plt
import seaborn as sn # to draw plots
import plotly.express as px
# In[2]:
import keyword
print(keyword.kwlist) # A list of all 33 keywords in python
# In[68]:
list1 = [] # for all sqaured values
list2 = [] # for all cubed values
n = 100 # till what values of n to check
for i in range(0,n): # if i is in the above given range
j=i**2
k=i**3
list1.append(j) # add the squared values to list1
list2.append(k) # add the cubed values to list2
elem = sorted(set(list1) & set(list2)) # check if an element is on both "list1" and "list2"
print(elem) # print the list
# print(set(list1)) # if you want to see the list as a set
| 20.117647 | 177 | 0.674464 |
ess as px
# In[2]:
import keyword
print(keyword.kwlist) # A list of all 33 keywords in python
# In[68]:
list1 = [] # for all sqaured values
list2 = [] # for all cubed values
n = 100 # till what values of n to check
for i in range(0,n): # if i is in the above given range
j=i**2
k=i**3
list1.append(j) # add the squared values to list1
list2.append(k) # add the cubed values to list2
elem = sorted(set(list1) & set(list2)) # check if an element is on both "list1" and "list2"
print(elem) # print the list
# print(set(list1)) # if you want to see the list as a set
| true | true |
f717eb7deff235aa9cb2449ef700d1d63d624333 | 155 | py | Python | utilities/printing.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 25 | 2016-02-28T16:42:57.000Z | 2022-01-03T13:29:48.000Z | utilities/printing.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 84 | 2016-02-13T01:07:55.000Z | 2021-04-06T18:57:36.000Z | utilities/printing.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 10 | 2016-05-30T14:35:59.000Z | 2022-03-16T12:24:09.000Z | from __future__ import absolute_import
import pprint
def pp(stuff):
pretty_printer = pprint.PrettyPrinter(indent=3)
pretty_printer.pprint(stuff)
| 19.375 | 51 | 0.787097 | from __future__ import absolute_import
import pprint
def pp(stuff):
pretty_printer = pprint.PrettyPrinter(indent=3)
pretty_printer.pprint(stuff)
| true | true |
f717ed1a92cd4103d8f8d7eecb5ad29aa817477f | 8,388 | py | Python | src/ingest_financials.py | ozacas/asxtrade | a3645ae526bfc7a546fdf2a39520feda99e3390a | [
"Apache-2.0"
] | 8 | 2021-03-20T13:12:25.000Z | 2022-02-07T11:17:40.000Z | src/ingest_financials.py | ozacas/asxtrade | a3645ae526bfc7a546fdf2a39520feda99e3390a | [
"Apache-2.0"
] | 8 | 2021-03-07T03:23:46.000Z | 2021-06-01T10:49:56.000Z | src/ingest_financials.py | ozacas/asxtrade | a3645ae526bfc7a546fdf2a39520feda99e3390a | [
"Apache-2.0"
] | 3 | 2020-12-08T10:22:23.000Z | 2021-08-04T01:59:24.000Z | #!/usr/bin/python3
"""
Responsible for ingesting data related to the business performance over time. Data is placed into the asx_company_financial_metric
collection, ready for the core viewer app to use. Stocks whose financial details have been retrieved in the past month are skipped.
"""
import pymongo
import argparse
import yfinance as yf
import time
from utils import read_config
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from bson.objectid import ObjectId
def melt_dataframes(dfs: tuple) -> pd.DataFrame:
result = None
for df in filter(lambda df: df is not None and len(df) > 0, dfs):
df["metric"] = df.index
melted = pd.melt(df, id_vars=("metric"), var_name="date")
melted = melted.dropna(axis=0, how="any")
if len(melted) == 0:
continue
# print(melted)
# print(melted.shape)
if result is None:
result = melted
else:
result = result.append(melted)
if result is not None and "date" in result.columns:
# print(result)
result["date"] = pd.to_datetime(
result["date"], infer_datetime_format=True
) # format="%Y-%m-%d")
# print(result)
return result
def desired_stocks():
available_stocks = set(db.asx_company_details.distinct("asx_code"))
print(f"Found {len(available_stocks)} available stocks.")
gen_time = datetime.today() - timedelta(days=30)
month_ago = ObjectId.from_datetime(gen_time)
recently_updated_stocks = set(
[
rec["asx_code"]
for rec in db.asx_company_financial_metrics.find(
{"_id": {"$gte": month_ago}}
)
]
)
ret = available_stocks.difference(recently_updated_stocks)
print(f"Found {len(ret)} desired stocks to process.")
return ret
def update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:
"""
Add (or update) all financial metrics (ie. rows) for the specified asx_code in the specified dataframe
:rtype: the number of records updated/created is returned
"""
print(f"Updating {len(df)} financial metrics for {asx_code}")
n = 0
for t in df.itertuples():
d = {
"metric": t.metric,
"date": t.date,
"value": t.value,
"asx_code": t.asx_code,
}
assert t.asx_code == asx_code
result = db.asx_company_financial_metrics.update_one(
{"asx_code": asx_code, "date": t.date, "metric": t.metric},
{"$set": d},
upsert=True,
)
assert result is not None
assert isinstance(result, pymongo.results.UpdateResult)
assert result.matched_count == 1 or result.upserted_id is not None
n += 1
return n
def fetch_metrics(asx_code: str) -> pd.DataFrame:
"""
Using the excellent yfinance, we fetch all possible metrics of business performance for the specified stock code.
Returns a dataframe (possibly empty or none) representing each metric and its datapoints as separate rows
"""
assert len(asx_code) >= 3
ticker = yf.Ticker(asx_code + ".AX")
cashflow_df = ticker.cashflow
financial_df = ticker.financials
earnings_df = ticker.earnings
if set(earnings_df.columns) == set(["Earnings", "Revenue"]):
earnings_df.index = earnings_df.index.map(
str
) # convert years to str (maybe int)
earnings_df = earnings_df.transpose()
# print(earnings_df)
balance_sheet_df = ticker.balance_sheet
melted_df = melt_dataframes(
(cashflow_df, financial_df, earnings_df, balance_sheet_df)
)
return melted_df
def make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:
#print(new_quote)
d = {
"asx_code": asx_code,
"fetch_date": new_quote.Index,
"volume": new_quote.Volume,
"last_price": new_quote.Close,
"day_low_price": new_quote.Low,
"day_high_price": new_quote.High,
"open_price": new_quote.Open,
"error_code": "",
"error_descr": "",
# we dont set nan fields so that existing values (if any) are used ie. merge with existing data
# "annual_dividend_yield": np.nan, # no available data from yf.Ticker.history() although may be available elsewhere, but for now set to missing
# "annual_daily_volume": np.nan,
# "bid_price": np.nan,
"change_price": new_quote.change_price,
"change_in_percent": new_quote.change_in_percent,
}
return d
def fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:
assert db is not None
assert len(stock_to_fetch) >= 3
ticker = yf.Ticker(stock_to_fetch + ".AX")
df = ticker.history(period="max")
df.index = [d.strftime("%Y-%m-%d") for d in df.index]
# print(df)
available_dates = set(df.index)
available_quotes = list(db.asx_prices.find({"asx_code": stock_to_fetch}))
quoted_dates = set(
[q["fetch_date"] for q in available_quotes if not np.isnan(q["last_price"])]
)
assert set(df.columns) == set(
["Open", "High", "Low", "Close", "Volume", "Dividends", "Stock Splits"]
)
dates_to_fill = (
available_dates.difference(quoted_dates) if not force else available_dates
)
print(
"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})".format(
len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force
)
)
if len(dates_to_fill) < 1:
return 0
df["change_price"] = df["Close"].diff()
df["change_in_percent"] = df["Close"].pct_change() * 100.0
gap_quotes_df = df.filter(dates_to_fill, axis=0)
# print(df)
n = 0
for new_quote in gap_quotes_df.itertuples():
d = make_asx_prices_dict(new_quote, stock_to_fetch)
result = db.asx_prices.update_one(
{"fetch_date": d["fetch_date"], "asx_code": d["asx_code"]},
{"$set": d},
upsert=True,
)
assert result is not None
# assert result.modified_count == 1 or result.upserted_id is not None
n += 1
assert n == len(gap_quotes_df)
return n
if __name__ == "__main__":
args = argparse.ArgumentParser(
description="Update financial performance metrics for ASX stocks using yfinance"
)
args.add_argument(
"--config",
help="Configuration file to use [config.json]",
type=str,
default="config.json",
)
args.add_argument(
"--fill-gaps",
help="Fill dates with no existing quotes for each stock (use --debug for a particular stock)",
action="store_true",
)
args.add_argument("--fail-fast", help="Stop on first error", action="store_true")
args.add_argument(
"--delay", help="Delay between stocks in seconds [30]", type=int, default=30
)
args.add_argument("--force", help="Overwrite existing data (if any)", action="store_true")
args.add_argument(
"--debug",
help="Try to fetch specified stock (for debugging)",
type=str,
required=False,
default=None,
)
a = args.parse_args()
config, password = read_config(a.config)
m = config.get("mongo")
mongo = pymongo.MongoClient(
m.get("host"), m.get("port"), username=m.get("user"), password=password
)
db = mongo[m.get("db")]
stock_codes = desired_stocks() if not a.debug else set([a.debug])
print(f"Updating financial metrics for {len(stock_codes)} stocks")
for asx_code in sorted(stock_codes):
print(f"Processing stock {asx_code}")
try:
melted_df = fetch_metrics(asx_code)
if melted_df is None or len(melted_df) < 1:
raise ValueError(f"No data available for {asx_code}... skipping")
melted_df["asx_code"] = asx_code
ret = update_all_metrics(melted_df, asx_code)
assert ret == len(melted_df)
if a.fill_gaps:
fill_stock_quote_gaps(db, asx_code, force=a.force)
# FALLTHRU...
time.sleep(a.delay)
except Exception as e:
print(f"WARNING: unable to download financials for {asx_code}")
print(str(e))
if a.fail_fast:
raise e
exit(0)
| 35.542373 | 152 | 0.625298 |
import pymongo
import argparse
import yfinance as yf
import time
from utils import read_config
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from bson.objectid import ObjectId
def melt_dataframes(dfs: tuple) -> pd.DataFrame:
result = None
for df in filter(lambda df: df is not None and len(df) > 0, dfs):
df["metric"] = df.index
melted = pd.melt(df, id_vars=("metric"), var_name="date")
melted = melted.dropna(axis=0, how="any")
if len(melted) == 0:
continue
if result is None:
result = melted
else:
result = result.append(melted)
if result is not None and "date" in result.columns:
result["date"] = pd.to_datetime(
result["date"], infer_datetime_format=True
)
return result
def desired_stocks():
available_stocks = set(db.asx_company_details.distinct("asx_code"))
print(f"Found {len(available_stocks)} available stocks.")
gen_time = datetime.today() - timedelta(days=30)
month_ago = ObjectId.from_datetime(gen_time)
recently_updated_stocks = set(
[
rec["asx_code"]
for rec in db.asx_company_financial_metrics.find(
{"_id": {"$gte": month_ago}}
)
]
)
ret = available_stocks.difference(recently_updated_stocks)
print(f"Found {len(ret)} desired stocks to process.")
return ret
def update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:
print(f"Updating {len(df)} financial metrics for {asx_code}")
n = 0
for t in df.itertuples():
d = {
"metric": t.metric,
"date": t.date,
"value": t.value,
"asx_code": t.asx_code,
}
assert t.asx_code == asx_code
result = db.asx_company_financial_metrics.update_one(
{"asx_code": asx_code, "date": t.date, "metric": t.metric},
{"$set": d},
upsert=True,
)
assert result is not None
assert isinstance(result, pymongo.results.UpdateResult)
assert result.matched_count == 1 or result.upserted_id is not None
n += 1
return n
def fetch_metrics(asx_code: str) -> pd.DataFrame:
assert len(asx_code) >= 3
ticker = yf.Ticker(asx_code + ".AX")
cashflow_df = ticker.cashflow
financial_df = ticker.financials
earnings_df = ticker.earnings
if set(earnings_df.columns) == set(["Earnings", "Revenue"]):
earnings_df.index = earnings_df.index.map(
str
)
earnings_df = earnings_df.transpose()
balance_sheet_df = ticker.balance_sheet
melted_df = melt_dataframes(
(cashflow_df, financial_df, earnings_df, balance_sheet_df)
)
return melted_df
def make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:
d = {
"asx_code": asx_code,
"fetch_date": new_quote.Index,
"volume": new_quote.Volume,
"last_price": new_quote.Close,
"day_low_price": new_quote.Low,
"day_high_price": new_quote.High,
"open_price": new_quote.Open,
"error_code": "",
"error_descr": "",
ange_in_percent,
}
return d
def fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:
assert db is not None
assert len(stock_to_fetch) >= 3
ticker = yf.Ticker(stock_to_fetch + ".AX")
df = ticker.history(period="max")
df.index = [d.strftime("%Y-%m-%d") for d in df.index]
available_dates = set(df.index)
available_quotes = list(db.asx_prices.find({"asx_code": stock_to_fetch}))
quoted_dates = set(
[q["fetch_date"] for q in available_quotes if not np.isnan(q["last_price"])]
)
assert set(df.columns) == set(
["Open", "High", "Low", "Close", "Volume", "Dividends", "Stock Splits"]
)
dates_to_fill = (
available_dates.difference(quoted_dates) if not force else available_dates
)
print(
"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})".format(
len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force
)
)
if len(dates_to_fill) < 1:
return 0
df["change_price"] = df["Close"].diff()
df["change_in_percent"] = df["Close"].pct_change() * 100.0
gap_quotes_df = df.filter(dates_to_fill, axis=0)
n = 0
for new_quote in gap_quotes_df.itertuples():
d = make_asx_prices_dict(new_quote, stock_to_fetch)
result = db.asx_prices.update_one(
{"fetch_date": d["fetch_date"], "asx_code": d["asx_code"]},
{"$set": d},
upsert=True,
)
assert result is not None
n += 1
assert n == len(gap_quotes_df)
return n
if __name__ == "__main__":
args = argparse.ArgumentParser(
description="Update financial performance metrics for ASX stocks using yfinance"
)
args.add_argument(
"--config",
help="Configuration file to use [config.json]",
type=str,
default="config.json",
)
args.add_argument(
"--fill-gaps",
help="Fill dates with no existing quotes for each stock (use --debug for a particular stock)",
action="store_true",
)
args.add_argument("--fail-fast", help="Stop on first error", action="store_true")
args.add_argument(
"--delay", help="Delay between stocks in seconds [30]", type=int, default=30
)
args.add_argument("--force", help="Overwrite existing data (if any)", action="store_true")
args.add_argument(
"--debug",
help="Try to fetch specified stock (for debugging)",
type=str,
required=False,
default=None,
)
a = args.parse_args()
config, password = read_config(a.config)
m = config.get("mongo")
mongo = pymongo.MongoClient(
m.get("host"), m.get("port"), username=m.get("user"), password=password
)
db = mongo[m.get("db")]
stock_codes = desired_stocks() if not a.debug else set([a.debug])
print(f"Updating financial metrics for {len(stock_codes)} stocks")
for asx_code in sorted(stock_codes):
print(f"Processing stock {asx_code}")
try:
melted_df = fetch_metrics(asx_code)
if melted_df is None or len(melted_df) < 1:
raise ValueError(f"No data available for {asx_code}... skipping")
melted_df["asx_code"] = asx_code
ret = update_all_metrics(melted_df, asx_code)
assert ret == len(melted_df)
if a.fill_gaps:
fill_stock_quote_gaps(db, asx_code, force=a.force)
time.sleep(a.delay)
except Exception as e:
print(f"WARNING: unable to download financials for {asx_code}")
print(str(e))
if a.fail_fast:
raise e
exit(0)
| true | true |
f717eda1c497c2f501c58a071e0a58d22211d3f9 | 6,442 | py | Python | src/waldur_slurm/serializers.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_slurm/serializers.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | src/waldur_slurm/serializers.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | import re
from django.core.validators import MinValueValidator
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers as rf_serializers
from waldur_core.core import serializers as core_serializers
from waldur_core.structure import serializers as structure_serializers
from waldur_core.structure.permissions import _has_admin_access
from waldur_freeipa import models as freeipa_models
from . import models
class SlurmServiceSerializer(structure_serializers.ServiceOptionsSerializer):
class Meta:
secret_fields = ('hostname', 'username', 'port', 'gateway')
username = rf_serializers.CharField(
max_length=100, help_text=_('Administrative user'), default='root'
)
hostname = rf_serializers.CharField(
source='options.hostname', label=_('Hostname or IP address of master node')
)
default_account = rf_serializers.CharField(
source='options.default_account', label=_('Default SLURM account for user')
)
port = rf_serializers.IntegerField(source='options.port', required=False)
use_sudo = rf_serializers.BooleanField(
source='options.use_sudo',
default=False,
help_text=_('Set to true to activate privilege escalation'),
required=False,
)
gateway = rf_serializers.CharField(
source='options.gateway',
label=_('Hostname or IP address of gateway node'),
required=False,
)
firecrest_api_url = rf_serializers.CharField(
source='options.firecrest_api_url',
label=_('FirecREST API base URL'),
required=False,
)
class AllocationSerializer(
structure_serializers.BaseResourceSerializer,
core_serializers.AugmentedSerializerMixin,
):
username = rf_serializers.SerializerMethodField()
gateway = rf_serializers.SerializerMethodField()
homepage = rf_serializers.ReadOnlyField(source='service_settings.homepage')
def get_username(self, allocation):
request = self.context['request']
try:
profile = freeipa_models.Profile.objects.get(user=request.user)
return profile.username
except freeipa_models.Profile.DoesNotExist:
return None
def get_gateway(self, allocation):
options = allocation.service_settings.options
return options.get('gateway') or options.get('hostname')
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Allocation
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'cpu_limit',
'cpu_usage',
'gpu_limit',
'gpu_usage',
'ram_limit',
'ram_usage',
'username',
'gateway',
'is_active',
'homepage',
)
read_only_fields = (
structure_serializers.BaseResourceSerializer.Meta.read_only_fields
+ (
'cpu_usage',
'gpu_usage',
'ram_usage',
'cpu_limit',
'gpu_limit',
'ram_limit',
'is_active',
)
)
extra_kwargs = dict(
url={'lookup_field': 'uuid', 'view_name': 'slurm-allocation-detail'},
cpu_limit={'validators': [MinValueValidator(0)]},
gpu_limit={'validators': [MinValueValidator(0)]},
ram_limit={'validators': [MinValueValidator(0)]},
)
def validate(self, attrs):
attrs = super(AllocationSerializer, self).validate(attrs)
# Skip validation on update
if self.instance:
return attrs
correct_name_regex = '^([%s]{1,63})$' % models.SLURM_ALLOCATION_REGEX
name = attrs.get('name')
if not re.match(correct_name_regex, name):
raise rf_serializers.ValidationError(
_(
"Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen"
)
% name
)
project = attrs['project']
user = self.context['request'].user
if not _has_admin_access(user, project):
raise rf_exceptions.PermissionDenied(
_('You do not have permissions to create allocation for given project.')
)
return attrs
class AllocationSetLimitsSerializer(rf_serializers.ModelSerializer):
cpu_limit = rf_serializers.IntegerField(min_value=-1)
gpu_limit = rf_serializers.IntegerField(min_value=-1)
ram_limit = rf_serializers.IntegerField(min_value=-1)
class Meta:
model = models.Allocation
fields = ('cpu_limit', 'gpu_limit', 'ram_limit')
class AllocationUserUsageCreateSerializer(rf_serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'user',
'username',
)
extra_kwargs = {
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AllocationUserUsageSerializer(rf_serializers.HyperlinkedModelSerializer):
full_name = rf_serializers.ReadOnlyField(source='user.full_name')
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'allocation',
'user',
'username',
'full_name',
)
extra_kwargs = {
'allocation': {
'lookup_field': 'uuid',
'view_name': 'slurm-allocation-detail',
},
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AssociationSerializer(rf_serializers.HyperlinkedModelSerializer):
allocation = rf_serializers.HyperlinkedRelatedField(
queryset=models.Allocation.objects.all(),
view_name='slurm-allocation-detail',
lookup_field='uuid',
)
class Meta:
model = models.Association
fields = (
'uuid',
'username',
'allocation',
)
| 31.42439 | 88 | 0.606023 | import re
from django.core.validators import MinValueValidator
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers as rf_serializers
from waldur_core.core import serializers as core_serializers
from waldur_core.structure import serializers as structure_serializers
from waldur_core.structure.permissions import _has_admin_access
from waldur_freeipa import models as freeipa_models
from . import models
class SlurmServiceSerializer(structure_serializers.ServiceOptionsSerializer):
class Meta:
secret_fields = ('hostname', 'username', 'port', 'gateway')
username = rf_serializers.CharField(
max_length=100, help_text=_('Administrative user'), default='root'
)
hostname = rf_serializers.CharField(
source='options.hostname', label=_('Hostname or IP address of master node')
)
default_account = rf_serializers.CharField(
source='options.default_account', label=_('Default SLURM account for user')
)
port = rf_serializers.IntegerField(source='options.port', required=False)
use_sudo = rf_serializers.BooleanField(
source='options.use_sudo',
default=False,
help_text=_('Set to true to activate privilege escalation'),
required=False,
)
gateway = rf_serializers.CharField(
source='options.gateway',
label=_('Hostname or IP address of gateway node'),
required=False,
)
firecrest_api_url = rf_serializers.CharField(
source='options.firecrest_api_url',
label=_('FirecREST API base URL'),
required=False,
)
class AllocationSerializer(
structure_serializers.BaseResourceSerializer,
core_serializers.AugmentedSerializerMixin,
):
username = rf_serializers.SerializerMethodField()
gateway = rf_serializers.SerializerMethodField()
homepage = rf_serializers.ReadOnlyField(source='service_settings.homepage')
def get_username(self, allocation):
request = self.context['request']
try:
profile = freeipa_models.Profile.objects.get(user=request.user)
return profile.username
except freeipa_models.Profile.DoesNotExist:
return None
def get_gateway(self, allocation):
options = allocation.service_settings.options
return options.get('gateway') or options.get('hostname')
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Allocation
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'cpu_limit',
'cpu_usage',
'gpu_limit',
'gpu_usage',
'ram_limit',
'ram_usage',
'username',
'gateway',
'is_active',
'homepage',
)
read_only_fields = (
structure_serializers.BaseResourceSerializer.Meta.read_only_fields
+ (
'cpu_usage',
'gpu_usage',
'ram_usage',
'cpu_limit',
'gpu_limit',
'ram_limit',
'is_active',
)
)
extra_kwargs = dict(
url={'lookup_field': 'uuid', 'view_name': 'slurm-allocation-detail'},
cpu_limit={'validators': [MinValueValidator(0)]},
gpu_limit={'validators': [MinValueValidator(0)]},
ram_limit={'validators': [MinValueValidator(0)]},
)
def validate(self, attrs):
attrs = super(AllocationSerializer, self).validate(attrs)
if self.instance:
return attrs
correct_name_regex = '^([%s]{1,63})$' % models.SLURM_ALLOCATION_REGEX
name = attrs.get('name')
if not re.match(correct_name_regex, name):
raise rf_serializers.ValidationError(
_(
"Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen"
)
% name
)
project = attrs['project']
user = self.context['request'].user
if not _has_admin_access(user, project):
raise rf_exceptions.PermissionDenied(
_('You do not have permissions to create allocation for given project.')
)
return attrs
class AllocationSetLimitsSerializer(rf_serializers.ModelSerializer):
cpu_limit = rf_serializers.IntegerField(min_value=-1)
gpu_limit = rf_serializers.IntegerField(min_value=-1)
ram_limit = rf_serializers.IntegerField(min_value=-1)
class Meta:
model = models.Allocation
fields = ('cpu_limit', 'gpu_limit', 'ram_limit')
class AllocationUserUsageCreateSerializer(rf_serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'user',
'username',
)
extra_kwargs = {
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AllocationUserUsageSerializer(rf_serializers.HyperlinkedModelSerializer):
full_name = rf_serializers.ReadOnlyField(source='user.full_name')
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'allocation',
'user',
'username',
'full_name',
)
extra_kwargs = {
'allocation': {
'lookup_field': 'uuid',
'view_name': 'slurm-allocation-detail',
},
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AssociationSerializer(rf_serializers.HyperlinkedModelSerializer):
allocation = rf_serializers.HyperlinkedRelatedField(
queryset=models.Allocation.objects.all(),
view_name='slurm-allocation-detail',
lookup_field='uuid',
)
class Meta:
model = models.Association
fields = (
'uuid',
'username',
'allocation',
)
| true | true |
f717efcb5290a464a7824eb3e23e80853f7e2668 | 1,230 | py | Python | ddns_clienter_core/runtimes/address_providers/host_name.py | rexzhang/ddns-clienter | f170cb579d49df2aa4aa1f607bbcf088af9cd4a5 | [
"MIT"
] | null | null | null | ddns_clienter_core/runtimes/address_providers/host_name.py | rexzhang/ddns-clienter | f170cb579d49df2aa4aa1f607bbcf088af9cd4a5 | [
"MIT"
] | null | null | null | ddns_clienter_core/runtimes/address_providers/host_name.py | rexzhang/ddns-clienter | f170cb579d49df2aa4aa1f607bbcf088af9cd4a5 | [
"MIT"
] | null | null | null | import socket
from logging import getLogger
from .abs import AddressProviderAbs, AddressProviderException
logger = getLogger(__name__)
class AddressProviderHostName(AddressProviderAbs):
@property
def name(self):
return "hostname"
def _detect_ip_address(self) -> None:
try:
data = socket.getaddrinfo(self._address_c.parameter, 80)
except socket.gaierror as e:
message = "Detect IP Address failed, hostname:'{}', message:{}".format(
self._address_c.parameter, e
)
logger.error(message)
raise AddressProviderException(message)
for item in data:
if (
item[0] == socket.AF_INET
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv4
):
ip_address = item[4][0]
self.set_ipv4_address(ip_address)
continue
if (
item[0] == socket.AF_INET6
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv6
):
ip_address = item[4][0]
self.set_ipv6_address(ip_address)
continue
| 29.285714 | 83 | 0.550407 | import socket
from logging import getLogger
from .abs import AddressProviderAbs, AddressProviderException
logger = getLogger(__name__)
class AddressProviderHostName(AddressProviderAbs):
@property
def name(self):
return "hostname"
def _detect_ip_address(self) -> None:
try:
data = socket.getaddrinfo(self._address_c.parameter, 80)
except socket.gaierror as e:
message = "Detect IP Address failed, hostname:'{}', message:{}".format(
self._address_c.parameter, e
)
logger.error(message)
raise AddressProviderException(message)
for item in data:
if (
item[0] == socket.AF_INET
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv4
):
ip_address = item[4][0]
self.set_ipv4_address(ip_address)
continue
if (
item[0] == socket.AF_INET6
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv6
):
ip_address = item[4][0]
self.set_ipv6_address(ip_address)
continue
| true | true |
f717f02ea026ba8bb72ef7721a359f8e060f9f1e | 1,289 | py | Python | homeassistant/components/homekit/diagnostics.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/homekit/diagnostics.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/homekit/diagnostics.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Diagnostics support for HomeKit."""
from __future__ import annotations
from typing import Any
from pyhap.accessory_driver import AccessoryDriver
from pyhap.state import State
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import HomeKit
from .const import DOMAIN, HOMEKIT
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
homekit: HomeKit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
data: dict[str, Any] = {
"status": homekit.status,
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": dict(entry.data),
"options": dict(entry.options),
},
}
if not hasattr(homekit, "driver"):
return data
driver: AccessoryDriver = homekit.driver
data.update(driver.get_accessories())
state: State = driver.state
data.update(
{
"client_properties": {
str(client): props for client, props in state.client_properties.items()
},
"config_version": state.config_version,
"pairing_id": state.mac,
}
)
return data
| 28.644444 | 87 | 0.644686 | from __future__ import annotations
from typing import Any
from pyhap.accessory_driver import AccessoryDriver
from pyhap.state import State
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import HomeKit
from .const import DOMAIN, HOMEKIT
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
homekit: HomeKit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
data: dict[str, Any] = {
"status": homekit.status,
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": dict(entry.data),
"options": dict(entry.options),
},
}
if not hasattr(homekit, "driver"):
return data
driver: AccessoryDriver = homekit.driver
data.update(driver.get_accessories())
state: State = driver.state
data.update(
{
"client_properties": {
str(client): props for client, props in state.client_properties.items()
},
"config_version": state.config_version,
"pairing_id": state.mac,
}
)
return data
| true | true |
f717f0755133f546a112ab7edad203a839137d37 | 3,107 | py | Python | tests/test_rmsa.py | ReleaseTheSpice/optical-rl-gym | 1913e19ba59dfd1e426d5783b68c045d2daf354a | [
"MIT"
] | null | null | null | tests/test_rmsa.py | ReleaseTheSpice/optical-rl-gym | 1913e19ba59dfd1e426d5783b68c045d2daf354a | [
"MIT"
] | null | null | null | tests/test_rmsa.py | ReleaseTheSpice/optical-rl-gym | 1913e19ba59dfd1e426d5783b68c045d2daf354a | [
"MIT"
] | null | null | null | import os
import gym
from optical_rl_gym.envs.rmsa_env import shortest_path_first_fit, shortest_available_path_first_fit, \
least_loaded_path_first_fit, SimpleMatrixObservation
from optical_rl_gym.utils import evaluate_heuristic, random_policy
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
load = 50
logging.getLogger('rmsaenv').setLevel(logging.INFO)
seed = 20
episodes = 10
episode_length = 100
monitor_files = []
policies = []
# topology_name = 'gbn'
# topology_name = 'nobel-us'
# topology_name = 'germany50'
with open(os.path.join('..', 'examples', 'topologies', 'nsfnet_chen_eon_5-paths.h5'), 'rb') as f:
topology = pickle.load(f)
env_args = dict(topology=topology, seed=10, allow_rejection=True, load=load, mean_service_holding_time=25,
episode_length=episode_length, num_spectrum_resources=64, bit_rate_selection='discrete')
print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))
init_env = gym.make('RMSA-v0', **env_args)
env_rnd = SimpleMatrixObservation(init_env)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes)
print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f} {std_reward_rnd:>7.4f}')
print('\tBit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested)
print('\tRequest blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed)
print(init_env.topology.graph['throughput'])
# exit(0)
env_sp = gym.make('RMSA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp, shortest_path_first_fit, n_eval_episodes=episodes)
print('SP-FF:'.ljust(8), f'{mean_reward_sp:.4f} {std_reward_sp:<7.4f}')
print('\tBit rate blocking:', (env_sp.episode_bit_rate_requested - env_sp.episode_bit_rate_provisioned) / env_sp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sp.episode_services_processed - env_sp.episode_services_accepted) / env_sp.episode_services_processed)
env_sap = gym.make('RMSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes)
print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f} {std_reward_sap:.4f}')
print('\tBit rate blocking:', (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned) / env_sap.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sap.episode_services_processed - env_sap.episode_services_accepted) / env_sap.episode_services_processed)
env_llp = gym.make('RMSA-v0', **env_args)
mean_reward_llp, std_reward_llp = evaluate_heuristic(env_llp, least_loaded_path_first_fit, n_eval_episodes=episodes)
print('LLP-FF:'.ljust(8), f'{mean_reward_llp:.4f} {std_reward_llp:.4f}')
print('\tBit rate blocking:', (env_llp.episode_bit_rate_requested - env_llp.episode_bit_rate_provisioned) / env_llp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_llp.episode_services_processed - env_llp.episode_services_accepted) / env_llp.episode_services_processed)
| 51.783333 | 146 | 0.798519 | import os
import gym
from optical_rl_gym.envs.rmsa_env import shortest_path_first_fit, shortest_available_path_first_fit, \
least_loaded_path_first_fit, SimpleMatrixObservation
from optical_rl_gym.utils import evaluate_heuristic, random_policy
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
load = 50
logging.getLogger('rmsaenv').setLevel(logging.INFO)
seed = 20
episodes = 10
episode_length = 100
monitor_files = []
policies = []
with open(os.path.join('..', 'examples', 'topologies', 'nsfnet_chen_eon_5-paths.h5'), 'rb') as f:
topology = pickle.load(f)
env_args = dict(topology=topology, seed=10, allow_rejection=True, load=load, mean_service_holding_time=25,
episode_length=episode_length, num_spectrum_resources=64, bit_rate_selection='discrete')
print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))
init_env = gym.make('RMSA-v0', **env_args)
env_rnd = SimpleMatrixObservation(init_env)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes)
print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f} {std_reward_rnd:>7.4f}')
print('\tBit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested)
print('\tRequest blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed)
print(init_env.topology.graph['throughput'])
env_sp = gym.make('RMSA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp, shortest_path_first_fit, n_eval_episodes=episodes)
print('SP-FF:'.ljust(8), f'{mean_reward_sp:.4f} {std_reward_sp:<7.4f}')
print('\tBit rate blocking:', (env_sp.episode_bit_rate_requested - env_sp.episode_bit_rate_provisioned) / env_sp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sp.episode_services_processed - env_sp.episode_services_accepted) / env_sp.episode_services_processed)
env_sap = gym.make('RMSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes)
print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f} {std_reward_sap:.4f}')
print('\tBit rate blocking:', (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned) / env_sap.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sap.episode_services_processed - env_sap.episode_services_accepted) / env_sap.episode_services_processed)
env_llp = gym.make('RMSA-v0', **env_args)
mean_reward_llp, std_reward_llp = evaluate_heuristic(env_llp, least_loaded_path_first_fit, n_eval_episodes=episodes)
print('LLP-FF:'.ljust(8), f'{mean_reward_llp:.4f} {std_reward_llp:.4f}')
print('\tBit rate blocking:', (env_llp.episode_bit_rate_requested - env_llp.episode_bit_rate_provisioned) / env_llp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_llp.episode_services_processed - env_llp.episode_services_accepted) / env_llp.episode_services_processed)
| true | true |
f717f359062fe9bbd5d9893e4b7b8942420830f7 | 1,037 | py | Python | auctionbot/users/migrations/0002_auto_20171231_1027.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | auctionbot/users/migrations/0002_auto_20171231_1027.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | 13 | 2019-12-12T03:07:55.000Z | 2022-03-07T12:59:27.000Z | auctionbot/users/migrations/0002_auto_20171231_1027.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-31 03:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
# ('markets', '0012_auto_20171220_1319'),
operations = [
migrations.AddField(
model_name='user',
name='cBio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='cLocation',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='user',
name='iMarket',
#field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='markets.Market', verbose_name='ebay market (default)'),
field=models.PositiveIntegerField(default=1, verbose_name='ebay market (default)'),
),
]
| 30.5 | 152 | 0.611379 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='cBio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='cLocation',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='user',
name='iMarket',
field=models.PositiveIntegerField(default=1, verbose_name='ebay market (default)'),
),
]
| true | true |
f717f4028211e6a9f3c853dde20a6d21323b607a | 362 | py | Python | examples/list_all_adb_devices.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 15 | 2020-03-11T17:31:12.000Z | 2021-11-19T03:26:09.000Z | examples/list_all_adb_devices.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 5 | 2021-03-31T19:43:15.000Z | 2022-03-12T00:18:38.000Z | examples/list_all_adb_devices.py | riquedev/WhatsAppManifest | bcbbd48f6f9152024a54172886876d3a725a3a62 | [
"MIT"
] | 4 | 2020-03-11T01:52:57.000Z | 2021-03-16T04:14:33.000Z | from WhatsAppManifest import ADB, Automator
# Note: We need the AdbServer class (even without using SSH) so that Automator can open the internal connection.
with ADB(use_ssh=False) as AdbServer:
automator = Automator(adb_server=AdbServer, adb_host="127.0.0.1", adb_port=5037)
for device in automator.list_devices(state=None):
help(device)
| 36.2 | 112 | 0.748619 | from WhatsAppManifest import ADB, Automator
with ADB(use_ssh=False) as AdbServer:
automator = Automator(adb_server=AdbServer, adb_host="127.0.0.1", adb_port=5037)
for device in automator.list_devices(state=None):
help(device)
| true | true |
f717f4717d60ec922e24c1a81798c104320021d4 | 33,686 | py | Python | scipy/interpolate/polyint.py | f0k/scipy | 3145a226339b14bbc22f2e984848e05def7659c5 | [
"BSD-3-Clause"
] | null | null | null | scipy/interpolate/polyint.py | f0k/scipy | 3145a226339b14bbc22f2e984848e05def7659c5 | [
"BSD-3-Clause"
] | null | null | null | scipy/interpolate/polyint.py | f0k/scipy | 3145a226339b14bbc22f2e984848e05def7659c5 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.misc import factorial
from scipy.lib.six.moves import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"]
class KroghInterpolator(object):
"""
The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
Parameters
----------
xi : array_like, length N
Known x-coordinates
yi : array_like, N by R
Known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
"""
def __init__(self, xi, yi):
"""Construct an interpolator passing through the specified points
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array-like, length N
known x-coordinates
yi : array-like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
self.xi = np.asarray(xi)
self.yi = np.asarray(yi)
if len(self.yi.shape)==1:
self.vector_valued = False
self.yi = self.yi[:,np.newaxis]
elif len(self.yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n = len(xi)
self.n = n
nn, r = self.yi.shape
if nn!=n:
raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn))
self.r = r
c = np.zeros((n+1,r))
c[0] = yi[0]
Vk = np.zeros((n,r))
for k in xrange(1,n):
s = 0
while s<=k and xi[k-s]==xi[k]:
s += 1
s -= 1
Vk[0] = yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s==0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def __call__(self,x):
"""Evaluate the polynomial at the point x
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar, array of length R, array of length N, or array of length N by R
If x is a scalar, returns either a vector or a scalar depending on
whether the interpolator is vector-valued or scalar-valued.
If x is a vector, returns a vector of values.
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
pi = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w = x - self.xi[k-1]
pi = w*pi
p = p + np.multiply.outer(pi,self.c[k])
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def derivatives(self,x,der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be der by N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Examples
--------
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
r = self.r
if der is None:
der = self.n
dern = min(self.n,der)
pi = np.zeros((n,m))
w = np.zeros((n,m))
pi[0] = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += np.multiply.outer(pi[k],self.c[k])
cn = np.zeros((max(der,n+1),m,r))
cn[:n+1,...] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k]*=factorial(k)
cn[n,...] = 0
if not self.vector_valued:
if scalar:
return cn[:der,0,0]
else:
return cn[:der,:,0]
else:
if scalar:
return cn[:der,0]
else:
return cn[:der]
def derivative(self,x,der):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
return self.derivatives(x,der=der+1)[der]
def krogh_interpolate(xi,yi,x,der=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Evaluates the polynomial or some of its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on Krogh 1970, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation"
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array_like, length N
known x-coordinates
yi : array_like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : integer or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If x is a scalar, the middle dimension will be dropped; if
the yi are scalars then the last dimension will be dropped.
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of x values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None
The order of the polynomial to be used in the fitting; f will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order=degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(object):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None):
"""Construct an object capable of interpolating functions sampled at xi
The values yi need to be provided before the function is evaluated,
but none of the preprocessing depends on them, so rapid updates
are possible.
Parameters
----------
xi : array-like of length N
The x coordinates of the points the polynomial should pass through
yi : array-like N by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
self.n = len(xi)
self.xi = np.asarray(xi)
if yi is not None and len(yi)!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.set_yi(yi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def set_yi(self, yi):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
if yi is None:
self.yi = None
return
yi = np.asarray(yi)
if len(yi.shape)==1:
self.vector_valued = False
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n, r = yi.shape
if n!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.yi = yi
self.r = r
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like of length N1
The x coordinates of the points the polynomial should pass through
yi : array_like N1 by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later. The yi should be specified if and only if
the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = np.asarray(yi)
if len(yi.shape)==1:
if self.vector_valued:
raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r)
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
n, r = yi.shape
if r!=self.r:
raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r))
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi**=-1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : scalar or array-like of length M
Returns
-------
y : scalar or array-like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by M, where N is the degree of the polynomial.
"""
scalar = _isscalar(x)
x = np.atleast_1d(x)
c = np.subtract.outer(x,self.xi)
z = c==0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]
i, j = np.nonzero(z)
p[i] = self.yi[j]
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def barycentric_interpolate(xi, yi, x):
"""
Convenience function for polynomial interpolation
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
Parameters
----------
xi : array_like of length N
The x coordinates of the points the polynomial should pass through
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued.
x : scalar or array_like of length M
Returns
-------
y : scalar or array_like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class BarycentricInterpolator.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi)(x)
class PiecewisePolynomial(object):
"""Piecewise polynomial curve specified by points and derivatives
This class represents a curve that is a piecewise polynomial. It
passes through a list of points and has specified derivatives at
each point. The degree of the polynomial may very from segment to
segment, as may the number of derivatives available. The degree
should not exceed about thirty.
Appending points to the end of the curve is efficient.
"""
def __init__(self, xi, yi, orders=None, direction=None):
"""Construct a piecewise polynomial
Parameters
----------
xi : array-like of length N
a sorted list of x-coordinates
yi : list of lists of length N
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
"""
yi0 = np.asarray(yi[0])
if len(yi0.shape)==2:
self.vector_valued = True
self.r = yi0.shape[1]
elif len(yi0.shape)==1:
self.vector_valued = False
self.r = 1
else:
raise ValueError("Each derivative must be a vector, not a higher-rank array")
self.xi = [xi[0]]
self.yi = [yi0]
self.n = 1
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[1:],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
"""Construct the interpolating polynomial object
Deduces the number of derivatives to match at each end
from order and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
"""
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2!=n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
if self.vector_valued:
yi = np.zeros((n,self.r))
else:
yi = np.zeros((n,))
xi[:n1] = x1
yi[:n1] = y1[:n1]
xi[n1:] = x2
yi[n1:] = y2[:n2]
return KroghInterpolator(xi,yi)
def append(self, xi, yi, order=None):
"""
Append a single point with derivatives to the PiecewisePolynomial
Parameters
----------
xi : float
yi : array_like
yi is the list of derivatives known at xi
order : integer or None
a polynomial order, or instructions to use the highest
possible order
"""
yi = np.asarray(yi)
if self.vector_valued:
if (len(yi.shape)!=2 or yi.shape[1]!=self.r):
raise ValueError("Each derivative must be a vector of length %d" % self.r)
else:
if len(yi.shape)!=1:
raise ValueError("Each derivative must be a scalar")
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
"""
Extend the PiecewisePolynomial by a list of points
Parameters
----------
xi : array_like of length N1
a sorted list of x-coordinates
yi : list of lists of length N1
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
"""
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
def __call__(self, x):
"""Evaluate the piecewise polynomial
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar or array-like of length R or length N or N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((m,self.r))
else:
y = np.zeros(m)
for i in xrange(self.n-1):
c = pos==i
y[c] = self.polynomials[i](x[c])
return y
def derivative(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial
Parameters
----------
x : scalar or array_like of length N
der : integer
which single derivative to extract
Returns
-------
y : scalar or array_like of length R or length N or N by R
Notes
-----
This currently computes (using self.derivatives()) all derivatives
of the curve segment containing each x but returns only one.
"""
return self.derivatives(x,der=der+1)[der]
def derivatives(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial
Parameters
----------
x : scalar or array_like of length N
der : integer
how many derivatives (including the function value as
0th derivative) to extract
Returns
-------
y : array_like of shape der by R or der by N or der by N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((der,m,self.r))
else:
y = np.zeros((der,m))
for i in xrange(self.n-1):
c = pos==i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):
"""
Convenience function for piecewise polynomial interpolation
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : list of lists
yi[i] is the list of derivatives known at xi[i]. Of length N.
x : scalar or array_like
Of length M.
orders : int or list of ints
a list of polynomial orders, or a single universal order
der : int
Which single derivative to extract.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
Construction of these piecewise polynomials can be an expensive process;
if you repeatedly evaluate the same polynomial, consider using the class
PiecewisePolynomial (which is what this function does).
"""
P = PiecewisePolynomial(xi, yi, orders)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2])
return dk
def pchip(x, y):
"""PCHIP 1-d monotonic cubic interpolation
x and y are arrays of values used to approximate some function f, with
``y = f(x)``. This class factory function returns a callable class whose
``__call__`` method uses monotonic cubic, interpolation to find the value
of new points.
Parameters
----------
x : array
A 1D array of monotonically increasing real values. x cannot
include duplicate values (otherwise f is overspecified)
y : array
A 1-D array of real values. y's length along the interpolation
axis must be equal to the length of x.
Assumes x is sorted in monotonic order (e.g. ``x[1] > x[0]``).
Returns
-------
pchip : PiecewisePolynomial instance
The result of the interpolation.
"""
derivs = _find_derivatives(x,y)
return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)
| 34.514344 | 206 | 0.591759 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.misc import factorial
from scipy.lib.six.moves import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"]
class KroghInterpolator(object):
def __init__(self, xi, yi):
self.xi = np.asarray(xi)
self.yi = np.asarray(yi)
if len(self.yi.shape)==1:
self.vector_valued = False
self.yi = self.yi[:,np.newaxis]
elif len(self.yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n = len(xi)
self.n = n
nn, r = self.yi.shape
if nn!=n:
raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn))
self.r = r
c = np.zeros((n+1,r))
c[0] = yi[0]
Vk = np.zeros((n,r))
for k in xrange(1,n):
s = 0
while s<=k and xi[k-s]==xi[k]:
s += 1
s -= 1
Vk[0] = yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s==0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def __call__(self,x):
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
pi = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w = x - self.xi[k-1]
pi = w*pi
p = p + np.multiply.outer(pi,self.c[k])
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def derivatives(self,x,der=None):
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
r = self.r
if der is None:
der = self.n
dern = min(self.n,der)
pi = np.zeros((n,m))
w = np.zeros((n,m))
pi[0] = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += np.multiply.outer(pi[k],self.c[k])
cn = np.zeros((max(der,n+1),m,r))
cn[:n+1,...] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k]*=factorial(k)
cn[n,...] = 0
if not self.vector_valued:
if scalar:
return cn[:der,0,0]
else:
return cn[:der,:,0]
else:
if scalar:
return cn[:der,0]
else:
return cn[:der]
def derivative(self,x,der):
return self.derivatives(x,der=der+1)[der]
def krogh_interpolate(xi,yi,x,der=0):
P = KroghInterpolator(xi, yi)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
if order is None:
order=degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(object):
def __init__(self, xi, yi=None):
self.n = len(xi)
self.xi = np.asarray(xi)
if yi is not None and len(yi)!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.set_yi(yi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def set_yi(self, yi):
if yi is None:
self.yi = None
return
yi = np.asarray(yi)
if len(yi.shape)==1:
self.vector_valued = False
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n, r = yi.shape
if n!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.yi = yi
self.r = r
def add_xi(self, xi, yi=None):
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = np.asarray(yi)
if len(yi.shape)==1:
if self.vector_valued:
raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r)
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
n, r = yi.shape
if r!=self.r:
raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r))
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi**=-1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def __call__(self, x):
scalar = _isscalar(x)
x = np.atleast_1d(x)
c = np.subtract.outer(x,self.xi)
z = c==0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]
i, j = np.nonzero(z)
p[i] = self.yi[j]
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def barycentric_interpolate(xi, yi, x):
return BarycentricInterpolator(xi, yi)(x)
class PiecewisePolynomial(object):
def __init__(self, xi, yi, orders=None, direction=None):
yi0 = np.asarray(yi[0])
if len(yi0.shape)==2:
self.vector_valued = True
self.r = yi0.shape[1]
elif len(yi0.shape)==1:
self.vector_valued = False
self.r = 1
else:
raise ValueError("Each derivative must be a vector, not a higher-rank array")
self.xi = [xi[0]]
self.yi = [yi0]
self.n = 1
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[1:],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2!=n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
if self.vector_valued:
yi = np.zeros((n,self.r))
else:
yi = np.zeros((n,))
xi[:n1] = x1
yi[:n1] = y1[:n1]
xi[n1:] = x2
yi[n1:] = y2[:n2]
return KroghInterpolator(xi,yi)
def append(self, xi, yi, order=None):
yi = np.asarray(yi)
if self.vector_valued:
if (len(yi.shape)!=2 or yi.shape[1]!=self.r):
raise ValueError("Each derivative must be a vector of length %d" % self.r)
else:
if len(yi.shape)!=1:
raise ValueError("Each derivative must be a scalar")
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
def __call__(self, x):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((m,self.r))
else:
y = np.zeros(m)
for i in xrange(self.n-1):
c = pos==i
y[c] = self.polynomials[i](x[c])
return y
def derivative(self, x, der):
return self.derivatives(x,der=der+1)[der]
def derivatives(self, x, der):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((der,m,self.r))
else:
y = np.zeros((der,m))
for i in xrange(self.n-1):
c = pos==i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):
P = PiecewisePolynomial(xi, yi, orders)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def _isscalar(x):
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2])
return dk
def pchip(x, y):
derivs = _find_derivatives(x,y)
return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)
| true | true |
f717f5329e9080881c559dfd976b9a5f38d7606a | 670 | py | Python | 680_Valid_Palindrome_II.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | 1 | 2019-12-12T20:16:08.000Z | 2019-12-12T20:16:08.000Z | 680_Valid_Palindrome_II.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | null | null | null | 680_Valid_Palindrome_II.py | yuqingchen/Leetcode | 6cbcb36e66a10a226ddb0966701e61ce4c2434d4 | [
"MIT"
] | null | null | null | class Solution:
def validPalindrome(self, s: str) -> bool:
left, right = self.twopointer(0, len(s) - 1, s)
if left >= right :
return True
return self.valid(left + 1, right, s) or self.valid(left, right - 1, s)
def valid(self, left, right, s) :
l, r = self.twopointer(left, right, s)
if l >= r :
return True
else:
return False
def twopointer(self, left, right, s) :
while left < right :
if s[left] == s[right] :
left += 1
right -= 1
else :
return left, right
return left, right | 30.454545 | 79 | 0.474627 | class Solution:
def validPalindrome(self, s: str) -> bool:
left, right = self.twopointer(0, len(s) - 1, s)
if left >= right :
return True
return self.valid(left + 1, right, s) or self.valid(left, right - 1, s)
def valid(self, left, right, s) :
l, r = self.twopointer(left, right, s)
if l >= r :
return True
else:
return False
def twopointer(self, left, right, s) :
while left < right :
if s[left] == s[right] :
left += 1
right -= 1
else :
return left, right
return left, right | true | true |
f717f561ebd073978d59e58a6e54a7189383291d | 24,623 | py | Python | tensorflow_probability/python/distributions/student_t_process.py | hendriksanta/probability | 6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/student_t_process.py | hendriksanta/probability | 6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/student_t_process.py | hendriksanta/probability | 6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The StudentTProcess distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import multivariate_student_t
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'StudentTProcess',
]
def _add_diagonal_shift(matrix, shift):
return tf.linalg.set_diag(
matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')
def make_cholesky_factored_marginal_fn(jitter):
"""Construct a `marginal_fn` for use with `tfd.StudentTProcess`.
The returned function computes the Cholesky factorization of the input
covariance plus a diagonal jitter, and uses that for the `scale` of a
`tfd.MultivariateNormalLinearOperator`.
Args:
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Returns:
marginal_fn: A Python function that takes a location, covariance matrix,
optional `validate_args`, `allow_nan_stats` and `name` arguments, and
returns a `tfd.MultivariateNormalLinearOperator`.
"""
def marginal_fn(
df,
loc,
covariance,
validate_args=False,
allow_nan_stats=False,
name='marginal_distribution'):
squared_scale = ((df - 2.) / df)[
..., tf.newaxis, tf.newaxis] * covariance
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),
is_non_singular=True,
name='StudentTProcessScaleLinearOperator')
return multivariate_student_t.MultivariateStudentTLinearOperator(
df=df,
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
return marginal_fn
class StudentTProcess(distribution.Distribution):
"""Marginal distribution of a Student's T process at finitely many points.
A Student's T process (TP) is an indexed collection of random variables, any
finite collection of which are jointly Multivariate Student's T. While this
definition applies to finite index sets, it is typically implicit that the
index set is infinite; in applications, it is often some finite dimensional
real or complex vector space. In such cases, the TP may be thought of as a
distribution over (real- or complex-valued) functions defined over the index
set.
Just as Student's T distributions are fully specified by their degrees of
freedom, location and scale, a Student's T process can be completely specified
by a degrees of freedom parameter, mean function and covariance function.
Let `S` denote the index set and `K` the space in
which each indexed random variable takes its values (again, often R or C).
The mean function is then a map `m: S -> K`, and the covariance function,
or kernel, is a positive-definite function `k: (S x S) -> K`. The properties
of functions drawn from a TP are entirely dictated (up to translation) by
the form of the kernel function.
This `Distribution` represents the marginal joint distribution over function
values at a given finite collection of points `[x[1], ..., x[N]]` from the
index set `S`. By definition, this marginal distribution is just a
multivariate Student's T distribution, whose mean is given by the vector
`[ m(x[1]), ..., m(x[N]) ]` and whose covariance matrix is constructed from
pairwise applications of the kernel function to the given inputs:
```none
| k(x[1], x[1]) k(x[1], x[2]) ... k(x[1], x[N]) |
| k(x[2], x[1]) k(x[2], x[2]) ... k(x[2], x[N]) |
| ... ... ... |
| k(x[N], x[1]) k(x[N], x[2]) ... k(x[N], x[N]) |
```
For this to be a valid covariance matrix, it must be symmetric and positive
definite; hence the requirement that `k` be a positive definite function
(which, by definition, says that the above procedure will yield PD matrices).
Note also we use a parameterization as suggested in [1], which requires `df`
to be greater than 2. This allows for the covariance for any finite
dimensional marginal of the TP (a multivariate Student's T distribution) to
just be the PD matrix generated by the kernel.
#### Mathematical Details
The probability density function (pdf) is a multivariate Student's T whose
parameters are derived from the TP's properties:
```none
pdf(x; df, index_points, mean_fn, kernel) = MultivariateStudentT(df, loc, K)
K = (df - 2) / df * (kernel.matrix(index_points, index_points) +
observation_noise_variance * eye(N))
loc = (x - mean_fn(index_points))^T @ K @ (x - mean_fn(index_points))
```
where:
* `df` is the degrees of freedom parameter for the TP.
* `index_points` are points in the index set over which the TP is defined,
* `mean_fn` is a callable mapping the index set to the TP's mean values,
* `kernel` is `PositiveSemidefiniteKernel`-like and represents the covariance
function of the TP,
* `observation_noise_variance` is a term added to the diagonal of the kernel
matrix. In the limit of `df` to `inf`, this represents the observation noise
of a gaussian likelihood.
* `eye(N)` is an N-by-N identity matrix.
#### Examples
##### Draw joint samples from a TP prior
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tf.enable_v2_behavior()
tfd = tfp.distributions
psd_kernels = tfp.math.psd_kernels
num_points = 100
# Index points should be a collection (100, here) of feature vectors. In this
# example, we're using 1-d vectors, so we just need to reshape the output from
# np.linspace, to give a shape of (100, 1).
index_points = np.expand_dims(np.linspace(-1., 1., num_points), -1)
# Define a kernel with default parameters.
kernel = psd_kernels.ExponentiatedQuadratic()
tp = tfd.StudentTProcess(3., kernel, index_points)
samples = tp.sample(10)
# ==> 10 independently drawn, joint samples at `index_points`
noisy_tp = tfd.StudentTProcess(
df=3.,
kernel=kernel,
index_points=index_points)
noisy_samples = noisy_tp.sample(10)
# ==> 10 independently drawn, noisy joint samples at `index_points`
```
##### Optimize kernel parameters via maximum marginal likelihood.
```python
# Suppose we have some data from a known function. Note the index points in
# general have shape `[b1, ..., bB, f1, ..., fF]` (here we assume `F == 1`),
# so we need to explicitly consume the feature dimensions (just the last one
# here).
f = lambda x: np.sin(10*x[..., 0]) * np.exp(-x[..., 0]**2)
observed_index_points = np.expand_dims(np.random.uniform(-1., 1., 50), -1)
# Squeeze to take the shape from [50, 1] to [50].
observed_values = f(observed_index_points)
amplitude = tfp.util.TransformedVariable(
1., tfp.bijectors.Softplus(), dtype=np.float64, name='amplitude')
length_scale = tfp.util.TransformedVariable(
1., tfp.bijectors.Softplus(), dtype=np.float64, name='length_scale')
# Define a kernel with trainable parameters.
kernel = psd_kernels.ExponentiatedQuadratic(
amplitude=amplitude,
length_scale=length_scale)
tp = tfd.StudentTProcess(3., kernel, observed_index_points)
optimizer = tf.optimizers.Adam()
@tf.function
def optimize():
with tf.GradientTape() as tape:
loss = -tp.log_prob(observed_values)
grads = tape.gradient(loss, tp.trainable_variables)
optimizer.apply_gradients(zip(grads, tp.trainable_variables))
return loss
for i in range(1000):
nll = optimize()
if i % 100 == 0:
print("Step {}: NLL = {}".format(i, nll))
print("Final NLL = {}".format(nll))
```
#### References
[1]: Amar Shah, Andrew Gordon Wilson, and Zoubin Ghahramani. Student-t
Processes as Alternatives to Gaussian Processes. In _Artificial
Intelligence and Statistics_, 2014.
https://www.cs.cmu.edu/~andrewgw/tprocess.pdf
"""
@deprecation.deprecated_args(
'2021-06-26',
'`jitter` is deprecated; please use `marginal_fn` directly.',
'jitter')
def __init__(self,
df,
kernel,
index_points=None,
mean_fn=None,
observation_noise_variance=0.,
marginal_fn=None,
jitter=1e-6,
validate_args=False,
allow_nan_stats=False,
name='StudentTProcess'):
"""Instantiate a StudentTProcess Distribution.
Args:
df: Positive Floating-point `Tensor` representing the degrees of freedom.
Must be greater than 2.
kernel: `PositiveSemidefiniteKernel`-like instance representing the
TP's covariance function.
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the TP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate Student's T. The batch
shape must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
mean_fn: Python `callable` that acts on `index_points` to produce a (batch
of) vector(s) of mean values at `index_points`. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
broadcastable with `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
observation_noise_variance: `float` `Tensor` representing (batch of)
scalar variance(s) of the noise in the Normal likelihood
distribution of the model. If batched, the batch shape must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `index_points`, etc.).
Default value: `0.`
marginal_fn: A Python callable that takes a location, covariance matrix,
optional `validate_args`, `allow_nan_stats` and `name` arguments, and
returns a multivariate normal subclass of `tfd.Distribution`.
Default value: `None`, in which case a Cholesky-factorizing function is
is created using `make_cholesky_factorizing_marginal_fn` and the
`jitter` argument.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
Default value: `False`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "StudentTProcess".
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[df, index_points, observation_noise_variance, jitter], tf.float32)
df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')
observation_noise_variance = tensor_util.convert_nonref_to_tensor(
observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
index_points = tensor_util.convert_nonref_to_tensor(
index_points, dtype=dtype, name='index_points')
jitter = tensor_util.convert_nonref_to_tensor(
jitter, dtype=dtype, name='jitter')
self._kernel = kernel
self._index_points = index_points
# Default to a constant zero function, borrowing the dtype from
# index_points to ensure consistency.
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
self._df = df
self._observation_noise_variance = observation_noise_variance
self._mean_fn = mean_fn
self._jitter = jitter
if marginal_fn is None:
self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)
else:
self._marginal_fn = marginal_fn
with tf.name_scope('init'):
super(StudentTProcess, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _is_univariate_marginal(self, index_points):
"""True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Student T distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
"""
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal Student T '
'Process at `index_points` as a multivariate Student T. This makes '
'some methods, like `cdf` unavailable.')
return num_index_points == 1
def _compute_covariance(self, index_points):
kernel_matrix = self.kernel.matrix(index_points, index_points)
if self._is_univariate_marginal(index_points):
# kernel_matrix thus has shape [..., 1, 1]; squeeze off the last dims and
# tack on the observation noise variance.
return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +
self.observation_noise_variance)
else:
observation_noise_variance = tf.convert_to_tensor(
self.observation_noise_variance)
# We are compute K + obs_noise_variance * I. The shape of this matrix
# is going to be a broadcast of the shapes of K and obs_noise_variance *
# I.
broadcast_shape = distribution_util.get_broadcast_shape(
kernel_matrix,
# We pad with two single dimension since this represents a batch of
# scaled identity matrices.
observation_noise_variance[..., tf.newaxis, tf.newaxis])
kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)
return _add_diagonal_shift(
kernel_matrix, observation_noise_variance[..., tf.newaxis])
def get_marginal_distribution(self, index_points=None):
"""Compute the marginal over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the TP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate student t. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `StudentT` or `MultivariateStudentT` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
"""
with self._name_and_control_scope('get_marginal_distribution'):
df = tf.convert_to_tensor(self.df)
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# If we're sure the number of index points is 1, we can just construct a
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
squared_scale = (df - 2.) / df * covariance
scale = tf.sqrt(squared_scale)
# `loc` has a trailing 1 in the shape; squeeze it.
loc = tf.squeeze(loc, axis=-1)
return student_t.StudentT(
df=df,
loc=loc,
scale=scale,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
else:
return self._marginal_fn(
df=df,
loc=loc,
covariance=covariance,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
@property
def df(self):
return self._df
@property
def observation_noise_variance(self):
return self._observation_noise_variance
@property
def mean_fn(self):
return self._mean_fn
@property
def kernel(self):
return self._kernel
@property
def index_points(self):
return self._index_points
@property
def marginal_fn(self):
return self._marginal_fn
@property
def jitter(self):
return self._jitter
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This StudentTProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods.')
return (index_points if index_points is not None
else tf.convert_to_tensor(self._index_points))
def _log_prob(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_prob(value)
def _batch_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
return functools.reduce(tf.broadcast_dynamic_shape, [
tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape_tensor(),
tf.shape(self.observation_noise_variance),
tf.shape(self.df)
])
def _batch_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
return functools.reduce(
tf.broadcast_static_shape,
[index_points.shape[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape,
self.observation_noise_variance.shape,
self.df.shape])
def _event_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
if self._is_univariate_marginal(index_points):
return tf.constant([], dtype=tf.int32)
else:
# The examples index is one position to the left of the feature dims.
examples_index = -(self.kernel.feature_ndims + 1)
return tf.shape(index_points)[examples_index:examples_index + 1]
def _event_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
if self._is_univariate_marginal(index_points):
return tf.TensorShape([])
else:
# The examples index is one position to the left of the feature dims.
examples_index = -(self.kernel.feature_ndims + 1)
shape = index_points.shape[examples_index:examples_index + 1]
if tensorshape_util.rank(shape) is None:
return tf.TensorShape([None])
return shape
def _sample_n(self, n, seed=None, index_points=None):
return self.get_marginal_distribution(index_points).sample(n, seed=seed)
def _log_survival_function(self, value, index_points=None):
return self.get_marginal_distribution(
index_points).log_survival_function(value)
def _survival_function(self, value, index_points=None):
return self.get_marginal_distribution(index_points).survival_function(value)
def _log_cdf(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_cdf(value)
def _entropy(self, index_points=None):
return self.get_marginal_distribution(index_points).entropy()
def _mean(self, index_points=None):
return self.get_marginal_distribution(index_points).mean()
def _quantile(self, value, index_points=None):
return self.get_marginal_distribution(index_points).quantile(value)
def _stddev(self, index_points=None):
return tf.sqrt(self._variance(index_points=index_points))
def _variance(self, index_points=None):
index_points = self._get_index_points(index_points)
kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_diag, axis=[-1]) +
self.observation_noise_variance)
else:
# We are computing diag(K + obs_noise_variance * I) = diag(K) +
# obs_noise_variance. We pad obs_noise_variance with a dimension in order
# to broadcast batch shapes of kernel_diag and obs_noise_variance (since
# kernel_diag has an extra dimension corresponding to the number of index
# points).
return kernel_diag + self.observation_noise_variance[..., tf.newaxis]
def _covariance(self, index_points=None):
# Using the result of get_marginal_distribution would involve an extra
# matmul, and possibly even an unnecessary cholesky first. We can avoid that
# by going straight through the kernel function.
return self._compute_covariance(self._get_index_points(index_points))
def _mode(self, index_points=None):
return self.get_marginal_distribution(index_points).mode()
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.df):
assertions.append(
assert_util.assert_greater(
self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),
message='`df` must be greater than 2.'))
return assertions
| 41.038333 | 92 | 0.699671 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import multivariate_student_t
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation
__all__ = [
'StudentTProcess',
]
def _add_diagonal_shift(matrix, shift):
return tf.linalg.set_diag(
matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')
def make_cholesky_factored_marginal_fn(jitter):
def marginal_fn(
df,
loc,
covariance,
validate_args=False,
allow_nan_stats=False,
name='marginal_distribution'):
squared_scale = ((df - 2.) / df)[
..., tf.newaxis, tf.newaxis] * covariance
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),
is_non_singular=True,
name='StudentTProcessScaleLinearOperator')
return multivariate_student_t.MultivariateStudentTLinearOperator(
df=df,
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
return marginal_fn
class StudentTProcess(distribution.Distribution):
@deprecation.deprecated_args(
'2021-06-26',
'`jitter` is deprecated; please use `marginal_fn` directly.',
'jitter')
def __init__(self,
df,
kernel,
index_points=None,
mean_fn=None,
observation_noise_variance=0.,
marginal_fn=None,
jitter=1e-6,
validate_args=False,
allow_nan_stats=False,
name='StudentTProcess'):
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[df, index_points, observation_noise_variance, jitter], tf.float32)
df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')
observation_noise_variance = tensor_util.convert_nonref_to_tensor(
observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
index_points = tensor_util.convert_nonref_to_tensor(
index_points, dtype=dtype, name='index_points')
jitter = tensor_util.convert_nonref_to_tensor(
jitter, dtype=dtype, name='jitter')
self._kernel = kernel
self._index_points = index_points
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
self._df = df
self._observation_noise_variance = observation_noise_variance
self._mean_fn = mean_fn
self._jitter = jitter
if marginal_fn is None:
self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)
else:
self._marginal_fn = marginal_fn
with tf.name_scope('init'):
super(StudentTProcess, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal Student T '
'Process at `index_points` as a multivariate Student T. This makes '
'some methods, like `cdf` unavailable.')
return num_index_points == 1
def _compute_covariance(self, index_points):
kernel_matrix = self.kernel.matrix(index_points, index_points)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +
self.observation_noise_variance)
else:
observation_noise_variance = tf.convert_to_tensor(
self.observation_noise_variance)
broadcast_shape = distribution_util.get_broadcast_shape(
kernel_matrix,
observation_noise_variance[..., tf.newaxis, tf.newaxis])
kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)
return _add_diagonal_shift(
kernel_matrix, observation_noise_variance[..., tf.newaxis])
def get_marginal_distribution(self, index_points=None):
with self._name_and_control_scope('get_marginal_distribution'):
df = tf.convert_to_tensor(self.df)
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
squared_scale = (df - 2.) / df * covariance
scale = tf.sqrt(squared_scale)
loc = tf.squeeze(loc, axis=-1)
return student_t.StudentT(
df=df,
loc=loc,
scale=scale,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
else:
return self._marginal_fn(
df=df,
loc=loc,
covariance=covariance,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
@property
def df(self):
return self._df
@property
def observation_noise_variance(self):
return self._observation_noise_variance
@property
def mean_fn(self):
return self._mean_fn
@property
def kernel(self):
return self._kernel
@property
def index_points(self):
return self._index_points
@property
def marginal_fn(self):
return self._marginal_fn
@property
def jitter(self):
return self._jitter
def _get_index_points(self, index_points=None):
if self._index_points is None and index_points is None:
raise ValueError(
'This StudentTProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods.')
return (index_points if index_points is not None
else tf.convert_to_tensor(self._index_points))
def _log_prob(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_prob(value)
def _batch_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
return functools.reduce(tf.broadcast_dynamic_shape, [
tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape_tensor(),
tf.shape(self.observation_noise_variance),
tf.shape(self.df)
])
def _batch_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
return functools.reduce(
tf.broadcast_static_shape,
[index_points.shape[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape,
self.observation_noise_variance.shape,
self.df.shape])
def _event_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
if self._is_univariate_marginal(index_points):
return tf.constant([], dtype=tf.int32)
else:
examples_index = -(self.kernel.feature_ndims + 1)
return tf.shape(index_points)[examples_index:examples_index + 1]
def _event_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
if self._is_univariate_marginal(index_points):
return tf.TensorShape([])
else:
examples_index = -(self.kernel.feature_ndims + 1)
shape = index_points.shape[examples_index:examples_index + 1]
if tensorshape_util.rank(shape) is None:
return tf.TensorShape([None])
return shape
def _sample_n(self, n, seed=None, index_points=None):
return self.get_marginal_distribution(index_points).sample(n, seed=seed)
def _log_survival_function(self, value, index_points=None):
return self.get_marginal_distribution(
index_points).log_survival_function(value)
def _survival_function(self, value, index_points=None):
return self.get_marginal_distribution(index_points).survival_function(value)
def _log_cdf(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_cdf(value)
def _entropy(self, index_points=None):
return self.get_marginal_distribution(index_points).entropy()
def _mean(self, index_points=None):
return self.get_marginal_distribution(index_points).mean()
def _quantile(self, value, index_points=None):
return self.get_marginal_distribution(index_points).quantile(value)
def _stddev(self, index_points=None):
return tf.sqrt(self._variance(index_points=index_points))
def _variance(self, index_points=None):
index_points = self._get_index_points(index_points)
kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_diag, axis=[-1]) +
self.observation_noise_variance)
else:
return kernel_diag + self.observation_noise_variance[..., tf.newaxis]
def _covariance(self, index_points=None):
return self._compute_covariance(self._get_index_points(index_points))
def _mode(self, index_points=None):
return self.get_marginal_distribution(index_points).mode()
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.df):
assertions.append(
assert_util.assert_greater(
self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),
message='`df` must be greater than 2.'))
return assertions
| true | true |
f717f5f0396b42207e68544fbe1af909acdf9d1d | 520 | py | Python | rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 234 | 2016-01-27T03:04:38.000Z | 2022-02-25T20:13:43.000Z | rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 351 | 2016-04-06T16:55:33.000Z | 2022-03-10T18:42:36.000Z | rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py | Tshisuaka/api-snippets | 52b50037d4af0f3b96adf76197964725a1501e96 | [
"MIT"
] | 494 | 2016-03-30T15:28:20.000Z | 2022-03-28T19:39:36.000Z | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+15005550003",
from_="+15005550006"
)
print(call.sid)
| 27.368421 | 72 | 0.755769 |
import os
from twilio.rest import Client
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+15005550003",
from_="+15005550006"
)
print(call.sid)
| true | true |
f717f64d749aac3930348898cd007d5ac9c4b917 | 1,314 | py | Python | example/test_img_similarity.py | Pandinosaurus/img2vec | e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0 | [
"MIT"
] | 1 | 2019-05-31T14:02:51.000Z | 2019-05-31T14:02:51.000Z | example/test_img_similarity.py | Pandinosaurus/img2vec | e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0 | [
"MIT"
] | null | null | null | example/test_img_similarity.py | Pandinosaurus/img2vec | e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append("../img2vec_pytorch") # Adds higher directory to python modules path.
from img_to_vec import Img2Vec
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
input_path = './test_images'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
# For each test image, we store the filename and vector as key, value in a dictionary
pics = {}
for file in os.listdir(input_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(input_path, filename)).convert('RGB')
vec = img2vec.get_vec(img)
pics[filename] = vec
available_filenames = ", ".join(pics.keys())
pic_name = ""
while pic_name != "exit":
pic_name = str(input("\nWhich filename would you like similarities for?\nAvailable options: " + available_filenames + "\n"))
try:
sims = {}
for key in list(pics.keys()):
if key == pic_name:
continue
sims[key] = cosine_similarity(pics[pic_name].reshape((1, -1)), pics[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
except KeyError as e:
print('Could not find filename %s' % e)
except Exception as e:
print(e)
| 29.2 | 128 | 0.642314 | import sys
import os
sys.path.append("../img2vec_pytorch")
from img_to_vec import Img2Vec
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
input_path = './test_images'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
pics = {}
for file in os.listdir(input_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(input_path, filename)).convert('RGB')
vec = img2vec.get_vec(img)
pics[filename] = vec
available_filenames = ", ".join(pics.keys())
pic_name = ""
while pic_name != "exit":
pic_name = str(input("\nWhich filename would you like similarities for?\nAvailable options: " + available_filenames + "\n"))
try:
sims = {}
for key in list(pics.keys()):
if key == pic_name:
continue
sims[key] = cosine_similarity(pics[pic_name].reshape((1, -1)), pics[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
except KeyError as e:
print('Could not find filename %s' % e)
except Exception as e:
print(e)
| true | true |
f717f6ee21c9fa11dd8b2998e6722883254a2f34 | 8,734 | py | Python | testing/test_cde_io.py | eberharf/cfl | 077b99a05824f1371ac47d76dfed6bb160222668 | [
"BSD-3-Clause"
] | 6 | 2021-01-09T04:46:55.000Z | 2022-03-19T22:27:13.000Z | testing/test_cde_io.py | eberharf/cfl | 077b99a05824f1371ac47d76dfed6bb160222668 | [
"BSD-3-Clause"
] | 12 | 2021-01-11T16:32:58.000Z | 2022-03-19T13:21:30.000Z | testing/test_cde_io.py | eberharf/cfl | 077b99a05824f1371ac47d76dfed6bb160222668 | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
from shutil import Error
import unittest
import numpy as np
import tensorflow as tf
from cdes_for_testing import all_cdes
from cfl.dataset import Dataset
''' The following code runs all tests in CondExpInputTests on all implemented
CondExpXxxx classes.
'''
def make_cde_io_tests(cond_exp_class):
# generic test class for any CondExpBase descendant
# (passed in as cond_exp_class)
class CondExpIOTests(unittest.TestCase):
def setUp(self): # overriden unittest.TestCase method that will be
# called in initializaiton
self.data_info = { 'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
self.params = { 'show_plot' : False,
'n_epochs' : 2}
self.ceb = cond_exp_class(self.data_info, self.params)
## INIT ###############################################################
def test_init_wrong_input_types(self):
data_info = 'str is bad'
params = 'these are not params'
self.assertRaises(AssertionError, cond_exp_class, data_info, params)
def test_init_wrong_data_info_keys(self):
data_info = {}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_wrong_data_info_value_types(self):
data_info = {'X_dims' : None, 'Y_dims' : None, 'Y_type' : None}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_wrong_data_info_values(self):
data_info = { 'X_dims' : (0,0),
'Y_dims' : (0,0),
'Y_type' : 'continuous'}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
data_info = { 'X_dims' : (10,3),
'Y_dims' : (12,2),
'Y_type' : 'continuous'}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_correct_inputs(self):
data_info = {'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
params = {}
ceb = cond_exp_class(data_info, params)
## SAVE_BLOCK #########################################################
def test_save_block_wrong_input_type(self):
path = 123
self.assertRaises(AssertionError, self.ceb.save_block, path)
def test_save_block_correct_input_type(self):
path = 'not/a/real/path'
self.ceb.save_block(path)
shutil.rmtree('not')
## LOAD_BLOCK #########################################################
def test_load_block_wrong_input_type(self):
path = 123
self.assertRaises(AssertionError, self.ceb.load_block, path)
def test_load_block_correct_input_type(self):
# should only be run after test_save_block_correct_input_type so
# there is something to load
path = 'not/a/real/path'
self.ceb.save_block(path)
self.ceb.load_block(path)
shutil.rmtree('not')
# check and reset state
assert self.ceb.trained, 'CDE should be trained after loading'
self.ceb.trained = False
### TRAIN ############################################################
def test_train_wrong_input_type(self):
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.train, dataset,
prev_results)
def test_train_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
# what we expect from train outputs
tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']
tshapes = {'train_loss' : (self.params['n_epochs'],),
'val_loss' : (self.params['n_epochs'],),
'pyx' : (self.data_info['Y_dims'])
}
for prev_results in [None, {}]:
# reset
self.ceb.trained = False
train_results = self.ceb.train(dataset, prev_results)
# check state
assert self.ceb.trained, 'CDE should be trained after loading'
# check outputs
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
for k in tshapes.keys():
assert tshapes[k]==np.array(train_results[k]).shape, \
f'expected {k} to have shape {tshapes[k]} but got \
{train_results[k].shape}'
def test_train_twice(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
# reset
self.ceb.trained = False
# what we expect from train outputs first time
tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']
train_results = self.ceb.train(dataset, prev_results)
# check state and outputs
assert self.ceb.trained, 'CDE should be trained after loading'
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
# what we expect from train outputs second time
tkeys = ['pyx']
train_results = self.ceb.train(dataset, prev_results)
# check state and outputs
assert self.ceb.trained, 'CDE should be trained after loading'
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
### PREDICT ##########################################################
def test_predict_wrong_input_type(self):
# artifically set CDE trained = True
self.ceb.trained = True
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.predict, dataset,
prev_results)
def test_predict_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
for prev_results in [None, {}]:
self.ceb.train(dataset, prev_results)
pred_results = self.ceb.predict(dataset, prev_results)
# check output
assert set(pred_results.keys())==set(['pyx']), f'pred_results \
keys should contain pyx, but contains {pred_results.keys()}'
assert pred_results['pyx'].shape==self.data_info['Y_dims'], \
f"expected {self.data_info['Y_dims']} but got \
{pred_results['pyx'].shape}"
### EVALUATE #########################################################
def test_evaluate_wrong_input_type(self):
# artifically set CDE trained = True
self.ceb.trained = True
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.evaluate, dataset)
def test_evaluate_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
self.ceb.train(dataset, prev_results)
score = self.ceb.evaluate(dataset)
assert score.shape==()
assert score.dtype==np.float32
### BUILD_MODEL ######################################################
def test_build_model(self):
assert isinstance(self.ceb._build_model(), tf.keras.Sequential)
return CondExpIOTests
for cond_exp_class in all_cdes:
class ConcreteIOTests(make_cde_io_tests(cond_exp_class)):
pass
| 39.342342 | 80 | 0.517174 | import os
import shutil
from shutil import Error
import unittest
import numpy as np
import tensorflow as tf
from cdes_for_testing import all_cdes
from cfl.dataset import Dataset
def make_cde_io_tests(cond_exp_class):
class CondExpIOTests(unittest.TestCase):
def setUp(self):
self.data_info = { 'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
self.params = { 'show_plot' : False,
'n_epochs' : 2}
self.ceb = cond_exp_class(self.data_info, self.params)
| true | true |
f717f76f6731c769b821c9ceaf17edbc8eba9b54 | 50,551 | py | Python | python/ccxt/async_support/bitbay.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 4 | 2021-09-24T09:18:36.000Z | 2022-03-15T16:47:09.000Z | python/ccxt/async_support/bitbay.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | null | null | null | python/ccxt/async_support/bitbay.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 2 | 2021-10-01T21:51:37.000Z | 2021-10-02T16:23:05.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitbay(Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'], # Malta
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'3d': '259200',
'1w': '604800',
},
'hostname': 'bitbay.net',
'urls': {
'referral': 'https://auth.bitbay.net/ref/jHlbB4mIkdS1',
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://{hostname}/API/Public',
'private': 'https://{hostname}/API/Trading/tradingApi.php',
'v1_01Public': 'https://api.{hostname}/rest',
'v1_01Private': 'https://api.{hostname}/rest',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/en/private-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
'https://docs.bitbay.net/v1.0.1-en/reference',
],
'support': 'https://support.bitbay.net',
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
'v1_01Public': {
'get': [
'trading/ticker',
'trading/ticker/{symbol}',
'trading/stats',
'trading/orderbook/{symbol}',
'trading/transactions/{symbol}',
'trading/candle/history/{symbol}/{resolution}',
],
},
'v1_01Private': {
'get': [
'payments/withdrawal/{detailId}',
'payments/deposit/{detailId}',
'trading/offer',
'trading/config/{symbol}',
'trading/history/transactions',
'balances/BITBAY/history',
'balances/BITBAY/balance',
'fiat_cantor/rate/{baseId}/{quoteId}',
'fiat_cantor/history',
],
'post': [
'trading/offer/{symbol}',
'trading/config/{symbol}',
'balances/BITBAY/balance',
'balances/BITBAY/balance/transfer/{source}/{destination}',
'fiat_cantor/exchange',
],
'delete': [
'trading/offer/{symbol}/{id}/{side}/{price}',
],
'put': [
'balances/BITBAY/balance/{id}',
],
},
},
'fees': {
'trading': {
'maker': 0.0,
'taker': 0.1 / 100,
'percentage': True,
'tierBased': False,
},
'fiat': {
'maker': 0.30 / 100,
'taker': 0.43 / 100,
'percentage': True,
'tierBased': True,
'tiers': {
'taker': [
[0.0043, 0],
[0.0042, 1250],
[0.0041, 3750],
[0.0040, 7500],
[0.0039, 10000],
[0.0038, 15000],
[0.0037, 20000],
[0.0036, 25000],
[0.0035, 37500],
[0.0034, 50000],
[0.0033, 75000],
[0.0032, 100000],
[0.0031, 150000],
[0.0030, 200000],
[0.0029, 250000],
[0.0028, 375000],
[0.0027, 500000],
[0.0026, 625000],
[0.0025, 875000],
],
'maker': [
[0.0030, 0],
[0.0029, 1250],
[0.0028, 3750],
[0.0028, 7500],
[0.0027, 10000],
[0.0026, 15000],
[0.0025, 20000],
[0.0025, 25000],
[0.0024, 37500],
[0.0023, 50000],
[0.0023, 75000],
[0.0022, 100000],
[0.0021, 150000],
[0.0021, 200000],
[0.0020, 250000],
[0.0019, 375000],
[0.0018, 500000],
[0.0018, 625000],
[0.0017, 875000],
],
},
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'options': {
'fiatCurrencies': ['EUR', 'USD', 'GBP', 'PLN'],
},
'exceptions': {
'400': ExchangeError, # At least one parameter wasn't set
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds, # No enough money or crypto
# code 407 not specified are not specified in their docs
'408': InvalidOrder, # Invalid currency name
'501': AuthenticationError, # Invalid public key
'502': AuthenticationError, # Invalid sign
'503': InvalidNonce, # Invalid moment parameter. Request time doesn't match current server time
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AccountSuspended, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': BadSymbol, # Invalid market name
'FUNDS_NOT_SUFFICIENT': InsufficientFunds,
'OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS': InvalidOrder,
'OFFER_NOT_FOUND': OrderNotFound,
'OFFER_WOULD_HAVE_BEEN_PARTIALLY_FILLED': OrderImmediatelyFillable,
'ACTION_LIMIT_EXCEEDED': RateLimitExceeded,
'UNDER_MAINTENANCE': OnMaintenance,
'REQUEST_TIMESTAMP_TOO_OLD': InvalidNonce,
'PERMISSIONS_NOT_SUFFICIENT': PermissionDenied,
},
'commonCurrencies': {
'GGC': 'Global Game Coin',
},
})
async def fetch_markets(self, params={}):
response = await self.v1_01PublicGetTradingTicker(params)
fiatCurrencies = self.safe_value(self.options, 'fiatCurrencies', [])
#
# {
# status: 'Ok',
# items: {
# 'BSV-USD': {
# market: {
# code: 'BSV-USD',
# first: {currency: 'BSV', minOffer: '0.00035', scale: 8},
# second: {currency: 'USD', minOffer: '5', scale: 2}
# },
# time: '1557569762154',
# highestBid: '52.31',
# lowestAsk: '62.99',
# rate: '63',
# previousRate: '51.21',
# },
# },
# }
#
result = []
items = self.safe_value(response, 'items')
keys = list(items.keys())
for i in range(0, len(keys)):
key = keys[i]
item = items[key]
market = self.safe_value(item, 'market', {})
first = self.safe_value(market, 'first', {})
second = self.safe_value(market, 'second', {})
baseId = self.safe_string(first, 'currency')
quoteId = self.safe_string(second, 'currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(first, 'scale'),
'price': self.safe_integer(second, 'scale'),
}
fees = self.safe_value(self.fees, 'trading', {})
if self.in_array(base, fiatCurrencies) or self.in_array(quote, fiatCurrencies):
fees = self.safe_value(self.fees, 'fiat', {})
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
# todo: check that the limits have ben interpreted correctly
# todo: parse the fees page
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': None,
'maker': maker,
'taker': taker,
'limits': {
'amount': {
'min': self.safe_number(first, 'minOffer'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(second, 'minOffer'),
'max': None,
},
},
'info': item,
})
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
response = await self.v1_01PrivateGetTradingOffer(self.extend(request, params))
items = self.safe_value(response, 'items', [])
return self.parse_orders(items, None, since, limit, {'status': 'open'})
def parse_order(self, order, market=None):
#
# {
# market: 'ETH-EUR',
# offerType: 'Sell',
# id: '93d3657b-d616-11e9-9248-0242ac110005',
# currentAmount: '0.04',
# lockedAmount: '0.04',
# rate: '280',
# startAmount: '0.04',
# time: '1568372806924',
# postOnly: False,
# hidden: False,
# mode: 'limit',
# receivedAmount: '0.0',
# firstBalanceId: '5b816c3e-437c-4e43-9bef-47814ae7ebfc',
# secondBalanceId: 'ab43023b-4079-414c-b340-056e3430a3af'
# }
#
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(order, 'time')
amount = self.safe_number(order, 'startAmount')
remaining = self.safe_number(order, 'currentAmount')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': self.safe_string(order, 'mode'),
'timeInForce': None,
'postOnly': postOnly,
'side': self.safe_string_lower(order, 'offerType'),
'price': self.safe_number(order, 'rate'),
'stopPrice': None,
'amount': amount,
'cost': None,
'filled': None,
'remaining': remaining,
'average': None,
'fee': None,
'trades': None,
})
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if symbol:
markets = [self.market_id(symbol)]
request['markets'] = markets
query = {'query': self.json(self.extend(request, params))}
response = await self.v1_01PrivateGetTradingHistoryTransactions(query)
#
# {
# status: 'Ok',
# totalRows: '67',
# items: [
# {
# id: 'b54659a0-51b5-42a0-80eb-2ac5357ccee2',
# market: 'BTC-EUR',
# time: '1541697096247',
# amount: '0.00003',
# rate: '4341.44',
# initializedBy: 'Sell',
# wasTaker: False,
# userAction: 'Buy',
# offerId: 'bd19804a-6f89-4a69-adb8-eb078900d006',
# commissionValue: null
# },
# ]
# }
#
items = self.safe_value(response, 'items')
result = self.parse_trades(items, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.v1_01PrivateGetBalancesBITBAYBalance(params)
balances = self.safe_value(response, 'balances')
if balances is None:
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'lockedFunds')
account['free'] = self.safe_string(balance, 'availableFunds')
result[code] = account
return self.parse_balance(result, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
orderbook = await self.publicGetIdOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook, symbol)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
ticker = await self.publicGetIdTicker(self.extend(request, params))
timestamp = self.milliseconds()
baseVolume = self.safe_number(ticker, 'volume')
vwap = self.safe_number(ticker, 'vwap')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max'),
'low': self.safe_number(ticker, 'min'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_number(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
balanceCurrencies = []
if code is not None:
currency = self.currency(code)
balanceCurrencies.append(currency['id'])
request = {
'balanceCurrencies': balanceCurrencies,
}
if since is not None:
request['fromTime'] = since
if limit is not None:
request['limit'] = limit
request = self.extend(request, params)
response = await self.v1_01PrivateGetBalancesBITBAYHistory({'query': self.json(request)})
items = response['items']
return self.parse_ledger(items, None, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# FUNDS_MIGRATION
# {
# "historyId": "84ea7a29-7da5-4de5-b0c0-871e83cad765",
# "balance": {
# "id": "821ec166-cb88-4521-916c-f4eb44db98df",
# "currency": "LTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "LTC"
# },
# "detailId": null,
# "time": 1506128252968,
# "type": "FUNDS_MIGRATION",
# "value": 0.0009957,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.0009957, "available": 0.0009957, "locked": 0},
# "change": {"total": 0.0009957, "available": 0.0009957, "locked": 0}
# }
#
# CREATE_BALANCE
# {
# "historyId": "d0fabd8d-9107-4b5e-b9a6-3cab8af70d49",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244751,
# "type": "CREATE_BALANCE",
# "value": 0,
# "fundsBefore": {"total": null, "available": null, "locked": null},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": 0, "available": 0, "locked": 0}
# }
#
# BITCOIN_GOLD_FORK
# {
# "historyId": "2b4d52d3-611c-473d-b92c-8a8d87a24e41",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244778,
# "type": "BITCOIN_GOLD_FORK",
# "value": 0.00453512,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "change": {"total": 0.00453512, "available": 0.00453512, "locked": 0}
# }
#
# ADD_FUNDS
# {
# "historyId": "3158236d-dae5-4a5d-81af-c1fa4af340fb",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "8e83a960-e737-4380-b8bb-259d6e236faa",
# "time": 1520631178816,
# "type": "ADD_FUNDS",
# "value": 0.628405,
# "fundsBefore": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "change": {"total": 0.628405, "available": 0.628405, "locked": 0}
# }
#
# TRANSACTION_PRE_LOCKING
# {
# "historyId": "e7d19e0f-03b3-46a8-bc72-dde72cc24ead",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520706403868,
# "type": "TRANSACTION_PRE_LOCKING",
# "value": -0.1,
# "fundsBefore": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "change": {"total": 0, "available": -0.1, "locked": 0.1}
# }
#
# TRANSACTION_POST_OUTCOME
# {
# "historyId": "c4010825-231d-4a9c-8e46-37cde1f7b63c",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "bf2876bc-b545-4503-96c8-ef4de8233876",
# "time": 1520706404032,
# "type": "TRANSACTION_POST_OUTCOME",
# "value": -0.01771415,
# "fundsBefore": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "fundsAfter": {"total": 0.61522597, "available": 0.53294012, "locked": 0.08228585},
# "change": {"total": -0.01771415, "available": 0, "locked": -0.01771415}
# }
#
# TRANSACTION_POST_INCOME
# {
# "historyId": "7f18b7af-b676-4125-84fd-042e683046f6",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404035,
# "type": "TRANSACTION_POST_INCOME",
# "value": 628.78,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 628.78, "available": 628.78, "locked": 0},
# "change": {"total": 628.78, "available": 628.78, "locked": 0}
# }
#
# TRANSACTION_COMMISSION_OUTCOME
# {
# "historyId": "843177fa-61bc-4cbf-8be5-b029d856c93b",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404050,
# "type": "TRANSACTION_COMMISSION_OUTCOME",
# "value": -2.71,
# "fundsBefore": {"total": 766.06, "available": 766.06, "locked": 0},
# "fundsAfter": {"total": 763.35,"available": 763.35, "locked": 0},
# "change": {"total": -2.71, "available": -2.71, "locked": 0}
# }
#
# TRANSACTION_OFFER_COMPLETED_RETURN
# {
# "historyId": "cac69b04-c518-4dc5-9d86-e76e91f2e1d2",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520714886425,
# "type": "TRANSACTION_OFFER_COMPLETED_RETURN",
# "value": 0.00000196,
# "fundsBefore": {"total": 0.00941208, "available": 0.00941012, "locked": 0.00000196},
# "fundsAfter": {"total": 0.00941208, "available": 0.00941208, "locked": 0},
# "change": {"total": 0, "available": 0.00000196, "locked": -0.00000196}
# }
#
# WITHDRAWAL_LOCK_FUNDS
# {
# "historyId": "03de2271-66ab-4960-a786-87ab9551fc14",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522245654481,
# "type": "WITHDRAWAL_LOCK_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0.8, "locked": 0},
# "fundsAfter": {"total": 0.8, "available": 0, "locked": 0.8},
# "change": {"total": 0, "available": -0.8, "locked": 0.8}
# }
#
# WITHDRAWAL_SUBTRACT_FUNDS
# {
# "historyId": "b0308c89-5288-438d-a306-c6448b1a266d",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522246526186,
# "type": "WITHDRAWAL_SUBTRACT_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0, "locked": 0.8},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": -0.8, "available": 0, "locked": -0.8}
# }
#
# TRANSACTION_OFFER_ABORTED_RETURN
# {
# "historyId": "b1a3c075-d403-4e05-8f32-40512cdd88c0",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1522512298662,
# "type": "TRANSACTION_OFFER_ABORTED_RETURN",
# "value": 0.0564931,
# "fundsBefore": {"total": 0.44951311, "available": 0.39302001, "locked": 0.0564931},
# "fundsAfter": {"total": 0.44951311, "available": 0.44951311, "locked": 0},
# "change": {"total": 0, "available": 0.0564931, "locked": -0.0564931}
# }
#
# WITHDRAWAL_UNLOCK_FUNDS
# {
# "historyId": "0ed569a2-c330-482e-bb89-4cb553fb5b11",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "0c7be256-c336-4111-bee7-4eb22e339700",
# "time": 1527866360785,
# "type": "WITHDRAWAL_UNLOCK_FUNDS",
# "value": 0.05045,
# "fundsBefore": {"total": 0.86001578, "available": 0.80956578, "locked": 0.05045},
# "fundsAfter": {"total": 0.86001578, "available": 0.86001578, "locked": 0},
# "change": {"total": 0, "available": 0.05045, "locked": -0.05045}
# }
#
# TRANSACTION_COMMISSION_RETURN
# {
# "historyId": "07c89c27-46f1-4d7a-8518-b73798bf168a",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": null,
# "time": 1528304043063,
# "type": "TRANSACTION_COMMISSION_RETURN",
# "value": 0.6,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.6, "available": 0.6, "locked": 0},
# "change": {"total": 0.6, "available": 0.6, "locked": 0}
# }
#
timestamp = self.safe_integer(item, 'time')
balance = self.safe_value(item, 'balance', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
change = self.safe_value(item, 'change', {})
amount = self.safe_number(change, 'total')
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'historyId')
# there are 2 undocumented api calls: (v1_01PrivateGetPaymentsDepositDetailId and v1_01PrivateGetPaymentsWithdrawalDetailId)
# that can be used to enrich the transfers with txid, address etc(you need to use info.detailId as a parameter)
referenceId = self.safe_string(item, 'detailId')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
fundsBefore = self.safe_value(item, 'fundsBefore', {})
before = self.safe_number(fundsBefore, 'total')
fundsAfter = self.safe_value(item, 'fundsAfter', {})
after = self.safe_number(fundsAfter, 'total')
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': 'ok',
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_ledger_entry_type(self, type):
types = {
'ADD_FUNDS': 'transaction',
'BITCOIN_GOLD_FORK': 'transaction',
'CREATE_BALANCE': 'transaction',
'FUNDS_MIGRATION': 'transaction',
'WITHDRAWAL_LOCK_FUNDS': 'transaction',
'WITHDRAWAL_SUBTRACT_FUNDS': 'transaction',
'WITHDRAWAL_UNLOCK_FUNDS': 'transaction',
'TRANSACTION_COMMISSION_OUTCOME': 'fee',
'TRANSACTION_COMMISSION_RETURN': 'fee',
'TRANSACTION_OFFER_ABORTED_RETURN': 'trade',
'TRANSACTION_OFFER_COMPLETED_RETURN': 'trade',
'TRANSACTION_POST_INCOME': 'trade',
'TRANSACTION_POST_OUTCOME': 'trade',
'TRANSACTION_PRE_LOCKING': 'trade',
}
return self.safe_string(types, type, type)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# '1582399800000',
# {
# o: '0.0001428',
# c: '0.0001428',
# h: '0.0001428',
# l: '0.0001428',
# v: '4',
# co: '1'
# }
# ]
#
first = self.safe_value(ohlcv, 1, {})
return [
self.safe_integer(ohlcv, 0),
self.safe_number(first, 'o'),
self.safe_number(first, 'h'),
self.safe_number(first, 'l'),
self.safe_number(first, 'c'),
self.safe_number(first, 'v'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'resolution': self.timeframes[timeframe],
# 'from': 1574709092000, # unix timestamp in milliseconds, required
# 'to': 1574709092000, # unix timestamp in milliseconds, required
}
if limit is None:
limit = 100
duration = self.parse_timeframe(timeframe)
timerange = limit * duration * 1000
if since is None:
request['to'] = self.milliseconds()
request['from'] = request['to'] - timerange
else:
request['from'] = int(since)
request['to'] = self.sum(request['from'], timerange)
response = await self.v1_01PublicGetTradingCandleHistorySymbolResolution(self.extend(request, params))
#
# {
# "status":"Ok",
# "items":[
# ["1591503060000",{"o":"0.02509572","c":"0.02509438","h":"0.02509664","l":"0.02509438","v":"0.02082165","co":"17"}],
# ["1591503120000",{"o":"0.02509606","c":"0.02509515","h":"0.02509606","l":"0.02509487","v":"0.04971703","co":"13"}],
# ["1591503180000",{"o":"0.02509532","c":"0.02509589","h":"0.02509589","l":"0.02509454","v":"0.01332236","co":"7"}],
# ]
# }
#
items = self.safe_value(response, 'items', [])
return self.parse_ohlcvs(items, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# createOrder trades
#
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# }
#
# fetchMyTrades(private)
#
# {
# amount: "0.29285199",
# commissionValue: "0.00125927",
# id: "11c8203a-a267-11e9-b698-0242ac110007",
# initializedBy: "Buy",
# market: "ETH-EUR",
# offerId: "11c82038-a267-11e9-b698-0242ac110007",
# rate: "277",
# time: "1562689917517",
# userAction: "Buy",
# wasTaker: True,
# }
#
# fetchTrades(public)
#
# {
# id: 'df00b0da-e5e0-11e9-8c19-0242ac11000a',
# t: '1570108958831',
# a: '0.04776653',
# r: '0.02145854',
# ty: 'Sell'
# }
#
timestamp = self.safe_integer_2(trade, 'time', 't')
userAction = self.safe_string(trade, 'userAction')
side = 'buy' if (userAction == 'Buy') else 'sell'
wasTaker = self.safe_value(trade, 'wasTaker')
takerOrMaker = None
if wasTaker is not None:
takerOrMaker = 'taker' if wasTaker else 'maker'
priceString = self.safe_string_2(trade, 'rate', 'r')
amountString = self.safe_string_2(trade, 'amount', 'a')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
feeCost = self.safe_number(trade, 'commissionValue')
marketId = self.safe_string(trade, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
fee = None
if feeCost is not None:
feeCcy = market['base'] if (side == 'buy') else market['quote']
fee = {
'currency': feeCcy,
'cost': feeCost,
}
order = self.safe_string(trade, 'offerId')
# todo: check self logic
type = None
if order is not None:
type = 'limit' if order else 'market'
return {
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
}
if since is not None:
request['fromTime'] = since - 1 # result does not include exactly `since` time therefore decrease by 1
if limit is not None:
request['limit'] = limit # default - 10, max - 300
response = await self.v1_01PublicGetTradingTransactionsSymbol(self.extend(request, params))
items = self.safe_value(response, 'items')
return self.parse_trades(items, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'offerType': side,
'amount': amount,
'mode': type,
}
if type == 'limit':
request['rate'] = price
price = float(price)
amount = float(amount)
response = await self.v1_01PrivatePostTradingOfferSymbol(self.extend(request, params))
#
# unfilled(open order)
#
# {
# status: 'Ok',
# completed: False, # can deduce status from here
# offerId: 'ce9cc72e-d61c-11e9-9248-0242ac110005',
# transactions: [], # can deduce order info from here
# }
#
# filled(closed order)
#
# {
# "status": "Ok",
# "offerId": "942a4a3e-e922-11e9-8c19-0242ac11000a",
# "completed": True,
# "transactions": [
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02196207",
# "amount": "0.27704177"
# }
# ]
# }
#
# partially-filled(open order)
#
# {
# "status": "Ok",
# "offerId": "d0ebefab-f4d7-11e9-8c19-0242ac11000a",
# "completed": False,
# "transactions": [
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02105901",
# "amount": "0.00975256"
# }
# ]
# }
#
timestamp = self.milliseconds() # the real timestamp is missing in the response
id = self.safe_string(response, 'offerId')
completed = self.safe_value(response, 'completed', False)
status = 'closed' if completed else 'open'
filled = 0
cost = None
transactions = self.safe_value(response, 'transactions')
trades = None
if transactions is not None:
trades = self.parse_trades(transactions, market, None, None, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'type': type,
'orderId': id,
})
cost = 0
for i in range(0, len(trades)):
filled = self.sum(filled, trades[i]['amount'])
cost = self.sum(cost, trades[i]['cost'])
remaining = amount - filled
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': trades,
'clientOrderId': None,
}
async def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `side` parameter("buy" or "sell")')
price = self.safe_value(params, 'price')
if price is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `price` parameter(float or string)')
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'id': id,
'side': side,
'price': price,
}
# {status: 'Fail', errors: ['NOT_RECOGNIZED_OFFER_TYPE']} -- if required params are missing
# {status: 'Ok', errors: []}
return self.v1_01PrivateDeleteTradingOfferSymbolIdSidePrice(self.extend(request, params))
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
return self.safe_value(fiatCurrencies, currency, False)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Private':
self.check_required_credentials()
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
nonce = str(self.milliseconds())
payload = None
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
payload = self.apiKey + nonce
elif body is None:
body = self.json(query)
payload = self.apiKey + nonce + body
headers = {
'Request-Timestamp': nonce,
'Operation-Id': self.uuid(),
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512),
'Content-Type': 'application/json',
}
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'code' in response:
#
# bitbay returns the integer 'success': 1 key from their private API
# or an integer 'code' value from 0 to 510 and an error message
#
# {'success': 1, ...}
# {'code': 502, 'message': 'Invalid sign'}
# {'code': 0, 'message': 'offer funds not exceeding minimums'}
#
# 400 At least one parameter wasn't set
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 406 No enough money or crypto
# 408 Invalid currency name
# 501 Invalid public key
# 502 Invalid sign
# 503 Invalid moment parameter. Request time doesn't match current server time
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = self.safe_string(response, 'code') # always an integer
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
elif 'status' in response:
#
# {"status":"Fail","errors":["OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS"]}
#
status = self.safe_string(response, 'status')
if status == 'Fail':
errors = self.safe_value(response, 'errors')
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
error = errors[i]
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
| 41.266122 | 137 | 0.46369 |
rt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitbay(Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'],
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'3d': '259200',
'1w': '604800',
},
'hostname': 'bitbay.net',
'urls': {
'referral': 'https://auth.bitbay.net/ref/jHlbB4mIkdS1',
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://{hostname}/API/Public',
'private': 'https://{hostname}/API/Trading/tradingApi.php',
'v1_01Public': 'https://api.{hostname}/rest',
'v1_01Private': 'https://api.{hostname}/rest',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/en/private-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
'https://docs.bitbay.net/v1.0.1-en/reference',
],
'support': 'https://support.bitbay.net',
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
'v1_01Public': {
'get': [
'trading/ticker',
'trading/ticker/{symbol}',
'trading/stats',
'trading/orderbook/{symbol}',
'trading/transactions/{symbol}',
'trading/candle/history/{symbol}/{resolution}',
],
},
'v1_01Private': {
'get': [
'payments/withdrawal/{detailId}',
'payments/deposit/{detailId}',
'trading/offer',
'trading/config/{symbol}',
'trading/history/transactions',
'balances/BITBAY/history',
'balances/BITBAY/balance',
'fiat_cantor/rate/{baseId}/{quoteId}',
'fiat_cantor/history',
],
'post': [
'trading/offer/{symbol}',
'trading/config/{symbol}',
'balances/BITBAY/balance',
'balances/BITBAY/balance/transfer/{source}/{destination}',
'fiat_cantor/exchange',
],
'delete': [
'trading/offer/{symbol}/{id}/{side}/{price}',
],
'put': [
'balances/BITBAY/balance/{id}',
],
},
},
'fees': {
'trading': {
'maker': 0.0,
'taker': 0.1 / 100,
'percentage': True,
'tierBased': False,
},
'fiat': {
'maker': 0.30 / 100,
'taker': 0.43 / 100,
'percentage': True,
'tierBased': True,
'tiers': {
'taker': [
[0.0043, 0],
[0.0042, 1250],
[0.0041, 3750],
[0.0040, 7500],
[0.0039, 10000],
[0.0038, 15000],
[0.0037, 20000],
[0.0036, 25000],
[0.0035, 37500],
[0.0034, 50000],
[0.0033, 75000],
[0.0032, 100000],
[0.0031, 150000],
[0.0030, 200000],
[0.0029, 250000],
[0.0028, 375000],
[0.0027, 500000],
[0.0026, 625000],
[0.0025, 875000],
],
'maker': [
[0.0030, 0],
[0.0029, 1250],
[0.0028, 3750],
[0.0028, 7500],
[0.0027, 10000],
[0.0026, 15000],
[0.0025, 20000],
[0.0025, 25000],
[0.0024, 37500],
[0.0023, 50000],
[0.0023, 75000],
[0.0022, 100000],
[0.0021, 150000],
[0.0021, 200000],
[0.0020, 250000],
[0.0019, 375000],
[0.0018, 500000],
[0.0018, 625000],
[0.0017, 875000],
],
},
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'options': {
'fiatCurrencies': ['EUR', 'USD', 'GBP', 'PLN'],
},
'exceptions': {
'400': ExchangeError,
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds,
'408': InvalidOrder,
'501': AuthenticationError,
'502': AuthenticationError,
'503': InvalidNonce,
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AccountSuspended, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': BadSymbol, # Invalid market name
'FUNDS_NOT_SUFFICIENT': InsufficientFunds,
'OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS': InvalidOrder,
'OFFER_NOT_FOUND': OrderNotFound,
'OFFER_WOULD_HAVE_BEEN_PARTIALLY_FILLED': OrderImmediatelyFillable,
'ACTION_LIMIT_EXCEEDED': RateLimitExceeded,
'UNDER_MAINTENANCE': OnMaintenance,
'REQUEST_TIMESTAMP_TOO_OLD': InvalidNonce,
'PERMISSIONS_NOT_SUFFICIENT': PermissionDenied,
},
'commonCurrencies': {
'GGC': 'Global Game Coin',
},
})
async def fetch_markets(self, params={}):
response = await self.v1_01PublicGetTradingTicker(params)
fiatCurrencies = self.safe_value(self.options, 'fiatCurrencies', [])
#
# {
# status: 'Ok',
# items: {
# 'BSV-USD': {
# market: {
# code: 'BSV-USD',
# first: {currency: 'BSV', minOffer: '0.00035', scale: 8},
# second: {currency: 'USD', minOffer: '5', scale: 2}
# },
# time: '1557569762154',
# highestBid: '52.31',
# lowestAsk: '62.99',
# rate: '63',
# previousRate: '51.21',
# },
# },
# }
#
result = []
items = self.safe_value(response, 'items')
keys = list(items.keys())
for i in range(0, len(keys)):
key = keys[i]
item = items[key]
market = self.safe_value(item, 'market', {})
first = self.safe_value(market, 'first', {})
second = self.safe_value(market, 'second', {})
baseId = self.safe_string(first, 'currency')
quoteId = self.safe_string(second, 'currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(first, 'scale'),
'price': self.safe_integer(second, 'scale'),
}
fees = self.safe_value(self.fees, 'trading', {})
if self.in_array(base, fiatCurrencies) or self.in_array(quote, fiatCurrencies):
fees = self.safe_value(self.fees, 'fiat', {})
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
# todo: check that the limits have ben interpreted correctly
# todo: parse the fees page
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': None,
'maker': maker,
'taker': taker,
'limits': {
'amount': {
'min': self.safe_number(first, 'minOffer'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(second, 'minOffer'),
'max': None,
},
},
'info': item,
})
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
response = await self.v1_01PrivateGetTradingOffer(self.extend(request, params))
items = self.safe_value(response, 'items', [])
return self.parse_orders(items, None, since, limit, {'status': 'open'})
def parse_order(self, order, market=None):
#
# {
# market: 'ETH-EUR',
# offerType: 'Sell',
# id: '93d3657b-d616-11e9-9248-0242ac110005',
# currentAmount: '0.04',
# lockedAmount: '0.04',
# rate: '280',
# startAmount: '0.04',
# time: '1568372806924',
# postOnly: False,
# hidden: False,
# mode: 'limit',
# receivedAmount: '0.0',
# firstBalanceId: '5b816c3e-437c-4e43-9bef-47814ae7ebfc',
# secondBalanceId: 'ab43023b-4079-414c-b340-056e3430a3af'
# }
#
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(order, 'time')
amount = self.safe_number(order, 'startAmount')
remaining = self.safe_number(order, 'currentAmount')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': self.safe_string(order, 'mode'),
'timeInForce': None,
'postOnly': postOnly,
'side': self.safe_string_lower(order, 'offerType'),
'price': self.safe_number(order, 'rate'),
'stopPrice': None,
'amount': amount,
'cost': None,
'filled': None,
'remaining': remaining,
'average': None,
'fee': None,
'trades': None,
})
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if symbol:
markets = [self.market_id(symbol)]
request['markets'] = markets
query = {'query': self.json(self.extend(request, params))}
response = await self.v1_01PrivateGetTradingHistoryTransactions(query)
#
# {
# status: 'Ok',
# totalRows: '67',
# items: [
# {
# id: 'b54659a0-51b5-42a0-80eb-2ac5357ccee2',
# market: 'BTC-EUR',
# time: '1541697096247',
# amount: '0.00003',
# rate: '4341.44',
# initializedBy: 'Sell',
# wasTaker: False,
# userAction: 'Buy',
# offerId: 'bd19804a-6f89-4a69-adb8-eb078900d006',
# commissionValue: null
# },
# ]
# }
#
items = self.safe_value(response, 'items')
result = self.parse_trades(items, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.v1_01PrivateGetBalancesBITBAYBalance(params)
balances = self.safe_value(response, 'balances')
if balances is None:
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'lockedFunds')
account['free'] = self.safe_string(balance, 'availableFunds')
result[code] = account
return self.parse_balance(result, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
orderbook = await self.publicGetIdOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook, symbol)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
ticker = await self.publicGetIdTicker(self.extend(request, params))
timestamp = self.milliseconds()
baseVolume = self.safe_number(ticker, 'volume')
vwap = self.safe_number(ticker, 'vwap')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max'),
'low': self.safe_number(ticker, 'min'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_number(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
balanceCurrencies = []
if code is not None:
currency = self.currency(code)
balanceCurrencies.append(currency['id'])
request = {
'balanceCurrencies': balanceCurrencies,
}
if since is not None:
request['fromTime'] = since
if limit is not None:
request['limit'] = limit
request = self.extend(request, params)
response = await self.v1_01PrivateGetBalancesBITBAYHistory({'query': self.json(request)})
items = response['items']
return self.parse_ledger(items, None, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# FUNDS_MIGRATION
# {
# "historyId": "84ea7a29-7da5-4de5-b0c0-871e83cad765",
# "balance": {
# "id": "821ec166-cb88-4521-916c-f4eb44db98df",
# "currency": "LTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "LTC"
# },
# "detailId": null,
# "time": 1506128252968,
# "type": "FUNDS_MIGRATION",
# "value": 0.0009957,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.0009957, "available": 0.0009957, "locked": 0},
# "change": {"total": 0.0009957, "available": 0.0009957, "locked": 0}
# }
#
# CREATE_BALANCE
# {
# "historyId": "d0fabd8d-9107-4b5e-b9a6-3cab8af70d49",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244751,
# "type": "CREATE_BALANCE",
# "value": 0,
# "fundsBefore": {"total": null, "available": null, "locked": null},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": 0, "available": 0, "locked": 0}
# }
#
# BITCOIN_GOLD_FORK
# {
# "historyId": "2b4d52d3-611c-473d-b92c-8a8d87a24e41",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244778,
# "type": "BITCOIN_GOLD_FORK",
# "value": 0.00453512,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "change": {"total": 0.00453512, "available": 0.00453512, "locked": 0}
# }
#
# ADD_FUNDS
# {
# "historyId": "3158236d-dae5-4a5d-81af-c1fa4af340fb",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "8e83a960-e737-4380-b8bb-259d6e236faa",
# "time": 1520631178816,
# "type": "ADD_FUNDS",
# "value": 0.628405,
# "fundsBefore": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "change": {"total": 0.628405, "available": 0.628405, "locked": 0}
# }
#
# TRANSACTION_PRE_LOCKING
# {
# "historyId": "e7d19e0f-03b3-46a8-bc72-dde72cc24ead",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520706403868,
# "type": "TRANSACTION_PRE_LOCKING",
# "value": -0.1,
# "fundsBefore": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "change": {"total": 0, "available": -0.1, "locked": 0.1}
# }
#
# TRANSACTION_POST_OUTCOME
# {
# "historyId": "c4010825-231d-4a9c-8e46-37cde1f7b63c",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "bf2876bc-b545-4503-96c8-ef4de8233876",
# "time": 1520706404032,
# "type": "TRANSACTION_POST_OUTCOME",
# "value": -0.01771415,
# "fundsBefore": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "fundsAfter": {"total": 0.61522597, "available": 0.53294012, "locked": 0.08228585},
# "change": {"total": -0.01771415, "available": 0, "locked": -0.01771415}
# }
#
# TRANSACTION_POST_INCOME
# {
# "historyId": "7f18b7af-b676-4125-84fd-042e683046f6",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404035,
# "type": "TRANSACTION_POST_INCOME",
# "value": 628.78,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 628.78, "available": 628.78, "locked": 0},
# "change": {"total": 628.78, "available": 628.78, "locked": 0}
# }
#
# TRANSACTION_COMMISSION_OUTCOME
# {
# "historyId": "843177fa-61bc-4cbf-8be5-b029d856c93b",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404050,
# "type": "TRANSACTION_COMMISSION_OUTCOME",
# "value": -2.71,
# "fundsBefore": {"total": 766.06, "available": 766.06, "locked": 0},
# "fundsAfter": {"total": 763.35,"available": 763.35, "locked": 0},
# "change": {"total": -2.71, "available": -2.71, "locked": 0}
# }
#
# TRANSACTION_OFFER_COMPLETED_RETURN
# {
# "historyId": "cac69b04-c518-4dc5-9d86-e76e91f2e1d2",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520714886425,
# "type": "TRANSACTION_OFFER_COMPLETED_RETURN",
# "value": 0.00000196,
# "fundsBefore": {"total": 0.00941208, "available": 0.00941012, "locked": 0.00000196},
# "fundsAfter": {"total": 0.00941208, "available": 0.00941208, "locked": 0},
# "change": {"total": 0, "available": 0.00000196, "locked": -0.00000196}
# }
#
# WITHDRAWAL_LOCK_FUNDS
# {
# "historyId": "03de2271-66ab-4960-a786-87ab9551fc14",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522245654481,
# "type": "WITHDRAWAL_LOCK_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0.8, "locked": 0},
# "fundsAfter": {"total": 0.8, "available": 0, "locked": 0.8},
# "change": {"total": 0, "available": -0.8, "locked": 0.8}
# }
#
# WITHDRAWAL_SUBTRACT_FUNDS
# {
# "historyId": "b0308c89-5288-438d-a306-c6448b1a266d",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522246526186,
# "type": "WITHDRAWAL_SUBTRACT_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0, "locked": 0.8},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": -0.8, "available": 0, "locked": -0.8}
# }
#
# TRANSACTION_OFFER_ABORTED_RETURN
# {
# "historyId": "b1a3c075-d403-4e05-8f32-40512cdd88c0",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1522512298662,
# "type": "TRANSACTION_OFFER_ABORTED_RETURN",
# "value": 0.0564931,
# "fundsBefore": {"total": 0.44951311, "available": 0.39302001, "locked": 0.0564931},
# "fundsAfter": {"total": 0.44951311, "available": 0.44951311, "locked": 0},
# "change": {"total": 0, "available": 0.0564931, "locked": -0.0564931}
# }
#
# WITHDRAWAL_UNLOCK_FUNDS
# {
# "historyId": "0ed569a2-c330-482e-bb89-4cb553fb5b11",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "0c7be256-c336-4111-bee7-4eb22e339700",
# "time": 1527866360785,
# "type": "WITHDRAWAL_UNLOCK_FUNDS",
# "value": 0.05045,
# "fundsBefore": {"total": 0.86001578, "available": 0.80956578, "locked": 0.05045},
# "fundsAfter": {"total": 0.86001578, "available": 0.86001578, "locked": 0},
# "change": {"total": 0, "available": 0.05045, "locked": -0.05045}
# }
#
# TRANSACTION_COMMISSION_RETURN
# {
# "historyId": "07c89c27-46f1-4d7a-8518-b73798bf168a",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": null,
# "time": 1528304043063,
# "type": "TRANSACTION_COMMISSION_RETURN",
# "value": 0.6,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.6, "available": 0.6, "locked": 0},
# "change": {"total": 0.6, "available": 0.6, "locked": 0}
# }
#
timestamp = self.safe_integer(item, 'time')
balance = self.safe_value(item, 'balance', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
change = self.safe_value(item, 'change', {})
amount = self.safe_number(change, 'total')
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'historyId')
# there are 2 undocumented api calls: (v1_01PrivateGetPaymentsDepositDetailId and v1_01PrivateGetPaymentsWithdrawalDetailId)
# that can be used to enrich the transfers with txid, address etc(you need to use info.detailId as a parameter)
referenceId = self.safe_string(item, 'detailId')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
fundsBefore = self.safe_value(item, 'fundsBefore', {})
before = self.safe_number(fundsBefore, 'total')
fundsAfter = self.safe_value(item, 'fundsAfter', {})
after = self.safe_number(fundsAfter, 'total')
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': 'ok',
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_ledger_entry_type(self, type):
types = {
'ADD_FUNDS': 'transaction',
'BITCOIN_GOLD_FORK': 'transaction',
'CREATE_BALANCE': 'transaction',
'FUNDS_MIGRATION': 'transaction',
'WITHDRAWAL_LOCK_FUNDS': 'transaction',
'WITHDRAWAL_SUBTRACT_FUNDS': 'transaction',
'WITHDRAWAL_UNLOCK_FUNDS': 'transaction',
'TRANSACTION_COMMISSION_OUTCOME': 'fee',
'TRANSACTION_COMMISSION_RETURN': 'fee',
'TRANSACTION_OFFER_ABORTED_RETURN': 'trade',
'TRANSACTION_OFFER_COMPLETED_RETURN': 'trade',
'TRANSACTION_POST_INCOME': 'trade',
'TRANSACTION_POST_OUTCOME': 'trade',
'TRANSACTION_PRE_LOCKING': 'trade',
}
return self.safe_string(types, type, type)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# '1582399800000',
# {
# o: '0.0001428',
# c: '0.0001428',
# h: '0.0001428',
# l: '0.0001428',
# v: '4',
# co: '1'
# }
# ]
#
first = self.safe_value(ohlcv, 1, {})
return [
self.safe_integer(ohlcv, 0),
self.safe_number(first, 'o'),
self.safe_number(first, 'h'),
self.safe_number(first, 'l'),
self.safe_number(first, 'c'),
self.safe_number(first, 'v'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'resolution': self.timeframes[timeframe],
# 'from': 1574709092000, # unix timestamp in milliseconds, required
# 'to': 1574709092000, # unix timestamp in milliseconds, required
}
if limit is None:
limit = 100
duration = self.parse_timeframe(timeframe)
timerange = limit * duration * 1000
if since is None:
request['to'] = self.milliseconds()
request['from'] = request['to'] - timerange
else:
request['from'] = int(since)
request['to'] = self.sum(request['from'], timerange)
response = await self.v1_01PublicGetTradingCandleHistorySymbolResolution(self.extend(request, params))
#
# {
# "status":"Ok",
# "items":[
# ["1591503060000",{"o":"0.02509572","c":"0.02509438","h":"0.02509664","l":"0.02509438","v":"0.02082165","co":"17"}],
# ["1591503120000",{"o":"0.02509606","c":"0.02509515","h":"0.02509606","l":"0.02509487","v":"0.04971703","co":"13"}],
# ["1591503180000",{"o":"0.02509532","c":"0.02509589","h":"0.02509589","l":"0.02509454","v":"0.01332236","co":"7"}],
# ]
# }
#
items = self.safe_value(response, 'items', [])
return self.parse_ohlcvs(items, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# createOrder trades
#
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# }
#
# fetchMyTrades(private)
#
# {
# amount: "0.29285199",
# commissionValue: "0.00125927",
# id: "11c8203a-a267-11e9-b698-0242ac110007",
# initializedBy: "Buy",
# market: "ETH-EUR",
# offerId: "11c82038-a267-11e9-b698-0242ac110007",
# rate: "277",
# time: "1562689917517",
# userAction: "Buy",
# wasTaker: True,
# }
#
# fetchTrades(public)
#
# {
# id: 'df00b0da-e5e0-11e9-8c19-0242ac11000a',
# t: '1570108958831',
# a: '0.04776653',
# r: '0.02145854',
# ty: 'Sell'
# }
#
timestamp = self.safe_integer_2(trade, 'time', 't')
userAction = self.safe_string(trade, 'userAction')
side = 'buy' if (userAction == 'Buy') else 'sell'
wasTaker = self.safe_value(trade, 'wasTaker')
takerOrMaker = None
if wasTaker is not None:
takerOrMaker = 'taker' if wasTaker else 'maker'
priceString = self.safe_string_2(trade, 'rate', 'r')
amountString = self.safe_string_2(trade, 'amount', 'a')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
feeCost = self.safe_number(trade, 'commissionValue')
marketId = self.safe_string(trade, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
fee = None
if feeCost is not None:
feeCcy = market['base'] if (side == 'buy') else market['quote']
fee = {
'currency': feeCcy,
'cost': feeCost,
}
order = self.safe_string(trade, 'offerId')
# todo: check self logic
type = None
if order is not None:
type = 'limit' if order else 'market'
return {
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
}
if since is not None:
request['fromTime'] = since - 1 # result does not include exactly `since` time therefore decrease by 1
if limit is not None:
request['limit'] = limit # default - 10, max - 300
response = await self.v1_01PublicGetTradingTransactionsSymbol(self.extend(request, params))
items = self.safe_value(response, 'items')
return self.parse_trades(items, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'offerType': side,
'amount': amount,
'mode': type,
}
if type == 'limit':
request['rate'] = price
price = float(price)
amount = float(amount)
response = await self.v1_01PrivatePostTradingOfferSymbol(self.extend(request, params))
#
# unfilled(open order)
#
# {
# status: 'Ok',
# completed: False, # can deduce status from here
# offerId: 'ce9cc72e-d61c-11e9-9248-0242ac110005',
# transactions: [], # can deduce order info from here
# }
#
# filled(closed order)
#
# {
# "status": "Ok",
# "offerId": "942a4a3e-e922-11e9-8c19-0242ac11000a",
# "completed": True,
# "transactions": [
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02196207",
# "amount": "0.27704177"
# }
# ]
# }
#
# partially-filled(open order)
#
# {
# "status": "Ok",
# "offerId": "d0ebefab-f4d7-11e9-8c19-0242ac11000a",
# "completed": False,
# "transactions": [
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02105901",
# "amount": "0.00975256"
# }
# ]
# }
#
timestamp = self.milliseconds() # the real timestamp is missing in the response
id = self.safe_string(response, 'offerId')
completed = self.safe_value(response, 'completed', False)
status = 'closed' if completed else 'open'
filled = 0
cost = None
transactions = self.safe_value(response, 'transactions')
trades = None
if transactions is not None:
trades = self.parse_trades(transactions, market, None, None, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'type': type,
'orderId': id,
})
cost = 0
for i in range(0, len(trades)):
filled = self.sum(filled, trades[i]['amount'])
cost = self.sum(cost, trades[i]['cost'])
remaining = amount - filled
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': trades,
'clientOrderId': None,
}
async def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `side` parameter("buy" or "sell")')
price = self.safe_value(params, 'price')
if price is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `price` parameter(float or string)')
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'id': id,
'side': side,
'price': price,
}
# {status: 'Fail', errors: ['NOT_RECOGNIZED_OFFER_TYPE']} -- if required params are missing
# {status: 'Ok', errors: []}
return self.v1_01PrivateDeleteTradingOfferSymbolIdSidePrice(self.extend(request, params))
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
return self.safe_value(fiatCurrencies, currency, False)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Private':
self.check_required_credentials()
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
nonce = str(self.milliseconds())
payload = None
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
payload = self.apiKey + nonce
elif body is None:
body = self.json(query)
payload = self.apiKey + nonce + body
headers = {
'Request-Timestamp': nonce,
'Operation-Id': self.uuid(),
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512),
'Content-Type': 'application/json',
}
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'code' in response:
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = self.safe_string(response, 'code') # always an integer
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
elif 'status' in response:
#
# {"status":"Fail","errors":["OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS"]}
#
status = self.safe_string(response, 'status')
if status == 'Fail':
errors = self.safe_value(response, 'errors')
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
error = errors[i]
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
| true | true |
f717f7d7f8f771201fee15a195eb1be65208493e | 207 | py | Python | CodeHS/Unit 1/1.5/pancakes.py | nitrospam/APCSP2020 | 275f576036805d244c3244f3f3646951940c9575 | [
"MIT"
] | null | null | null | CodeHS/Unit 1/1.5/pancakes.py | nitrospam/APCSP2020 | 275f576036805d244c3244f3f3646951940c9575 | [
"MIT"
] | null | null | null | CodeHS/Unit 1/1.5/pancakes.py | nitrospam/APCSP2020 | 275f576036805d244c3244f3f3646951940c9575 | [
"MIT"
] | null | null | null | def place_3_balls():
put_ball()
put_ball()
put_ball()
def move_twice():
move()
move()
move()
place_3_balls()
move_twice()
place_3_balls()
move_twice()
place_3_balls()
move()
| 10.35 | 20 | 0.618357 | def place_3_balls():
put_ball()
put_ball()
put_ball()
def move_twice():
move()
move()
move()
place_3_balls()
move_twice()
place_3_balls()
move_twice()
place_3_balls()
move()
| true | true |
f717f87eae6eff378694a1f1173d6bf41dba6abe | 505 | py | Python | 66-plus-one/66-plus-one.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | 3 | 2022-02-07T12:47:43.000Z | 2022-03-13T16:40:12.000Z | 66-plus-one/66-plus-one.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | null | null | null | 66-plus-one/66-plus-one.py | yuzhengcuhk/MyLeetcodeRecord | bd516c6f2946b922da53e587fc186935c6a8819c | [
"MIT"
] | null | null | null | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
cnt = len(digits)
if digits[cnt-1] != 9:
digits[cnt-1] = digits[cnt-1] + 1
return digits
else:
for i in range(0, len(digits)):
digits[i] = str(digits[i])
intdig = ''.join(digits)
intdig = int(intdig) + 1
result = []
for item in str(intdig):
result.append(int(item))
return result | 33.666667 | 54 | 0.463366 | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
cnt = len(digits)
if digits[cnt-1] != 9:
digits[cnt-1] = digits[cnt-1] + 1
return digits
else:
for i in range(0, len(digits)):
digits[i] = str(digits[i])
intdig = ''.join(digits)
intdig = int(intdig) + 1
result = []
for item in str(intdig):
result.append(int(item))
return result | true | true |
f717f8f11f852a6ff486c6c6a1bdbf3db226a42b | 849 | py | Python | Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py | crepuscularlight/ROSbyExample | fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c | [
"MIT"
] | 1 | 2021-04-23T10:01:22.000Z | 2021-04-23T10:01:22.000Z | Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py | crepuscularlight/ROSbyExample | fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c | [
"MIT"
] | null | null | null | Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py | crepuscularlight/ROSbyExample | fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include".split(';') if "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gazebo_ros_control;hector_quadrotor_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhector_quadrotor_controller_gazebo".split(';') if "-lhector_quadrotor_controller_gazebo" != "" else []
PROJECT_NAME = "hector_quadrotor_controller_gazebo"
PROJECT_SPACE_DIR = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/devel/.private/hector_quadrotor_controller_gazebo"
PROJECT_VERSION = "0.3.5"
| 94.333333 | 319 | 0.829211 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include".split(';') if "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gazebo_ros_control;hector_quadrotor_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhector_quadrotor_controller_gazebo".split(';') if "-lhector_quadrotor_controller_gazebo" != "" else []
PROJECT_NAME = "hector_quadrotor_controller_gazebo"
PROJECT_SPACE_DIR = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/devel/.private/hector_quadrotor_controller_gazebo"
PROJECT_VERSION = "0.3.5"
| true | true |
f717f935bd346a3daf5e4df75d97e3d4a4dd5155 | 1,221 | py | Python | tests/test_persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 1 | 2017-11-26T17:57:59.000Z | 2017-11-26T17:57:59.000Z | tests/test_persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 34 | 2015-03-23T10:28:59.000Z | 2021-12-13T20:16:48.000Z | tests/test_persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 2 | 2015-05-17T00:56:45.000Z | 2015-06-27T22:10:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_persistentkv
----------------------------------
Tests for `persistentkv` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import common
from wikipediabase import persistentkv as pkv
DATABASE = "/tmp/remove-me.db"
class TestPersistentkv(unittest.TestCase):
def setUp(self):
pass
def test_non_persist(self):
ps = pkv.PersistentDict(DATABASE)
ps['hello'] = "yes"
ps["bye"] = "no"
ps['\xe2\x98\x83snowman'.decode('utf8')] = "well"
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
del ps
# Test persistence
ps = pkv.PersistentDict(DATABASE)
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
self.assertEqual(ps['\xe2\x98\x83snowman'.decode('utf8')], "well")
del ps
# Test file dependency
os.remove(DATABASE)
ps = pkv.PersistentDict(DATABASE)
with self.assertRaises(KeyError):
bo = ps['hello'] == "yes"
def tearDown(self):
os.remove(DATABASE)
if __name__ == '__main__':
unittest.main()
| 22.611111 | 74 | 0.585586 |
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import common
from wikipediabase import persistentkv as pkv
DATABASE = "/tmp/remove-me.db"
class TestPersistentkv(unittest.TestCase):
def setUp(self):
pass
def test_non_persist(self):
ps = pkv.PersistentDict(DATABASE)
ps['hello'] = "yes"
ps["bye"] = "no"
ps['\xe2\x98\x83snowman'.decode('utf8')] = "well"
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
del ps
ps = pkv.PersistentDict(DATABASE)
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
self.assertEqual(ps['\xe2\x98\x83snowman'.decode('utf8')], "well")
del ps
os.remove(DATABASE)
ps = pkv.PersistentDict(DATABASE)
with self.assertRaises(KeyError):
bo = ps['hello'] == "yes"
def tearDown(self):
os.remove(DATABASE)
if __name__ == '__main__':
unittest.main()
| true | true |
f717f96bb0c2423c47e922ab54cdfb5493b76d10 | 2,593 | py | Python | seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py | ranyadshalom/seinfeld_laugh_corpus | b1e1a5208d2d3499144743028205336f8ca34552 | [
"MIT"
] | null | null | null | seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py | ranyadshalom/seinfeld_laugh_corpus | b1e1a5208d2d3499144743028205336f8ca34552 | [
"MIT"
] | 2 | 2018-09-04T05:32:22.000Z | 2018-09-17T10:58:11.000Z | seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py | ranyadshalom/seinfeld_laugh_corpus | b1e1a5208d2d3499144743028205336f8ca34552 | [
"MIT"
] | null | null | null | import argparse
import re
import sys
from collections import Counter
sys.path.append("..")
from ml_humor_recogniser import read_data
from screenplay import Line
def run(data, output):
screenplays = read_data(data)
txt = screenplays_to_txt(screenplays)
word_counts = get_word_counts(txt)
word_probabilities = get_probabilities(word_counts)
write_to_file(word_probabilities, output)
# TODO take care of UNKs
def screenplays_to_txt(screenplays):
result = ''
for screenplay in screenplays:
for line in screenplay:
if isinstance(line, Line):
result += ('\n' + line.txt)
return result
def get_word_counts(txt):
"""
Counts word occurrences in "txt".
The methodology of dealing with unknown words is to calculate a count of "UNK" by splitting the set of words, and
after counting words in the bigger set, every unknown word that appears in the smaller set will be counted as "UNK".
:param txt:
:return: a {'word':integer} dictionary that represents the number of times a word appears in the txt.
"""
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts
def get_probabilities(word_counts):
probabilities = {}
total_num_of_words = sum((count for _, count in word_counts.items()))
for word in word_counts.keys():
probabilities[word] = word_counts[word] / total_num_of_words
return probabilities
def write_to_file(word_probabilities, output):
with open(output, 'w') as f:
for word, prob in word_probabilities.items():
f.write("%s %.9f\n" % (word, prob))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A script to calculate the probabilities of words occurring in a "
"screenplay.")
parser.add_argument('data', help='The folder where the training data is located. Training data is .merged '
'files, created by the data_merger.py module and contain screenplays, '
'laugh times & dialog times.')
parser.add_argument('output', help='Output file.')
args = parser.parse_args()
run(args.data, args.output)
| 32.822785 | 120 | 0.644042 | import argparse
import re
import sys
from collections import Counter
sys.path.append("..")
from ml_humor_recogniser import read_data
from screenplay import Line
def run(data, output):
screenplays = read_data(data)
txt = screenplays_to_txt(screenplays)
word_counts = get_word_counts(txt)
word_probabilities = get_probabilities(word_counts)
write_to_file(word_probabilities, output)
def screenplays_to_txt(screenplays):
result = ''
for screenplay in screenplays:
for line in screenplay:
if isinstance(line, Line):
result += ('\n' + line.txt)
return result
def get_word_counts(txt):
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts
def get_probabilities(word_counts):
probabilities = {}
total_num_of_words = sum((count for _, count in word_counts.items()))
for word in word_counts.keys():
probabilities[word] = word_counts[word] / total_num_of_words
return probabilities
def write_to_file(word_probabilities, output):
with open(output, 'w') as f:
for word, prob in word_probabilities.items():
f.write("%s %.9f\n" % (word, prob))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A script to calculate the probabilities of words occurring in a "
"screenplay.")
parser.add_argument('data', help='The folder where the training data is located. Training data is .merged '
'files, created by the data_merger.py module and contain screenplays, '
'laugh times & dialog times.')
parser.add_argument('output', help='Output file.')
args = parser.parse_args()
run(args.data, args.output)
| true | true |
f717f9a709ac6da00ce8729b7850f20a3de65921 | 59 | py | Python | uiSimple.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | 1 | 2019-05-30T08:08:34.000Z | 2019-05-30T08:08:34.000Z | uiSimple.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | uiSimple.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | from _tkinter import *
root = tkinter.Tk()
root.mainloop() | 19.666667 | 23 | 0.728814 | from _tkinter import *
root = tkinter.Tk()
root.mainloop() | true | true |
f717f9e8df46378c8a416ad5be38e9da22664eeb | 798 | py | Python | deal/_cli/_main.py | toonarmycaptain/deal | 9dff86e1dc5c8607f02ded34b6d64e770f1959fa | [
"MIT"
] | null | null | null | deal/_cli/_main.py | toonarmycaptain/deal | 9dff86e1dc5c8607f02ded34b6d64e770f1959fa | [
"MIT"
] | null | null | null | deal/_cli/_main.py | toonarmycaptain/deal | 9dff86e1dc5c8607f02ded34b6d64e770f1959fa | [
"MIT"
] | null | null | null | # built-in
from argparse import ArgumentParser
from types import MappingProxyType
from typing import Callable, Mapping, Sequence
# app
from ._lint import lint_command
from ._memtest import memtest_command
from ._stub import stub_command
from ._test import test_command
CommandsType = Mapping[str, Callable[[Sequence[str]], int]]
COMMANDS: CommandsType = MappingProxyType(dict(
lint=lint_command,
memtest=memtest_command,
stub=stub_command,
test=test_command,
))
def main(argv: Sequence[str], *, commands: CommandsType = COMMANDS) -> int:
parser = ArgumentParser(prog='python3 -m deal')
parser.add_argument('command', choices=sorted(commands))
args, unknown_argv = parser.parse_known_args(argv)
command = commands[args.command]
return command(unknown_argv)
| 27.517241 | 75 | 0.761905 |
from argparse import ArgumentParser
from types import MappingProxyType
from typing import Callable, Mapping, Sequence
from ._lint import lint_command
from ._memtest import memtest_command
from ._stub import stub_command
from ._test import test_command
CommandsType = Mapping[str, Callable[[Sequence[str]], int]]
COMMANDS: CommandsType = MappingProxyType(dict(
lint=lint_command,
memtest=memtest_command,
stub=stub_command,
test=test_command,
))
def main(argv: Sequence[str], *, commands: CommandsType = COMMANDS) -> int:
parser = ArgumentParser(prog='python3 -m deal')
parser.add_argument('command', choices=sorted(commands))
args, unknown_argv = parser.parse_known_args(argv)
command = commands[args.command]
return command(unknown_argv)
| true | true |
f717fa3b3ac3c83afb7de4e2d210b524c7409f46 | 2,049 | py | Python | examples/extract_table_names.py | hugovk/sqlparse | 3598bf4670b0f4d80b7ca0557f156aa8bf87add4 | [
"BSD-3-Clause"
] | null | null | null | examples/extract_table_names.py | hugovk/sqlparse | 3598bf4670b0f4d80b7ca0557f156aa8bf87add4 | [
"BSD-3-Clause"
] | null | null | null | examples/extract_table_names.py | hugovk/sqlparse | 3598bf4670b0f4d80b7ca0557f156aa8bf87add4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This example is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
#
# This example illustrates how to extract table names from nested
# SELECT statements.
#
# See:
# https://groups.google.com/forum/#!forum/sqlparse/browse_thread/thread/b0bd9a022e9d4895
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword, DML
def is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def extract_from_part(parsed):
from_seen = False
for item in parsed.tokens:
if from_seen:
if is_subselect(item):
yield from extract_from_part(item)
elif item.ttype is Keyword:
return
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_name()
elif isinstance(item, Identifier):
yield item.get_name()
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def extract_tables(sql):
stream = extract_from_part(sqlparse.parse(sql)[0])
return list(extract_table_identifiers(stream))
if __name__ == '__main__':
sql = """
select K.a,K.b from (select H.b from (select G.c from (select F.d from
(select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2;
"""
tables = ', '.join(extract_tables(sql))
print(f'Tables: {tables}')
| 29.695652 | 88 | 0.650073 |
ist, Identifier
from sqlparse.tokens import Keyword, DML
def is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def extract_from_part(parsed):
from_seen = False
for item in parsed.tokens:
if from_seen:
if is_subselect(item):
yield from extract_from_part(item)
elif item.ttype is Keyword:
return
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_name()
elif isinstance(item, Identifier):
yield item.get_name()
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def extract_tables(sql):
stream = extract_from_part(sqlparse.parse(sql)[0])
return list(extract_table_identifiers(stream))
if __name__ == '__main__':
sql = """
select K.a,K.b from (select H.b from (select G.c from (select F.d from
(select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2;
"""
tables = ', '.join(extract_tables(sql))
print(f'Tables: {tables}')
| true | true |
f717faec2a4bce7642e1b452032a060d7e5853ec | 3,970 | py | Python | 01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | 33 | 2020-09-01T20:10:28.000Z | 2022-02-11T06:15:55.000Z | 01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | null | null | null | 01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | 64 | 2021-01-21T11:55:34.000Z | 2022-03-10T08:14:11.000Z | from cassandra.cluster import Cluster
class CassandraMgr:
"""
Manage orerations with Apache Cassandra.
"""
def __init__(self, config):
"""
Constructor.
:param config: configuration of the cluster of Apache Cassandra -> ip, replicator factor, replication class and
key space.
"""
self.ip = config['ip']
self.replication_factor = config["replication_factor"]
self.replication_class = config["replication_class"]
self.key_space = config["key_space"]
self.cluster = Cluster(self.ip)
def connect(self):
"""
Create a connection from the configuration passed in class constructor.
Creates a Keyspace an returns a session.
:return: session.
"""
session = self.cluster.connect()
cql_create_keyspace = """
CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : '%s', 'replication_factor' : %s }
""" % (self.key_space, self.replication_class, self.replication_factor)
try:
session.execute(cql_create_keyspace)
except Exception as e:
print(e)
try:
session.set_keyspace(self.key_space )
except Exception as e:
print(e)
return session
def disconnect(self, session):
"""
Finalise the session and cluster shutdown.
:param session: session
"""
session.shutdown()
self.cluster.shutdown()
@staticmethod
def create_table(session, table, fields, primary_key):
"""
Create an Apache Cassandra table.
:param session: session.
:param table: table to create.
:param fields: fields of the table.
:param primary_key: primary key of the table.
"""
fields_string = ", ".join(fields)
query = "CREATE TABLE IF NOT EXISTS %s (%s , PRIMARY KEY %s)" % (table, fields_string, primary_key)
try:
session.execute(query)
except Exception as e:
print(e)
@staticmethod
def insert_cassandra_from_df(session, table, columns_table, df):
"""
Insert a pandas dataframe into a Cassandra table.
:param session: session.
:param table: table where insert rows.
:param columns_table: columns of the table.
:param df: pandas dataframe to insert into the table.
"""
query = CassandraMgr.get_insert_query(table, columns_table)
for index, row in df.iterrows():
session.execute(query, (row[x] for x in df.columns))
@staticmethod
def select(session, fields, table, filters):
"""
Make a select to an apache Cassandra table.
:param session: session.
:param fields: projection of the select statement
:param table: table
:param filters: filters of the WHERE clause.
:return: list of rows of the request.
"""
fields_string = ", ".join(fields)
query = "select %s from %s WHERE %s" % (fields_string, table, filters)
try:
rows = session.execute(query)
except Exception as e:
print(e)
return rows
@staticmethod
def get_insert_query(table: str, columns):
"""
Builds an INSERT statement string.
:param table: table
:param columns: columns to insert.
:return: string with INSERT query.
"""
query = "INSERT INTO %s (%s) " % (table, ", ".join(columns))
query = query + " VALUES (" + ", ".join(["%s"] * len(columns)) + ") "
return query
@staticmethod
def drop_table(session, table):
"""
Drop an Apache Cassandra table.
:param session: session.
:param table: table to drop.
"""
query = "drop table %s" % table
try:
session.execute(query)
except Exception as e:
print(e)
| 29.626866 | 119 | 0.58262 | from cassandra.cluster import Cluster
class CassandraMgr:
def __init__(self, config):
self.ip = config['ip']
self.replication_factor = config["replication_factor"]
self.replication_class = config["replication_class"]
self.key_space = config["key_space"]
self.cluster = Cluster(self.ip)
def connect(self):
session = self.cluster.connect()
cql_create_keyspace = """
CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : '%s', 'replication_factor' : %s }
""" % (self.key_space, self.replication_class, self.replication_factor)
try:
session.execute(cql_create_keyspace)
except Exception as e:
print(e)
try:
session.set_keyspace(self.key_space )
except Exception as e:
print(e)
return session
def disconnect(self, session):
session.shutdown()
self.cluster.shutdown()
@staticmethod
def create_table(session, table, fields, primary_key):
fields_string = ", ".join(fields)
query = "CREATE TABLE IF NOT EXISTS %s (%s , PRIMARY KEY %s)" % (table, fields_string, primary_key)
try:
session.execute(query)
except Exception as e:
print(e)
@staticmethod
def insert_cassandra_from_df(session, table, columns_table, df):
query = CassandraMgr.get_insert_query(table, columns_table)
for index, row in df.iterrows():
session.execute(query, (row[x] for x in df.columns))
@staticmethod
def select(session, fields, table, filters):
fields_string = ", ".join(fields)
query = "select %s from %s WHERE %s" % (fields_string, table, filters)
try:
rows = session.execute(query)
except Exception as e:
print(e)
return rows
@staticmethod
def get_insert_query(table: str, columns):
query = "INSERT INTO %s (%s) " % (table, ", ".join(columns))
query = query + " VALUES (" + ", ".join(["%s"] * len(columns)) + ") "
return query
@staticmethod
def drop_table(session, table):
query = "drop table %s" % table
try:
session.execute(query)
except Exception as e:
print(e)
| true | true |
f717fc05188de674e02f5c99af90516ab0930a2f | 814 | py | Python | backend/server/apps/notes/migrations/0001_initial.py | Bonifase/django-react | ea18c3192ee28ce2291d6cabb08addd8cf8eb27e | [
"MIT"
] | 508 | 2020-10-05T14:03:16.000Z | 2022-03-30T09:04:42.000Z | backend/server/apps/notes/migrations/0001_initial.py | Bonifase/django-react | ea18c3192ee28ce2291d6cabb08addd8cf8eb27e | [
"MIT"
] | 17 | 2020-12-10T08:23:55.000Z | 2022-03-20T17:10:37.000Z | backend/server/apps/notes/migrations/0001_initial.py | Bonifase/django-react | ea18c3192ee28ce2291d6cabb08addd8cf8eb27e | [
"MIT"
] | 80 | 2020-12-23T13:59:14.000Z | 2022-03-12T03:52:21.000Z | # Generated by Django 3.1.3 on 2020-11-09 10:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(blank=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.148148 | 124 | 0.637592 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(blank=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f717fceec4380b1677eb124c6b56d04232940628 | 8,716 | py | Python | tests/api_connexion/endpoints/test_task_endpoint.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | tests/api_connexion/endpoints/test_task_endpoint.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | tests/api_connexion/endpoints/test_task_endpoint.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from datetime import datetime
from airflow import DAG
from airflow.models import DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
class TestTaskEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
cls.dag = dag # type:ignore
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag}
cls.app.dag_bag = dag_bag # type:ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client() # type:ignore
def tearDown(self) -> None:
self.clean_db()
class TestGetTask(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_serialized(self):
# Create empty app with empty dagbag to check if DAG is read from db
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
task_id = "xxxx_not_existing"
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetTasks(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"tasks": [
{
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
],
"total_entries": 1,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
dag_id = "xxxx_not_existing"
response = self.client.get(f"/api/v1/dags/{dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks")
assert_401(response)
| 38.566372 | 109 | 0.58559 |
import os
import unittest
from datetime import datetime
from airflow import DAG
from airflow.models import DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
class TestTaskEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True)
create_user(
cls.app,
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions")
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
cls.dag = dag
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag}
cls.app.dag_bag = dag_bag
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test")
delete_user(cls.app, username="test_no_permissions")
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client()
def tearDown(self) -> None:
self.clean_db()
class TestGetTask(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_serialized(self):
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
task_id = "xxxx_not_existing"
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetTasks(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"tasks": [
{
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
],
"total_entries": 1,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
dag_id = "xxxx_not_existing"
response = self.client.get(f"/api/v1/dags/{dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks")
assert_401(response)
| true | true |
f717fe26b70d466a7b574b0a4659b534cb647013 | 961 | py | Python | testapp/app.py | movermeyer/Flask-Dropbox | bfc59c64a6a55b50cacb9b362ed520c50705778a | [
"BSD-3-Clause"
] | 22 | 2015-02-07T21:37:36.000Z | 2021-12-06T07:12:49.000Z | testapp/app.py | movermeyer/Flask-Dropbox | bfc59c64a6a55b50cacb9b362ed520c50705778a | [
"BSD-3-Clause"
] | 33 | 2020-03-16T03:48:37.000Z | 2021-08-02T03:40:08.000Z | testapp/app.py | movermeyer/Flask-Dropbox | bfc59c64a6a55b50cacb9b362ed520c50705778a | [
"BSD-3-Clause"
] | 6 | 2017-02-04T04:29:15.000Z | 2021-12-06T07:12:51.000Z | import os
import sys
from flask import Flask
from flask.ext.dropbox import Dropbox
from flask.ext.lazyviews import LazyViews
from flask.ext.script import Manager
import settings
# Initialize and configure Flask app
app = Flask(__name__)
app.config.from_object(settings)
# Setup Dropbox and script extensions
dropbox = Dropbox(app)
dropbox.register_blueprint(url_prefix='/dropbox')
manager = Manager(app)
# Add test project views
views = LazyViews(app, 'testapp.views')
views.add('/', 'home')
views.add('/delete/<path:filename>', 'delete')
views.add('/download/<path:filename>', 'download', endpoint='download')
views.add('/files', 'files')
views.add('/media/<path:filename>',
'download',
defaults={'media': True},
endpoint='media')
views.add('/session/clear', 'session_clear')
views.add('/session/dump', 'session_dump')
views.add('/success/<path:filename>', 'success')
views.add('/upload', 'upload', methods=('GET', 'POST'))
| 27.457143 | 71 | 0.715921 | import os
import sys
from flask import Flask
from flask.ext.dropbox import Dropbox
from flask.ext.lazyviews import LazyViews
from flask.ext.script import Manager
import settings
app = Flask(__name__)
app.config.from_object(settings)
dropbox = Dropbox(app)
dropbox.register_blueprint(url_prefix='/dropbox')
manager = Manager(app)
views = LazyViews(app, 'testapp.views')
views.add('/', 'home')
views.add('/delete/<path:filename>', 'delete')
views.add('/download/<path:filename>', 'download', endpoint='download')
views.add('/files', 'files')
views.add('/media/<path:filename>',
'download',
defaults={'media': True},
endpoint='media')
views.add('/session/clear', 'session_clear')
views.add('/session/dump', 'session_dump')
views.add('/success/<path:filename>', 'success')
views.add('/upload', 'upload', methods=('GET', 'POST'))
| true | true |
f7180103c1420b4319a7785c69d208a63ea1cce0 | 3,462 | py | Python | Code/all-starter-code/bases.py | stasi815/CS-1.3-Core-Data-Structures | 8586d92a841a80bbfbb0f4acfabda8552f04ff92 | [
"MIT"
] | null | null | null | Code/all-starter-code/bases.py | stasi815/CS-1.3-Core-Data-Structures | 8586d92a841a80bbfbb0f4acfabda8552f04ff92 | [
"MIT"
] | null | null | null | Code/all-starter-code/bases.py | stasi815/CS-1.3-Core-Data-Structures | 8586d92a841a80bbfbb0f4acfabda8552f04ff92 | [
"MIT"
] | null | null | null | #!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, f'base is out of range: {base}'
# Decode digits from any base (2 up to 36)
# for each digit, use the position or the index an the base to digit * base ** index
decimal_num = 0
digits = digits[::-1]
for i in range(len(digits)):
digit = int(digits[i], base=base)
decimal_num += digit * base ** i
return decimal_num
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, f'base is out of range: {base}'
# Handle unsigned numbers only for now
assert number >= 0, f'number is negative: {number}'
# binary (base 2)
# 10 -> 2:
# 10/2 = 5: 0
# 5/2 = 2: 1
# 2/2 = 1: 0
# 1/2 = 0: 1 - then read the remainders bottom up: 1010 = 1 * 2^3 + 0 * 2^2 + 1 * 2^1 + 0 * 2^0
# Encode number in any base (2 up to 36)
result = ""
while number > 0:
remainder = number % base
number -= remainder
number = number // base
if remainder > 9:
remainder = string.ascii_lowercase[remainder-10]
result = str(remainder) + result
return result
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, f'base1 is out of range: {base1}'
assert 2 <= base2 <= 36, f'base2 is out of range: {base2}'
# start by using decode to decoded digits in base 10 form
# use encode to turn base 10 digits into desired base form
# Convert digits from any base to any base (2 up to 36)
decoded_base10 = decode(digits, base1)
result = encode(decoded_base10, base2)
return result
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print(f'{digits} in base {base1} is {result} in base {base2}')
else:
print(f'Usage: {sys.argv[0]} digits base1 base2')
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| 34.62 | 115 | 0.634027 |
import string
def decode(digits, base):
assert 2 <= base <= 36, f'base is out of range: {base}'
decimal_num = 0
digits = digits[::-1]
for i in range(len(digits)):
digit = int(digits[i], base=base)
decimal_num += digit * base ** i
return decimal_num
def encode(number, base):
assert 2 <= base <= 36, f'base is out of range: {base}'
assert number >= 0, f'number is negative: {number}'
result = ""
while number > 0:
remainder = number % base
number -= remainder
number = number // base
if remainder > 9:
remainder = string.ascii_lowercase[remainder-10]
result = str(remainder) + result
return result
def convert(digits, base1, base2):
assert 2 <= base1 <= 36, f'base1 is out of range: {base1}'
assert 2 <= base2 <= 36, f'base2 is out of range: {base2}'
decoded_base10 = decode(digits, base1)
result = encode(decoded_base10, base2)
return result
def main():
import sys
args = sys.argv[1:]
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
result = convert(digits, base1, base2)
print(f'{digits} in base {base1} is {result} in base {base2}')
else:
print(f'Usage: {sys.argv[0]} digits base1 base2')
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| true | true |
f71801af019ea004db2031fbf73a7074a38968cc | 6,665 | py | Python | eval_ke.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 2,649 | 2018-08-03T14:18:00.000Z | 2022-03-31T08:08:17.000Z | eval_ke.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 95 | 2018-08-13T01:46:03.000Z | 2022-03-13T08:38:14.000Z | eval_ke.py | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | [
"MIT"
] | 549 | 2018-08-06T08:09:22.000Z | 2022-03-31T08:08:21.000Z | """
Script for evaluating trained model on Keras (validate/test).
"""
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : Model
Model.
val_gen : generator
Data loader.
val_size : int
Size of validation subset.
batch_size : int
Batch size.
num_gpus : int
Number of used GPUs.
calc_weight_count : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
# net.summary()
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| 28.361702 | 118 | 0.616954 |
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| true | true |
f71802a5127f7c7fb60315e16f2f50fa2f4a7235 | 1,504 | py | Python | app/auth/views.py | Bchizi/Pitch-app | f52398d270e812eab70b66df9f7f80d579bab7d4 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | app/auth/views.py | Bchizi/Pitch-app | f52398d270e812eab70b66df9f7f80d579bab7d4 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | app/auth/views.py | Bchizi/Pitch-app | f52398d270e812eab70b66df9f7f80d579bab7d4 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',form =form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to one minute pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',form =form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| 31.333333 | 165 | 0.696809 | from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',form =form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to one minute pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',form =form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| true | true |
f71802e152bdc5a880b16e7aa88ec372a25c5854 | 193 | py | Python | mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py | Momscode-Technologies/mani_sales | e3c8de6b50367bfd15adadf38c658e89559e71ab | [
"MIT"
] | null | null | null | mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py | Momscode-Technologies/mani_sales | e3c8de6b50367bfd15adadf38c658e89559e71ab | [
"MIT"
] | null | null | null | mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py | Momscode-Technologies/mani_sales | e3c8de6b50367bfd15adadf38c658e89559e71ab | [
"MIT"
] | null | null | null | # Copyright (c) 2021, jan and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class LinkedSuppliers(Document):
pass
| 21.444444 | 49 | 0.792746 |
from frappe.model.document import Document
class LinkedSuppliers(Document):
pass
| true | true |
f718043592242ea890cc97835b3f6db1a9ca2b43 | 322 | py | Python | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py | freeglow/microcontroller-cpy | 5adfda49da6eefaece81be2a2f26122d68736355 | [
"MIT"
] | null | null | null | import board
import busio
import adafruit_bd3491fs
i2c = busio.I2C(board.SCL, board.SDA)
bd3491fs = adafruit_bd3491fs.BD3491FS(i2c)
bd3491fs.active_input = adafruit_bd3491fs.Input.A
bd3491fs.input_gain = adafruit_bd3491fs.Level.LEVEL_20DB
bd3491fs.channel_1_attenuation = 0
bd3491fs.channel_2_attenuation = 0
| 26.833333 | 57 | 0.810559 | import board
import busio
import adafruit_bd3491fs
i2c = busio.I2C(board.SCL, board.SDA)
bd3491fs = adafruit_bd3491fs.BD3491FS(i2c)
bd3491fs.active_input = adafruit_bd3491fs.Input.A
bd3491fs.input_gain = adafruit_bd3491fs.Level.LEVEL_20DB
bd3491fs.channel_1_attenuation = 0
bd3491fs.channel_2_attenuation = 0
| true | true |
f71806245033ff31b7f8e029e27f81e487b11834 | 20,468 | py | Python | cadnano/views/pathview/tools/pathselection.py | sherwoodyao/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 69 | 2015-01-13T02:54:40.000Z | 2022-03-27T14:25:51.000Z | cadnano/views/pathview/tools/pathselection.py | scholer/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 127 | 2015-01-01T06:26:34.000Z | 2022-03-02T12:48:05.000Z | cadnano/views/pathview/tools/pathselection.py | scholer/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 48 | 2015-01-22T19:57:49.000Z | 2022-03-27T14:27:53.000Z | # -*- coding: utf-8 -*-
import logging
from math import floor
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QPainterPath,
QKeyEvent,
QMouseEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsItemGroup,
QGraphicsPathItem,
QGraphicsSceneMouseEvent,
)
from cadnano.gui.palette import getPenObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview import (
PathRootItemT,
)
from cadnano.cntypes import (
Vec2T,
DocT
)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SelectionItemGroup(QGraphicsItemGroup):
"""SelectionItemGroup
Attributes:
getR (TYPE): Description
selectionbox (TYPE): SelectionBox
translateR (TYPE): Description
viewroot: Description
"""
def __init__(self, boxtype: QGraphicsItem,
constraint: str,
viewroot: PathRootItemT):
"""
Args:
boxtype: :class:`EndpointHandleSelectionBox` or
:class:`VirtualHelixHandleSelectionBox` instance
constraint: ``x`` or ``y``. Default to ``y`` (up and down)
viewroot: view root item and object parent
"""
super(SelectionItemGroup, self).__init__(viewroot)
self.viewroot: PathRootItemT = viewroot
self.setFiltersChildEvents(True)
# LOOK at Qt Source for deprecated code to replace this behavior
# self.setHandlesChildEvents(True) # commented out NC
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemIsFocusable) # for keyPressEvents
self.setFlag(QGraphicsItem.ItemHasNoContents)
self._rect = QRectF()
self._PEN = getPenObj(styles.BLUE_STROKE,
styles.PATH_SELECTBOX_STROKE_WIDTH)
self.selectionbox = boxtype(self)
self._drag_enable = False
self._dragged = False
self._r0 = 0 # save original mousedown
self._r = 0 # latest position for moving
# self._lastKid = 0
# this keeps track of mousePressEvents within the class
# to aid in intellignetly removing items from the group
self._added_to_press_list = False
self._pending_to_add_dict = {}
if constraint == 'y':
self.getR = self.selectionbox.getY
self.translateR = self.selectionbox.translateY
else:
self.getR = self.selectionbox.getX
self.translateR = self.selectionbox.translateX
self._normal_select = True
self.setZValue(styles.ZPATHSELECTION)
# end def
# def paint(self, painter, option, widget):
# painter.drawRect(self.boundingRect())
# # end def
def pendToAdd(self, item):
"""
Args:
item (TYPE): Description
"""
self._pending_to_add_dict[item] = True
# end def
def isPending(self, item):
"""
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
return item in self._pending_to_add_dict
# end def
def document(self) -> DocT:
"""
Returns:
:class:`Document`
"""
return self.viewroot.document()
# end def
def pendToRemove(self, item):
"""
Args:
item (TYPE): Description
"""
if item in self._pending_to_add_dict:
del self._pending_to_add_dict[item]
# end def
def setNormalSelect(self, bool_val: bool):
"""
Args:
bool_val: Description
"""
self._normal_select = bool_val
# end def
def isNormalSelect(self) -> bool:
"""
Returns:
is it normal select?
"""
return self._normal_select
# end def
def processPendingToAddList(self):
"""
Adds to the local selection and the document if required
"""
doc = self.document()
p2add = self._pending_to_add_dict
# logger.debug("processPendingToAddList")
if len(p2add) > 0:
plist = list(self._pending_to_add_dict.keys())
for item in plist:
if p2add[item]:
p2add[item] = False
# logger.debug("just checking1", item, item.group(), item.parentItem())
self.addToGroup(item)
item.modelSelect(doc)
# end for
# logger.debug('finished')
self._pending_to_add_dict = {}
doc.updateStrandSelection()
# end def
def selectionLock(self):
"""
Returns:
TYPE: Description
"""
return self.viewroot.selectionLock()
# end def
def setSelectionLock(self, selection_group):
"""
Args:
selection_group (TYPE): Description
"""
self.viewroot.setSelectionLock(selection_group)
# end def
def keyPressEvent(self, event: QKeyEvent):
"""
Must intercept invalid input events. Make changes here
Args:
event (TYPE): Description
"""
key = event.key()
if key in [Qt.Key_Backspace, Qt.Key_Delete]:
self.selectionbox.deleteSelection()
self.clearSelection(False)
return QGraphicsItemGroup.keyPressEvent(self, event)
else:
return QGraphicsItemGroup.keyPressEvent(self, event)
# end def
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Handler for user mouse press.
Args:
event: Contains item, scene, and screen
coordinates of the the event, and previous event.
"""
# self.show()
if event.button() != Qt.LeftButton:
return QGraphicsItemGroup.mousePressEvent(self, event)
else:
self._drag_enable = True
# required to get the itemChanged event to work
# correctly for this
self.setSelected(True)
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.refreshPath()
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.show()
# for some reason we need to skip the first mouseMoveEvent
self._dragged = False
if self._added_to_press_list is False:
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return QGraphicsItemGroup.mousePressEvent(self, event)
# end def
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
"""
Args:
event: Description
"""
if self._drag_enable is True:
# map the item to the scene coordinates
# to help keep coordinates uniform
rf = self.getR(self.mapFromScene(QPointF(event.scenePos())))
# for some reason we need to skip the first mouseMoveEvent
if self._dragged is False:
self._dragged = True
self._r0 = rf
# end if
else:
delta = self.selectionbox.delta(rf, self._r0)
self.translateR(delta)
# logger.debug('mouse move path selectionbox', delta, rf, self._r0)
# end else
self._r = rf
# end if
else:
QGraphicsItemGroup.mouseMoveEvent(self, event)
# end else
# end def
def customMouseRelease(self, event: QMouseEvent):
"""
Args:
event: Description
"""
self.selectionbox.setParentItem(self.viewroot)
self.selectionbox.hide()
self.selectionbox.resetTransform()
self._drag_enable = False
# now do stuff
if not (self._r0 == 0 and self._r == 0):
modifiers = event.modifiers()
self.selectionbox.processSelectedItems(self._r0, self._r, modifiers)
# end if
self._r0 = 0 # reset
self._r = 0 # reset
self.setFocus() # needed to get keyPresses post a move
self._added_to_press_list = False
# end def
def resetSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._pending_to_add_dict = {}
self._added_to_press_list = False
self.clearSelection(False)
self.setSelectionLock(None)
self.selectionbox.setParentItem(self.viewroot)
self.setParentItem(self.viewroot)
# end def
def clearSelection(self, value):
"""value is for keyPressEvents
Arguments:
value (QVariant): resolves in Python as an integer
"""
if value == False: # noqa
self.selectionbox.hide()
self.selectionbox.resetPosition()
self.removeSelectedItems()
self.viewroot.setSelectionLock(None)
self.clearFocus() # this is to disable delete keyPressEvents
self.prepareGeometryChange()
self._rect.setWidth(0)
# self._rect = QRectF()
# end if
else:
self.setFocus() # this is to get delete keyPressEvents
self.update(self.boundingRect())
# end def
def itemChange(self, change, value):
"""docstring for itemChange
Arguments:
change (GraphicsItemChange): see http://doc.qt.io/qt-5/qgraphicsitem.html#GraphicsItemChange-enum
value (QVariant): resolves in Python as an integer
"""
# logger.debug("ps itemChange")
if change == QGraphicsItem.ItemSelectedChange:
# logger.debug("isc", value)
if value == False: # noqa
self.clearSelection(False)
return False
else:
return True
elif change == QGraphicsItem.ItemChildAddedChange:
# logger.debug("icac")
if self._added_to_press_list is False:
# logger.debug("kid added")
self.setFocus() # this is to get delete keyPressEvents
self.selectionbox.boxParent()
# self.setParentItem(self.selectionbox.boxParent())
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return
return QGraphicsItemGroup.itemChange(self, change, value)
# end def
def removeChild(self, child):
"""
remove only the child and ask it to
restore it's original parent
Args:
child (TYPE): Description
"""
doc = self.document()
self.removeFromGroup(child)
child.modelDeselect(doc)
# end def
def removeSelectedItems(self):
"""docstring for removeSelectedItems
"""
doc = self.document()
for item in self.childItems():
self.removeFromGroup(item)
item.modelDeselect(doc)
# end for
doc.updateStrandSelection()
# end def
def setBoundingRect(self, rect):
"""Summary
Args:
rect (TYPE): Description
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self._rect = rect
# end def
def boundingRect(self):
"""Summary
Returns:
TYPE: Description
"""
return self._rect
# end class
class VirtualHelixHandleSelectionBox(QGraphicsPathItem):
"""
docstring for VirtualHelixHandleSelectionBox
"""
_HELIX_HEIGHT = styles.PATH_HELIX_HEIGHT + styles.PATH_HELIX_PADDING
_RADIUS = styles.VIRTUALHELIXHANDLEITEM_RADIUS
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.BLUE_STROKE, _PEN_WIDTH)
def __init__(self, item_group: SelectionItemGroup):
"""
The item_group.parentItem() is expected to be a partItem
Args:
item_group (TYPE): Description
"""
super(VirtualHelixHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = None
self._pos0 = QPointF()
# end def
def getY(self, pos):
"""Summary
Args:
pos (TYPE): Description
Returns:
TYPE: Description
"""
pos = self._item_group.mapToScene(QPointF(pos))
return pos.y()
# end def
def translateY(self, delta):
"""Summary
Args:
delta (TYPE): Description
Returns:
TYPE: Description
"""
self.setY(delta)
# end def
def refreshPath(self):
"""Summary
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self):
"""Summary
Returns:
TYPE: Description
"""
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect = self.mapRectFromItem(i_g, i_g.childrenBoundingRect())
radius = self._RADIUS
path = QPainterPath()
path.addRoundedRect(rect, radius, radius)
path.moveTo(rect.right(), rect.center().y())
path.lineTo(rect.right() + radius / 2, rect.center().y())
return path
# end def
def processSelectedItems(self, r_start, r_end, modifiers):
"""docstring for processSelectedItems
Args:
r_start (TYPE): Description
r_end (TYPE): Description
modifiers (TYPE): Description
"""
margin = styles.VIRTUALHELIXHANDLEITEM_RADIUS
delta = (r_end - r_start) # r delta
mid_height = (self.boundingRect().height()) / 2 - margin
helix_height = self._HELIX_HEIGHT
if abs(delta) < mid_height: # move is too short for reordering
return
if delta > 0: # moved down, delta is positive
indexDelta = int((delta - mid_height) / helix_height)
else: # moved up, delta is negative
indexDelta = int((delta + mid_height) / helix_height)
# sort on y to determine the extremes of the selection group
items = sorted(self._item_group.childItems(), key=lambda vhhi: vhhi.y())
part_item = items[0].partItem()
part_item.reorderHelices([item.idNum() for item in items],
indexDelta)
# part_item.reorderHelices(items[0].idNum(),
# items[-1].idNum(),
# indexDelta)
part_item.updateStatusBar("")
# end def
def boxParent(self):
"""Summary
Returns:
TYPE: Description
"""
temp = self._item_group.childItems()[0].partItem()
self.setParentItem(temp)
return temp
# end def
def deleteSelection(self):
"""
Delete selection operates outside of the documents a virtual helices
are not actually selected in the model
"""
vh_handle_items = self._item_group.childItems()
u_s = self._item_group.document().undoStack()
u_s.beginMacro("delete Virtual Helices")
for vhhi in vh_handle_items:
part = vhhi.part()
part.removeVirtualHelix(vhhi.idNum())
u_s.endMacro()
# end def
def bounds(self):
"""Summary
Returns:
TYPE: Description
"""
return self._bounds
# end def
def delta(self, yf, y0):
"""Summary
Args:
yf (TYPE): Description
y0 (TYPE): Description
Returns:
TYPE: Description
"""
return yf - y0
# end def
def resetPosition(self):
"""Summary
Returns:
TYPE: Description
"""
self.setPos(self._pos0)
# end def
# end class
class EndpointHandleSelectionBox(QGraphicsPathItem):
"""Summary
"""
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.SELECTED_COLOR, _PEN_WIDTH)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
def __init__(self, item_group: SelectionItemGroup):
"""The item_group.parentItem() is expected to be a partItem
Args:
item_group: Description
"""
super(EndpointHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = (0, 0)
self._pos0 = QPointF()
# end def
def getX(self, pos: QPointF) -> float:
"""
Args:
pos: Description
Returns:
``x`` position
"""
return pos.x()
# end def
def translateX(self, delta: float):
"""
Args:
delta: Description
"""
children = self._item_group.childItems()
if children:
p_i = children[0].partItem()
str = "+%d" % delta if delta >= 0 else "%d" % delta
p_i.updateStatusBar(str)
self.setX(self._BASE_WIDTH * delta)
# end def
def resetPosition(self):
"""
"""
self.setPos(self._pos0)
def delta(self, xf: float, x0: float) -> float:
"""
Args:
xf: Description
x0: Description
Returns:
change distance
"""
bound_l, bound_h = self._bounds
delta = int(floor((xf - x0) / self._BASE_WIDTH))
if delta > 0 and delta > bound_h:
delta = bound_h
elif delta < 0 and abs(delta) > bound_l:
delta = -bound_l
return delta
def refreshPath(self):
"""
"""
temp_low, temp_high = self._item_group.viewroot.document().getSelectionBounds()
self._bounds = (temp_low, temp_high)
# logger.debug("rp:", self._bounds)
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self) -> QPainterPath:
"""
Returns:
:class:`QPainterPath`
"""
bw = self._BASE_WIDTH
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect_IG = i_g.childrenBoundingRect()
rect = self.mapRectFromItem(i_g, rect_IG)
if rect.width() < bw:
rect.adjust(-bw / 4, 0, bw / 2, 0)
path = QPainterPath()
path.addRect(rect)
self._item_group.setBoundingRect(rect_IG)
# path.addRoundedRect(rect, radius, radius)
# path.moveTo(rect.right(),\
# rect.center().y())
# path.lineTo(rect.right() + radius / 2,\
# rect.center().y())
return path
# end def
def processSelectedItems(self, r_start: float, r_end: float, modifiers):
"""
Args:
r_start: Description
r_end: Description
modifiers (TYPE): Description
"""
delta = self.delta(r_end, r_start)
# TODO reenable do_maximize?????
# if modifiers & Qt.AltModifier:
# do_maximize = True
# else:
# do_maximize = False
self._item_group.viewroot.document().resizeSelection(delta)
# end def
def deleteSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._item_group.document().deleteStrandSelection()
def boxParent(self) -> QGraphicsItem:
"""Get the parent :class:`ProxyParentItem`
Returns:
:class:`ProxyParentItem`
"""
temp = self._item_group.childItems()[0].partItem().proxy()
self.setParentItem(temp)
return temp
# end def
def bounds(self) -> Vec2T:
"""
Returns:
the bounds
"""
return self._bounds
# end def
# end class
| 28.467316 | 109 | 0.569572 |
import logging
from math import floor
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QPainterPath,
QKeyEvent,
QMouseEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsItemGroup,
QGraphicsPathItem,
QGraphicsSceneMouseEvent,
)
from cadnano.gui.palette import getPenObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview import (
PathRootItemT,
)
from cadnano.cntypes import (
Vec2T,
DocT
)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SelectionItemGroup(QGraphicsItemGroup):
def __init__(self, boxtype: QGraphicsItem,
constraint: str,
viewroot: PathRootItemT):
super(SelectionItemGroup, self).__init__(viewroot)
self.viewroot: PathRootItemT = viewroot
self.setFiltersChildEvents(True)
Flag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self.setFlag(QGraphicsItem.ItemHasNoContents)
self._rect = QRectF()
self._PEN = getPenObj(styles.BLUE_STROKE,
styles.PATH_SELECTBOX_STROKE_WIDTH)
self.selectionbox = boxtype(self)
self._drag_enable = False
self._dragged = False
self._r0 = 0
self._r = 0
self._added_to_press_list = False
self._pending_to_add_dict = {}
if constraint == 'y':
self.getR = self.selectionbox.getY
self.translateR = self.selectionbox.translateY
else:
self.getR = self.selectionbox.getX
self.translateR = self.selectionbox.translateX
self._normal_select = True
self.setZValue(styles.ZPATHSELECTION)
pendToAdd(self, item):
self._pending_to_add_dict[item] = True
def isPending(self, item):
return item in self._pending_to_add_dict
def document(self) -> DocT:
return self.viewroot.document()
def pendToRemove(self, item):
if item in self._pending_to_add_dict:
del self._pending_to_add_dict[item]
def setNormalSelect(self, bool_val: bool):
self._normal_select = bool_val
def isNormalSelect(self) -> bool:
return self._normal_select
def processPendingToAddList(self):
doc = self.document()
p2add = self._pending_to_add_dict
if len(p2add) > 0:
plist = list(self._pending_to_add_dict.keys())
for item in plist:
if p2add[item]:
p2add[item] = False
self.addToGroup(item)
item.modelSelect(doc)
self._pending_to_add_dict = {}
doc.updateStrandSelection()
def selectionLock(self):
return self.viewroot.selectionLock()
def setSelectionLock(self, selection_group):
self.viewroot.setSelectionLock(selection_group)
def keyPressEvent(self, event: QKeyEvent):
key = event.key()
if key in [Qt.Key_Backspace, Qt.Key_Delete]:
self.selectionbox.deleteSelection()
self.clearSelection(False)
return QGraphicsItemGroup.keyPressEvent(self, event)
else:
return QGraphicsItemGroup.keyPressEvent(self, event)
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
if event.button() != Qt.LeftButton:
return QGraphicsItemGroup.mousePressEvent(self, event)
else:
self._drag_enable = True
self.setSelected(True)
self.selectionbox.resetPosition()
self.selectionbox.refreshPath()
self.selectionbox.resetPosition()
self.selectionbox.show()
self._dragged = False
if self._added_to_press_list is False:
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return QGraphicsItemGroup.mousePressEvent(self, event)
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
if self._drag_enable is True:
rf = self.getR(self.mapFromScene(QPointF(event.scenePos())))
if self._dragged is False:
self._dragged = True
self._r0 = rf
else:
delta = self.selectionbox.delta(rf, self._r0)
self.translateR(delta)
self._r = rf
else:
QGraphicsItemGroup.mouseMoveEvent(self, event)
def customMouseRelease(self, event: QMouseEvent):
self.selectionbox.setParentItem(self.viewroot)
self.selectionbox.hide()
self.selectionbox.resetTransform()
self._drag_enable = False
if not (self._r0 == 0 and self._r == 0):
modifiers = event.modifiers()
self.selectionbox.processSelectedItems(self._r0, self._r, modifiers)
self._r0 = 0
self._r = 0
self.setFocus()
self._added_to_press_list = False
def resetSelection(self):
self._pending_to_add_dict = {}
self._added_to_press_list = False
self.clearSelection(False)
self.setSelectionLock(None)
self.selectionbox.setParentItem(self.viewroot)
self.setParentItem(self.viewroot)
def clearSelection(self, value):
if value == False:
self.selectionbox.hide()
self.selectionbox.resetPosition()
self.removeSelectedItems()
self.viewroot.setSelectionLock(None)
self.clearFocus()
self.prepareGeometryChange()
self._rect.setWidth(0)
else:
self.setFocus()
self.update(self.boundingRect())
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedChange:
if value == False:
self.clearSelection(False)
return False
else:
return True
elif change == QGraphicsItem.ItemChildAddedChange:
if self._added_to_press_list is False:
self.setFocus()
self.selectionbox.boxParent()
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return
return QGraphicsItemGroup.itemChange(self, change, value)
def removeChild(self, child):
doc = self.document()
self.removeFromGroup(child)
child.modelDeselect(doc)
def removeSelectedItems(self):
doc = self.document()
for item in self.childItems():
self.removeFromGroup(item)
item.modelDeselect(doc)
doc.updateStrandSelection()
def setBoundingRect(self, rect):
self.prepareGeometryChange()
self._rect = rect
def boundingRect(self):
return self._rect
class VirtualHelixHandleSelectionBox(QGraphicsPathItem):
_HELIX_HEIGHT = styles.PATH_HELIX_HEIGHT + styles.PATH_HELIX_PADDING
_RADIUS = styles.VIRTUALHELIXHANDLEITEM_RADIUS
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.BLUE_STROKE, _PEN_WIDTH)
def __init__(self, item_group: SelectionItemGroup):
super(VirtualHelixHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = None
self._pos0 = QPointF()
def getY(self, pos):
pos = self._item_group.mapToScene(QPointF(pos))
return pos.y()
def translateY(self, delta):
self.setY(delta)
def refreshPath(self):
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
def painterPath(self):
i_g = self._item_group
rect = self.mapRectFromItem(i_g, i_g.childrenBoundingRect())
radius = self._RADIUS
path = QPainterPath()
path.addRoundedRect(rect, radius, radius)
path.moveTo(rect.right(), rect.center().y())
path.lineTo(rect.right() + radius / 2, rect.center().y())
return path
def processSelectedItems(self, r_start, r_end, modifiers):
margin = styles.VIRTUALHELIXHANDLEITEM_RADIUS
delta = (r_end - r_start)
mid_height = (self.boundingRect().height()) / 2 - margin
helix_height = self._HELIX_HEIGHT
if abs(delta) < mid_height:
return
if delta > 0:
indexDelta = int((delta - mid_height) / helix_height)
else:
indexDelta = int((delta + mid_height) / helix_height)
items = sorted(self._item_group.childItems(), key=lambda vhhi: vhhi.y())
part_item = items[0].partItem()
part_item.reorderHelices([item.idNum() for item in items],
indexDelta)
part_item.updateStatusBar("")
def boxParent(self):
temp = self._item_group.childItems()[0].partItem()
self.setParentItem(temp)
return temp
def deleteSelection(self):
vh_handle_items = self._item_group.childItems()
u_s = self._item_group.document().undoStack()
u_s.beginMacro("delete Virtual Helices")
for vhhi in vh_handle_items:
part = vhhi.part()
part.removeVirtualHelix(vhhi.idNum())
u_s.endMacro()
def bounds(self):
return self._bounds
def delta(self, yf, y0):
return yf - y0
def resetPosition(self):
self.setPos(self._pos0)
class EndpointHandleSelectionBox(QGraphicsPathItem):
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.SELECTED_COLOR, _PEN_WIDTH)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
def __init__(self, item_group: SelectionItemGroup):
super(EndpointHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = (0, 0)
self._pos0 = QPointF()
def getX(self, pos: QPointF) -> float:
return pos.x()
def translateX(self, delta: float):
children = self._item_group.childItems()
if children:
p_i = children[0].partItem()
str = "+%d" % delta if delta >= 0 else "%d" % delta
p_i.updateStatusBar(str)
self.setX(self._BASE_WIDTH * delta)
def resetPosition(self):
self.setPos(self._pos0)
def delta(self, xf: float, x0: float) -> float:
bound_l, bound_h = self._bounds
delta = int(floor((xf - x0) / self._BASE_WIDTH))
if delta > 0 and delta > bound_h:
delta = bound_h
elif delta < 0 and abs(delta) > bound_l:
delta = -bound_l
return delta
def refreshPath(self):
temp_low, temp_high = self._item_group.viewroot.document().getSelectionBounds()
self._bounds = (temp_low, temp_high)
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
def painterPath(self) -> QPainterPath:
bw = self._BASE_WIDTH
i_g = self._item_group
rect_IG = i_g.childrenBoundingRect()
rect = self.mapRectFromItem(i_g, rect_IG)
if rect.width() < bw:
rect.adjust(-bw / 4, 0, bw / 2, 0)
path = QPainterPath()
path.addRect(rect)
self._item_group.setBoundingRect(rect_IG)
return path
def processSelectedItems(self, r_start: float, r_end: float, modifiers):
delta = self.delta(r_end, r_start)
self._item_group.viewroot.document().resizeSelection(delta)
def deleteSelection(self):
self._item_group.document().deleteStrandSelection()
def boxParent(self) -> QGraphicsItem:
temp = self._item_group.childItems()[0].partItem().proxy()
self.setParentItem(temp)
return temp
def bounds(self) -> Vec2T:
return self._bounds
| true | true |
f7180731325d42a74cf0349d5377f43a897a9155 | 12,937 | py | Python | lama/elastix/invert_transforms.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 6 | 2016-08-15T22:07:02.000Z | 2022-02-17T04:22:58.000Z | lama/elastix/invert_transforms.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 25 | 2019-12-05T02:02:20.000Z | 2021-09-08T01:39:17.000Z | lama/elastix/invert_transforms.py | MiaRatkovic/LAMA | 3ccfed0864001c8c270861e23cc81bc43d7d25c9 | [
"Apache-2.0"
] | 5 | 2019-12-05T00:15:29.000Z | 2021-07-06T05:24:54.000Z | from pathlib import Path
import tempfile
import os
import subprocess
from collections import defaultdict
from multiprocessing import Pool
from os.path import join, abspath, isfile
from typing import Union, List, Dict
from logzero import logger as logging
import yaml
from lama import common
from lama.common import cfg_load
from lama.registration_pipeline.validate_config import LamaConfig
from lama.elastix import (ELX_TRANSFORM_NAME, ELX_PARAM_PREFIX, PROPAGATE_LABEL_TRANFORM,
PROPAGATE_IMAGE_TRANSFORM, PROPAGATE_CONFIG, RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR)
LABEL_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '0',
'FixedInternalImagePixelType': 'short',
'MovingInternalImagePixelType': 'short',
'ResultImagePixelType': 'unsigned char',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
IMAGE_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '3',
'FixedInternalImagePixelType': 'float',
'MovingInternalImagePixelType': 'float',
'ResultImagePixelType': 'float',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
def batch_invert_transform_parameters(config: Union[Path, LamaConfig],
clobber=True, new_log:bool=False):
"""
Create new elastix TransformParameter files that can then be used by transformix to invert labelmaps, stats etc
Parameters
----------
config
path to original reg pipeline config file
clobber
if True overwrite inverted parameters present
new_log:
Whether to create a new log file. If called from another module, logging may happen there
"""
common.test_installation('elastix')
if isinstance(config, (Path, str)):
config = LamaConfig(config)
threads = str(config['threads'])
if new_log:
common.init_logging(config / 'invert_transforms.log')
reg_dirs = get_reg_dirs(config)
# Get the image basenames from the first stage registration folder (usually rigid)
# ignore images in non-relevent folder that may be present
volume_names = [x.stem for x in common.get_file_paths(reg_dirs[0], ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR])]
inv_outdir = config.mkdir('inverted_transforms')
stages_to_invert = defaultdict(list)
jobs: List[Dict] = []
reg_stage_dir: Path
for i, vol_id in enumerate(volume_names):
for reg_stage_dir in reg_dirs:
if not reg_stage_dir.is_dir():
logging.error('cannot find {}'.format(reg_stage_dir))
raise FileNotFoundError(f'Cannot find registration dir {reg_stage_dir}')
inv_stage_dir = inv_outdir / reg_stage_dir.name
specimen_stage_reg_dir = reg_stage_dir / vol_id
specimen_stage_inversion_dir = inv_stage_dir / vol_id
transform_file = common.getfile_startswith(specimen_stage_reg_dir, ELX_TRANSFORM_NAME)
parameter_file = common.getfile_startswith(reg_stage_dir, ELX_PARAM_PREFIX)
# Create the folder to put the specimen inversion parameter files in.
inv_stage_dir.mkdir(exist_ok=True)
# Add the stage to the inversion order config (in reverse order), if not already.
if reg_stage_dir.name not in stages_to_invert['label_propagation_order']:
stages_to_invert['label_propagation_order'].insert(0, reg_stage_dir.name)
if clobber:
common.mkdir_force(specimen_stage_inversion_dir) # Overwrite any inversion file that exist for a single specimen
# Each registration directory contains a metadata file, which contains the relative path to the fixed volume
reg_metadata = cfg_load(specimen_stage_reg_dir / common.INDV_REG_METADATA)
fixed_volume = (specimen_stage_reg_dir / reg_metadata['fixed_vol']).resolve()
# Invert the Transform parameters with options for normal image inversion
job = {
'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
'parameter_file': abspath(parameter_file),
'transform_file': transform_file,
'fixed_volume': fixed_volume,
'param_file_output_name': 'inversion_parameters.txt',
'image_replacements': IMAGE_REPLACEMENTS,
'label_replacements': LABEL_REPLACEMENTS,
'image_transform_file': PROPAGATE_IMAGE_TRANSFORM,
'label_transform_file': PROPAGATE_LABEL_TRANFORM,
'clobber': clobber,
'threads': threads
}
jobs.append(job)
# By putting each inverison job (a single job per registration stage) we can speed things up a bit
# If we can get multithreded inversion in elastix we can remove this python multithreading
pool = Pool(8)
try:
pool.map(_invert_transform_parameters, jobs)
except KeyboardInterrupt:
print('terminating inversion')
pool.terminate()
pool.join()
# TODO: Should we replace the need for this invert.yaml?
reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
stages_to_invert['registration_directory'] = str(reg_dir) # Doc why we need this
# Create a yaml config file so that inversions can be run seperatley
invert_config = config['inverted_transforms'] / PROPAGATE_CONFIG
with open(invert_config, 'w') as yf:
yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
def _invert_transform_parameters(args: Dict):
"""
Generate a single inverted elastix transform parameter file. This can then be used to invert labels, masks etc.
If any of the step fail, return as subsequent steps will also fail. The logging of failures is handled
within each function
"""
# If we have both the image and label inverted transforms, don't do anything if noclobber is True
clobber = args['clobber']
threads = args['threads']
image_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['image_transform_file']))
label_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['label_transform_file']))
if not clobber and isfile(label_transform_param_path) and isfile(image_transform_param_path):
logging.info('skipping {} as noclobber is True and inverted parameter files exist')
return
# Modify the elastix registration input parameter file to enable inversion (Change metric and don't write image results)
inversion_params = abspath(join(args['specimen_stage_inversion_dir'], args['param_file_output_name'])) # The elastix registration parameters used for inversion
make_elastix_inversion_parameter_file(abspath(args['parameter_file']), inversion_params, args['image_replacements']) # I don't think we need the replacements here!!!!!!!!
# Do the inversion, making the inverted TransformParameters file
fixed_vol = args['fixed_volume']
forward_tform_file = abspath(args['transform_file'])
invert_param_dir = args['specimen_stage_inversion_dir']
if not invert_elastix_transform_parameters(fixed_vol, forward_tform_file, inversion_params, invert_param_dir, threads):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting normal volumes
image_inverted_tform = abspath(join(args['specimen_stage_inversion_dir'], 'TransformParameters.0.txt'))
if not _modify_inverted_tform_file(image_inverted_tform, image_transform_param_path):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting label volumes
# replace the parameter in the image file with label-specific parameters and save in new file. No need to
# generate one from scratch
if not make_elastix_inversion_parameter_file(image_transform_param_path, label_transform_param_path, args['label_replacements']):
return
_modify_inverted_tform_file(label_transform_param_path)
def get_reg_dirs(config: LamaConfig) -> List[Path]:
"""
Get the registration output directories paths in the order they were made
"""
reg_stages = []
for i, reg_stage in enumerate(config['registration_stage_params']):
stage_id = reg_stage['stage_id']
stage_dir = config['root_reg_dir'] / stage_id
reg_stages.append(stage_dir)
return reg_stages
def make_elastix_inversion_parameter_file(elx_param_file: Path, newfile_name: str, replacements: Dict):
"""
Modifies the elastix input parameter file that was used in the original transformation.
Adds DisplacementMagnitudePenalty (which is needed for inverting)
Turns off writing the image results at the end as we only need an inverted output file.
Also changes interpolation order in the case of inverting labels
Parameters
----------
elx_param_file: str
path to elastix input parameter file
newfile_name: str
path to save modified parameter file to
"""
try:
with open(elx_param_file) as old, open(newfile_name, "w") as new:
for line in old:
if line.startswith("(Metric "):
line = '(Metric "DisplacementMagnitudePenalty")\n'
if line.startswith('(WriteResultImage '):
line = '(WriteResultImage "false")\n'
if line.startswith('WriteResultImageAfterEachResolution '):
continue
try:
param_name = line.split()[0][1:]
except IndexError:
continue # comment?
if param_name in replacements:
value = replacements[param_name]
try:
int(value)
except ValueError:
# Not an int, neeed quotes
line = '({} "{}")\n'.format(param_name, value)
else:
# An int, no quotes
line = '({} {})\n'.format(param_name, value)
new.write(line)
except IOError as e:
logging.error("Error modifying the elastix parameter file: {}".format(e))
return False
return True
def invert_elastix_transform_parameters(fixed: Path, tform_file: Path, param: Path, outdir: Path, threads: str):
"""
Invert the transform and get a new transform file
"""
if not common.test_installation('elastix'):
raise OSError('elastix not installed')
cmd = ['elastix',
'-t0', tform_file,
'-p', param,
'-f', fixed,
'-m', fixed,
'-out', outdir,
'-threads', threads # 11/09/18. This was set to 1. Can iversions take advantage of multithreading?
]
try:
subprocess.check_output(cmd)
except (Exception, subprocess.CalledProcessError) as e:
msg = f'Inverting transform file failed. cmd: {cmd}\n{str(e)}:'
logging.error(msg)
logging.exception(msg)
return False
return True
def _modify_inverted_tform_file(elx_tform_file: Path, newfile_name: str=None):
"""
Remove "NoInitialTransform" from the output transform parameter file
Set output image format to unsigned char. Writes out a modified elastix transform parameter file
that can be used for inverting volumes
Parameters
----------
elx_tform_file: str
path to elastix transform file
newfile_mame: str
path to save modified transform file
"""
if not newfile_name: # Write to temporary file before overwriting
new_file = tempfile.NamedTemporaryFile().name
else:
new_file = newfile_name
try:
with open(new_file, "w+") as new_tform_param_fh, open(elx_tform_file, "r") as tform_param_fh:
for line in tform_param_fh:
if line.startswith('(InitialTransformParametersFileName'):
line = '(InitialTransformParametersFileName "NoInitialTransform")\n'
new_tform_param_fh.write(line)
new_tform_param_fh.close()
tform_param_fh.close()
except IOError:
logging.warning("Error reading or writing transform files {}".format(elx_tform_file))
return False
return True
# def is_euler_stage(tform_param):
# """
# Return True if the registration used to create this param file was a Euler transform. Can't currently invert
# Euler transforms with this method, and is usually not required
# :param tform_param:
# :return:
# """
# with open(tform_param, 'r') as fh:
# line = fh.readline()
# if 'EulerTransform' in line:
# return True
# else:
# return False | 38.84985 | 175 | 0.678287 | from pathlib import Path
import tempfile
import os
import subprocess
from collections import defaultdict
from multiprocessing import Pool
from os.path import join, abspath, isfile
from typing import Union, List, Dict
from logzero import logger as logging
import yaml
from lama import common
from lama.common import cfg_load
from lama.registration_pipeline.validate_config import LamaConfig
from lama.elastix import (ELX_TRANSFORM_NAME, ELX_PARAM_PREFIX, PROPAGATE_LABEL_TRANFORM,
PROPAGATE_IMAGE_TRANSFORM, PROPAGATE_CONFIG, RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR)
LABEL_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '0',
'FixedInternalImagePixelType': 'short',
'MovingInternalImagePixelType': 'short',
'ResultImagePixelType': 'unsigned char',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
IMAGE_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '3',
'FixedInternalImagePixelType': 'float',
'MovingInternalImagePixelType': 'float',
'ResultImagePixelType': 'float',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
def batch_invert_transform_parameters(config: Union[Path, LamaConfig],
clobber=True, new_log:bool=False):
common.test_installation('elastix')
if isinstance(config, (Path, str)):
config = LamaConfig(config)
threads = str(config['threads'])
if new_log:
common.init_logging(config / 'invert_transforms.log')
reg_dirs = get_reg_dirs(config)
volume_names = [x.stem for x in common.get_file_paths(reg_dirs[0], ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR])]
inv_outdir = config.mkdir('inverted_transforms')
stages_to_invert = defaultdict(list)
jobs: List[Dict] = []
reg_stage_dir: Path
for i, vol_id in enumerate(volume_names):
for reg_stage_dir in reg_dirs:
if not reg_stage_dir.is_dir():
logging.error('cannot find {}'.format(reg_stage_dir))
raise FileNotFoundError(f'Cannot find registration dir {reg_stage_dir}')
inv_stage_dir = inv_outdir / reg_stage_dir.name
specimen_stage_reg_dir = reg_stage_dir / vol_id
specimen_stage_inversion_dir = inv_stage_dir / vol_id
transform_file = common.getfile_startswith(specimen_stage_reg_dir, ELX_TRANSFORM_NAME)
parameter_file = common.getfile_startswith(reg_stage_dir, ELX_PARAM_PREFIX)
inv_stage_dir.mkdir(exist_ok=True)
if reg_stage_dir.name not in stages_to_invert['label_propagation_order']:
stages_to_invert['label_propagation_order'].insert(0, reg_stage_dir.name)
if clobber:
common.mkdir_force(specimen_stage_inversion_dir)
reg_metadata = cfg_load(specimen_stage_reg_dir / common.INDV_REG_METADATA)
fixed_volume = (specimen_stage_reg_dir / reg_metadata['fixed_vol']).resolve()
job = {
'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
'parameter_file': abspath(parameter_file),
'transform_file': transform_file,
'fixed_volume': fixed_volume,
'param_file_output_name': 'inversion_parameters.txt',
'image_replacements': IMAGE_REPLACEMENTS,
'label_replacements': LABEL_REPLACEMENTS,
'image_transform_file': PROPAGATE_IMAGE_TRANSFORM,
'label_transform_file': PROPAGATE_LABEL_TRANFORM,
'clobber': clobber,
'threads': threads
}
jobs.append(job)
pool = Pool(8)
try:
pool.map(_invert_transform_parameters, jobs)
except KeyboardInterrupt:
print('terminating inversion')
pool.terminate()
pool.join()
reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
stages_to_invert['registration_directory'] = str(reg_dir)
invert_config = config['inverted_transforms'] / PROPAGATE_CONFIG
with open(invert_config, 'w') as yf:
yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
def _invert_transform_parameters(args: Dict):
clobber = args['clobber']
threads = args['threads']
image_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['image_transform_file']))
label_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['label_transform_file']))
if not clobber and isfile(label_transform_param_path) and isfile(image_transform_param_path):
logging.info('skipping {} as noclobber is True and inverted parameter files exist')
return
# Modify the elastix registration input parameter file to enable inversion (Change metric and don't write image results)
inversion_params = abspath(join(args['specimen_stage_inversion_dir'], args['param_file_output_name']))
make_elastix_inversion_parameter_file(abspath(args['parameter_file']), inversion_params, args['image_replacements'])
# Do the inversion, making the inverted TransformParameters file
fixed_vol = args['fixed_volume']
forward_tform_file = abspath(args['transform_file'])
invert_param_dir = args['specimen_stage_inversion_dir']
if not invert_elastix_transform_parameters(fixed_vol, forward_tform_file, inversion_params, invert_param_dir, threads):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting normal volumes
image_inverted_tform = abspath(join(args['specimen_stage_inversion_dir'], 'TransformParameters.0.txt'))
if not _modify_inverted_tform_file(image_inverted_tform, image_transform_param_path):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting label volumes
# replace the parameter in the image file with label-specific parameters and save in new file. No need to
# generate one from scratch
if not make_elastix_inversion_parameter_file(image_transform_param_path, label_transform_param_path, args['label_replacements']):
return
_modify_inverted_tform_file(label_transform_param_path)
def get_reg_dirs(config: LamaConfig) -> List[Path]:
reg_stages = []
for i, reg_stage in enumerate(config['registration_stage_params']):
stage_id = reg_stage['stage_id']
stage_dir = config['root_reg_dir'] / stage_id
reg_stages.append(stage_dir)
return reg_stages
def make_elastix_inversion_parameter_file(elx_param_file: Path, newfile_name: str, replacements: Dict):
try:
with open(elx_param_file) as old, open(newfile_name, "w") as new:
for line in old:
if line.startswith("(Metric "):
line = '(Metric "DisplacementMagnitudePenalty")\n'
if line.startswith('(WriteResultImage '):
line = '(WriteResultImage "false")\n'
if line.startswith('WriteResultImageAfterEachResolution '):
continue
try:
param_name = line.split()[0][1:]
except IndexError:
continue # comment?
if param_name in replacements:
value = replacements[param_name]
try:
int(value)
except ValueError:
# Not an int, neeed quotes
line = '({} "{}")\n'.format(param_name, value)
else:
# An int, no quotes
line = '({} {})\n'.format(param_name, value)
new.write(line)
except IOError as e:
logging.error("Error modifying the elastix parameter file: {}".format(e))
return False
return True
def invert_elastix_transform_parameters(fixed: Path, tform_file: Path, param: Path, outdir: Path, threads: str):
if not common.test_installation('elastix'):
raise OSError('elastix not installed')
cmd = ['elastix',
'-t0', tform_file,
'-p', param,
'-f', fixed,
'-m', fixed,
'-out', outdir,
'-threads', threads # 11/09/18. This was set to 1. Can iversions take advantage of multithreading?
]
try:
subprocess.check_output(cmd)
except (Exception, subprocess.CalledProcessError) as e:
msg = f'Inverting transform file failed. cmd: {cmd}\n{str(e)}:'
logging.error(msg)
logging.exception(msg)
return False
return True
def _modify_inverted_tform_file(elx_tform_file: Path, newfile_name: str=None):
if not newfile_name: # Write to temporary file before overwriting
new_file = tempfile.NamedTemporaryFile().name
else:
new_file = newfile_name
try:
with open(new_file, "w+") as new_tform_param_fh, open(elx_tform_file, "r") as tform_param_fh:
for line in tform_param_fh:
if line.startswith('(InitialTransformParametersFileName'):
line = '(InitialTransformParametersFileName "NoInitialTransform")\n'
new_tform_param_fh.write(line)
new_tform_param_fh.close()
tform_param_fh.close()
except IOError:
logging.warning("Error reading or writing transform files {}".format(elx_tform_file))
return False
return True
# def is_euler_stage(tform_param):
# """
# Return True if the registration used to create this param file was a Euler transform. Can't currently invert
# Euler transforms with this method, and is usually not required
# :param tform_param:
# :return:
# """
| true | true |
f718082ff8a1b480495d2fe2964e1b8479a5f70b | 3,677 | py | Python | tests/python/unittest/test_tir_ptx_ldmatrix.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | tests/python/unittest/test_tir_ptx_ldmatrix.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863 | 2017-08-17T19:55:50.000Z | 2019-11-04T17:18:41.000Z | tests/python/unittest/test_tir_ptx_ldmatrix.py | shengxinhu/tvm | 06c443e9959452c6da3a911fe0c11e08c5554477 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
arch = tvm.contrib.nvcc.get_target_compute_version()
major, minor = tvm.contrib.nvcc.parse_compute_version(arch)
if major * 10 + minor < 75:
# Require at least SM75
return
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else: # num == 4
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| 36.04902 | 100 | 0.536035 |
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
arch = tvm.contrib.nvcc.get_target_compute_version()
major, minor = tvm.contrib.nvcc.parse_compute_version(arch)
if major * 10 + minor < 75:
return
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| true | true |
f71808b9f205aac0b404b9509ad046d9f41b7eab | 12,174 | py | Python | google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import (
conversion_value_rule_set_service,
)
from .base import ConversionValueRuleSetServiceTransport, DEFAULT_CLIENT_INFO
class ConversionValueRuleSetServiceGrpcTransport(
ConversionValueRuleSetServiceTransport
):
"""gRPC backend transport for ConversionValueRuleSetService.
Service to manage conversion value rule sets.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_conversion_value_rule_sets(
self,
) -> Callable[
[
conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest
],
conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse,
]:
r"""Return a callable for the mutate conversion value rule
sets method over gRPC.
Creates, updates or removes conversion value rule
sets. Operation statuses are returned.
Returns:
Callable[[~.MutateConversionValueRuleSetsRequest],
~.MutateConversionValueRuleSetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_conversion_value_rule_sets" not in self._stubs:
self._stubs[
"mutate_conversion_value_rule_sets"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.ConversionValueRuleSetService/MutateConversionValueRuleSets",
request_serializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest.serialize,
response_deserializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse.deserialize,
)
return self._stubs["mutate_conversion_value_rule_sets"]
def close(self):
self.grpc_channel.close()
__all__ = ("ConversionValueRuleSetServiceGrpcTransport",)
| 43.634409 | 122 | 0.637013 |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v10.services.types import (
conversion_value_rule_set_service,
)
from .base import ConversionValueRuleSetServiceTransport, DEFAULT_CLIENT_INFO
class ConversionValueRuleSetServiceGrpcTransport(
ConversionValueRuleSetServiceTransport
):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def mutate_conversion_value_rule_sets(
self,
) -> Callable[
[
conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest
],
conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse,
]:
if "mutate_conversion_value_rule_sets" not in self._stubs:
self._stubs[
"mutate_conversion_value_rule_sets"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.ConversionValueRuleSetService/MutateConversionValueRuleSets",
request_serializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest.serialize,
response_deserializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse.deserialize,
)
return self._stubs["mutate_conversion_value_rule_sets"]
def close(self):
self.grpc_channel.close()
__all__ = ("ConversionValueRuleSetServiceGrpcTransport",)
| true | true |
f7180a3e45377a91711d6c8fa67895d8d860641f | 1,780 | py | Python | mlprocessors/consolecapture.py | flatironinstitute/mountaintools | d5680599381e0810c4aa5b309b9ef9ec7f2d1b25 | [
"Apache-2.0"
] | 2 | 2019-11-07T14:09:02.000Z | 2021-09-23T01:09:04.000Z | mountaintools/mlprocessors/consolecapture.py | flatironinstitute/spikeforest_old | d9470194dc906b949178b9c44d14aea57a1f6c27 | [
"Apache-2.0"
] | 13 | 2019-05-04T09:34:53.000Z | 2019-06-23T07:05:58.000Z | mountaintools/mlprocessors/consolecapture.py | flatironinstitute/spikeforest_old | d9470194dc906b949178b9c44d14aea57a1f6c27 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:21.000Z | 2021-09-23T01:07:21.000Z | from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def flush(self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| 28.709677 | 70 | 0.626404 | from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def flush(self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| true | true |
f7180a3fe9d499c15ed0c134b63d4d7772dbd786 | 3,357 | py | Python | profiles_project/settings.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | profiles_project/settings.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | profiles_project/settings.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9%&3aa&mz9nkbfr0!b(^9a^((@_wbd&m3f$3wbyseq9ai9m!^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-18-117-223-244.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| 26.226563 | 91 | 0.699136 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '9%&3aa&mz9nkbfr0!b(^9a^((@_wbd&m3f$3wbyseq9ai9m!^v'
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-18-117-223-244.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| true | true |
f7180b932ca3d3eac6846747dbbfa23652d97e6d | 1,458 | py | Python | sa/profiles/AlliedTelesis/AT8100/get_version.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/AlliedTelesis/AT8100/get_version.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/AlliedTelesis/AT8100/get_version.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# AlliedTelesis.AT8100.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
import re
class Script(BaseScript):
name = "AlliedTelesis.AT8100.get_version"
cache = True
interface = IGetVersion
rx_plat = re.compile(
r"^Base\s+(?P<platform>AT-81\S+)\s+(?P<hardware>\S+)\s+(?P<serial>\S+)\s*\n", re.MULTILINE
)
rx_boot = re.compile(r"^Bootloader version\s+:\s+(?P<bootprom>\S+)\s*\n", re.MULTILINE)
rx_version = re.compile(r"^Software version\s+:\s+(?P<version>\S+)\s*\n", re.MULTILINE)
def execute_cli(self):
v = self.cli("show system")
match1 = self.rx_plat.search(v)
match2 = self.rx_boot.search(v)
match3 = self.rx_version.search(v)
return {
"vendor": "Allied Telesis",
"platform": match1.group("platform"),
"version": match3.group("version"),
"attributes": {
"Boot PROM": match2.group("bootprom"),
"HW version": match1.group("hardware"),
"Serial Number": match1.group("serial"),
},
}
| 34.714286 | 98 | 0.504801 |
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
import re
class Script(BaseScript):
name = "AlliedTelesis.AT8100.get_version"
cache = True
interface = IGetVersion
rx_plat = re.compile(
r"^Base\s+(?P<platform>AT-81\S+)\s+(?P<hardware>\S+)\s+(?P<serial>\S+)\s*\n", re.MULTILINE
)
rx_boot = re.compile(r"^Bootloader version\s+:\s+(?P<bootprom>\S+)\s*\n", re.MULTILINE)
rx_version = re.compile(r"^Software version\s+:\s+(?P<version>\S+)\s*\n", re.MULTILINE)
def execute_cli(self):
v = self.cli("show system")
match1 = self.rx_plat.search(v)
match2 = self.rx_boot.search(v)
match3 = self.rx_version.search(v)
return {
"vendor": "Allied Telesis",
"platform": match1.group("platform"),
"version": match3.group("version"),
"attributes": {
"Boot PROM": match2.group("bootprom"),
"HW version": match1.group("hardware"),
"Serial Number": match1.group("serial"),
},
}
| true | true |
f7180d77b8c3f9bb8f6b2d45b2d1f43aa01a1d41 | 673 | py | Python | net/wyun/tests/basic/test_basicfunction.py | michaelyin/im2markup-prep | 0613e4f77f1b50084a85e5c0b1511c9ae007211d | [
"Apache-2.0"
] | 3 | 2018-04-19T13:51:33.000Z | 2020-10-04T12:35:50.000Z | net/wyun/tests/basic/test_basicfunction.py | michaelyin/im2markup-prep | 0613e4f77f1b50084a85e5c0b1511c9ae007211d | [
"Apache-2.0"
] | null | null | null | net/wyun/tests/basic/test_basicfunction.py | michaelyin/im2markup-prep | 0613e4f77f1b50084a85e5c0b1511c9ae007211d | [
"Apache-2.0"
] | 1 | 2018-11-22T08:44:11.000Z | 2018-11-22T08:44:11.000Z | import unittest
from net.wyun.mer.basicfunction import BasicFunction
class TestBasicFunction(unittest.TestCase):
def setUp(self):
self.func = BasicFunction()
def test_1(self):
self.assertTrue(True)
def test_2(self):
self.assertTrue(True)
def test_3(self):
self.assertEqual(self.func.state, 0)
def test_4(self):
self.func.increment_state()
self.assertEqual(self.func.state, 1)
def test_5(self):
self.func.increment_state()
self.func.increment_state()
self.func.clear_state()
self.assertEqual(self.func.state, 0)
if __name__ == '__main__':
unittest.main() | 20.393939 | 52 | 0.649331 | import unittest
from net.wyun.mer.basicfunction import BasicFunction
class TestBasicFunction(unittest.TestCase):
def setUp(self):
self.func = BasicFunction()
def test_1(self):
self.assertTrue(True)
def test_2(self):
self.assertTrue(True)
def test_3(self):
self.assertEqual(self.func.state, 0)
def test_4(self):
self.func.increment_state()
self.assertEqual(self.func.state, 1)
def test_5(self):
self.func.increment_state()
self.func.increment_state()
self.func.clear_state()
self.assertEqual(self.func.state, 0)
if __name__ == '__main__':
unittest.main() | true | true |
f7180f9594e73d237384205187769766c8cda637 | 13,033 | py | Python | google-cloud-sdk/lib/surface/compute/ssh.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/ssh.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | 4 | 2020-07-21T12:51:46.000Z | 2022-01-22T10:29:25.000Z | google-cloud-sdk/lib/surface/compute/ssh.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | 1 | 2020-07-25T18:17:57.000Z | 2020-07-25T18:17:57.000Z | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for SSHing into an instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import iap_tunnel
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute import ssh_utils
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.util.ssh import containers
from googlecloudsdk.command_lib.util.ssh import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core.util import retry
def AddCommandArg(parser):
parser.add_argument(
'--command',
help="""\
A command to run on the virtual machine.
Runs the command on the target instance and then exits.
""")
def AddSSHArgs(parser):
"""Additional flags and positional args to be passed to *ssh(1)*."""
parser.add_argument(
'--ssh-flag',
action='append',
help="""\
Additional flags to be passed to *ssh(1)*. It is recommended that flags
be passed using an assignment operator and quotes. This flag will
replace occurences of ``%USER%'' and ``%INSTANCE%'' with their
dereferenced values. Example:
$ {command} example-instance --zone=us-central1-a --ssh-flag="-vvv" --ssh-flag="-L 80:%INSTANCE%:80"
is equivalent to passing the flags ``--vvv'' and ``-L
80:162.222.181.197:80'' to *ssh(1)* if the external IP address of
'example-instance' is 162.222.181.197.
If connecting to the instance's external IP, then %INSTANCE% is replaced
with that, otherwise it is replaced with the internal IP.
""")
parser.add_argument(
'user_host',
completer=completers.InstancesCompleter,
metavar='[USER@]INSTANCE',
help="""\
Specifies the instance to SSH into.
``USER'' specifies the username with which to SSH. If omitted,
the user login name is used. If using OS Login, USER will be replaced
by the OS Login user.
``INSTANCE'' specifies the name of the virtual machine instance to SSH
into.
""")
parser.add_argument(
'ssh_args',
nargs=argparse.REMAINDER,
help="""\
Flags and positionals passed to the underlying ssh implementation.
""",
example="""\
$ {command} example-instance --zone=us-central1-a -- -vvv -L 80:%INSTANCE%:80
""")
def AddContainerArg(parser):
parser.add_argument(
'--container',
help="""\
The name or ID of a container inside of the virtual machine instance
to connect to. This only applies to virtual machines that are using
a Google Container-Optimized virtual machine image. For more
information, see [](https://cloud.google.com/compute/docs/containers).
""")
def AddInternalIPArg(group):
group.add_argument(
'--internal-ip',
default=False,
action='store_true',
help="""\
Connect to instances using their internal IP addresses rather than their
external IP addresses. Use this to connect from one instance to another
on the same VPC network, over a VPN connection, or between two peered
VPC networks.
For this connection to work, you must configure your networks and
firewall to allow SSH connections to the internal IP address of
the instance to which you want to connect.
To learn how to use this flag, see
[](https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances).
""")
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Ssh(base.Command):
"""SSH into a virtual machine instance."""
category = base.TOOLS_CATEGORY
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser.
"""
ssh_utils.BaseSSHCLIHelper.Args(parser)
AddCommandArg(parser)
AddSSHArgs(parser)
AddContainerArg(parser)
flags.AddZoneFlag(
parser, resource_type='instance', operation_type='connect to')
routing_group = parser.add_mutually_exclusive_group()
AddInternalIPArg(routing_group)
iap_tunnel.AddSshTunnelArgs(parser, routing_group)
def Run(self, args):
"""See ssh_utils.BaseSSHCLICommand.Run."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
ssh_helper = ssh_utils.BaseSSHCLIHelper()
ssh_helper.Run(args)
user, instance_name = ssh_utils.GetUserAndInstance(args.user_host)
instance_ref = instance_flags.SSH_INSTANCE_RESOLVER.ResolveResources(
[instance_name], compute_scope.ScopeEnum.ZONE, args.zone,
holder.resources,
scope_lister=instance_flags.GetInstanceZoneScopeLister(client))[0]
instance = ssh_helper.GetInstance(client, instance_ref)
project = ssh_helper.GetProject(client, instance_ref.project)
host_keys = ssh_helper.GetHostKeysFromGuestAttributes(client, instance_ref,
instance, project)
if not host_keys and host_keys is not None:
# Only display this message if there was an attempt to retrieve
# host keys but it was unsuccessful. If Guest Attributes is disabled,
# there is no attempt to retrieve host keys.
log.status.Print('Unable to retrieve host keys from instance metadata. '
'Continuing.')
expiration, expiration_micros = ssh_utils.GetSSHKeyExpirationFromArgs(args)
if args.plain:
use_oslogin = False
else:
public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)
# If there is an '@' symbol in the user_host arg, the user is requesting
# to connect as a specific user. This may get overridden by OS Login.
username_requested = '@' in args.user_host
user, use_oslogin = ssh.CheckForOsloginAndGetUser(
instance, project, user, public_key, expiration_micros,
self.ReleaseTrack(), username_requested=username_requested)
iap_tunnel_args = iap_tunnel.SshTunnelArgs.FromArgs(
args, self.ReleaseTrack(), instance_ref,
ssh_utils.GetExternalInterface(instance, no_raise=True))
internal_address = ssh_utils.GetInternalIPAddress(instance)
if iap_tunnel_args:
# IAP Tunnel only uses instance_address for the purpose of --ssh-flag
# substitution. In this case, dest_addr doesn't do much, it just matches
# against entries in the user's ssh_config file. It's best to use
# something unique to avoid false positive matches, thus we use
# HostKeyAlias.
instance_address = internal_address
dest_addr = ssh_utils.HostKeyAlias(instance)
elif args.internal_ip:
instance_address = internal_address
dest_addr = instance_address
else:
instance_address = ssh_utils.GetExternalIPAddress(instance)
dest_addr = instance_address
remote = ssh.Remote(dest_addr, user)
identity_file = None
options = None
if not args.plain:
identity_file = ssh_helper.keys.key_file
options = ssh_helper.GetConfig(ssh_utils.HostKeyAlias(instance),
args.strict_host_key_checking,
host_keys_to_add=host_keys)
extra_flags = ssh.ParseAndSubstituteSSHFlags(args, remote, instance_address,
internal_address)
remainder = []
if args.ssh_args:
remainder.extend(args.ssh_args)
# Transform args.command into arg list or None if no command
command_list = args.command.split(' ') if args.command else None
tty = containers.GetTty(args.container, command_list)
remote_command = containers.GetRemoteCommand(args.container, command_list)
# Do not include default port since that will prevent users from
# specifying a custom port (b/121998342).
ssh_cmd_args = {'remote': remote,
'identity_file': identity_file,
'options': options,
'extra_flags': extra_flags,
'remote_command': remote_command,
'tty': tty,
'iap_tunnel_args': iap_tunnel_args,
'remainder': remainder}
cmd = ssh.SSHCommand(**ssh_cmd_args)
if args.dry_run:
log.out.Print(' '.join(cmd.Build(ssh_helper.env)))
return
if args.plain or use_oslogin:
keys_newly_added = False
else:
keys_newly_added = ssh_helper.EnsureSSHKeyExists(
client, remote.user, instance, project, expiration=expiration)
if keys_newly_added:
poller = ssh_utils.CreateSSHPoller(remote, identity_file, options,
iap_tunnel_args,
extra_flags=extra_flags)
log.status.Print('Waiting for SSH key to propagate.')
# TODO(b/35355795): Don't force_connect
try:
poller.Poll(ssh_helper.env, force_connect=True)
except retry.WaitException:
raise ssh_utils.NetworkError()
if args.internal_ip:
ssh_helper.PreliminarilyVerifyInstance(instance.id, remote, identity_file,
options)
# Errors from SSH itself result in an ssh.CommandError being raised
return_code = cmd.Run(ssh_helper.env, force_connect=True)
if return_code:
# This is the return code of the remote command. Problems with SSH itself
# will result in ssh.CommandError being raised above.
sys.exit(return_code)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SshBeta(Ssh):
"""SSH into a virtual machine instance (Beta)."""
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SshAlpha(SshBeta):
"""SSH into a virtual machine instance (Alpha)."""
def DetailedHelp():
"""Construct help text based on the command release track."""
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
Note, this command does not work when connecting to Windows VMs. To
connect to a Windows instance using a command-line method, refer to this
guide: https://cloud.google.com/compute/docs/instances/connecting-to-instance#windows_cli
The default network comes preconfigured to allow ssh access to
all VMs. If the default network was edited, or if not using the
default network, you may need to explicitly enable ssh access by adding
a firewall-rule:
$ gcloud compute firewall-rules create --network=NETWORK \
default-allow-ssh --allow=tcp:22
{command} ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone=us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone=us-central1-a --command="ps -ejH"
If you are using the Google Container-Optimized virtual machine image,
you can SSH into one of your containers with:
$ {command} example-instance --zone=us-central1-a --container=CONTAINER
You can limit the allowed time to ssh. For example, to allow a key to be
used through 2019:
$ {command} example-instance --zone=us-central1-a --ssh-key-expiration="2020-01-01T00:00:00:00Z"
Or alternatively, allow access for the next two minutes:
$ {command} example-instance --zone=us-central1-a --ssh-key-expire-after=2m
""",
}
return detailed_help
Ssh.detailed_help = DetailedHelp()
| 38.559172 | 108 | 0.682115 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import iap_tunnel
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute import ssh_utils
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.util.ssh import containers
from googlecloudsdk.command_lib.util.ssh import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core.util import retry
def AddCommandArg(parser):
parser.add_argument(
'--command',
help="""\
A command to run on the virtual machine.
Runs the command on the target instance and then exits.
""")
def AddSSHArgs(parser):
parser.add_argument(
'--ssh-flag',
action='append',
help="""\
Additional flags to be passed to *ssh(1)*. It is recommended that flags
be passed using an assignment operator and quotes. This flag will
replace occurences of ``%USER%'' and ``%INSTANCE%'' with their
dereferenced values. Example:
$ {command} example-instance --zone=us-central1-a --ssh-flag="-vvv" --ssh-flag="-L 80:%INSTANCE%:80"
is equivalent to passing the flags ``--vvv'' and ``-L
80:162.222.181.197:80'' to *ssh(1)* if the external IP address of
'example-instance' is 162.222.181.197.
If connecting to the instance's external IP, then %INSTANCE% is replaced
with that, otherwise it is replaced with the internal IP.
""")
parser.add_argument(
'user_host',
completer=completers.InstancesCompleter,
metavar='[USER@]INSTANCE',
help="""\
Specifies the instance to SSH into.
``USER'' specifies the username with which to SSH. If omitted,
the user login name is used. If using OS Login, USER will be replaced
by the OS Login user.
``INSTANCE'' specifies the name of the virtual machine instance to SSH
into.
""")
parser.add_argument(
'ssh_args',
nargs=argparse.REMAINDER,
help="""\
Flags and positionals passed to the underlying ssh implementation.
""",
example="""\
$ {command} example-instance --zone=us-central1-a -- -vvv -L 80:%INSTANCE%:80
""")
def AddContainerArg(parser):
parser.add_argument(
'--container',
help="""\
The name or ID of a container inside of the virtual machine instance
to connect to. This only applies to virtual machines that are using
a Google Container-Optimized virtual machine image. For more
information, see [](https://cloud.google.com/compute/docs/containers).
""")
def AddInternalIPArg(group):
group.add_argument(
'--internal-ip',
default=False,
action='store_true',
help="""\
Connect to instances using their internal IP addresses rather than their
external IP addresses. Use this to connect from one instance to another
on the same VPC network, over a VPN connection, or between two peered
VPC networks.
For this connection to work, you must configure your networks and
firewall to allow SSH connections to the internal IP address of
the instance to which you want to connect.
To learn how to use this flag, see
[](https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances).
""")
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Ssh(base.Command):
category = base.TOOLS_CATEGORY
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLIHelper.Args(parser)
AddCommandArg(parser)
AddSSHArgs(parser)
AddContainerArg(parser)
flags.AddZoneFlag(
parser, resource_type='instance', operation_type='connect to')
routing_group = parser.add_mutually_exclusive_group()
AddInternalIPArg(routing_group)
iap_tunnel.AddSshTunnelArgs(parser, routing_group)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
ssh_helper = ssh_utils.BaseSSHCLIHelper()
ssh_helper.Run(args)
user, instance_name = ssh_utils.GetUserAndInstance(args.user_host)
instance_ref = instance_flags.SSH_INSTANCE_RESOLVER.ResolveResources(
[instance_name], compute_scope.ScopeEnum.ZONE, args.zone,
holder.resources,
scope_lister=instance_flags.GetInstanceZoneScopeLister(client))[0]
instance = ssh_helper.GetInstance(client, instance_ref)
project = ssh_helper.GetProject(client, instance_ref.project)
host_keys = ssh_helper.GetHostKeysFromGuestAttributes(client, instance_ref,
instance, project)
if not host_keys and host_keys is not None:
# Only display this message if there was an attempt to retrieve
# host keys but it was unsuccessful. If Guest Attributes is disabled,
# there is no attempt to retrieve host keys.
log.status.Print('Unable to retrieve host keys from instance metadata. '
'Continuing.')
expiration, expiration_micros = ssh_utils.GetSSHKeyExpirationFromArgs(args)
if args.plain:
use_oslogin = False
else:
public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)
# If there is an '@' symbol in the user_host arg, the user is requesting
# to connect as a specific user. This may get overridden by OS Login.
username_requested = '@' in args.user_host
user, use_oslogin = ssh.CheckForOsloginAndGetUser(
instance, project, user, public_key, expiration_micros,
self.ReleaseTrack(), username_requested=username_requested)
iap_tunnel_args = iap_tunnel.SshTunnelArgs.FromArgs(
args, self.ReleaseTrack(), instance_ref,
ssh_utils.GetExternalInterface(instance, no_raise=True))
internal_address = ssh_utils.GetInternalIPAddress(instance)
if iap_tunnel_args:
# IAP Tunnel only uses instance_address for the purpose of --ssh-flag
# substitution. In this case, dest_addr doesn't do much, it just matches
instance_address = internal_address
dest_addr = ssh_utils.HostKeyAlias(instance)
elif args.internal_ip:
instance_address = internal_address
dest_addr = instance_address
else:
instance_address = ssh_utils.GetExternalIPAddress(instance)
dest_addr = instance_address
remote = ssh.Remote(dest_addr, user)
identity_file = None
options = None
if not args.plain:
identity_file = ssh_helper.keys.key_file
options = ssh_helper.GetConfig(ssh_utils.HostKeyAlias(instance),
args.strict_host_key_checking,
host_keys_to_add=host_keys)
extra_flags = ssh.ParseAndSubstituteSSHFlags(args, remote, instance_address,
internal_address)
remainder = []
if args.ssh_args:
remainder.extend(args.ssh_args)
command_list = args.command.split(' ') if args.command else None
tty = containers.GetTty(args.container, command_list)
remote_command = containers.GetRemoteCommand(args.container, command_list)
ssh_cmd_args = {'remote': remote,
'identity_file': identity_file,
'options': options,
'extra_flags': extra_flags,
'remote_command': remote_command,
'tty': tty,
'iap_tunnel_args': iap_tunnel_args,
'remainder': remainder}
cmd = ssh.SSHCommand(**ssh_cmd_args)
if args.dry_run:
log.out.Print(' '.join(cmd.Build(ssh_helper.env)))
return
if args.plain or use_oslogin:
keys_newly_added = False
else:
keys_newly_added = ssh_helper.EnsureSSHKeyExists(
client, remote.user, instance, project, expiration=expiration)
if keys_newly_added:
poller = ssh_utils.CreateSSHPoller(remote, identity_file, options,
iap_tunnel_args,
extra_flags=extra_flags)
log.status.Print('Waiting for SSH key to propagate.')
try:
poller.Poll(ssh_helper.env, force_connect=True)
except retry.WaitException:
raise ssh_utils.NetworkError()
if args.internal_ip:
ssh_helper.PreliminarilyVerifyInstance(instance.id, remote, identity_file,
options)
# Errors from SSH itself result in an ssh.CommandError being raised
return_code = cmd.Run(ssh_helper.env, force_connect=True)
if return_code:
# This is the return code of the remote command. Problems with SSH itself
# will result in ssh.CommandError being raised above.
sys.exit(return_code)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SshBeta(Ssh):
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SshAlpha(SshBeta):
def DetailedHelp():
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
Note, this command does not work when connecting to Windows VMs. To
connect to a Windows instance using a command-line method, refer to this
guide: https://cloud.google.com/compute/docs/instances/connecting-to-instance#windows_cli
The default network comes preconfigured to allow ssh access to
all VMs. If the default network was edited, or if not using the
default network, you may need to explicitly enable ssh access by adding
a firewall-rule:
$ gcloud compute firewall-rules create --network=NETWORK \
default-allow-ssh --allow=tcp:22
{command} ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone=us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone=us-central1-a --command="ps -ejH"
If you are using the Google Container-Optimized virtual machine image,
you can SSH into one of your containers with:
$ {command} example-instance --zone=us-central1-a --container=CONTAINER
You can limit the allowed time to ssh. For example, to allow a key to be
used through 2019:
$ {command} example-instance --zone=us-central1-a --ssh-key-expiration="2020-01-01T00:00:00:00Z"
Or alternatively, allow access for the next two minutes:
$ {command} example-instance --zone=us-central1-a --ssh-key-expire-after=2m
""",
}
return detailed_help
Ssh.detailed_help = DetailedHelp()
| true | true |
f7180fdca7de8d265e0d9027890060eff6ecc433 | 3,563 | py | Python | playground/algorithms/ddpg.py | brandontrabucco/playground | 069be961aaecb45d75f12f4a71cfa65d7152ea8a | [
"MIT"
] | 3 | 2019-12-06T19:22:22.000Z | 2020-01-20T01:57:26.000Z | playground/algorithms/ddpg.py | brandontrabucco/playground | 069be961aaecb45d75f12f4a71cfa65d7152ea8a | [
"MIT"
] | null | null | null | playground/algorithms/ddpg.py | brandontrabucco/playground | 069be961aaecb45d75f12f4a71cfa65d7152ea8a | [
"MIT"
] | null | null | null | """Author: Brandon Trabucco, Copyright 2019, MIT License"""
from playground.algorithms.algorithm import Algorithm
import tensorflow as tf
class DDPG(Algorithm):
def __init__(
self,
policy,
target_policy,
qf,
target_qf,
replay_buffer,
reward_scale=1.0,
discount=0.99,
observation_key="observation",
batch_size=32,
update_every=1,
update_after=0,
logger=None,
logging_prefix="ddpg/"
):
# train a policy using the deep deterministic policy gradient
Algorithm.__init__(
self,
replay_buffer,
batch_size=batch_size,
update_every=update_every,
update_after=update_after,
logger=logger,
logging_prefix=logging_prefix)
# each neural network is probabilistic
self.policy = policy
self.target_policy = target_policy
self.qf = qf
self.target_qf = target_qf
# select into the observation dictionary
self.observation_key = observation_key
# control some parameters that are important for ddpg
self.reward_scale = reward_scale
self.discount = discount
def update_algorithm(
self,
observations,
actions,
rewards,
next_observations,
terminals
):
# select from the observation dictionary
observations = observations[self.observation_key]
next_observations = next_observations[self.observation_key]
# build a tape to collect gradients from the policy and critics
with tf.GradientTape(persistent=True) as tape:
mean_actions, log_pi = self.policy.expected_value(observations)
next_mean_actions, next_log_pi = self.target_policy.expected_value(
next_observations)
# build the q function target value
inputs = tf.concat([next_observations, next_mean_actions], -1)
target_qf_value = self.target_qf(inputs)[..., 0]
self.record("target_qf_value", tf.reduce_mean(target_qf_value).numpy())
qf_targets = tf.stop_gradient(
self.reward_scale * rewards + terminals * self.discount * (
target_qf_value))
self.record("qf_targets", tf.reduce_mean(qf_targets).numpy())
# build the q function loss
inputs = tf.concat([observations, actions], -1)
qf_value = self.qf(inputs)[..., 0]
self.record("qf_value", tf.reduce_mean(qf_value).numpy())
qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))
self.record("qf_loss", qf_loss.numpy())
# build the policy loss
inputs = tf.concat([observations, mean_actions], -1)
policy_qf_value = self.qf(inputs)[..., 0]
self.record("policy_qf_value", tf.reduce_mean(policy_qf_value).numpy())
policy_loss = -tf.reduce_mean(policy_qf_value)
self.record("policy_loss", policy_loss.numpy())
# back prop gradients
self.policy.apply_gradients(
self.policy.compute_gradients(policy_loss, tape))
self.qf.apply_gradients(
self.qf.compute_gradients(qf_loss, tape))
# soft update target parameters
self.target_policy.soft_update(self.policy.get_weights())
self.target_qf.soft_update(self.qf.get_weights())
| 35.989899 | 83 | 0.609879 |
from playground.algorithms.algorithm import Algorithm
import tensorflow as tf
class DDPG(Algorithm):
def __init__(
self,
policy,
target_policy,
qf,
target_qf,
replay_buffer,
reward_scale=1.0,
discount=0.99,
observation_key="observation",
batch_size=32,
update_every=1,
update_after=0,
logger=None,
logging_prefix="ddpg/"
):
Algorithm.__init__(
self,
replay_buffer,
batch_size=batch_size,
update_every=update_every,
update_after=update_after,
logger=logger,
logging_prefix=logging_prefix)
self.policy = policy
self.target_policy = target_policy
self.qf = qf
self.target_qf = target_qf
self.observation_key = observation_key
self.reward_scale = reward_scale
self.discount = discount
def update_algorithm(
self,
observations,
actions,
rewards,
next_observations,
terminals
):
observations = observations[self.observation_key]
next_observations = next_observations[self.observation_key]
with tf.GradientTape(persistent=True) as tape:
mean_actions, log_pi = self.policy.expected_value(observations)
next_mean_actions, next_log_pi = self.target_policy.expected_value(
next_observations)
inputs = tf.concat([next_observations, next_mean_actions], -1)
target_qf_value = self.target_qf(inputs)[..., 0]
self.record("target_qf_value", tf.reduce_mean(target_qf_value).numpy())
qf_targets = tf.stop_gradient(
self.reward_scale * rewards + terminals * self.discount * (
target_qf_value))
self.record("qf_targets", tf.reduce_mean(qf_targets).numpy())
inputs = tf.concat([observations, actions], -1)
qf_value = self.qf(inputs)[..., 0]
self.record("qf_value", tf.reduce_mean(qf_value).numpy())
qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))
self.record("qf_loss", qf_loss.numpy())
inputs = tf.concat([observations, mean_actions], -1)
policy_qf_value = self.qf(inputs)[..., 0]
self.record("policy_qf_value", tf.reduce_mean(policy_qf_value).numpy())
policy_loss = -tf.reduce_mean(policy_qf_value)
self.record("policy_loss", policy_loss.numpy())
self.policy.apply_gradients(
self.policy.compute_gradients(policy_loss, tape))
self.qf.apply_gradients(
self.qf.compute_gradients(qf_loss, tape))
self.target_policy.soft_update(self.policy.get_weights())
self.target_qf.soft_update(self.qf.get_weights())
| true | true |
f71810217649e9ce7c57c78566cb40789c548173 | 3,089 | py | Python | google/ads/google_ads/v0/proto/resources/gender_view_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/resources/gender_view_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/resources/gender_view_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v0/proto/resources/gender_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/resources/gender_view.proto',
package='google.ads.googleads.v0.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\017GenderViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'),
serialized_pb=_b('\n9google/ads/googleads_v0/proto/resources/gender_view.proto\x12!google.ads.googleads.v0.resources\"#\n\nGenderView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\xfc\x01\n%com.google.ads.googleads.v0.resourcesB\x0fGenderViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3')
)
_GENDERVIEW = _descriptor.Descriptor(
name='GenderView',
full_name='google.ads.googleads.v0.resources.GenderView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v0.resources.GenderView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=131,
)
DESCRIPTOR.message_types_by_name['GenderView'] = _GENDERVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenderView = _reflection.GeneratedProtocolMessageType('GenderView', (_message.Message,), dict(
DESCRIPTOR = _GENDERVIEW,
__module__ = 'google.ads.googleads_v0.proto.resources.gender_view_pb2'
,
__doc__ = """A gender view.
Attributes:
resource_name:
The resource name of the gender view. Gender view resource
names have the form: ``customers/{customer_id}/genderViews/{a
d_group_id}_{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.GenderView)
))
_sym_db.RegisterMessage(GenderView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.135802 | 488 | 0.767886 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/resources/gender_view.proto',
package='google.ads.googleads.v0.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\017GenderViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'),
serialized_pb=_b('\n9google/ads/googleads_v0/proto/resources/gender_view.proto\x12!google.ads.googleads.v0.resources\"#\n\nGenderView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\xfc\x01\n%com.google.ads.googleads.v0.resourcesB\x0fGenderViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3')
)
_GENDERVIEW = _descriptor.Descriptor(
name='GenderView',
full_name='google.ads.googleads.v0.resources.GenderView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v0.resources.GenderView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=131,
)
DESCRIPTOR.message_types_by_name['GenderView'] = _GENDERVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenderView = _reflection.GeneratedProtocolMessageType('GenderView', (_message.Message,), dict(
DESCRIPTOR = _GENDERVIEW,
__module__ = 'google.ads.googleads_v0.proto.resources.gender_view_pb2'
,
__doc__ = """A gender view.
Attributes:
resource_name:
The resource name of the gender view. Gender view resource
names have the form: ``customers/{customer_id}/genderViews/{a
d_group_id}_{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.GenderView)
))
_sym_db.RegisterMessage(GenderView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f71810c4ecead08a935a56fcd5fb6be0cdf8d125 | 130 | py | Python | pythreshold/global_th/entropy/__init__.py | pedrogalher/pythreshold | 135e42fb4be1ff4d4c52ea05daca84be1acaa0fc | [
"MIT"
] | null | null | null | pythreshold/global_th/entropy/__init__.py | pedrogalher/pythreshold | 135e42fb4be1ff4d4c52ea05daca84be1acaa0fc | [
"MIT"
] | null | null | null | pythreshold/global_th/entropy/__init__.py | pedrogalher/pythreshold | 135e42fb4be1ff4d4c52ea05daca84be1acaa0fc | [
"MIT"
] | null | null | null | from .pun import pun_threshold
from .kapur import kapur_threshold, kapur_multithreshold
from .johannsen import johannsen_threshold | 43.333333 | 56 | 0.876923 | from .pun import pun_threshold
from .kapur import kapur_threshold, kapur_multithreshold
from .johannsen import johannsen_threshold | true | true |
f71811f3f0271780fcc16a9db631d5dee72d81ba | 1,078 | py | Python | src/python/pants/backend/codegen/jaxb/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/jaxb/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/jaxb/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JaxbJavaPackage(StringField):
"""Java package (com.company.package) in which to generate the output Java files.
If unspecified, Pants guesses it from the file path leading to the schema (xsd) file. This guess
is accurate only if the .xsd file is in a path like `.../com/company/package/schema.xsd`. Pants
looks for packages that start with 'com', 'org', or 'net'.
"""
alias = "package"
class JaxbLanguage(StringField):
"""The language to use, which currently can only be `java`."""
alias = "language"
valid_choices = ("java",)
default = "java"
value: str
class JaxbLibrary(Target):
"""A Java library generated from JAXB xsd files."""
alias = "jaxb_library"
core_fields = (*COMMON_JVM_FIELDS, Sources, JaxbJavaPackage, JaxbLanguage)
v1_only = True
| 31.705882 | 100 | 0.71243 |
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JaxbJavaPackage(StringField):
alias = "package"
class JaxbLanguage(StringField):
alias = "language"
valid_choices = ("java",)
default = "java"
value: str
class JaxbLibrary(Target):
alias = "jaxb_library"
core_fields = (*COMMON_JVM_FIELDS, Sources, JaxbJavaPackage, JaxbLanguage)
v1_only = True
| true | true |
f718134ad71e50e6db3ca760f1916747c1d91ee2 | 4,100 | py | Python | tests/plot_time_space.py | folk85/gen_turb | 4390938c4cefae334e95414f83b9c484991bff67 | [
"MIT"
] | 1 | 2020-09-10T07:42:29.000Z | 2020-09-10T07:42:29.000Z | tests/plot_time_space.py | folk85/gen_turb | 4390938c4cefae334e95414f83b9c484991bff67 | [
"MIT"
] | null | null | null | tests/plot_time_space.py | folk85/gen_turb | 4390938c4cefae334e95414f83b9c484991bff67 | [
"MIT"
] | 1 | 2019-08-08T20:08:49.000Z | 2019-08-08T20:08:49.000Z | # -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib as m
import matplotlib.pyplot as plt
from scipy.fftpack import *
from plot_spectr import *
def main_routine():
print(os.getcwd())
nfile = './store.dat'
#Read the file by blocks to reduce required memory
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
#repeat for each timesteps
nk = 64*64 *64
ntimes = nel / nk
def get_nel(nfile):
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
return nel
def plot_spectr(uin,vin,win):
alpha = 1.339e0
L = 1.0e-1
sigma = 1.0e+1
# x,y,z = np.genfromtxt('tests/spectr.dat',unpack=True)
# x,y,z = np.genfromtxt('../hita/spectrum.dat',unpack=True)
# x1,y1,z1 = np.genfromtxt('../hita/spectrum_32.dat',unpack=True)
uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)
nk = int(round(np.size(uvel)**(1./3.)))
nel = nk
ufft = fftn(uvel.reshape(nk,nk,nk))
vfft = fftn(vvel.reshape(nk,nk,nk))
wfft = fftn(wvel.reshape(nk,nk,nk))
muu = ufft*np.conj(ufft) / nel**6
mvv = vfft*np.conj(vfft) / nel**6
mww = wfft*np.conj(wfft) / nel**6
# calc std
umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])
std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])
sigma = np.sqrt(np.sum(std_i[:]**2))
print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)
dx = 10.
k = np.arange(-nk//2,nk//2)*dx
k = np.roll(k,nk//2)
spectrum = np.zeros(nk)
count = np.zeros(nk)
# ?np.meshgrid(k,k,k)
X,Y,Z = np.meshgrid(k,k,k)
r = np.sqrt(X**2+Y**2+Z**2) #*dx
# print(np.shape(r),r.min(),r.max(),k.max(),r[:,0,0])
for i,ki in enumerate(k[:nk//2]):
t = np.where((r<=ki+dx/2)&(r>ki-dx/2))
spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)
count[i] = np.size(t[0])
spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)
font = {'family': 'Droid Sans',
'weight': 'normal',
'size': 12}
m.rc('axes',linewidth=2)
m.rc('font',**font)
m.rc('lines',markeredgewidth=1.0)
f,ax = plt.subplots()
xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)
xf = np.exp(xf)
ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)
ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)
# ax.loglog(x,y,'bx')
# ax.loglog(x1,y1,'ro')
ax.set_xlabel(u'$k, 1/м$',size='large')
ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')
plt.grid()
plt.tight_layout()
plt.show()
del(f)
del(ax)
plt.clf()
Rij_x=(ufft*np.conj(ufft)) # compute velo. correlation tensor
Rij_y=(vfft*np.conj(vfft))
Rij_z=(wfft*np.conj(wfft))
R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;
R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;
R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;
NFFT=np.size(ufft,1)
R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.
# R11 = R11[:np.size(ufft)//2+1]
R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0
R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0
R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0
R22 = (R1_22+R2_22+R3_22)/3.0e0
# R22 = R22(1:size(u_fft)/2+1);
Lx = 2.0*np.pi*1.0e-1
r = np.linspace(0,Lx,NFFT)/(Lx/2);
l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])
l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])
print("Integral Length Scale Longitudal: %g"%(l11))
print("Integral Length Scale Tangent: %g"%(l22))
f,ax = plt.subplots(1)
ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')
ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')
ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))
ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))
plt.legend()
plt.tight_layout()
ax.set_xlabel(u'$r$')
ax.set_ylabel(u'$R_{11}, R_{22}$')
plt.grid()
plt.show()
return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]
def Ek(k,alpha=1.339,L=0.01,sigma=10.):
tmp = (alpha * L * k) **2
tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))
return tmp
if __name__ == '__main__':
main_routine()
| 30.37037 | 96 | 0.580488 |
import os
import numpy as np
import matplotlib as m
import matplotlib.pyplot as plt
from scipy.fftpack import *
from plot_spectr import *
def main_routine():
print(os.getcwd())
nfile = './store.dat'
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
nk = 64*64 *64
ntimes = nel / nk
def get_nel(nfile):
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
return nel
def plot_spectr(uin,vin,win):
alpha = 1.339e0
L = 1.0e-1
sigma = 1.0e+1
uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)
nk = int(round(np.size(uvel)**(1./3.)))
nel = nk
ufft = fftn(uvel.reshape(nk,nk,nk))
vfft = fftn(vvel.reshape(nk,nk,nk))
wfft = fftn(wvel.reshape(nk,nk,nk))
muu = ufft*np.conj(ufft) / nel**6
mvv = vfft*np.conj(vfft) / nel**6
mww = wfft*np.conj(wfft) / nel**6
umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])
std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])
sigma = np.sqrt(np.sum(std_i[:]**2))
print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)
dx = 10.
k = np.arange(-nk//2,nk//2)*dx
k = np.roll(k,nk//2)
spectrum = np.zeros(nk)
count = np.zeros(nk)
X,Y,Z = np.meshgrid(k,k,k)
r = np.sqrt(X**2+Y**2+Z**2)
for i,ki in enumerate(k[:nk//2]):
t = np.where((r<=ki+dx/2)&(r>ki-dx/2))
spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)
count[i] = np.size(t[0])
spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)
font = {'family': 'Droid Sans',
'weight': 'normal',
'size': 12}
m.rc('axes',linewidth=2)
m.rc('font',**font)
m.rc('lines',markeredgewidth=1.0)
f,ax = plt.subplots()
xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)
xf = np.exp(xf)
ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)
ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)
ax.set_xlabel(u'$k, 1/м$',size='large')
ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')
plt.grid()
plt.tight_layout()
plt.show()
del(f)
del(ax)
plt.clf()
Rij_x=(ufft*np.conj(ufft))
Rij_y=(vfft*np.conj(vfft))
Rij_z=(wfft*np.conj(wfft))
R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;
R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;
R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;
NFFT=np.size(ufft,1)
R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.
R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0
R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0
R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0
R22 = (R1_22+R2_22+R3_22)/3.0e0
Lx = 2.0*np.pi*1.0e-1
r = np.linspace(0,Lx,NFFT)/(Lx/2);
l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])
l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])
print("Integral Length Scale Longitudal: %g"%(l11))
print("Integral Length Scale Tangent: %g"%(l22))
f,ax = plt.subplots(1)
ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')
ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')
ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))
ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))
plt.legend()
plt.tight_layout()
ax.set_xlabel(u'$r$')
ax.set_ylabel(u'$R_{11}, R_{22}$')
plt.grid()
plt.show()
return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]
def Ek(k,alpha=1.339,L=0.01,sigma=10.):
tmp = (alpha * L * k) **2
tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))
return tmp
if __name__ == '__main__':
main_routine()
| true | true |
f71813e7fa972b662bb12978d9498a527f879572 | 60 | py | Python | tacotron2/__init__.py | samia-mmx/T2_PT | 25ed08791f72492440e9a796d37c5e67a51aaf05 | [
"BSD-3-Clause"
] | null | null | null | tacotron2/__init__.py | samia-mmx/T2_PT | 25ed08791f72492440e9a796d37c5e67a51aaf05 | [
"BSD-3-Clause"
] | null | null | null | tacotron2/__init__.py | samia-mmx/T2_PT | 25ed08791f72492440e9a796d37c5e67a51aaf05 | [
"BSD-3-Clause"
] | null | null | null | from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
| 30 | 59 | 0.883333 | from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
| true | true |
f718170478283f6fd995f6b98c28ab10f3a084fa | 5,620 | py | Python | google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/change_status_resource_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/change_status_resource_type.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\035ChangeStatusResourceTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nEgoogle/ads/googleads_v4/proto/enums/change_status_resource_type.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\x90\x02\n\x1c\x43hangeStatusResourceTypeEnum\"\xef\x01\n\x18\x43hangeStatusResourceType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08\x41\x44_GROUP\x10\x03\x12\x0f\n\x0b\x41\x44_GROUP_AD\x10\x04\x12\x16\n\x12\x41\x44_GROUP_CRITERION\x10\x05\x12\x0c\n\x08\x43\x41MPAIGN\x10\x06\x12\x16\n\x12\x43\x41MPAIGN_CRITERION\x10\x07\x12\x08\n\x04\x46\x45\x45\x44\x10\t\x12\r\n\tFEED_ITEM\x10\n\x12\x11\n\rAD_GROUP_FEED\x10\x0b\x12\x11\n\rCAMPAIGN_FEED\x10\x0c\x12\x19\n\x15\x41\x44_GROUP_BID_MODIFIER\x10\rB\xf2\x01\n!com.google.ads.googleads.v4.enumsB\x1d\x43hangeStatusResourceTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE = _descriptor.EnumDescriptor(
name='ChangeStatusResourceType',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum.ChangeStatusResourceType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_AD', index=3, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_CRITERION', index=4, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN', index=5, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_CRITERION', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED', index=7, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED_ITEM', index=8, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_FEED', index=9, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_FEED', index=10, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_BID_MODIFIER', index=11, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=168,
serialized_end=407,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE)
_CHANGESTATUSRESOURCETYPEENUM = _descriptor.Descriptor(
name='ChangeStatusResourceTypeEnum',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=407,
)
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE.containing_type = _CHANGESTATUSRESOURCETYPEENUM
DESCRIPTOR.message_types_by_name['ChangeStatusResourceTypeEnum'] = _CHANGESTATUSRESOURCETYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusResourceTypeEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusResourceTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _CHANGESTATUSRESOURCETYPEENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.change_status_resource_type_pb2'
,
__doc__ = """Container for enum describing supported resource types for the
ChangeStatus resource.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum)
))
_sym_db.RegisterMessage(ChangeStatusResourceTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.62963 | 1,005 | 0.775801 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/change_status_resource_type.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\035ChangeStatusResourceTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nEgoogle/ads/googleads_v4/proto/enums/change_status_resource_type.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\x90\x02\n\x1c\x43hangeStatusResourceTypeEnum\"\xef\x01\n\x18\x43hangeStatusResourceType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08\x41\x44_GROUP\x10\x03\x12\x0f\n\x0b\x41\x44_GROUP_AD\x10\x04\x12\x16\n\x12\x41\x44_GROUP_CRITERION\x10\x05\x12\x0c\n\x08\x43\x41MPAIGN\x10\x06\x12\x16\n\x12\x43\x41MPAIGN_CRITERION\x10\x07\x12\x08\n\x04\x46\x45\x45\x44\x10\t\x12\r\n\tFEED_ITEM\x10\n\x12\x11\n\rAD_GROUP_FEED\x10\x0b\x12\x11\n\rCAMPAIGN_FEED\x10\x0c\x12\x19\n\x15\x41\x44_GROUP_BID_MODIFIER\x10\rB\xf2\x01\n!com.google.ads.googleads.v4.enumsB\x1d\x43hangeStatusResourceTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE = _descriptor.EnumDescriptor(
name='ChangeStatusResourceType',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum.ChangeStatusResourceType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_AD', index=3, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_CRITERION', index=4, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN', index=5, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_CRITERION', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED', index=7, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED_ITEM', index=8, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_FEED', index=9, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_FEED', index=10, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_BID_MODIFIER', index=11, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=168,
serialized_end=407,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE)
_CHANGESTATUSRESOURCETYPEENUM = _descriptor.Descriptor(
name='ChangeStatusResourceTypeEnum',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=407,
)
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE.containing_type = _CHANGESTATUSRESOURCETYPEENUM
DESCRIPTOR.message_types_by_name['ChangeStatusResourceTypeEnum'] = _CHANGESTATUSRESOURCETYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusResourceTypeEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusResourceTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _CHANGESTATUSRESOURCETYPEENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.change_status_resource_type_pb2'
,
__doc__ = """Container for enum describing supported resource types for the
ChangeStatus resource.
""",
))
_sym_db.RegisterMessage(ChangeStatusResourceTypeEnum)
DESCRIPTOR._options = None
| true | true |
f718190eca4cc66afac5d11490eec0b6d1f694cf | 10,310 | py | Python | tests/unit/Stories.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | tests/unit/Stories.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | tests/unit/Stories.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pathlib
import time
from asyncy.Stories import MAX_BYTES_LOGGING, Stories
from asyncy.utils import Dict, Resolver
from pytest import mark
def test_stories_init(app, logger, story):
assert story.entrypoint == app.stories['hello.story']['entrypoint']
assert story.app == app
assert story.name == 'hello.story'
assert story.logger == logger
assert story.execution_id is not None
assert story.results == {}
def test_stories_get_tmp_dir(story):
story.execution_id = 'ex'
assert story.get_tmp_dir() == '/tmp/story.ex'
def test_stories_create_tmp_dir(patch, story):
patch.object(pathlib, 'Path')
patch.object(story, 'get_tmp_dir')
# Yes, called twice to ensure the dir is created just once.
story.create_tmp_dir()
story.create_tmp_dir()
story.get_tmp_dir.assert_called_once()
pathlib.Path.assert_called_with(story.get_tmp_dir())
pathlib.Path().mkdir.assert_called_with(
parents=True, mode=0o700, exist_ok=True)
@mark.parametrize('long', [True, False])
def test_get_str_for_logging(long):
def make_string(length):
out = ''
for i in range(0, length):
out += 'a'
return out
test_str = 'hello world'
if long:
test_str = make_string(1024)
actual_val = Stories.get_str_for_logging(test_str)
if long:
assert actual_val == f'{test_str[:MAX_BYTES_LOGGING]} ... ' \
f'({1024-MAX_BYTES_LOGGING} bytes truncated)'
else:
assert actual_val == 'hello world'
def test_stories_line(magic, story):
story.tree = magic()
line = story.line('1')
assert line == story.tree['1']
def test_stories_line_none(magic, story):
story.tree = magic()
line = story.line(None)
assert line is None
def test_stories_first_line(patch, story):
story.entrypoint = '16'
story.tree = {'23': {'ln': '23'}, '16': {'ln': '16'}}
result = story.first_line()
assert result == '16'
def test_stories_function_line_by_name(patch, story):
patch.object(story, 'line')
ret = story.function_line_by_name('execute')
story.line.assert_called_with(
story.app.stories[story.name]['functions']['execute'])
assert ret == story.line()
def test_stories_resolve(patch, logger, story):
patch.object(Resolver, 'resolve')
story.context = 'context'
result = story.resolve('args')
assert result == 'args'
def test_command_arguments_list(patch, story):
patch.object(Stories, 'resolve', return_value='something')
obj = {'$OBJECT': 'string', 'string': 'string'}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj, encode=True)
assert result == ['something']
def test_command_arguments_list_none(patch, story):
"""
Ensures that when an argument resolves to None it is used literally
"""
patch.object(Stories, 'resolve', return_value=None)
obj = {'$OBJECT': 'path', 'paths': ['literal']}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj)
assert result == ['literal']
def test_stories_start_line(patch, story):
patch.object(time, 'time')
story.start_line('1')
assert story.results['1'] == {'start': time.time()}
def test_stories_end_line(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1')
assert story.results['1']['output'] is None
assert story.results['1']['end'] == time.time()
assert story.results['1']['start'] == 'start'
def test_stories_end_line_output(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='output')
assert story.results['1']['output'] == 'output'
def test_stories_end_line_output_assign(patch, story):
patch.object(Dict, 'set')
story.results = {'1': {'start': 'start'}}
assign = {'paths': ['x']}
story.end_line('1', output='output', assign=assign)
assert story.results['1']['output'] == 'output'
Dict.set.assert_called_with(story.context, assign['paths'], 'output')
def test_stories_end_line_output_as_list(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=['a', 'b'])
assert story.results['1']['output'] == ['a', 'b']
def test_stories_end_line_output_as_json_no_auto_convert(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='{"key":"value"}')
assert story.results['1']['output'] == '{"key":"value"}'
def test_stories_end_line_output_as_sting(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=' foobar\n\t')
assert story.results['1']['output'] == ' foobar\n\t'
def test_stories_end_line_output_as_bytes(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=b'output')
assert story.results['1']['output'] == b'output'
@mark.parametrize('input,output', [
(None, 'null'),
(False, 'false'),
(True, 'true'),
('string', "'string'"),
("st'ring", "'st\'ring'"),
(1, "'1'"),
({'foo': 'bar'}, "'{\"foo\": \"bar\"}'"),
(['foobar'], "'[\"foobar\"]'"),
])
def test_stories_encode(story, input, output):
assert story.encode(input) == output
def test_stories_argument_by_name_empty(story):
assert story.argument_by_name({}, 'foo') is None
def test_stories_argument_by_name_lookup(patch, story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {'$OBJECT': 'string', 'string': 'bar'}
}
]
}
patch.object(story, 'resolve')
story.argument_by_name(line, 'foo')
story.resolve.assert_called_with(line['args'][0]['argument'], encode=False)
def test_stories_argument_by_name_missing(patch, story):
line = {'args': []}
assert story.argument_by_name(line, 'foo') is None
def test_stories_prepare(story):
story.prepare(None)
def test_stories_prepare_context(story, app):
story.app = app
context = {'app': app.app_context}
story.prepare(context=context)
assert story.environment == app.environment
assert story.context == context
def test_stories_next_block_simple(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['4']
def test_stories_next_block_as_lines(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_where_next_block_is_block(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4', 'enter': '4'},
'4': {'ln': '4', 'parent': '3'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_only_block(patch, story):
story.tree = {
'2': {'ln': '2'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_context_for_function_call(story):
assert story.context_for_function_call({}, {}) == {}
def test_stories_context_for_function_call_with_args(story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'string',
'string': 'bar'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'string',
'string': 'bar1'
}
}
]
}
function_line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
}
]
}
assert story.context_for_function_call(line, function_line) == {
'foo': 'bar',
'foo1': 'bar1'
}
def test_stories_next_block_nested(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['7']
def test_stories_next_block_last_line(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_next_block_nested_inner(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7', 'parent': '2', 'next': '8'},
'8': {'ln': '8', 'parent': '2'}
}
assert isinstance(story, Stories)
assert story.tree['7'] == story.next_block(story.line('4'))
| 28.169399 | 79 | 0.562949 |
import pathlib
import time
from asyncy.Stories import MAX_BYTES_LOGGING, Stories
from asyncy.utils import Dict, Resolver
from pytest import mark
def test_stories_init(app, logger, story):
assert story.entrypoint == app.stories['hello.story']['entrypoint']
assert story.app == app
assert story.name == 'hello.story'
assert story.logger == logger
assert story.execution_id is not None
assert story.results == {}
def test_stories_get_tmp_dir(story):
story.execution_id = 'ex'
assert story.get_tmp_dir() == '/tmp/story.ex'
def test_stories_create_tmp_dir(patch, story):
patch.object(pathlib, 'Path')
patch.object(story, 'get_tmp_dir')
story.create_tmp_dir()
story.create_tmp_dir()
story.get_tmp_dir.assert_called_once()
pathlib.Path.assert_called_with(story.get_tmp_dir())
pathlib.Path().mkdir.assert_called_with(
parents=True, mode=0o700, exist_ok=True)
@mark.parametrize('long', [True, False])
def test_get_str_for_logging(long):
def make_string(length):
out = ''
for i in range(0, length):
out += 'a'
return out
test_str = 'hello world'
if long:
test_str = make_string(1024)
actual_val = Stories.get_str_for_logging(test_str)
if long:
assert actual_val == f'{test_str[:MAX_BYTES_LOGGING]} ... ' \
f'({1024-MAX_BYTES_LOGGING} bytes truncated)'
else:
assert actual_val == 'hello world'
def test_stories_line(magic, story):
story.tree = magic()
line = story.line('1')
assert line == story.tree['1']
def test_stories_line_none(magic, story):
story.tree = magic()
line = story.line(None)
assert line is None
def test_stories_first_line(patch, story):
story.entrypoint = '16'
story.tree = {'23': {'ln': '23'}, '16': {'ln': '16'}}
result = story.first_line()
assert result == '16'
def test_stories_function_line_by_name(patch, story):
patch.object(story, 'line')
ret = story.function_line_by_name('execute')
story.line.assert_called_with(
story.app.stories[story.name]['functions']['execute'])
assert ret == story.line()
def test_stories_resolve(patch, logger, story):
patch.object(Resolver, 'resolve')
story.context = 'context'
result = story.resolve('args')
assert result == 'args'
def test_command_arguments_list(patch, story):
patch.object(Stories, 'resolve', return_value='something')
obj = {'$OBJECT': 'string', 'string': 'string'}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj, encode=True)
assert result == ['something']
def test_command_arguments_list_none(patch, story):
patch.object(Stories, 'resolve', return_value=None)
obj = {'$OBJECT': 'path', 'paths': ['literal']}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj)
assert result == ['literal']
def test_stories_start_line(patch, story):
patch.object(time, 'time')
story.start_line('1')
assert story.results['1'] == {'start': time.time()}
def test_stories_end_line(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1')
assert story.results['1']['output'] is None
assert story.results['1']['end'] == time.time()
assert story.results['1']['start'] == 'start'
def test_stories_end_line_output(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='output')
assert story.results['1']['output'] == 'output'
def test_stories_end_line_output_assign(patch, story):
patch.object(Dict, 'set')
story.results = {'1': {'start': 'start'}}
assign = {'paths': ['x']}
story.end_line('1', output='output', assign=assign)
assert story.results['1']['output'] == 'output'
Dict.set.assert_called_with(story.context, assign['paths'], 'output')
def test_stories_end_line_output_as_list(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=['a', 'b'])
assert story.results['1']['output'] == ['a', 'b']
def test_stories_end_line_output_as_json_no_auto_convert(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='{"key":"value"}')
assert story.results['1']['output'] == '{"key":"value"}'
def test_stories_end_line_output_as_sting(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=' foobar\n\t')
assert story.results['1']['output'] == ' foobar\n\t'
def test_stories_end_line_output_as_bytes(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=b'output')
assert story.results['1']['output'] == b'output'
@mark.parametrize('input,output', [
(None, 'null'),
(False, 'false'),
(True, 'true'),
('string', "'string'"),
("st'ring", "'st\'ring'"),
(1, "'1'"),
({'foo': 'bar'}, "'{\"foo\": \"bar\"}'"),
(['foobar'], "'[\"foobar\"]'"),
])
def test_stories_encode(story, input, output):
assert story.encode(input) == output
def test_stories_argument_by_name_empty(story):
assert story.argument_by_name({}, 'foo') is None
def test_stories_argument_by_name_lookup(patch, story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {'$OBJECT': 'string', 'string': 'bar'}
}
]
}
patch.object(story, 'resolve')
story.argument_by_name(line, 'foo')
story.resolve.assert_called_with(line['args'][0]['argument'], encode=False)
def test_stories_argument_by_name_missing(patch, story):
line = {'args': []}
assert story.argument_by_name(line, 'foo') is None
def test_stories_prepare(story):
story.prepare(None)
def test_stories_prepare_context(story, app):
story.app = app
context = {'app': app.app_context}
story.prepare(context=context)
assert story.environment == app.environment
assert story.context == context
def test_stories_next_block_simple(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['4']
def test_stories_next_block_as_lines(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_where_next_block_is_block(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4', 'enter': '4'},
'4': {'ln': '4', 'parent': '3'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_only_block(patch, story):
story.tree = {
'2': {'ln': '2'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_context_for_function_call(story):
assert story.context_for_function_call({}, {}) == {}
def test_stories_context_for_function_call_with_args(story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'string',
'string': 'bar'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'string',
'string': 'bar1'
}
}
]
}
function_line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
}
]
}
assert story.context_for_function_call(line, function_line) == {
'foo': 'bar',
'foo1': 'bar1'
}
def test_stories_next_block_nested(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['7']
def test_stories_next_block_last_line(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_next_block_nested_inner(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7', 'parent': '2', 'next': '8'},
'8': {'ln': '8', 'parent': '2'}
}
assert isinstance(story, Stories)
assert story.tree['7'] == story.next_block(story.line('4'))
| true | true |
f7181a51ac70864c0872ec1652625be1aa4f459a | 3,736 | py | Python | code/UNET_lowered.py | sagnik1511/U-Net-Lowered-with-keras | 364336b244ece288a52cf76df451501a665e745a | [
"MIT"
] | 6 | 2021-06-14T14:42:48.000Z | 2021-06-14T15:16:22.000Z | code/UNET_lowered.py | sagnik1511/U-Net-Reduced-with-TF-keras | 364336b244ece288a52cf76df451501a665e745a | [
"MIT"
] | null | null | null | code/UNET_lowered.py | sagnik1511/U-Net-Reduced-with-TF-keras | 364336b244ece288a52cf76df451501a665e745a | [
"MIT"
] | 2 | 2021-12-16T12:40:36.000Z | 2022-02-04T23:10:09.000Z | # -*- coding: utf-8 -*-
"""
UNET LOwered Model :
This customized UNet Model has been generated lowering the filters to their 25% .
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import optimizers
import numpy as np
def UNet(input_shape):
keras.backend.clear_session()
inputs = Input(input_shape)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')
return model | 54.941176 | 131 | 0.677195 |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import optimizers
import numpy as np
def UNet(input_shape):
keras.backend.clear_session()
inputs = Input(input_shape)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')
return model | true | true |
f7181b13ca73f4b482d5f775d442f82f8780cd58 | 20,330 | py | Python | modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py | Rippling/modin | b2cf1d5fc704803a1ce6699e9a373dc7abeb409e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py | Rippling/modin | b2cf1d5fc704803a1ce6699e9a373dc7abeb409e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py | Rippling/modin | b2cf1d5fc704803a1ce6699e9a373dc7abeb409e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from .expr import (
InputRefExpr,
LiteralExpr,
OpExpr,
AggregateExpr,
build_if_then_else,
build_row_idx_filter_expr,
)
from .calcite_algebra import (
CalciteBaseNode,
CalciteInputRefExpr,
CalciteInputIdxExpr,
CalciteScanNode,
CalciteProjectionNode,
CalciteFilterNode,
CalciteAggregateNode,
CalciteCollation,
CalciteSortNode,
CalciteJoinNode,
CalciteUnionNode,
)
from .df_algebra import (
FrameNode,
MaskNode,
GroupbyAggNode,
TransformNode,
JoinNode,
UnionNode,
SortNode,
FilterNode,
)
from collections import abc
from pandas.core.dtypes.common import _get_dtype
class CalciteBuilder:
class CompoundAggregate:
def __init__(self, builder, arg):
self._builder = builder
self._arg = arg
def gen_proj_exprs(self):
return []
def gen_agg_exprs(self):
pass
def gen_reduce_expr(self):
pass
class StdAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
expr = self._builder._translate(self._arg.mul(self._arg))
return {self._quad_name: expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
null_expr = LiteralExpr(None)
count_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype
)
count_m_1_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(1)),
null_expr,
count_expr.sub(LiteralExpr(1)),
count_expr._dtype,
)
# sqrt((sum(x * x) - sum(x) * sum(x) / n) / (n - 1))
return (
qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))
.truediv(count_m_1_or_null)
.pow(LiteralExpr(0.5))
)
class SkewAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._cube_name = self._arg.column + "__cube__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._cube_sum_name = self._arg.column + "__cube_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
quad_expr = self._builder._translate(self._arg.mul(self._arg))
cube_expr = self._builder._translate(
self._arg.mul(self._arg).mul(self._arg)
)
return {self._quad_name: quad_expr, self._cube_name: cube_expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
csum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._cube_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._cube_sum_name: csum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)
csum_expr._dtype = self._sum_dtype
mean_expr = sum_expr.truediv(count_expr)
# n * sqrt(n - 1) / (n - 2)
# * (sum(x ** 3) - 3 * mean * sum(x * x) + 2 * mean * mean * sum(x))
# / (sum(x * x) - mean * sum(x)) ** 1.5
part1 = count_expr.mul(
count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))
).truediv(count_expr.sub(LiteralExpr(2)))
part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(
mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))
)
part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))
skew_expr = part1.mul(part2).truediv(part3)
# The result is NULL if n <= 2
return build_if_then_else(
count_expr.le(LiteralExpr(2)),
LiteralExpr(None),
skew_expr,
skew_expr._dtype,
)
_compound_aggregates = {"std": StdAggregate, "skew": SkewAggregate}
class InputContext:
_simple_aggregates = {
"sum": "SUM",
"mean": "AVG",
"max": "MAX",
"min": "MIN",
"size": "COUNT",
"count": "COUNT",
}
_no_arg_aggregates = {"size"}
def __init__(self, input_frames, input_nodes):
self.input_nodes = input_nodes
self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}
self.input_offsets = {}
self.replacements = {}
offs = 0
for frame in input_frames:
self.input_offsets[frame] = offs
offs += len(frame._table_cols)
# Materialized frames have additional 'rowid' column
if isinstance(frame._op, FrameNode):
offs += 1
def replace_input_node(self, frame, node, new_cols):
self.replacements[frame] = new_cols
def _idx(self, frame, col):
assert (
frame in self.input_offsets
), f"unexpected reference to {frame.id_str()}"
offs = self.input_offsets[frame]
if frame in self.replacements:
return self.replacements[frame].index(col) + offs
if col == "__rowid__":
if not isinstance(self.frame_to_node[frame], CalciteScanNode):
raise NotImplementedError(
"rowid can be accessed in materialized frames only"
)
return len(frame._table_cols) + offs
assert (
col in frame._table_cols
), f"unexpected reference to '{col}' in {frame.id_str()}"
return frame._table_cols.index(col) + offs
def ref(self, frame, col):
return CalciteInputRefExpr(self._idx(frame, col))
def ref_idx(self, frame, col):
return CalciteInputIdxExpr(self._idx(frame, col))
def input_ids(self):
return [x.id for x in self.input_nodes]
def translate(self, expr):
"""Copy those parts of expr tree that have input references
and translate all references into CalciteInputRefExr"""
return self._maybe_copy_and_translate_expr(expr)
def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):
if isinstance(expr, InputRefExpr):
if ref_idx:
return self.ref_idx(expr.modin_frame, expr.column)
else:
return self.ref(expr.modin_frame, expr.column)
if isinstance(expr, AggregateExpr):
expr = expr.copy()
if expr.agg in self._no_arg_aggregates:
expr.operands = []
else:
expr.operands[0] = self._maybe_copy_and_translate_expr(
expr.operands[0], True
)
expr.agg = self._simple_aggregates[expr.agg]
return expr
copied = False
for i, op in enumerate(getattr(expr, "operands", [])):
new_op = self._maybe_copy_and_translate_expr(op)
if new_op != op:
if not copied:
expr = expr.copy()
expr.operands[i] = new_op
return expr
class InputContextMgr:
def __init__(self, builder, input_frames, input_nodes):
self.builder = builder
self.input_frames = input_frames
self.input_nodes = input_nodes
def __enter__(self):
self.builder._input_ctx_stack.append(
self.builder.InputContext(self.input_frames, self.input_nodes)
)
return self.builder._input_ctx_stack[-1]
def __exit__(self, type, value, traceback):
self.builder._input_ctx_stack.pop()
type_strings = {
int: "INTEGER",
bool: "BOOLEAN",
}
def __init__(self):
self._input_ctx_stack = []
def build(self, op):
CalciteBaseNode.reset_id()
self.res = []
self._to_calcite(op)
return self.res
def _input_ctx(self):
return self._input_ctx_stack[-1]
def _set_input_ctx(self, op):
input_frames = getattr(op, "input", [])
input_nodes = [self._to_calcite(x._op) for x in input_frames]
return self.InputContextMgr(self, input_frames, input_nodes)
def _set_tmp_ctx(self, input_frames, input_nodes):
return self.InputContextMgr(self, input_frames, input_nodes)
def _ref(self, frame, col):
return self._input_ctx().ref(frame, col)
def _ref_idx(self, frame, col):
return self._input_ctx().ref_idx(frame, col)
def _translate(self, exprs):
if isinstance(exprs, abc.Iterable):
return [self._input_ctx().translate(x) for x in exprs]
return self._input_ctx().translate(exprs)
def _push(self, node):
self.res.append(node)
def _last(self):
return self.res[-1]
def _input_nodes(self):
return self._input_ctx().input_nodes
def _input_node(self, idx):
return self._input_nodes()[idx]
def _input_ids(self):
return self._input_ctx().input_ids()
def _to_calcite(self, op):
# This context translates input operands and setup current
# input context to translate input references (recursion
# over tree happens here).
with self._set_input_ctx(op):
if isinstance(op, FrameNode):
self._process_frame(op)
elif isinstance(op, MaskNode):
self._process_mask(op)
elif isinstance(op, GroupbyAggNode):
self._process_groupby(op)
elif isinstance(op, TransformNode):
self._process_transform(op)
elif isinstance(op, JoinNode):
self._process_join(op)
elif isinstance(op, UnionNode):
self._process_union(op)
elif isinstance(op, SortNode):
self._process_sort(op)
elif isinstance(op, FilterNode):
self._process_filter(op)
else:
raise NotImplementedError(
f"CalciteBuilder doesn't support {type(op).__name__}"
)
return self.res[-1]
def _process_frame(self, op):
self._push(CalciteScanNode(op.modin_frame))
def _process_mask(self, op):
if op.row_indices is not None:
raise NotImplementedError("row indices masking is not yet supported")
frame = op.input[0]
# select rows by rowid
rowid_col = self._ref(frame, "__rowid__")
condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)
self._push(CalciteFilterNode(condition))
# mask is currently always applied over scan, it means
# we need additional projection to remove rowid column
fields = frame._table_cols
exprs = [self._ref(frame, col) for col in frame._table_cols]
self._push(CalciteProjectionNode(fields, exprs))
def _process_groupby(self, op):
frame = op.input[0]
# Aggregation's input should always be a projection and
# group key columns should always go first
proj_cols = op.by.copy()
for col in frame._table_cols:
if col not in op.by:
proj_cols.append(col)
proj_exprs = [self._ref(frame, col) for col in proj_cols]
# Add expressions required for compound aggregates
compound_aggs = {}
for agg, expr in op.agg_exprs.items():
if expr.agg in self._compound_aggregates:
compound_aggs[agg] = self._compound_aggregates[expr.agg](
self, expr.operands[0]
)
extra_exprs = compound_aggs[agg].gen_proj_exprs()
proj_cols.extend(extra_exprs.keys())
proj_exprs.extend(extra_exprs.values())
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, proj_cols)
group = [self._ref_idx(frame, col) for col in op.by]
fields = op.by.copy()
aggs = []
for agg, expr in op.agg_exprs.items():
if agg in compound_aggs:
extra_aggs = compound_aggs[agg].gen_agg_exprs()
fields.extend(extra_aggs.keys())
aggs.extend(extra_aggs.values())
else:
fields.append(agg)
aggs.append(self._translate(expr))
node = CalciteAggregateNode(fields, group, aggs)
self._push(node)
if compound_aggs:
self._input_ctx().replace_input_node(frame, node, fields)
proj_cols = op.by.copy()
proj_exprs = [self._ref(frame, col) for col in proj_cols]
proj_cols.extend(op.agg_exprs.keys())
for agg in op.agg_exprs:
if agg in compound_aggs:
proj_exprs.append(compound_aggs[agg].gen_reduce_expr())
else:
proj_exprs.append(self._ref(frame, agg))
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
if op.groupby_opts["sort"]:
collation = [CalciteCollation(col) for col in group]
self._push(CalciteSortNode(collation))
def _process_transform(self, op):
fields = list(op.exprs.keys())
exprs = self._translate(op.exprs.values())
self._push(CalciteProjectionNode(fields, exprs))
def _process_join(self, op):
left = op.input[0]
right = op.input[1]
assert (
op.on is not None
), "Merge with unspecified 'on' parameter is not supported in the engine"
for col in op.on:
assert (
col in left._table_cols and col in right._table_cols
), f"Column '{col}'' is missing in one of merge operands"
""" Join, only equal-join supported """
cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]
if len(cmps) > 1:
condition = OpExpr("AND", cmps, _get_dtype(bool))
else:
condition = cmps[0]
node = CalciteJoinNode(
left_id=self._input_node(0).id,
right_id=self._input_node(1).id,
how=op.how,
condition=condition,
)
self._push(node)
"""Projection for both frames"""
fields = []
exprs = []
conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)
"""First goes 'on' column then all left columns(+suffix for conflicting names)
but 'on' then all right columns(+suffix for conflicting names) but 'on'"""
on_idx = [-1] * len(op.on)
for c in left.columns:
if c in op.on:
on_idx[op.on.index(c)] = len(fields)
suffix = op.suffixes[0] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(left, c))
for c in right.columns:
if c not in op.on:
suffix = op.suffixes[1] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(right, c))
self._push(CalciteProjectionNode(fields, exprs))
# TODO: current input translation system doesn't work here
# because there is no frame to reference for index computation.
# We should build calcite tree to keep references to input
# nodes and keep scheme in calcite nodes. For now just use
# known index on_idx.
if op.sort is True:
"""Sort by key column"""
collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]
self._push(CalciteSortNode(collation))
def _process_union(self, op):
self._push(CalciteUnionNode(self._input_ids(), True))
def _process_sort(self, op):
frame = op.input[0]
# Sort should be applied to projections.
if not isinstance(self._input_node(0), CalciteProjectionNode):
proj = CalciteProjectionNode(
frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]
)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, frame._table_cols)
nulls = op.na_position.upper()
collations = []
for col, asc in zip(op.columns, op.ascending):
ascending = "ASCENDING" if asc else "DESCENDING"
collations.append(
CalciteCollation(self._ref_idx(frame, col), ascending, nulls)
)
self._push(CalciteSortNode(collations))
def _process_filter(self, op):
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
| 37.302752 | 87 | 0.588096 |
from .expr import (
InputRefExpr,
LiteralExpr,
OpExpr,
AggregateExpr,
build_if_then_else,
build_row_idx_filter_expr,
)
from .calcite_algebra import (
CalciteBaseNode,
CalciteInputRefExpr,
CalciteInputIdxExpr,
CalciteScanNode,
CalciteProjectionNode,
CalciteFilterNode,
CalciteAggregateNode,
CalciteCollation,
CalciteSortNode,
CalciteJoinNode,
CalciteUnionNode,
)
from .df_algebra import (
FrameNode,
MaskNode,
GroupbyAggNode,
TransformNode,
JoinNode,
UnionNode,
SortNode,
FilterNode,
)
from collections import abc
from pandas.core.dtypes.common import _get_dtype
class CalciteBuilder:
class CompoundAggregate:
def __init__(self, builder, arg):
self._builder = builder
self._arg = arg
def gen_proj_exprs(self):
return []
def gen_agg_exprs(self):
pass
def gen_reduce_expr(self):
pass
class StdAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
expr = self._builder._translate(self._arg.mul(self._arg))
return {self._quad_name: expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
null_expr = LiteralExpr(None)
count_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype
)
count_m_1_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(1)),
null_expr,
count_expr.sub(LiteralExpr(1)),
count_expr._dtype,
)
return (
qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))
.truediv(count_m_1_or_null)
.pow(LiteralExpr(0.5))
)
class SkewAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._cube_name = self._arg.column + "__cube__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._cube_sum_name = self._arg.column + "__cube_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
quad_expr = self._builder._translate(self._arg.mul(self._arg))
cube_expr = self._builder._translate(
self._arg.mul(self._arg).mul(self._arg)
)
return {self._quad_name: quad_expr, self._cube_name: cube_expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
csum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._cube_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._cube_sum_name: csum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)
csum_expr._dtype = self._sum_dtype
mean_expr = sum_expr.truediv(count_expr)
part1 = count_expr.mul(
count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))
).truediv(count_expr.sub(LiteralExpr(2)))
part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(
mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))
)
part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))
skew_expr = part1.mul(part2).truediv(part3)
return build_if_then_else(
count_expr.le(LiteralExpr(2)),
LiteralExpr(None),
skew_expr,
skew_expr._dtype,
)
_compound_aggregates = {"std": StdAggregate, "skew": SkewAggregate}
class InputContext:
_simple_aggregates = {
"sum": "SUM",
"mean": "AVG",
"max": "MAX",
"min": "MIN",
"size": "COUNT",
"count": "COUNT",
}
_no_arg_aggregates = {"size"}
def __init__(self, input_frames, input_nodes):
self.input_nodes = input_nodes
self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}
self.input_offsets = {}
self.replacements = {}
offs = 0
for frame in input_frames:
self.input_offsets[frame] = offs
offs += len(frame._table_cols)
if isinstance(frame._op, FrameNode):
offs += 1
def replace_input_node(self, frame, node, new_cols):
self.replacements[frame] = new_cols
def _idx(self, frame, col):
assert (
frame in self.input_offsets
), f"unexpected reference to {frame.id_str()}"
offs = self.input_offsets[frame]
if frame in self.replacements:
return self.replacements[frame].index(col) + offs
if col == "__rowid__":
if not isinstance(self.frame_to_node[frame], CalciteScanNode):
raise NotImplementedError(
"rowid can be accessed in materialized frames only"
)
return len(frame._table_cols) + offs
assert (
col in frame._table_cols
), f"unexpected reference to '{col}' in {frame.id_str()}"
return frame._table_cols.index(col) + offs
def ref(self, frame, col):
return CalciteInputRefExpr(self._idx(frame, col))
def ref_idx(self, frame, col):
return CalciteInputIdxExpr(self._idx(frame, col))
def input_ids(self):
return [x.id for x in self.input_nodes]
def translate(self, expr):
return self._maybe_copy_and_translate_expr(expr)
def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):
if isinstance(expr, InputRefExpr):
if ref_idx:
return self.ref_idx(expr.modin_frame, expr.column)
else:
return self.ref(expr.modin_frame, expr.column)
if isinstance(expr, AggregateExpr):
expr = expr.copy()
if expr.agg in self._no_arg_aggregates:
expr.operands = []
else:
expr.operands[0] = self._maybe_copy_and_translate_expr(
expr.operands[0], True
)
expr.agg = self._simple_aggregates[expr.agg]
return expr
copied = False
for i, op in enumerate(getattr(expr, "operands", [])):
new_op = self._maybe_copy_and_translate_expr(op)
if new_op != op:
if not copied:
expr = expr.copy()
expr.operands[i] = new_op
return expr
class InputContextMgr:
def __init__(self, builder, input_frames, input_nodes):
self.builder = builder
self.input_frames = input_frames
self.input_nodes = input_nodes
def __enter__(self):
self.builder._input_ctx_stack.append(
self.builder.InputContext(self.input_frames, self.input_nodes)
)
return self.builder._input_ctx_stack[-1]
def __exit__(self, type, value, traceback):
self.builder._input_ctx_stack.pop()
type_strings = {
int: "INTEGER",
bool: "BOOLEAN",
}
def __init__(self):
self._input_ctx_stack = []
def build(self, op):
CalciteBaseNode.reset_id()
self.res = []
self._to_calcite(op)
return self.res
def _input_ctx(self):
return self._input_ctx_stack[-1]
def _set_input_ctx(self, op):
input_frames = getattr(op, "input", [])
input_nodes = [self._to_calcite(x._op) for x in input_frames]
return self.InputContextMgr(self, input_frames, input_nodes)
def _set_tmp_ctx(self, input_frames, input_nodes):
return self.InputContextMgr(self, input_frames, input_nodes)
def _ref(self, frame, col):
return self._input_ctx().ref(frame, col)
def _ref_idx(self, frame, col):
return self._input_ctx().ref_idx(frame, col)
def _translate(self, exprs):
if isinstance(exprs, abc.Iterable):
return [self._input_ctx().translate(x) for x in exprs]
return self._input_ctx().translate(exprs)
def _push(self, node):
self.res.append(node)
def _last(self):
return self.res[-1]
def _input_nodes(self):
return self._input_ctx().input_nodes
def _input_node(self, idx):
return self._input_nodes()[idx]
def _input_ids(self):
return self._input_ctx().input_ids()
def _to_calcite(self, op):
with self._set_input_ctx(op):
if isinstance(op, FrameNode):
self._process_frame(op)
elif isinstance(op, MaskNode):
self._process_mask(op)
elif isinstance(op, GroupbyAggNode):
self._process_groupby(op)
elif isinstance(op, TransformNode):
self._process_transform(op)
elif isinstance(op, JoinNode):
self._process_join(op)
elif isinstance(op, UnionNode):
self._process_union(op)
elif isinstance(op, SortNode):
self._process_sort(op)
elif isinstance(op, FilterNode):
self._process_filter(op)
else:
raise NotImplementedError(
f"CalciteBuilder doesn't support {type(op).__name__}"
)
return self.res[-1]
def _process_frame(self, op):
self._push(CalciteScanNode(op.modin_frame))
def _process_mask(self, op):
if op.row_indices is not None:
raise NotImplementedError("row indices masking is not yet supported")
frame = op.input[0]
# select rows by rowid
rowid_col = self._ref(frame, "__rowid__")
condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)
self._push(CalciteFilterNode(condition))
# mask is currently always applied over scan, it means
# we need additional projection to remove rowid column
fields = frame._table_cols
exprs = [self._ref(frame, col) for col in frame._table_cols]
self._push(CalciteProjectionNode(fields, exprs))
def _process_groupby(self, op):
frame = op.input[0]
# Aggregation's input should always be a projection and
proj_cols = op.by.copy()
for col in frame._table_cols:
if col not in op.by:
proj_cols.append(col)
proj_exprs = [self._ref(frame, col) for col in proj_cols]
compound_aggs = {}
for agg, expr in op.agg_exprs.items():
if expr.agg in self._compound_aggregates:
compound_aggs[agg] = self._compound_aggregates[expr.agg](
self, expr.operands[0]
)
extra_exprs = compound_aggs[agg].gen_proj_exprs()
proj_cols.extend(extra_exprs.keys())
proj_exprs.extend(extra_exprs.values())
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, proj_cols)
group = [self._ref_idx(frame, col) for col in op.by]
fields = op.by.copy()
aggs = []
for agg, expr in op.agg_exprs.items():
if agg in compound_aggs:
extra_aggs = compound_aggs[agg].gen_agg_exprs()
fields.extend(extra_aggs.keys())
aggs.extend(extra_aggs.values())
else:
fields.append(agg)
aggs.append(self._translate(expr))
node = CalciteAggregateNode(fields, group, aggs)
self._push(node)
if compound_aggs:
self._input_ctx().replace_input_node(frame, node, fields)
proj_cols = op.by.copy()
proj_exprs = [self._ref(frame, col) for col in proj_cols]
proj_cols.extend(op.agg_exprs.keys())
for agg in op.agg_exprs:
if agg in compound_aggs:
proj_exprs.append(compound_aggs[agg].gen_reduce_expr())
else:
proj_exprs.append(self._ref(frame, agg))
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
if op.groupby_opts["sort"]:
collation = [CalciteCollation(col) for col in group]
self._push(CalciteSortNode(collation))
def _process_transform(self, op):
fields = list(op.exprs.keys())
exprs = self._translate(op.exprs.values())
self._push(CalciteProjectionNode(fields, exprs))
def _process_join(self, op):
left = op.input[0]
right = op.input[1]
assert (
op.on is not None
), "Merge with unspecified 'on' parameter is not supported in the engine"
for col in op.on:
assert (
col in left._table_cols and col in right._table_cols
), f"Column '{col}'' is missing in one of merge operands"
cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]
if len(cmps) > 1:
condition = OpExpr("AND", cmps, _get_dtype(bool))
else:
condition = cmps[0]
node = CalciteJoinNode(
left_id=self._input_node(0).id,
right_id=self._input_node(1).id,
how=op.how,
condition=condition,
)
self._push(node)
fields = []
exprs = []
conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)
on_idx = [-1] * len(op.on)
for c in left.columns:
if c in op.on:
on_idx[op.on.index(c)] = len(fields)
suffix = op.suffixes[0] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(left, c))
for c in right.columns:
if c not in op.on:
suffix = op.suffixes[1] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(right, c))
self._push(CalciteProjectionNode(fields, exprs))
# TODO: current input translation system doesn't work here
if op.sort is True:
collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]
self._push(CalciteSortNode(collation))
def _process_union(self, op):
self._push(CalciteUnionNode(self._input_ids(), True))
def _process_sort(self, op):
frame = op.input[0]
if not isinstance(self._input_node(0), CalciteProjectionNode):
proj = CalciteProjectionNode(
frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]
)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, frame._table_cols)
nulls = op.na_position.upper()
collations = []
for col, asc in zip(op.columns, op.ascending):
ascending = "ASCENDING" if asc else "DESCENDING"
collations.append(
CalciteCollation(self._ref_idx(frame, col), ascending, nulls)
)
self._push(CalciteSortNode(collations))
def _process_filter(self, op):
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
| true | true |
f7181b87434e6a3a078b7f233f6a61d24e5fe9cc | 3,374 | py | Python | data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ['--app', '--loader', '--config']
keep_base_opts = False
def get_version(self):
return 'celery %s\ndjango-celery %s' % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ['CELERY_BROKER_URL'] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if '--settings=' in arg:
_, settings_module = arg.split('=')
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
| 31.53271 | 74 | 0.590101 | from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ['--app', '--loader', '--config']
keep_base_opts = False
def get_version(self):
return 'celery %s\ndjango-celery %s' % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ['CELERY_BROKER_URL'] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if '--settings=' in arg:
_, settings_module = arg.split('=')
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
| true | true |
f7181bc2790949201ed0b3f57763455f00d8b77a | 28,933 | py | Python | Simulador.py | edrhat/simulator | d243443c84ccb3e4efa880990d11b395125d16d3 | [
"MIT"
] | null | null | null | Simulador.py | edrhat/simulator | d243443c84ccb3e4efa880990d11b395125d16d3 | [
"MIT"
] | null | null | null | Simulador.py | edrhat/simulator | d243443c84ccb3e4efa880990d11b395125d16d3 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import messagebox
import tkinter as tk
from tkinter import ttk
#IMAGENS DEFEITO: 240X240
class Tela:
def fechar(self, event):
janela.destroy()
exit()
def fecharPc(self, event):
self.lb_simulador.place_forget()
self.imgFundo.place_forget()
self.imgg2.place_forget()
self.lbGabinete.config(bg="white")
lbMonitor.place(x=100, y=30)
self.imggg.place_forget()
self.imgg3.place_forget()
self.imgg4.place_forget()
self.imgg5.place_forget()
self.imgg6.place_forget()
self.imgg7.place_forget()
self.imgg8.place_forget()
self.imgg9.place_forget()
def __init__(self, master):
global lbMonitor
monitor = PhotoImage(file="monitor.png")
lbMonitor = Label(image=monitor)
lbMonitor.monitor = monitor
lbMonitor.place(x=100, y=30)
gabinete = PhotoImage(file="gabinete.png")
self.lbGabinete = Label(janela, image=gabinete)
self.lbGabinete.gabinete = gabinete
self.lbGabinete.place(x=970, y=285)
self.lbGabinete.bind("<Enter>", self.abrirPc)
self.lbGabinete.bind("<Leave>", self.fecharPc)
self.lbGabinete.bind("<Button-1>", self.defeitos)
teclado = PhotoImage(file="teclado.png")
lbTeclado = Label(janela, image=teclado)
lbTeclado.teclado = teclado
lbTeclado.place(x=50, y=530)
delete = PhotoImage(file="delete.png")
lbDelete = Label(janela, image=delete)
lbDelete.delete = delete
lbDelete.config(bg="red")
lbDelete.bind("<Button-1>", self.bios)
lbDelete.place(x=842, y=722)
self.sair = Button(janela, text="[X]")
self.sair["font"] = ("Arial", "15")
self.sair.config(bg="red", foreground="white")
self.sair.place(x=1200, y=30)
self.sair.bind("<Button-1>", self.fechar)
def defeitos(self, event):
janela2 = Tk()
self.p = Label(janela2, text="O computador liga normalmente mas não aparece nada\n no monitor. Quais peças devem ser testadas ?")
self.p["font"] = ("Lucida console", "30")
self.p.config(bg="black", foreground="limegreen")
self.p.place(x=140, y=30)
img_monitor = PhotoImage(master=janela2, file="monitor2.png")
self.monitor2 = Label(janela2, image=img_monitor)
self.monitor2.img_monitor = img_monitor
self.monitor2.place(x=120,y=200)
img_placa = PhotoImage(master=janela2, file="placa2.png")
self.placa = Label(janela2, image=img_placa)
self.placa.img_placa = img_placa
self.placa.place(x=420,y=200)
img_hd = PhotoImage(master=janela2, file="hd2.png")
self.hd = Label(janela2, image=img_hd)
self.hd.img_hd = img_hd
self.hd.place(x=720,y=200)
img_gpu = PhotoImage(master=janela2, file="gpu2.png")
self.gpu = Label(janela2, image=img_gpu)
self.gpu.img_gpu = img_gpu
self.gpu.place(x=1020,y=200)
janela.title("Simulador de defeitos")
janela2.geometry("1400x830+50+5")
def abrirPc(self, event):
global lbMonitor
self.lb_simulador = Label(janela, text="Clique para iniciar\n simulador de defeitos")
self.lb_simulador["font"] = ("Arial", "20")
self.lb_simulador.config(bg="black", foreground="white")
self.lb_simulador.place(x=970, y=210)
lbMonitor.place(x=1800, y=10)
fundobranco = PhotoImage(file="fundobranco.png")
self.imgFundo = Label(janela, image=fundobranco)
self.imgFundo.fundobranco = fundobranco
self.imgFundo.config(bg="white")
self.imgFundo.place(x=80,y=30)
gabineteAberto = PhotoImage(file="gabineteAberto.png")
self.imggg = Label(janela, image=gabineteAberto)
self.imggg.gabineteAberto = gabineteAberto
self.lbGabinete.config(bg="green")
self.imggg.place(x=60,y=100)
hd = PhotoImage(file="hd.png")
self.imgg2 = Label(janela, image=hd)
self.imgg2.hd = hd
self.imgg2.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg2.place(x=500,y=30)
fonte = PhotoImage(file="fonte.png")
self.imgg3 = Label(janela, image=fonte)
self.imgg3.fonte = fonte
self.imgg3.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg3.place(x=650,y=30)
cpu = PhotoImage(file="cpu.png")
self.imgg4 = Label(janela, image=cpu)
self.imgg4.cpu = cpu
self.imgg4.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg4.place(x=800,y=30)
placa = PhotoImage(file="placa.png")
self.imgg5 = Label(janela, image=placa)
self.imgg5.placa = placa
self.imgg5.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg5.place(x=500,y=200)
memoria = PhotoImage(file="memoria.png")
self.imgg6 = Label(janela, image=memoria)
self.imgg6.memoria = memoria
self.imgg6.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg6.place(x=650,y=200)
sata = PhotoImage(file="sata.png")
self.imgg7 = Label(janela, image=sata)
self.imgg7.sata = sata
self.imgg7.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg7.place(x=800,y=200)
cooler = PhotoImage(file="cooler.png")
self.imgg8 = Label(janela, image=cooler)
self.imgg8.cooler = cooler
self.imgg8.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg8.place(x=500,y=370)
gpu = PhotoImage(file="gpu.png")
self.imgg9 = Label(janela, image=gpu)
self.imgg9.gpu = gpu
self.imgg9.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg9.place(x=650,y=370)
def bios(self, event):
janela2 = tk.Tk()
#Label inicial
p1 = tk.Label(janela2,foreground="white",background="#00008B",text="CMOS Setup Utility - Copyright (C) 1984-1999 Award Software")
p1["font"] = ("Lucida Console","18")
p1.pack(pady=7,padx=7,ipady=20,ipadx=7)
linhaH = tk.Label(janela2,foreground="white",background="#00008B",text="____________________________________________________________________")
linhaH["font"] = ("Lucida Console","18")
linhaH.place(x=0,y=60)
linhaV = tk.Label(janela2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n")
linhaV["font"] = ("Lucida Console","12")
linhaV.place(x=470,y=90)
#Label 1
self.p2 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Standard CMOS Features")
self.p2["font"] = ("Lucida Console","15")
self.p2.place(x=80, y=100)
#Label 2
self.p3 = tk.Label(janela2,foreground="yellow",background="red",text="> Advanced BIOS Features")
self.p3["font"] = ("Lucida Console","15")
self.p3.place(x=80, y=140)
self.p3.bind("<Button-1>", self.bios2)
#Label 3
p4 = tk.Label(janela2, foreground="#FFD700",background="#00008B",text="> Advanced Chipset Features")
p4["font"] = ("Lucida Console","15")
p4.place(x=80, y=180)
#Label 4
p5 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Integrated Peripherials")
p5["font"] = ("Lucida Console","15")
p5.place(x=80, y=220)
#Label 5
p6 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Power Management Setup")
p6["font"] = ("Lucida Console","15")
p6.place(x=80, y=260)
#Label 6
p7 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PnP/PCI Configurations")
p7["font"] = ("Lucida Console","15")
p7.place(x=80, y=300)
#Label 7
p8 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PC Health Status")
p8["font"] = ("Lucida Console","15")
p8.place(x=80, y=340)
#///////////////////////////////////////////////////////////////////////////////////
#Label 8
p9 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Frequency/Voltage Control")
p9["font"] = ("Lucida Console","15")
p9.place(x=520, y=100)
#Label 9
p10 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Fail-Safe Defaults")
p10["font"] = ("Lucida Console","15")
p10.place(x=520, y=140)
#Label 10
p11 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Optimized Defaults")
p11["font"] = ("Lucida Console","15")
p11.place(x=520, y=180)
#Label 11
p12 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set Supervisor Password")
p12["font"] = ("Lucida Console","15")
p12.place(x=520, y=220)
#Label 12
p13 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set User Password")
p13["font"] = ("Lucida Console","15")
p13.place(x=520, y=260)
#Label 13
p14 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Save & Exit Setup")
p14["font"] = ("Lucida Console","15")
p14.place(x=520, y=300)
#Label 14
p15 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Exit Without Saving")
p15["font"] = ("Lucida Console","15")
p15.place(x=520, y=300)
#Esc
esc = tk.Label(janela2,foreground="white",background="#00008B",text="Esc : Quit")
esc["font"] = ("Lucida Console","15")
esc.place(x=23, y=470)
#F10
f10 = tk.Label(janela2,foreground="white",background="#00008B",text="F10 : Save & Exit Setup")
f10["font"] = ("Lucida Console","15")
f10.place(x=23, y=498)
#Rodapé
rodape = tk.Label(janela2, text="Time, Date, Hard Disk Type. . .")
rodape["font"] = ("Helvetica","16")
rodape.configure(background="#00008B", foreground="#FFD700")
rodape.place(x=280,y=580)
janela2.title("BIOS")
janela2.geometry("880x640+200+30")
janela2.config(bg="#00008B")
janela2.config(cursor="hand2")
janela2.resizable(width=False, height=False)
janela2.mainloop()
def fecharBios(self, event):
janela2.destroy()
def bios2(self, event):
jan2= tk.Tk()
jan2.configure(bg="#00008B")
jan2.geometry('880x700+200+20')
jan2.config(cursor="hand2")
jan2.resizable(width=False, height=False)
jan2.title("Ordem de Boot")
#Label inicial
self.lb1 = tk.Label(jan2,foreground="white",background="#00008B",text="Phoenix - Award BIOS CMOS Setup Utility\nAdvanced BIOS Features")
self.lb1["font"] = ("Lucida Console","18")
self.lb1.pack(pady=7,padx=7,ipady=15,ipadx=7)
#Linha horizontal
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="____________________________________________________________________________")
self.l1["font"] = ("Lucida Console","18")
self.l1.place(x=0,y=70)
#Linha vertical
self.l2 = tk.Label(jan2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|")
self.l2["font"] = ("Lucida Console","15")
self.l2.place(x=630, y=95)
#Label 1
self.lb3 = tk.Label(jan2,foreground="white",background="#00008B",text="Virus Warning")
self.lb3["font"] = ("Lucida Console","15")
self.lb3.place(x=30, y=100)
self.lb4 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.lb4["font"] = ("Lucida Console","15")
self.lb4.place(x=400, y=100)
self.lb5 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L1 Cache")
self.lb5["font"] = ("Lucida Console","15")
self.lb5.place(x=30, y=130)
self.lb6 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb6["font"] = ("Lucida Console","15")
self.lb6.place(x=400, y=130)
self.lb7 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L2 Cache")
self.lb7["font"] = ("Lucida Console","15")
self.lb7.place(x=30, y=160)
self.lb8 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb8["font"] = ("Lucida Console","15")
self.lb8.place(x=400, y=160)
self.lb9 = tk.Label(jan2,foreground="white",background="#00008B",text="Quick Power On Self Test")
self.lb9["font"] = ("Lucida Console","15")
self.lb9.place(x=30, y=190)
self.lb10 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb10["font"] = ("Lucida Console","15")
self.lb10.place(x=400, y=190)
self.l11 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD Boot Sprite")
self.l11["font"] = ("Lucida Console","15")
self.l11.place(x=30, y=220)
self.l12 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l12["font"] = ("Lucida Console","15")
self.l12.place(x=400, y=220)
self.l13 = tk.Label(jan2,foreground="white",background="#00008B",text="First Boot Device")
self.l13["font"] = ("Lucida Console","15")
self.l13.place(x=30, y=250)
self.l14 = tk.Label(jan2,foreground="#FFD700",background="red",text="CD-ROM")
self.l14["font"] = ("Lucida Console","15")
self.l14.place(x=400, y=250)
self.l14.bind("<Button-1>", self.boot)
self.l15 = tk.Label(jan2,foreground="white",background="#00008B",text="Second Boot Device")
self.l15["font"] = ("Lucida Console","15")
self.l15.place(x=30, y=280)
self.l16 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="HDD-0")
self.l16["font"] = ("Lucida Console","15")
self.l16.place(x=400, y=280)
self.l17 = tk.Label(jan2,foreground="white",background="#00008B",text="Third Boot Device")
self.l17["font"] = ("Lucida Console","15")
self.l17.place(x=30, y=310)
self.l18 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l18["font"] = ("Lucida Console","15")
self.l18.place(x=400, y=310)
self.l19 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Other Device")
self.l19["font"] = ("Lucida Console","15")
self.l19.place(x=30, y=340)
self.l20 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l20["font"] = ("Lucida Console","15")
self.l20.place(x=400, y=340)
self.l21 = tk.Label(jan2,foreground="white",background="#00008B",text="Swap Floppy Seek")
self.l21["font"] = ("Lucida Console","15")
self.l21.place(x=30, y=370)
self.l22 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l22["font"] = ("Lucida Console","15")
self.l22.place(x=400, y=370)
self.l23 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up Floppy Seek")
self.l23["font"] = ("Lucida Console","15")
self.l23.place(x=30, y=400)
self.l24 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l24["font"] = ("Lucida Console","15")
self.l24.place(x=400, y=400)
self.l25 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up NumLock Status")
self.l25["font"] = ("Lucida Console","15")
self.l25.place(x=30, y=430)
self.l26 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="On")
self.l26["font"] = ("Lucida Console","15")
self.l26.place(x=400, y=430)
self.l27 = tk.Label(jan2,foreground="white",background="#00008B",text="Gate A20 Option")
self.l27["font"] = ("Lucida Console","15")
self.l27.place(x=30, y=460)
self.l28 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Normal")
self.l28["font"] = ("Lucida Console","15")
self.l28.place(x=400, y=460)
self.l29 = tk.Label(jan2,foreground="white",background="#00008B",text="Typematic Rate Setting")
self.l29["font"] = ("Lucida Console","15")
self.l29.place(x=30, y=490)
self.l30 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l30["font"] = ("Lucida Console","15")
self.l30.place(x=400, y=490)
self.l31 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Rate (Chars/Sec)")
self.l31["font"] = ("Lucida Console","15")
self.l31.place(x=9, y=520)
self.l32 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="6")
self.l32["font"] = ("Lucida Console","15")
self.l32.place(x=400, y=520)
self.l33 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Delay (Msec)")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=9, y=550)
self.l34 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="250")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=550)
self.l33 = tk.Label(jan2,foreground="white",background="#00008B",text="Security Option")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=30, y=580)
self.l34 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Setup")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="OS Select For DRAM > 64MB")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=580)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Non-OS2")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD S.M.A.R.T. Capability")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=610)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=610)
self.l37 = tk.Label(jan2,foreground="white",background="#00008B",text="_____________________________________________________________________________________")
self.l37["font"] = ("Lucida Console","15")
self.l37.place(x=0, y=630)
self.f10 = tk.Label(jan2,foreground="white",background="#00008B",text="F10: Save & Exit")
self.f10["font"] = ("Lucida Console","15")
self.f10.place(x=25, y=665)
self.l38 = tk.Label(jan2,foreground="white",background="#00008B",text="Item Help")
self.l38["font"] = ("Lucida Console","15")
self.l38.place(x=705, y=120)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="---------------------------------")
self.l1["font"] = ("Lucida Console","15")
self.l1.place(x=640, y=152)
self.p17 = tk.Label(jan2,foreground="white",background="#00008B",text="-Menu Level >")
self.p17["font"] = ("Lucida Console","15")
self.p17.place(x=650, y=180)
jan2.mainloop()
def boot(self,event):
messagebox.showinfo("WINDOWS 10", "Iniciando instalação...")
#tux = PhotoImage(file="tux.png")
#self.img0 = Label(janela, image=tux)
#self.img0.tux = tux
#self.img0.place(x=20, y=800)
w1 = PhotoImage(file="w1.png")
self.img1 = Label(janela, image=w1)
self.img1.w1 = w1
self.img1.place(x=123, y=50)
abnts = ["(Português Brasil ABNT-2)", "(Português Brasil ABNT)"]
abnt = ttk.Combobox(values=abnts)
abnt.set("(Português Brasil ABNT)")
abnt.place(x=412, y=262, width=337, height=22)
btAvancar = PhotoImage(file="btAvancar.png")
self.img2 = Label(janela, image=btAvancar)
self.img2.btAvancar = btAvancar
self.img2.place(x=740, y=324)
self.img2.bind("<Button-1>", self.avancar)
def avancar(self, event):
w2 = PhotoImage(file="w2.png")
self.img3 = Label(janela, image=w2)
self.img3.w2 = w2
self.img3.place(x=123, y=50)
btInstalar = PhotoImage(file="btInstalar.png")
self.img4 = Label(janela, image=btInstalar)
self.img4.btInstalar = btInstalar
self.img4.place(x=400, y=205)
self.img4.bind("<Button-1>", self.instalar)
def instalar(self, event):
w3 = PhotoImage(file="w3.png")
self.img5 = Label(janela, image=w3)
self.img5.w3 = w3
self.img5.place(x=113, y=52)
chave = PhotoImage(file="chave.png")
self.img6 = Label(janela, image=chave)
self.img6.chave = chave
self.img6.place(x=485, y=290)
self.img6.bind("<Button-1>", self.chaveW)
btAvancar2 = PhotoImage(file="btAvancar2.png")
self.img7 = Label(janela, image=btAvancar2)
self.img7.btAvancar2 = btAvancar2
self.img7.place(x=726, y=300)
self.img7.bind("<Button-1>", self.avancar2)
def chaveW(self, event):
self.img6.config(bg="lightblue")
def avancar2(self, event):
w4 = PhotoImage(file="w4.png")
self.img8 = Label(janela, image=w4)
self.img8.w4 = w4
self.img8.place(x=112, y=49)
btAvancar3 = PhotoImage(file="btAvancar3.png")
self.img9 = Label(janela, image=btAvancar3)
self.img9.btAvancar3 = btAvancar3
self.img9.place(x=726, y=300)
self.img9.bind("<Button-1>", self.avancar3)
def avancar3(self, event):
w5 = PhotoImage(file="w5.png")
self.img10 = Label(janela, image=w5)
self.img10.w5 = w5
self.img10.place(x=112, y=49)
btAvancar4 = PhotoImage(file="btAvancar4.png")
self.img11 = Label(janela, image=btAvancar4)
self.img11.btAvancar4 = btAvancar4
self.img11.place(x=726, y=305)
self.img11.bind("<Button-1>", self.avancar4)
def avancar4(self, event):
w6 = PhotoImage(file="w6.png")
self.img12 = Label(janela, image=w6)
self.img12.w6 = w6
self.img12.place(x=112, y=49)
personalizada = PhotoImage(file="personalizada.png")
self.img13 = Label(janela, image=personalizada)
self.img13.personalizada = personalizada
self.img13.place(x=206, y=205)
self.img13.bind("<Button-1>", self.avancar5)
def avancar5(self, event):
w7 = PhotoImage(file="w7.png")
self.img14 = Label(janela, image=w7)
self.img14.w7 = w7
self.img14.place(x=112, y=49)
formatar = PhotoImage(file="formatar.png")
self.img15 = Label(janela, image=formatar)
self.img15.formatar = formatar
self.img15.place(x=460, y=238)
self.img15.bind("<Button-1>", self.formatarW)
btAvancar6 = PhotoImage(file="btAvancar6.png")
self.img16 = Label(janela, image=btAvancar6)
self.img16.btAvancar6 = btAvancar6
self.img16.place(x=726, y=310)
self.img16.bind("<Button-1>", self.avancar6)
def formatarW(self, event):
messagebox.showwarning("Formatação Windows 10", "TODOS OS DADOS DESSA PARTIÇÃO SERÃO EXCLUÍDOS !!")
def avancar6(self, event):
w8 = PhotoImage(file="w8.png")
self.img18 = Label(janela, image=w8)
self.img18.w8 = w8
self.img18.place(x=112, y=49)
self.img18.bind("<Button-1>", self.win)
def win(self, event):
w9 = PhotoImage(file="w9.png")
self.img19 = Label(janela, image=w9)
self.img19.w9 = w9
self.img19.place(x=112, y=49)
self.img19.bind("<Button-1>", self.win10)
def win10(self, event):
w10 = PhotoImage(file="w10.png")
self.img20 = Label(janela, image=w10)
self.img20.w10 = w10
self.img20.place(x=112, y=49)
iniciar = PhotoImage(file="iniciar.png")
self.img21 = Label(janela, image=iniciar)
self.img21.iniciar = iniciar
self.img21.place(x=112, y=354)
self.img21.bind("<Enter>", self.gerenciador)
self.img21.bind("<Leave>", self.fecharGerenciador)
chrome = PhotoImage(file="chrome.png")
self.img23 = Label(janela, image=chrome)
self.img23.chrome = chrome
self.img23.place(x=600, y=100)
self.img23.bind("<Enter>", self.chrome)
self.img23.bind("<Leave>", self.chromeSair)
winrar = PhotoImage(file="winrar.png")
self.img26 = Label(janela, image=winrar)
self.img26.winrar = winrar
self.img26.place(x=700, y=100)
self.img26.bind("<Enter>", self.winrar)
self.img26.bind("<Leave>", self.winrarSair)
reader = PhotoImage(file="reader.png")
self.img27 = Label(janela, image=reader)
self.img27.reader = reader
self.img27.place(x=600, y=200)
self.img27.bind("<Enter>", self.reader)
self.img27.bind("<Leave>", self.readerSair)
driver = PhotoImage(file="driver.png")
self.img28 = Label(janela, image=driver)
self.img28.driver = driver
self.img28.place(x=700, y=200)
self.img28.bind("<Enter>", self.driver)
self.img28.bind("<Leave>", self.driverSair)
def reader(self, event):
telaReader = PhotoImage(file="telaReader.png")
self.img27 = Label(janela, image=telaReader)
self.img27.telaReader = telaReader
self.img27.place(x=150, y=80)
def driver(self, event):
telaDriver = PhotoImage(file="telaDriver.png")
self.img28 = Label(janela, image=telaDriver)
self.img28.telaDriver = telaDriver
self.img28.place(x=150, y=80)
def chrome(self, event):
telaChrome = PhotoImage(file="telaChrome.png")
self.img24 = Label(janela, image=telaChrome)
self.img24.telaChrome = telaChrome
self.img24.place(x=150, y=80)
def winrar(self, event):
telaWinrar = PhotoImage(file="telaWinrar.png")
self.img26 = Label(janela, image=telaWinrar)
self.img26.telaWinrar = telaWinrar
self.img26.place(x=150, y=80)
def chromeSair(self, event):
self.img24.place(x=1900, y=80)
def driverSair(self, event):
self.img28.place(x=1900, y=80)
def readerSair(self, event):
self.img27.place(x=1900, y=80)
def winrarSair(self, event):
self.img26.place(x=1900, y=80)
def gerenciador(self, event):
gerenciador = PhotoImage(file="gerenciador.png")
self.img22 = Label(janela, image=gerenciador)
self.img22.gerenciador = gerenciador
self.img22.place(x=112, y=54)
def fecharGerenciador(self, event):
self.img22.place(x=1900, y=0)
janela = Tk()
Tela(janela)
janela.title("Simulador Formatação")
janela.geometry("1400x830+50+5")
janela.resizable(width=False, height=False)
janela.config(bg="white")
janela.config(cursor="hand2")
janela.iconbitmap("placa2.ico")
janela.mainloop()
| 35.544226 | 169 | 0.571631 | from tkinter import *
from tkinter import messagebox
import tkinter as tk
from tkinter import ttk
class Tela:
def fechar(self, event):
janela.destroy()
exit()
def fecharPc(self, event):
self.lb_simulador.place_forget()
self.imgFundo.place_forget()
self.imgg2.place_forget()
self.lbGabinete.config(bg="white")
lbMonitor.place(x=100, y=30)
self.imggg.place_forget()
self.imgg3.place_forget()
self.imgg4.place_forget()
self.imgg5.place_forget()
self.imgg6.place_forget()
self.imgg7.place_forget()
self.imgg8.place_forget()
self.imgg9.place_forget()
def __init__(self, master):
global lbMonitor
monitor = PhotoImage(file="monitor.png")
lbMonitor = Label(image=monitor)
lbMonitor.monitor = monitor
lbMonitor.place(x=100, y=30)
gabinete = PhotoImage(file="gabinete.png")
self.lbGabinete = Label(janela, image=gabinete)
self.lbGabinete.gabinete = gabinete
self.lbGabinete.place(x=970, y=285)
self.lbGabinete.bind("<Enter>", self.abrirPc)
self.lbGabinete.bind("<Leave>", self.fecharPc)
self.lbGabinete.bind("<Button-1>", self.defeitos)
teclado = PhotoImage(file="teclado.png")
lbTeclado = Label(janela, image=teclado)
lbTeclado.teclado = teclado
lbTeclado.place(x=50, y=530)
delete = PhotoImage(file="delete.png")
lbDelete = Label(janela, image=delete)
lbDelete.delete = delete
lbDelete.config(bg="red")
lbDelete.bind("<Button-1>", self.bios)
lbDelete.place(x=842, y=722)
self.sair = Button(janela, text="[X]")
self.sair["font"] = ("Arial", "15")
self.sair.config(bg="red", foreground="white")
self.sair.place(x=1200, y=30)
self.sair.bind("<Button-1>", self.fechar)
def defeitos(self, event):
janela2 = Tk()
self.p = Label(janela2, text="O computador liga normalmente mas não aparece nada\n no monitor. Quais peças devem ser testadas ?")
self.p["font"] = ("Lucida console", "30")
self.p.config(bg="black", foreground="limegreen")
self.p.place(x=140, y=30)
img_monitor = PhotoImage(master=janela2, file="monitor2.png")
self.monitor2 = Label(janela2, image=img_monitor)
self.monitor2.img_monitor = img_monitor
self.monitor2.place(x=120,y=200)
img_placa = PhotoImage(master=janela2, file="placa2.png")
self.placa = Label(janela2, image=img_placa)
self.placa.img_placa = img_placa
self.placa.place(x=420,y=200)
img_hd = PhotoImage(master=janela2, file="hd2.png")
self.hd = Label(janela2, image=img_hd)
self.hd.img_hd = img_hd
self.hd.place(x=720,y=200)
img_gpu = PhotoImage(master=janela2, file="gpu2.png")
self.gpu = Label(janela2, image=img_gpu)
self.gpu.img_gpu = img_gpu
self.gpu.place(x=1020,y=200)
janela.title("Simulador de defeitos")
janela2.geometry("1400x830+50+5")
def abrirPc(self, event):
global lbMonitor
self.lb_simulador = Label(janela, text="Clique para iniciar\n simulador de defeitos")
self.lb_simulador["font"] = ("Arial", "20")
self.lb_simulador.config(bg="black", foreground="white")
self.lb_simulador.place(x=970, y=210)
lbMonitor.place(x=1800, y=10)
fundobranco = PhotoImage(file="fundobranco.png")
self.imgFundo = Label(janela, image=fundobranco)
self.imgFundo.fundobranco = fundobranco
self.imgFundo.config(bg="white")
self.imgFundo.place(x=80,y=30)
gabineteAberto = PhotoImage(file="gabineteAberto.png")
self.imggg = Label(janela, image=gabineteAberto)
self.imggg.gabineteAberto = gabineteAberto
self.lbGabinete.config(bg="green")
self.imggg.place(x=60,y=100)
hd = PhotoImage(file="hd.png")
self.imgg2 = Label(janela, image=hd)
self.imgg2.hd = hd
self.imgg2.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg2.place(x=500,y=30)
fonte = PhotoImage(file="fonte.png")
self.imgg3 = Label(janela, image=fonte)
self.imgg3.fonte = fonte
self.imgg3.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg3.place(x=650,y=30)
cpu = PhotoImage(file="cpu.png")
self.imgg4 = Label(janela, image=cpu)
self.imgg4.cpu = cpu
self.imgg4.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg4.place(x=800,y=30)
placa = PhotoImage(file="placa.png")
self.imgg5 = Label(janela, image=placa)
self.imgg5.placa = placa
self.imgg5.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg5.place(x=500,y=200)
memoria = PhotoImage(file="memoria.png")
self.imgg6 = Label(janela, image=memoria)
self.imgg6.memoria = memoria
self.imgg6.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg6.place(x=650,y=200)
sata = PhotoImage(file="sata.png")
self.imgg7 = Label(janela, image=sata)
self.imgg7.sata = sata
self.imgg7.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg7.place(x=800,y=200)
cooler = PhotoImage(file="cooler.png")
self.imgg8 = Label(janela, image=cooler)
self.imgg8.cooler = cooler
self.imgg8.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg8.place(x=500,y=370)
gpu = PhotoImage(file="gpu.png")
self.imgg9 = Label(janela, image=gpu)
self.imgg9.gpu = gpu
self.imgg9.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg9.place(x=650,y=370)
def bios(self, event):
janela2 = tk.Tk()
p1 = tk.Label(janela2,foreground="white",background="#00008B",text="CMOS Setup Utility - Copyright (C) 1984-1999 Award Software")
p1["font"] = ("Lucida Console","18")
p1.pack(pady=7,padx=7,ipady=20,ipadx=7)
linhaH = tk.Label(janela2,foreground="white",background="#00008B",text="____________________________________________________________________")
linhaH["font"] = ("Lucida Console","18")
linhaH.place(x=0,y=60)
linhaV = tk.Label(janela2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n")
linhaV["font"] = ("Lucida Console","12")
linhaV.place(x=470,y=90)
self.p2 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Standard CMOS Features")
self.p2["font"] = ("Lucida Console","15")
self.p2.place(x=80, y=100)
self.p3 = tk.Label(janela2,foreground="yellow",background="red",text="> Advanced BIOS Features")
self.p3["font"] = ("Lucida Console","15")
self.p3.place(x=80, y=140)
self.p3.bind("<Button-1>", self.bios2)
p4 = tk.Label(janela2, foreground="#FFD700",background="#00008B",text="> Advanced Chipset Features")
p4["font"] = ("Lucida Console","15")
p4.place(x=80, y=180)
p5 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Integrated Peripherials")
p5["font"] = ("Lucida Console","15")
p5.place(x=80, y=220)
p6 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Power Management Setup")
p6["font"] = ("Lucida Console","15")
p6.place(x=80, y=260)
p7 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PnP/PCI Configurations")
p7["font"] = ("Lucida Console","15")
p7.place(x=80, y=300)
p8 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PC Health Status")
p8["font"] = ("Lucida Console","15")
p8.place(x=80, y=340)
p9 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Frequency/Voltage Control")
p9["font"] = ("Lucida Console","15")
p9.place(x=520, y=100)
p10 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Fail-Safe Defaults")
p10["font"] = ("Lucida Console","15")
p10.place(x=520, y=140)
p11 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Optimized Defaults")
p11["font"] = ("Lucida Console","15")
p11.place(x=520, y=180)
p12 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set Supervisor Password")
p12["font"] = ("Lucida Console","15")
p12.place(x=520, y=220)
p13 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set User Password")
p13["font"] = ("Lucida Console","15")
p13.place(x=520, y=260)
p14 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Save & Exit Setup")
p14["font"] = ("Lucida Console","15")
p14.place(x=520, y=300)
p15 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Exit Without Saving")
p15["font"] = ("Lucida Console","15")
p15.place(x=520, y=300)
esc = tk.Label(janela2,foreground="white",background="#00008B",text="Esc : Quit")
esc["font"] = ("Lucida Console","15")
esc.place(x=23, y=470)
f10 = tk.Label(janela2,foreground="white",background="#00008B",text="F10 : Save & Exit Setup")
f10["font"] = ("Lucida Console","15")
f10.place(x=23, y=498)
rodape = tk.Label(janela2, text="Time, Date, Hard Disk Type. . .")
rodape["font"] = ("Helvetica","16")
rodape.configure(background="#00008B", foreground="#FFD700")
rodape.place(x=280,y=580)
janela2.title("BIOS")
janela2.geometry("880x640+200+30")
janela2.config(bg="#00008B")
janela2.config(cursor="hand2")
janela2.resizable(width=False, height=False)
janela2.mainloop()
def fecharBios(self, event):
janela2.destroy()
def bios2(self, event):
jan2= tk.Tk()
jan2.configure(bg="#00008B")
jan2.geometry('880x700+200+20')
jan2.config(cursor="hand2")
jan2.resizable(width=False, height=False)
jan2.title("Ordem de Boot")
self.lb1 = tk.Label(jan2,foreground="white",background="#00008B",text="Phoenix - Award BIOS CMOS Setup Utility\nAdvanced BIOS Features")
self.lb1["font"] = ("Lucida Console","18")
self.lb1.pack(pady=7,padx=7,ipady=15,ipadx=7)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="____________________________________________________________________________")
self.l1["font"] = ("Lucida Console","18")
self.l1.place(x=0,y=70)
self.l2 = tk.Label(jan2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|")
self.l2["font"] = ("Lucida Console","15")
self.l2.place(x=630, y=95)
self.lb3 = tk.Label(jan2,foreground="white",background="#00008B",text="Virus Warning")
self.lb3["font"] = ("Lucida Console","15")
self.lb3.place(x=30, y=100)
self.lb4 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.lb4["font"] = ("Lucida Console","15")
self.lb4.place(x=400, y=100)
self.lb5 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L1 Cache")
self.lb5["font"] = ("Lucida Console","15")
self.lb5.place(x=30, y=130)
self.lb6 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb6["font"] = ("Lucida Console","15")
self.lb6.place(x=400, y=130)
self.lb7 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L2 Cache")
self.lb7["font"] = ("Lucida Console","15")
self.lb7.place(x=30, y=160)
self.lb8 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb8["font"] = ("Lucida Console","15")
self.lb8.place(x=400, y=160)
self.lb9 = tk.Label(jan2,foreground="white",background="#00008B",text="Quick Power On Self Test")
self.lb9["font"] = ("Lucida Console","15")
self.lb9.place(x=30, y=190)
self.lb10 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb10["font"] = ("Lucida Console","15")
self.lb10.place(x=400, y=190)
self.l11 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD Boot Sprite")
self.l11["font"] = ("Lucida Console","15")
self.l11.place(x=30, y=220)
self.l12 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l12["font"] = ("Lucida Console","15")
self.l12.place(x=400, y=220)
self.l13 = tk.Label(jan2,foreground="white",background="#00008B",text="First Boot Device")
self.l13["font"] = ("Lucida Console","15")
self.l13.place(x=30, y=250)
self.l14 = tk.Label(jan2,foreground="#FFD700",background="red",text="CD-ROM")
self.l14["font"] = ("Lucida Console","15")
self.l14.place(x=400, y=250)
self.l14.bind("<Button-1>", self.boot)
self.l15 = tk.Label(jan2,foreground="white",background="#00008B",text="Second Boot Device")
self.l15["font"] = ("Lucida Console","15")
self.l15.place(x=30, y=280)
self.l16 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="HDD-0")
self.l16["font"] = ("Lucida Console","15")
self.l16.place(x=400, y=280)
self.l17 = tk.Label(jan2,foreground="white",background="#00008B",text="Third Boot Device")
self.l17["font"] = ("Lucida Console","15")
self.l17.place(x=30, y=310)
self.l18 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l18["font"] = ("Lucida Console","15")
self.l18.place(x=400, y=310)
self.l19 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Other Device")
self.l19["font"] = ("Lucida Console","15")
self.l19.place(x=30, y=340)
self.l20 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l20["font"] = ("Lucida Console","15")
self.l20.place(x=400, y=340)
self.l21 = tk.Label(jan2,foreground="white",background="#00008B",text="Swap Floppy Seek")
self.l21["font"] = ("Lucida Console","15")
self.l21.place(x=30, y=370)
self.l22 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l22["font"] = ("Lucida Console","15")
self.l22.place(x=400, y=370)
self.l23 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up Floppy Seek")
self.l23["font"] = ("Lucida Console","15")
self.l23.place(x=30, y=400)
self.l24 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l24["font"] = ("Lucida Console","15")
self.l24.place(x=400, y=400)
self.l25 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up NumLock Status")
self.l25["font"] = ("Lucida Console","15")
self.l25.place(x=30, y=430)
self.l26 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="On")
self.l26["font"] = ("Lucida Console","15")
self.l26.place(x=400, y=430)
self.l27 = tk.Label(jan2,foreground="white",background="#00008B",text="Gate A20 Option")
self.l27["font"] = ("Lucida Console","15")
self.l27.place(x=30, y=460)
self.l28 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Normal")
self.l28["font"] = ("Lucida Console","15")
self.l28.place(x=400, y=460)
self.l29 = tk.Label(jan2,foreground="white",background="#00008B",text="Typematic Rate Setting")
self.l29["font"] = ("Lucida Console","15")
self.l29.place(x=30, y=490)
self.l30 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l30["font"] = ("Lucida Console","15")
self.l30.place(x=400, y=490)
self.l31 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Rate (Chars/Sec)")
self.l31["font"] = ("Lucida Console","15")
self.l31.place(x=9, y=520)
self.l32 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="6")
self.l32["font"] = ("Lucida Console","15")
self.l32.place(x=400, y=520)
self.l33 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Delay (Msec)")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=9, y=550)
self.l34 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="250")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=550)
self.l33 = tk.Label(jan2,foreground="white",background="#00008B",text="Security Option")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=30, y=580)
self.l34 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Setup")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="OS Select For DRAM > 64MB")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=580)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Non-OS2")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD S.M.A.R.T. Capability")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=610)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=610)
self.l37 = tk.Label(jan2,foreground="white",background="#00008B",text="_____________________________________________________________________________________")
self.l37["font"] = ("Lucida Console","15")
self.l37.place(x=0, y=630)
self.f10 = tk.Label(jan2,foreground="white",background="#00008B",text="F10: Save & Exit")
self.f10["font"] = ("Lucida Console","15")
self.f10.place(x=25, y=665)
self.l38 = tk.Label(jan2,foreground="white",background="#00008B",text="Item Help")
self.l38["font"] = ("Lucida Console","15")
self.l38.place(x=705, y=120)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="---------------------------------")
self.l1["font"] = ("Lucida Console","15")
self.l1.place(x=640, y=152)
self.p17 = tk.Label(jan2,foreground="white",background="#00008B",text="-Menu Level >")
self.p17["font"] = ("Lucida Console","15")
self.p17.place(x=650, y=180)
jan2.mainloop()
def boot(self,event):
messagebox.showinfo("WINDOWS 10", "Iniciando instalação...")
w1 = PhotoImage(file="w1.png")
self.img1 = Label(janela, image=w1)
self.img1.w1 = w1
self.img1.place(x=123, y=50)
abnts = ["(Português Brasil ABNT-2)", "(Português Brasil ABNT)"]
abnt = ttk.Combobox(values=abnts)
abnt.set("(Português Brasil ABNT)")
abnt.place(x=412, y=262, width=337, height=22)
btAvancar = PhotoImage(file="btAvancar.png")
self.img2 = Label(janela, image=btAvancar)
self.img2.btAvancar = btAvancar
self.img2.place(x=740, y=324)
self.img2.bind("<Button-1>", self.avancar)
def avancar(self, event):
w2 = PhotoImage(file="w2.png")
self.img3 = Label(janela, image=w2)
self.img3.w2 = w2
self.img3.place(x=123, y=50)
btInstalar = PhotoImage(file="btInstalar.png")
self.img4 = Label(janela, image=btInstalar)
self.img4.btInstalar = btInstalar
self.img4.place(x=400, y=205)
self.img4.bind("<Button-1>", self.instalar)
def instalar(self, event):
w3 = PhotoImage(file="w3.png")
self.img5 = Label(janela, image=w3)
self.img5.w3 = w3
self.img5.place(x=113, y=52)
chave = PhotoImage(file="chave.png")
self.img6 = Label(janela, image=chave)
self.img6.chave = chave
self.img6.place(x=485, y=290)
self.img6.bind("<Button-1>", self.chaveW)
btAvancar2 = PhotoImage(file="btAvancar2.png")
self.img7 = Label(janela, image=btAvancar2)
self.img7.btAvancar2 = btAvancar2
self.img7.place(x=726, y=300)
self.img7.bind("<Button-1>", self.avancar2)
def chaveW(self, event):
self.img6.config(bg="lightblue")
def avancar2(self, event):
w4 = PhotoImage(file="w4.png")
self.img8 = Label(janela, image=w4)
self.img8.w4 = w4
self.img8.place(x=112, y=49)
btAvancar3 = PhotoImage(file="btAvancar3.png")
self.img9 = Label(janela, image=btAvancar3)
self.img9.btAvancar3 = btAvancar3
self.img9.place(x=726, y=300)
self.img9.bind("<Button-1>", self.avancar3)
def avancar3(self, event):
w5 = PhotoImage(file="w5.png")
self.img10 = Label(janela, image=w5)
self.img10.w5 = w5
self.img10.place(x=112, y=49)
btAvancar4 = PhotoImage(file="btAvancar4.png")
self.img11 = Label(janela, image=btAvancar4)
self.img11.btAvancar4 = btAvancar4
self.img11.place(x=726, y=305)
self.img11.bind("<Button-1>", self.avancar4)
def avancar4(self, event):
w6 = PhotoImage(file="w6.png")
self.img12 = Label(janela, image=w6)
self.img12.w6 = w6
self.img12.place(x=112, y=49)
personalizada = PhotoImage(file="personalizada.png")
self.img13 = Label(janela, image=personalizada)
self.img13.personalizada = personalizada
self.img13.place(x=206, y=205)
self.img13.bind("<Button-1>", self.avancar5)
def avancar5(self, event):
w7 = PhotoImage(file="w7.png")
self.img14 = Label(janela, image=w7)
self.img14.w7 = w7
self.img14.place(x=112, y=49)
formatar = PhotoImage(file="formatar.png")
self.img15 = Label(janela, image=formatar)
self.img15.formatar = formatar
self.img15.place(x=460, y=238)
self.img15.bind("<Button-1>", self.formatarW)
btAvancar6 = PhotoImage(file="btAvancar6.png")
self.img16 = Label(janela, image=btAvancar6)
self.img16.btAvancar6 = btAvancar6
self.img16.place(x=726, y=310)
self.img16.bind("<Button-1>", self.avancar6)
def formatarW(self, event):
messagebox.showwarning("Formatação Windows 10", "TODOS OS DADOS DESSA PARTIÇÃO SERÃO EXCLUÍDOS !!")
def avancar6(self, event):
w8 = PhotoImage(file="w8.png")
self.img18 = Label(janela, image=w8)
self.img18.w8 = w8
self.img18.place(x=112, y=49)
self.img18.bind("<Button-1>", self.win)
def win(self, event):
w9 = PhotoImage(file="w9.png")
self.img19 = Label(janela, image=w9)
self.img19.w9 = w9
self.img19.place(x=112, y=49)
self.img19.bind("<Button-1>", self.win10)
def win10(self, event):
w10 = PhotoImage(file="w10.png")
self.img20 = Label(janela, image=w10)
self.img20.w10 = w10
self.img20.place(x=112, y=49)
iniciar = PhotoImage(file="iniciar.png")
self.img21 = Label(janela, image=iniciar)
self.img21.iniciar = iniciar
self.img21.place(x=112, y=354)
self.img21.bind("<Enter>", self.gerenciador)
self.img21.bind("<Leave>", self.fecharGerenciador)
chrome = PhotoImage(file="chrome.png")
self.img23 = Label(janela, image=chrome)
self.img23.chrome = chrome
self.img23.place(x=600, y=100)
self.img23.bind("<Enter>", self.chrome)
self.img23.bind("<Leave>", self.chromeSair)
winrar = PhotoImage(file="winrar.png")
self.img26 = Label(janela, image=winrar)
self.img26.winrar = winrar
self.img26.place(x=700, y=100)
self.img26.bind("<Enter>", self.winrar)
self.img26.bind("<Leave>", self.winrarSair)
reader = PhotoImage(file="reader.png")
self.img27 = Label(janela, image=reader)
self.img27.reader = reader
self.img27.place(x=600, y=200)
self.img27.bind("<Enter>", self.reader)
self.img27.bind("<Leave>", self.readerSair)
driver = PhotoImage(file="driver.png")
self.img28 = Label(janela, image=driver)
self.img28.driver = driver
self.img28.place(x=700, y=200)
self.img28.bind("<Enter>", self.driver)
self.img28.bind("<Leave>", self.driverSair)
def reader(self, event):
telaReader = PhotoImage(file="telaReader.png")
self.img27 = Label(janela, image=telaReader)
self.img27.telaReader = telaReader
self.img27.place(x=150, y=80)
def driver(self, event):
telaDriver = PhotoImage(file="telaDriver.png")
self.img28 = Label(janela, image=telaDriver)
self.img28.telaDriver = telaDriver
self.img28.place(x=150, y=80)
def chrome(self, event):
telaChrome = PhotoImage(file="telaChrome.png")
self.img24 = Label(janela, image=telaChrome)
self.img24.telaChrome = telaChrome
self.img24.place(x=150, y=80)
def winrar(self, event):
telaWinrar = PhotoImage(file="telaWinrar.png")
self.img26 = Label(janela, image=telaWinrar)
self.img26.telaWinrar = telaWinrar
self.img26.place(x=150, y=80)
def chromeSair(self, event):
self.img24.place(x=1900, y=80)
def driverSair(self, event):
self.img28.place(x=1900, y=80)
def readerSair(self, event):
self.img27.place(x=1900, y=80)
def winrarSair(self, event):
self.img26.place(x=1900, y=80)
def gerenciador(self, event):
gerenciador = PhotoImage(file="gerenciador.png")
self.img22 = Label(janela, image=gerenciador)
self.img22.gerenciador = gerenciador
self.img22.place(x=112, y=54)
def fecharGerenciador(self, event):
self.img22.place(x=1900, y=0)
janela = Tk()
Tela(janela)
janela.title("Simulador Formatação")
janela.geometry("1400x830+50+5")
janela.resizable(width=False, height=False)
janela.config(bg="white")
janela.config(cursor="hand2")
janela.iconbitmap("placa2.ico")
janela.mainloop()
| true | true |
f7181c55922ded847f3c093a97e05cf3a83a7542 | 502 | py | Python | descarteslabs/vectors/exceptions.py | carderne/descarteslabs-python | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | [
"Apache-2.0"
] | 167 | 2017-03-23T22:16:58.000Z | 2022-03-08T09:19:30.000Z | descarteslabs/vectors/exceptions.py | carderne/descarteslabs-python | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | [
"Apache-2.0"
] | 93 | 2017-03-23T22:11:40.000Z | 2021-12-13T18:38:53.000Z | descarteslabs/vectors/exceptions.py | carderne/descarteslabs-python | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | [
"Apache-2.0"
] | 46 | 2017-03-25T19:12:14.000Z | 2021-08-15T18:04:29.000Z | class VectorException(Exception):
"""Base exception for Vector operations"""
pass
class WaitTimeoutError(VectorException):
"""The timeout period for a wait operation has been exceeded"""
pass
class FailedJobError(VectorException):
"""Used to indicate that an asynchronous job has failed"""
pass
class InvalidQueryException(VectorException):
"""The submitted query is invalid"""
pass
# FailedCopyError, use the FailedJobError
FailedCopyError = FailedJobError
| 18.592593 | 67 | 0.737052 | class VectorException(Exception):
pass
class WaitTimeoutError(VectorException):
pass
class FailedJobError(VectorException):
pass
class InvalidQueryException(VectorException):
pass
FailedCopyError = FailedJobError
| true | true |
f7181d74255d1aac4659dd861c34a79c119960a0 | 2,097 | py | Python | permutation.py | kaixindelele/self_demo | cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af | [
"Apache-2.0"
] | null | null | null | permutation.py | kaixindelele/self_demo | cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af | [
"Apache-2.0"
] | null | null | null | permutation.py | kaixindelele/self_demo | cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 14:56:07 2018
f(n) = n * f(n-1)
and if a is a string variable
a = "hello"
b = a
b = b+" world"
print(b): hello world
print(a): hello
so "=" equal "copy"
and creat a new date
@author: lele
"""
a = "1234"
#def permutation(a,size,n):
# if n == 1:
# print(new_array)
# return
# for i in range(size):
# pass
#
#def main(a):
# a = input("please input a string of integer:")
# permutation(a,sizeof(a)/sizeof(int),n)
print("size:",size)
ls = range(1,size+1)
minimum = 0
for figure in ls:
minimum += figure * (10 ** (size-1-ls.index(figure)))
maximum = list(str(minimum))
maximum.reverse()
maximum = "".join(maximum)
def swap(temp,a,b):
temp = list(temp)
temp[a],temp[b] = temp[b],temp[a]
return temp
#temp_ls = list(str(minimum))
temp_ls = list("123")
size = len(temp_ls)
print("a:",a)
print("original temp_ls:",temp_ls)
count = 0
while(1):
if("".join(temp_ls) == maximum):
break
for i in range(size):
if(temp_ls[size-i-1]>temp_ls[size-i-2]):
roi = temp_ls[size-i-2:]
a = size-i-2
a_value = temp_ls[a]
second = []
for j in roi:
if(j>a_value):
second.append(j)
print("second",second)
b_value = min(second)
b = temp_ls.index(b_value)
print("a",a)
print("b",b)
temp_ls = swap(temp_ls,a,b)
print("swap:",temp_ls)
rest = temp_ls[size-i-1:]
print("rest",rest)
rest.reverse()
temp_ls[size-i-1:] = rest
print("finally temp_ls",temp_ls)
count += 1
print("count:",count)
print("--------------")
break
| 19.063636 | 58 | 0.44206 |
a = "1234"
print("size:",size)
ls = range(1,size+1)
minimum = 0
for figure in ls:
minimum += figure * (10 ** (size-1-ls.index(figure)))
maximum = list(str(minimum))
maximum.reverse()
maximum = "".join(maximum)
def swap(temp,a,b):
temp = list(temp)
temp[a],temp[b] = temp[b],temp[a]
return temp
temp_ls = list("123")
size = len(temp_ls)
print("a:",a)
print("original temp_ls:",temp_ls)
count = 0
while(1):
if("".join(temp_ls) == maximum):
break
for i in range(size):
if(temp_ls[size-i-1]>temp_ls[size-i-2]):
roi = temp_ls[size-i-2:]
a = size-i-2
a_value = temp_ls[a]
second = []
for j in roi:
if(j>a_value):
second.append(j)
print("second",second)
b_value = min(second)
b = temp_ls.index(b_value)
print("a",a)
print("b",b)
temp_ls = swap(temp_ls,a,b)
print("swap:",temp_ls)
rest = temp_ls[size-i-1:]
print("rest",rest)
rest.reverse()
temp_ls[size-i-1:] = rest
print("finally temp_ls",temp_ls)
count += 1
print("count:",count)
print("--------------")
break
| true | true |
f7181e6bab2403dd9cc3515a9e46f280c4a1f683 | 4,961 | py | Python | airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 5 | 2022-02-22T14:49:48.000Z | 2022-03-19T10:43:08.000Z | airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1 | 2022-03-11T06:21:24.000Z | 2022-03-11T06:21:24.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
# helpers
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
# main class definition
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
# convert all data to string as it is only expected format in schema
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
| 37.583333 | 131 | 0.610966 |
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet))
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet))
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
| true | true |
f7181ee86ad8cc7e5af71dcdfa13dd1e97cf1945 | 4,658 | py | Python | python/download-all-data.py | wizzardz/vehicle-statistics-india | a54f84460ce3129d170510ce2c33799008b1a7a6 | [
"Apache-2.0"
] | null | null | null | python/download-all-data.py | wizzardz/vehicle-statistics-india | a54f84460ce3129d170510ce2c33799008b1a7a6 | [
"Apache-2.0"
] | null | null | null | python/download-all-data.py | wizzardz/vehicle-statistics-india | a54f84460ce3129d170510ce2c33799008b1a7a6 | [
"Apache-2.0"
] | null | null | null | import urllib.request
import json
import sys
import os
# specify the url format for downloading the json data
url_format = 'https://data.gov.in/node/{0}/datastore/export/json'
years = [2011, 2009, 2006, 2004, 2002]
# default data for constructing the urls for each States and union teritories
json_string = json.dumps({
"Data": [{
"Andaman and Nicobar Islands": [89524, 100624, 100681, 100729, 100794]
}, {
"Chandigarh": [89529, 100629, 100682, 100730, 100795]
}, {
"Dadra And Nagar Haveli": [
89531, 100626, 100683, 100731, 100796
]
}, {
"Daman and Diu": [89532, 100627, 100684, 100732, 100797]
}, {
"Delhi": [89533, 100628,
100685, 100733, 100798
]
}, {
"Lakshadweep": [89539, 100629, 100686, 100734, 100799]
}, {
"Puducherry": [89546, 100630, 100687, 100735, 100800]
}, {
"Bihar": [89528, 100599, 100656, 100704, 100769]
}, {
"Chhattisgarh": [
89530, 100600, 100657, 100705, 100770
]
}, {
"Goa": [89534, 100601, 100658, 100706, 100771]
}, {
"Gujarat": [89535, 100602, 100659, 100706, 100772]
}, {
"Haryana": [89536, 100603, 100660, 100708, 100773]
}, {
"Himachal Pradesh": [
89537, 100604, 100661, 100709, 100774
]
}, {
"Jammu and Kashmir": [
89555, 100605, 100662, 100710, 100775
]
}, {
"Jharkhand": [89556, 100606,
100663, 100711, 100776
]
}, {
"Karnataka": [89557, 100607,
100664, 100712, 100777
]
}, {
"Kerala": [89538, 100608, 100665, 100713, 100778]
}, {
"Madhya Pradesh": [
89558, 100609, 100666, 100714, 100779
]
}, {
"Maharashtra": [89540, 100610,
100667, 100715, 100780
]
}, {
"Manipur": [89541, 100611, 100668, 100716, 100781]
}, {
"Meghalaya": [89542, 100612,
100669, 100717, 100782
]
}, {
"Mizoram": [89543, 100613, 100670, 100718, 100783]
}, {
"Nagaland": [89544, 100614, 100671, 100719, 100784]
}, {
"Odisha": [89545, 100615, 100672, 100720, 100785]
}, {
"Punjab": [89547, 100616, 100673, 100721, 100786]
}, {
"Rajasthan": [89548, 100617,
100674, 100722, 100787
]
}, {
"Sikkim": [89549, 100618, 100675, 100723, 100788]
}, {
"Tamil Nadu": [89550, 100619,
100676, 100724, 100789
]
}, {
"Tripura": [89551, 100620, 100677, 100725, 100790]
}, {
"Uttarakhand": [89553, 100621,
100678, 100726, 100791
]
}, {
"Uttar Pradesh": [89552, 100622,
100679, 100727, 100792
]
}, {
"West Bengal": [89554, 100623, 100680, 100728, 100793]
}]
})
# loads the default data in josn format
state_data = json.loads(json_string)
# check whether an url data is specified through an input file, if thats
# the case then overwrite the default data by the input file
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as json_file:
state_data = json.loads(json_file.read())
failed_urls = ''
# iterates through each data for downloading the json content
for state in state_data["Data"]:
# get the name of the state, ideally the key is same as that of
# state/union teritory
state_name = ''
for key in state.keys():
state_name = key
# initialises the index for downloading the data
index = 0
# for a state, download the json data for each year
for identifer in state[state_name]:
url = url_format.format(identifer)
try:
downloaded_data = ''
with urllib.request.urlopen(url) as response:
downloaded_data = response.read().decode('utf-8')
fille_name = '{0}/{1}.json'.format(state_name, years[index])
os.makedirs(os.path.dirname(fille_name), exist_ok=True)
with open(fille_name, "w") as output_file:
output_file.write(downloaded_data)
print(
'Downloading completed for {0}-{1}'.format(state_name, str(years[index])))
index += 1
except Exception as e:
failed_urls += "{0} - {1}\n".format(state_name, url)
if len(failed_urls) > 0:
with open("failedurl.txt", 'w') as f:
f.write(failed_urls)
print('Failed url details has been written to failedurl.txt')
| 30.246753 | 84 | 0.542078 | import urllib.request
import json
import sys
import os
url_format = 'https://data.gov.in/node/{0}/datastore/export/json'
years = [2011, 2009, 2006, 2004, 2002]
json_string = json.dumps({
"Data": [{
"Andaman and Nicobar Islands": [89524, 100624, 100681, 100729, 100794]
}, {
"Chandigarh": [89529, 100629, 100682, 100730, 100795]
}, {
"Dadra And Nagar Haveli": [
89531, 100626, 100683, 100731, 100796
]
}, {
"Daman and Diu": [89532, 100627, 100684, 100732, 100797]
}, {
"Delhi": [89533, 100628,
100685, 100733, 100798
]
}, {
"Lakshadweep": [89539, 100629, 100686, 100734, 100799]
}, {
"Puducherry": [89546, 100630, 100687, 100735, 100800]
}, {
"Bihar": [89528, 100599, 100656, 100704, 100769]
}, {
"Chhattisgarh": [
89530, 100600, 100657, 100705, 100770
]
}, {
"Goa": [89534, 100601, 100658, 100706, 100771]
}, {
"Gujarat": [89535, 100602, 100659, 100706, 100772]
}, {
"Haryana": [89536, 100603, 100660, 100708, 100773]
}, {
"Himachal Pradesh": [
89537, 100604, 100661, 100709, 100774
]
}, {
"Jammu and Kashmir": [
89555, 100605, 100662, 100710, 100775
]
}, {
"Jharkhand": [89556, 100606,
100663, 100711, 100776
]
}, {
"Karnataka": [89557, 100607,
100664, 100712, 100777
]
}, {
"Kerala": [89538, 100608, 100665, 100713, 100778]
}, {
"Madhya Pradesh": [
89558, 100609, 100666, 100714, 100779
]
}, {
"Maharashtra": [89540, 100610,
100667, 100715, 100780
]
}, {
"Manipur": [89541, 100611, 100668, 100716, 100781]
}, {
"Meghalaya": [89542, 100612,
100669, 100717, 100782
]
}, {
"Mizoram": [89543, 100613, 100670, 100718, 100783]
}, {
"Nagaland": [89544, 100614, 100671, 100719, 100784]
}, {
"Odisha": [89545, 100615, 100672, 100720, 100785]
}, {
"Punjab": [89547, 100616, 100673, 100721, 100786]
}, {
"Rajasthan": [89548, 100617,
100674, 100722, 100787
]
}, {
"Sikkim": [89549, 100618, 100675, 100723, 100788]
}, {
"Tamil Nadu": [89550, 100619,
100676, 100724, 100789
]
}, {
"Tripura": [89551, 100620, 100677, 100725, 100790]
}, {
"Uttarakhand": [89553, 100621,
100678, 100726, 100791
]
}, {
"Uttar Pradesh": [89552, 100622,
100679, 100727, 100792
]
}, {
"West Bengal": [89554, 100623, 100680, 100728, 100793]
}]
})
state_data = json.loads(json_string)
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as json_file:
state_data = json.loads(json_file.read())
failed_urls = ''
for state in state_data["Data"]:
state_name = ''
for key in state.keys():
state_name = key
index = 0
for identifer in state[state_name]:
url = url_format.format(identifer)
try:
downloaded_data = ''
with urllib.request.urlopen(url) as response:
downloaded_data = response.read().decode('utf-8')
fille_name = '{0}/{1}.json'.format(state_name, years[index])
os.makedirs(os.path.dirname(fille_name), exist_ok=True)
with open(fille_name, "w") as output_file:
output_file.write(downloaded_data)
print(
'Downloading completed for {0}-{1}'.format(state_name, str(years[index])))
index += 1
except Exception as e:
failed_urls += "{0} - {1}\n".format(state_name, url)
if len(failed_urls) > 0:
with open("failedurl.txt", 'w') as f:
f.write(failed_urls)
print('Failed url details has been written to failedurl.txt')
| true | true |
f71820aea4c4ecfde9e0adb936a156185abd7e94 | 10,068 | py | Python | constph/gromos_factory.py | bbraunsfeld/const_pH_gromos | 6ef02da6fc0f451aa0082b726926c6fccabf324b | [
"MIT"
] | null | null | null | constph/gromos_factory.py | bbraunsfeld/const_pH_gromos | 6ef02da6fc0f451aa0082b726926c6fccabf324b | [
"MIT"
] | 1 | 2021-09-17T18:17:39.000Z | 2021-09-17T18:17:39.000Z | constph/gromos_factory.py | bbraunsfeld/const_pH_gromos | 6ef02da6fc0f451aa0082b726926c6fccabf324b | [
"MIT"
] | null | null | null | import datetime
from os import stat
class GromosFactory:
"""Class to build the string needed to create a Gromos input file (*.imd), a make_script fiel (*.arg) and a job file (*.job)"""
def __init__(self, configuration: dict, structure: str) -> None:
self.configuration = configuration
self.structure = structure
def _get_search_run_parameters(self):
prms = {}
for key in self.configuration["search_run"]["search_parameters"]:
prms[key] = self.configuration["search_run"]["search_parameters"][key]
return prms
def _get_production_run_parameters(self):
prms = {}
for key in self.configuration["production_run"]["production_parameters"]:
prms[key] = self.configuration["production_run"]["production_parameters"][key]
return prms
def generate_Gromos_search_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "search":
gromos_search_script += (
self._get_Gromos_search_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def generate_Gromos_production_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "production":
gromos_search_script += (
self._get_Gromos_production_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def _get_Gromos_input_header(self, env: str) -> str:
date = datetime.date.today()
header = f"""TITLE
Automatically generated input file for {env} run with constph
Version {date}
END
"""
return header
def _get_Gromos_search_body(self) -> str:
NSM = self.configuration["search_run"]["search_parameters"]["NSM"]
NSTLIM = self.configuration["search_run"]["search_parameters"]["NSTLIM"]
DT = self.configuration["search_run"]["search_parameters"]["dt"]
ATMNR1 = self.configuration["search_run"]["search_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["search_run"]["search_parameters"]["ATMNR2"]
NTWX = self.configuration["search_run"]["search_parameters"]["NTWX"]
NTWE = self.configuration["search_run"]["search_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["search_run"]["search_parameters"]["NSTATES"]
OFFSETS = "0 " * int(NSTATES)
SIGMA = self.configuration["search_run"]["search_parameters"]["sigma"]
ASTEPS = self.configuration["search_run"]["search_parameters"]["asteps"]
BSTEPS = self.configuration["search_run"]["search_parameters"]["bsteps"]
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
0 0
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 {ASTEPS} {BSTEPS}
END"""
return body
def _get_Gromos_production_body(self) -> str:
NSM = self.configuration["production_run"]["_parameters"]["NSM"]
NSTLIM = self.configuration["production_run"]["production_parameters"]["NSTLIM"]
DT = self.configuration["production_run"]["production_parameters"]["dt"]
ATMNR1 = self.configuration["production_run"]["production_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["production_run"]["production_parameters"]["ATMNR2"]
NTWX = self.configuration["production_run"]["production_parameters"]["NTWX"]
NTWE = self.configuration["production_run"]["production_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["production_run"]["production_parameters"]["NSTATES"]
OFFSETS = "0 {new_offset}"
SIGMA = self.configuration["production_run"]["production_parameters"]["sigma"]
EMIN = "found in search"
EMAX = "found in search"
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
{EMAX} {EMIN}
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 0 0
END"""
return body
| 29.786982 | 131 | 0.527712 | import datetime
from os import stat
class GromosFactory:
def __init__(self, configuration: dict, structure: str) -> None:
self.configuration = configuration
self.structure = structure
def _get_search_run_parameters(self):
prms = {}
for key in self.configuration["search_run"]["search_parameters"]:
prms[key] = self.configuration["search_run"]["search_parameters"][key]
return prms
def _get_production_run_parameters(self):
prms = {}
for key in self.configuration["production_run"]["production_parameters"]:
prms[key] = self.configuration["production_run"]["production_parameters"][key]
return prms
def generate_Gromos_search_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "search":
gromos_search_script += (
self._get_Gromos_search_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def generate_Gromos_production_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "production":
gromos_search_script += (
self._get_Gromos_production_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def _get_Gromos_input_header(self, env: str) -> str:
date = datetime.date.today()
header = f"""TITLE
Automatically generated input file for {env} run with constph
Version {date}
END
"""
return header
def _get_Gromos_search_body(self) -> str:
NSM = self.configuration["search_run"]["search_parameters"]["NSM"]
NSTLIM = self.configuration["search_run"]["search_parameters"]["NSTLIM"]
DT = self.configuration["search_run"]["search_parameters"]["dt"]
ATMNR1 = self.configuration["search_run"]["search_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["search_run"]["search_parameters"]["ATMNR2"]
NTWX = self.configuration["search_run"]["search_parameters"]["NTWX"]
NTWE = self.configuration["search_run"]["search_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["search_run"]["search_parameters"]["NSTATES"]
OFFSETS = "0 " * int(NSTATES)
SIGMA = self.configuration["search_run"]["search_parameters"]["sigma"]
ASTEPS = self.configuration["search_run"]["search_parameters"]["asteps"]
BSTEPS = self.configuration["search_run"]["search_parameters"]["bsteps"]
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
0 0
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 {ASTEPS} {BSTEPS}
END"""
return body
def _get_Gromos_production_body(self) -> str:
NSM = self.configuration["production_run"]["_parameters"]["NSM"]
NSTLIM = self.configuration["production_run"]["production_parameters"]["NSTLIM"]
DT = self.configuration["production_run"]["production_parameters"]["dt"]
ATMNR1 = self.configuration["production_run"]["production_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["production_run"]["production_parameters"]["ATMNR2"]
NTWX = self.configuration["production_run"]["production_parameters"]["NTWX"]
NTWE = self.configuration["production_run"]["production_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["production_run"]["production_parameters"]["NSTATES"]
OFFSETS = "0 {new_offset}"
SIGMA = self.configuration["production_run"]["production_parameters"]["sigma"]
EMIN = "found in search"
EMAX = "found in search"
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
{EMAX} {EMIN}
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 0 0
END"""
return body
| true | true |
f718217d51a3402d72204f81cd749070c51ae9c6 | 387 | py | Python | borsa/asgi.py | bozcani/borsa-scraper-app | 56c767a9b6d6c9be40046aa03763f13465860f6f | [
"MIT"
] | 3 | 2020-02-06T10:05:29.000Z | 2020-04-18T10:11:37.000Z | borsa/asgi.py | bozcani/borsa | 56c767a9b6d6c9be40046aa03763f13465860f6f | [
"MIT"
] | 10 | 2020-02-06T08:50:13.000Z | 2020-04-25T12:17:17.000Z | borsa/asgi.py | bozcani/borsa-scraper-app | 56c767a9b6d6c9be40046aa03763f13465860f6f | [
"MIT"
] | 1 | 2020-02-06T07:40:06.000Z | 2020-02-06T07:40:06.000Z | """
ASGI config for borsa project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'borsa.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'borsa.settings')
application = get_asgi_application()
| true | true |
f71821ed4d2b1e66e27c2cefdf134e9907e7d2b1 | 8,869 | py | Python | src/python/main/segeval/ml/PercentageTest.py | anna-ka/segmentation.evaluation | b7eddc9067fc773f3d040dd5eef33dabac07abc0 | [
"BSD-3-Clause"
] | 1 | 2017-05-09T06:16:58.000Z | 2017-05-09T06:16:58.000Z | src/python/main/segeval/ml/PercentageTest.py | anna-ka/segmentation.evaluation | b7eddc9067fc773f3d040dd5eef33dabac07abc0 | [
"BSD-3-Clause"
] | null | null | null | src/python/main/segeval/ml/PercentageTest.py | anna-ka/segmentation.evaluation | b7eddc9067fc773f3d040dd5eef33dabac07abc0 | [
"BSD-3-Clause"
] | null | null | null | '''
Tests the WindowDiff evaluation metric.
.. moduleauthor:: Chris Fournier <chris.m.fournier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2011-2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import unittest
from decimal import Decimal
from .Percentage import percentage, pairwise_percentage, \
find_boundary_position_freqs
from ..data.Samples import KAZANTSEVA2012_G5, KAZANTSEVA2012_G2, \
COMPLETE_AGREEMENT, LARGE_DISAGREEMENT
from .. import convert_positions_to_masses
class TestPercentage(unittest.TestCase):
'''
Test segmentation percentage.
'''
# pylint: disable=R0904
def test_identical(self):
'''
Test whether identical segmentations produce 1.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),1.0)
def test_no_boundaries(self):
'''
Test whether no segments versus some segments produce 0.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_all_boundaries(self):
'''
Test whether all segments versus some segments produces 2/12, or 0.167.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.1666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.1666666666666666666666666667'))
def test_all_and_no_boundaries(self):
'''
Test whether all segments versus no segments produces 0.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_translated_boundary(self):
'''
Test whether 2/3 total segments participate in mis-alignment produces
0.33.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.3333333333333333333333333333'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.3333333333333333333333333333'))
def test_extra_boundary(self):
'''
Test whether 1/3 segments that are non-existent produces 0.66.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.6666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.6666666666666666666666666667'))
def test_full_miss_and_misaligned(self):
'''
Test whether a full miss and a translated boundary out of 4 produces
0.25.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b), Decimal('0.25'))
self.assertEqual(percentage(segs_b, segs_a), Decimal('0.25'))
class TestPairwisePercentage(unittest.TestCase):
# pylint: disable=R0904
'''
Test permuted pairwise percentage.
'''
def test_kazantseva2012_g5(self):
'''
Calculate permuted pairwise percentage on Group 5 from the dataset
collected in Kazantseva (2012).
'''
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G5),
(Decimal('0.1621263635243898401793138635'),
Decimal('0.1788409781886208812486660585'),
Decimal('0.03198409547946276978304443503'),
Decimal('0.03650576180519474391025947712')))
def test_kazantseva2012_g2(self):
'''
Calculate mean permuted pairwise percentage on Group 2 from the dataset
collected in Kazantseva (2012).
'''
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G2),
(Decimal('0.3398087832646656176067940768'),
Decimal('0.1948481072924021072633034332'),
Decimal('0.03796578491543144325163024138'),
Decimal('0.02515478248611697670879150623')))
def test_large_disagreement(self):
'''
Calculate mean permuted pairwise percentage on a theoretical dataset
containing large disagreement.
'''
self.assertEqual(pairwise_percentage(LARGE_DISAGREEMENT),
(0.0,
0.0,
0.0,
0.0))
def test_complete_agreement(self):
'''
Calculate mean permuted pairwise percentage on a theoretical dataset
containing complete agreement.
'''
self.assertEqual(pairwise_percentage(COMPLETE_AGREEMENT),
(1.0,
0.0,
0.0,
0.0))
class TestPercentageUtils(unittest.TestCase):
# pylint: disable=R0904
'''
Test utility functions used to calculate percentage.
'''
def test_find_seg_positions(self):
'''
Test segmentation position frequency counting.
'''
# pylint: disable=C0324
seg_positions = find_boundary_position_freqs([[1,2,3,3,2,1],
[1,2,2,4,2,1]])
self.assertEqual(seg_positions, { 1: 2,
3: 2,
5: 1,
6: 1,
9: 2,
11: 2})
| 42.033175 | 80 | 0.56658 |
import unittest
from decimal import Decimal
from .Percentage import percentage, pairwise_percentage, \
find_boundary_position_freqs
from ..data.Samples import KAZANTSEVA2012_G5, KAZANTSEVA2012_G2, \
COMPLETE_AGREEMENT, LARGE_DISAGREEMENT
from .. import convert_positions_to_masses
class TestPercentage(unittest.TestCase):
def test_identical(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),1.0)
def test_no_boundaries(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_all_boundaries(self):
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.1666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.1666666666666666666666666667'))
def test_all_and_no_boundaries(self):
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_translated_boundary(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.3333333333333333333333333333'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.3333333333333333333333333333'))
def test_extra_boundary(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.6666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.6666666666666666666666666667'))
def test_full_miss_and_misaligned(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b), Decimal('0.25'))
self.assertEqual(percentage(segs_b, segs_a), Decimal('0.25'))
class TestPairwisePercentage(unittest.TestCase):
def test_kazantseva2012_g5(self):
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G5),
(Decimal('0.1621263635243898401793138635'),
Decimal('0.1788409781886208812486660585'),
Decimal('0.03198409547946276978304443503'),
Decimal('0.03650576180519474391025947712')))
def test_kazantseva2012_g2(self):
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G2),
(Decimal('0.3398087832646656176067940768'),
Decimal('0.1948481072924021072633034332'),
Decimal('0.03796578491543144325163024138'),
Decimal('0.02515478248611697670879150623')))
def test_large_disagreement(self):
self.assertEqual(pairwise_percentage(LARGE_DISAGREEMENT),
(0.0,
0.0,
0.0,
0.0))
def test_complete_agreement(self):
self.assertEqual(pairwise_percentage(COMPLETE_AGREEMENT),
(1.0,
0.0,
0.0,
0.0))
class TestPercentageUtils(unittest.TestCase):
def test_find_seg_positions(self):
seg_positions = find_boundary_position_freqs([[1,2,3,3,2,1],
[1,2,2,4,2,1]])
self.assertEqual(seg_positions, { 1: 2,
3: 2,
5: 1,
6: 1,
9: 2,
11: 2})
| true | true |
f7182279ccd3d16543495752c131fb1fcf6fbcc0 | 5,356 | py | Python | torchmetrics/regression/pearson.py | lucadiliello/metrics | e98fbafd2af5d217596958f9cfe6152543a00b7f | [
"Apache-2.0"
] | null | null | null | torchmetrics/regression/pearson.py | lucadiliello/metrics | e98fbafd2af5d217596958f9cfe6152543a00b7f | [
"Apache-2.0"
] | null | null | null | torchmetrics/regression/pearson.py | lucadiliello/metrics | e98fbafd2af5d217596958f9cfe6152543a00b7f | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
from torchmetrics.metric import Metric
def _final_aggregation(
means_x: Tensor,
means_y: Tensor,
vars_x: Tensor,
vars_y: Tensor,
corrs_xy: Tensor,
nbs: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Aggregate the statistics from multiple devices.
Formula taken from here: `Aggregate the statistics from multiple devices`_
"""
# assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
nb = n1 + n2
mean_x = (n1 * mx1 + n2 * mx2) / nb
mean_y = (n1 * my1 + n2 * my2) / nb
var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)
var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)
corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)
corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)
corr_xy = (corr1 + corr2) / (n1 + n2)
mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
return var_x, var_y, corr_xy, nb
class PearsonCorrcoef(Metric):
r"""
Computes `Pearson Correlation Coefficient`_:
.. math::
P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y}
Where :math:`y` is a tensor of target values, and :math:`x` is a
tensor of predictions.
Forward accepts
- ``preds`` (float tensor): ``(N,)``
- ``target``(float tensor): ``(N,)``
Args:
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import PearsonCorrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> pearson = PearsonCorrcoef()
>>> pearson(preds, target)
tensor(0.9849)
"""
is_differentiable = True
higher_is_better = None # both -1 and 1 are optimal
preds: List[Tensor]
target: List[Tensor]
mean_x: Tensor
mean_y: Tensor
var_x: Tensor
var_y: Tensor
corr_xy: Tensor
n_total: Tensor
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.add_state("mean_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("mean_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("corr_xy", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("n_total", default=torch.tensor(0.0), dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
"""Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
def compute(self) -> Tensor:
"""Computes pearson correlation coefficient over state."""
if self.mean_x.numel() > 1: # multiple devices, need further reduction
var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
| 37.71831 | 120 | 0.626027 |
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
from torchmetrics.metric import Metric
def _final_aggregation(
means_x: Tensor,
means_y: Tensor,
vars_x: Tensor,
vars_y: Tensor,
corrs_xy: Tensor,
nbs: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
nb = n1 + n2
mean_x = (n1 * mx1 + n2 * mx2) / nb
mean_y = (n1 * my1 + n2 * my2) / nb
var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)
var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)
corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)
corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)
corr_xy = (corr1 + corr2) / (n1 + n2)
mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
return var_x, var_y, corr_xy, nb
class PearsonCorrcoef(Metric):
is_differentiable = True
higher_is_better = None
preds: List[Tensor]
target: List[Tensor]
mean_x: Tensor
mean_y: Tensor
var_x: Tensor
var_y: Tensor
corr_xy: Tensor
n_total: Tensor
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.add_state("mean_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("mean_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("corr_xy", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("n_total", default=torch.tensor(0.0), dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor) -> None:
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
def compute(self) -> Tensor:
if self.mean_x.numel() > 1:
var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
| true | true |
f718227f7c7f9f79e60bd34c13ee360426d8cedb | 12,071 | py | Python | 8.SAC/SAC-continuous.py | Lizhi-sjtu/DRL-code-pytorch | 2ca05f4ed64d2d032e161fc3a2d2a68c818c4337 | [
"MIT"
] | 2 | 2022-03-27T01:56:48.000Z | 2022-03-31T05:02:39.000Z | 8.SAC/SAC-continuous.py | Lizhi-sjtu/DRL-code-pytorch | 2ca05f4ed64d2d032e161fc3a2d2a68c818c4337 | [
"MIT"
] | null | null | null | 8.SAC/SAC-continuous.py | Lizhi-sjtu/DRL-code-pytorch | 2ca05f4ed64d2d032e161fc3a2d2a68c818c4337 | [
"MIT"
] | null | null | null | import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.mean_layer = nn.Linear(hidden_width, action_dim)
self.log_std_layer = nn.Linear(hidden_width, action_dim)
def forward(self, x, deterministic=False, with_logprob=True):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
mean = self.mean_layer(x)
log_std = self.log_std_layer(x) # We output the log_std to ensure that std=exp(log_std)>0
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
dist = Normal(mean, std) # Generate a Gaussian distribution
if deterministic: # When evaluating,we use the deterministic policy
a = mean
else:
a = dist.rsample() # reparameterization trick: mean+std*N(0,1)
if with_logprob: # The method refers to Open AI Spinning up, which is more stable.
log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)
log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)
else:
log_pi = None
a = self.max_action * torch.tanh(a) # Use tanh to compress the unbounded Gaussian distribution into a bounded action interval.
return a, log_pi
class Critic(nn.Module): # According to (s,a), directly calculate Q(s,a)
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
# Q1
self.l1 = nn.Linear(state_dim + action_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
# Q2
self.l4 = nn.Linear(state_dim + action_dim, hidden_width)
self.l5 = nn.Linear(hidden_width, hidden_width)
self.l6 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
s_a = torch.cat([s, a], 1)
q1 = F.relu(self.l1(s_a))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(s_a))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim):
self.max_size = int(1e6)
self.count = 0
self.size = 0
self.s = np.zeros((self.max_size, state_dim))
self.a = np.zeros((self.max_size, action_dim))
self.r = np.zeros((self.max_size, 1))
self.s_ = np.zeros((self.max_size, state_dim))
self.dw = np.zeros((self.max_size, 1))
def store(self, s, a, r, s_, dw):
self.s[self.count] = s
self.a[self.count] = a
self.r[self.count] = r
self.s_[self.count] = s_
self.dw[self.count] = dw
self.count = (self.count + 1) % self.max_size # When the 'count' reaches max_size, it will be reset to 0.
self.size = min(self.size + 1, self.max_size) # Record the number of transitions
def sample(self, batch_size):
index = np.random.choice(self.size, size=batch_size) # Randomly sampling
batch_s = torch.tensor(self.s[index], dtype=torch.float)
batch_a = torch.tensor(self.a[index], dtype=torch.float)
batch_r = torch.tensor(self.r[index], dtype=torch.float)
batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)
batch_dw = torch.tensor(self.dw[index], dtype=torch.float)
return batch_s, batch_a, batch_r, batch_s_, batch_dw
class SAC(object):
def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
self.hidden_width = 256 # The number of neurons in hidden layers of the neural network
self.batch_size = 256 # batch size
self.GAMMA = 0.99 # discount factor
self.TAU = 0.005 # Softly update the target network
self.lr = 3e-4 # learning rate
self.adaptive_alpha = True # Whether to automatically learn the temperature alpha
if self.adaptive_alpha:
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
self.target_entropy = -action_dim
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha = self.log_alpha.exp()
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)
else:
self.alpha = 0.2
self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)
self.critic = Critic(state_dim, action_dim, self.hidden_width)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)
def choose_action(self, s, deterministic=False):
s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)
a, _ = self.actor(s, deterministic, False) # When choosing actions, we do not need to compute log_pi
return a.data.numpy().flatten()
def learn(self, relay_buffer):
batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size) # Sample a batch
with torch.no_grad():
batch_a_, log_pi_ = self.actor(batch_s_) # a' from the current policy
# Compute target Q
target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)
target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)
# Compute current Q
current_Q1, current_Q2 = self.critic(batch_s, batch_a)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Freeze critic networks so you don't waste computational effort
for params in self.critic.parameters():
params.requires_grad = False
# Compute actor loss
a, log_pi = self.actor(batch_s)
Q1, Q2 = self.critic(batch_s, a)
Q = torch.min(Q1, Q2)
actor_loss = (self.alpha * log_pi - Q).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Unfreeze critic networks
for params in self.critic.parameters():
params.requires_grad = True
# Update alpha
if self.adaptive_alpha:
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
# Softly update target networks
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)
def evaluate_policy(env, agent):
times = 3 # Perform three evaluations and calculate the average
evaluate_reward = 0
for _ in range(times):
s = env.reset()
done = False
episode_reward = 0
while not done:
a = agent.choose_action(s, deterministic=True) # We use the deterministic policy during the evaluating
s_, r, done, _ = env.step(a)
episode_reward += r
s = s_
evaluate_reward += episode_reward
return int(evaluate_reward / times)
def reward_adapter(r, env_index):
if env_index == 0: # Pendulum-v1
r = (r + 8) / 8
elif env_index == 1: # BipedalWalker-v3
if r <= -100:
r = -1
return r
if __name__ == '__main__':
env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
env_index = 0
env = gym.make(env_name[env_index])
env_evaluate = gym.make(env_name[env_index]) # When evaluating the policy, we need to rebuild an environment
number = 1
seed = 0
# Set random seed
env.seed(seed)
env.action_space.seed(seed)
env_evaluate.seed(seed)
env_evaluate.action_space.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_episode_steps = env._max_episode_steps # Maximum number of steps per episode
print("env={}".format(env_name[env_index]))
print("state_dim={}".format(state_dim))
print("action_dim={}".format(action_dim))
print("max_action={}".format(max_action))
print("max_episode_steps={}".format(max_episode_steps))
agent = SAC(state_dim, action_dim, max_action)
replay_buffer = ReplayBuffer(state_dim, action_dim)
# Build a tensorboard
writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))
max_train_steps = 3e6 # Maximum number of training steps
random_steps = 25e3 # Take the random actions in the beginning for the better exploration
evaluate_freq = 5e3 # Evaluate the policy every 'evaluate_freq' steps
evaluate_num = 0 # Record the number of evaluations
evaluate_rewards = [] # Record the rewards during the evaluating
total_steps = 0 # Record the total steps during the training
while total_steps < max_train_steps:
s = env.reset()
episode_steps = 0
done = False
while not done:
episode_steps += 1
if total_steps < random_steps: # Take the random actions in the beginning for the better exploration
a = env.action_space.sample()
else:
a = agent.choose_action(s)
s_, r, done, _ = env.step(a)
r = reward_adapter(r, env_index) # Adjust rewards for better performance
# When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;
# dw means dead or win,there is no next state s';
# but when reaching the max_episode_steps,there is a next state s' actually.
if done and episode_steps != max_episode_steps:
dw = True
else:
dw = False
replay_buffer.store(s, a, r, s_, dw) # Store the transition
s = s_
if total_steps >= random_steps:
agent.learn(replay_buffer)
# Evaluate the policy every 'evaluate_freq' steps
if (total_steps + 1) % evaluate_freq == 0:
evaluate_num += 1
evaluate_reward = evaluate_policy(env_evaluate, agent)
evaluate_rewards.append(evaluate_reward)
print("evaluate_num:{} \t evaluate_reward:{}".format(evaluate_num, evaluate_reward))
writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)
# Save the rewards
if evaluate_num % 10 == 0:
np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))
total_steps += 1
| 42.65371 | 147 | 0.618341 | import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.mean_layer = nn.Linear(hidden_width, action_dim)
self.log_std_layer = nn.Linear(hidden_width, action_dim)
def forward(self, x, deterministic=False, with_logprob=True):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
mean = self.mean_layer(x)
log_std = self.log_std_layer(x)
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
dist = Normal(mean, std)
if deterministic:
a = mean
else:
a = dist.rsample()
if with_logprob:
log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)
log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)
else:
log_pi = None
a = self.max_action * torch.tanh(a)
return a, log_pi
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
self.l4 = nn.Linear(state_dim + action_dim, hidden_width)
self.l5 = nn.Linear(hidden_width, hidden_width)
self.l6 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
s_a = torch.cat([s, a], 1)
q1 = F.relu(self.l1(s_a))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(s_a))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim):
self.max_size = int(1e6)
self.count = 0
self.size = 0
self.s = np.zeros((self.max_size, state_dim))
self.a = np.zeros((self.max_size, action_dim))
self.r = np.zeros((self.max_size, 1))
self.s_ = np.zeros((self.max_size, state_dim))
self.dw = np.zeros((self.max_size, 1))
def store(self, s, a, r, s_, dw):
self.s[self.count] = s
self.a[self.count] = a
self.r[self.count] = r
self.s_[self.count] = s_
self.dw[self.count] = dw
self.count = (self.count + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
index = np.random.choice(self.size, size=batch_size)
batch_s = torch.tensor(self.s[index], dtype=torch.float)
batch_a = torch.tensor(self.a[index], dtype=torch.float)
batch_r = torch.tensor(self.r[index], dtype=torch.float)
batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)
batch_dw = torch.tensor(self.dw[index], dtype=torch.float)
return batch_s, batch_a, batch_r, batch_s_, batch_dw
class SAC(object):
def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
self.hidden_width = 256
self.batch_size = 256
self.GAMMA = 0.99
self.TAU = 0.005
self.lr = 3e-4
self.adaptive_alpha = True
if self.adaptive_alpha:
self.target_entropy = -action_dim
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha = self.log_alpha.exp()
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)
else:
self.alpha = 0.2
self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)
self.critic = Critic(state_dim, action_dim, self.hidden_width)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)
def choose_action(self, s, deterministic=False):
s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)
a, _ = self.actor(s, deterministic, False)
return a.data.numpy().flatten()
def learn(self, relay_buffer):
batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size)
with torch.no_grad():
batch_a_, log_pi_ = self.actor(batch_s_)
# Compute target Q
target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)
target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)
# Compute current Q
current_Q1, current_Q2 = self.critic(batch_s, batch_a)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Freeze critic networks so you don't waste computational effort
for params in self.critic.parameters():
params.requires_grad = False
a, log_pi = self.actor(batch_s)
Q1, Q2 = self.critic(batch_s, a)
Q = torch.min(Q1, Q2)
actor_loss = (self.alpha * log_pi - Q).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for params in self.critic.parameters():
params.requires_grad = True
if self.adaptive_alpha:
alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)
def evaluate_policy(env, agent):
times = 3
evaluate_reward = 0
for _ in range(times):
s = env.reset()
done = False
episode_reward = 0
while not done:
a = agent.choose_action(s, deterministic=True)
s_, r, done, _ = env.step(a)
episode_reward += r
s = s_
evaluate_reward += episode_reward
return int(evaluate_reward / times)
def reward_adapter(r, env_index):
if env_index == 0:
r = (r + 8) / 8
elif env_index == 1:
if r <= -100:
r = -1
return r
if __name__ == '__main__':
env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
env_index = 0
env = gym.make(env_name[env_index])
env_evaluate = gym.make(env_name[env_index])
number = 1
seed = 0
env.seed(seed)
env.action_space.seed(seed)
env_evaluate.seed(seed)
env_evaluate.action_space.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_episode_steps = env._max_episode_steps
print("env={}".format(env_name[env_index]))
print("state_dim={}".format(state_dim))
print("action_dim={}".format(action_dim))
print("max_action={}".format(max_action))
print("max_episode_steps={}".format(max_episode_steps))
agent = SAC(state_dim, action_dim, max_action)
replay_buffer = ReplayBuffer(state_dim, action_dim)
writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))
max_train_steps = 3e6
random_steps = 25e3
evaluate_freq = 5e3
evaluate_num = 0
evaluate_rewards = []
total_steps = 0
while total_steps < max_train_steps:
s = env.reset()
episode_steps = 0
done = False
while not done:
episode_steps += 1
if total_steps < random_steps:
a = env.action_space.sample()
else:
a = agent.choose_action(s)
s_, r, done, _ = env.step(a)
r = reward_adapter(r, env_index)
# but when reaching the max_episode_steps,there is a next state s' actually.
if done and episode_steps != max_episode_steps:
dw = True
else:
dw = False
replay_buffer.store(s, a, r, s_, dw)
s = s_
if total_steps >= random_steps:
agent.learn(replay_buffer)
if (total_steps + 1) % evaluate_freq == 0:
evaluate_num += 1
evaluate_reward = evaluate_policy(env_evaluate, agent)
evaluate_rewards.append(evaluate_reward)
print("evaluate_num:{} \t evaluate_reward:{}".format(evaluate_num, evaluate_reward))
writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)
if evaluate_num % 10 == 0:
np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))
total_steps += 1
| true | true |
f71823696e8d384656f678616b79009f7bcd95a6 | 14,225 | py | Python | esrally/client.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 1,577 | 2016-04-19T12:38:58.000Z | 2022-03-31T07:18:25.000Z | esrally/client.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 1,079 | 2016-04-19T12:09:16.000Z | 2022-03-31T05:38:50.000Z | esrally/client.py | Kua-Fu/rally | 7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae | [
"Apache-2.0"
] | 300 | 2016-04-19T18:27:12.000Z | 2022-03-23T07:54:16.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextvars
import logging
import time
import certifi
import urllib3
from esrally import doc_link, exceptions
from esrally.utils import console, convert
class RequestContextManager:
"""
Ensures that request context span the defined scope and allow nesting of request contexts with proper propagation.
This means that we can span a top-level request context, open sub-request contexts that can be used to measure
individual timings and still measure the proper total time on the top-level request context.
"""
def __init__(self, request_context_holder):
self.ctx_holder = request_context_holder
self.ctx = None
self.token = None
async def __aenter__(self):
self.ctx, self.token = self.ctx_holder.init_request_context()
return self
@property
def request_start(self):
return self.ctx["request_start"]
@property
def request_end(self):
return self.ctx["request_end"]
async def __aexit__(self, exc_type, exc_val, exc_tb):
# propagate earliest request start and most recent request end to parent
request_start = self.request_start
request_end = self.request_end
self.ctx_holder.restore_context(self.token)
# don't attempt to restore these values on the top-level context as they don't exist
if self.token.old_value != contextvars.Token.MISSING:
self.ctx_holder.update_request_start(request_start)
self.ctx_holder.update_request_end(request_end)
self.token = None
return False
class RequestContextHolder:
"""
Holds request context variables. This class is only meant to be used together with RequestContextManager.
"""
request_context = contextvars.ContextVar("rally_request_context")
def new_request_context(self):
return RequestContextManager(self)
@classmethod
def init_request_context(cls):
ctx = {}
token = cls.request_context.set(ctx)
return ctx, token
@classmethod
def restore_context(cls, token):
cls.request_context.reset(token)
@classmethod
def update_request_start(cls, new_request_start):
meta = cls.request_context.get()
# this can happen if multiple requests are sent on the wire for one logical request (e.g. scrolls)
if "request_start" not in meta:
meta["request_start"] = new_request_start
@classmethod
def update_request_end(cls, new_request_end):
meta = cls.request_context.get()
meta["request_end"] = new_request_end
@classmethod
def on_request_start(cls):
cls.update_request_start(time.perf_counter())
@classmethod
def on_request_end(cls):
cls.update_request_end(time.perf_counter())
@classmethod
def return_raw_response(cls):
ctx = cls.request_context.get()
ctx["raw_response"] = True
class EsClientFactory:
"""
Abstracts how the Elasticsearch client is created. Intended for testing.
"""
def __init__(self, hosts, client_options):
self.hosts = hosts
self.client_options = dict(client_options)
self.ssl_context = None
self.logger = logging.getLogger(__name__)
masked_client_options = dict(client_options)
if "basic_auth_password" in masked_client_options:
masked_client_options["basic_auth_password"] = "*****"
if "http_auth" in masked_client_options:
masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
# we're using an SSL context now and it is not allowed to have use_ssl present in client options anymore
if self.client_options.pop("use_ssl", False):
# pylint: disable=import-outside-toplevel
import ssl
self.logger.info("SSL support: on")
self.client_options["scheme"] = "https"
# ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
# but can be disabled via the verify_mode property later on.
self.ssl_context = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH, cafile=self.client_options.pop("ca_certs", certifi.where())
)
if not self.client_options.pop("verify_certs", True):
self.logger.info("SSL certificate verification: off")
# order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
self.ssl_context.verify_mode = ssl.CERT_NONE
self.ssl_context.check_hostname = False
self.logger.warning(
"User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
"benchmark. Disabling urllib warnings now to avoid a logging storm. "
"See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details."
)
# disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
# advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
urllib3.disable_warnings()
else:
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.check_hostname = True
self.logger.info("SSL certificate verification: on")
# When using SSL_context, all SSL related kwargs in client options get ignored
client_cert = self.client_options.pop("client_cert", False)
client_key = self.client_options.pop("client_key", False)
if not client_cert and not client_key:
self.logger.info("SSL client authentication: off")
elif bool(client_cert) != bool(client_key):
self.logger.error("Supplied client-options contain only one of client_cert/client_key. ")
defined_client_ssl_option = "client_key" if client_key else "client_cert"
missing_client_ssl_option = "client_cert" if client_key else "client_key"
console.println(
"'{}' is missing from client-options but '{}' has been specified.\n"
"If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
"Read the documentation at {}\n".format(
missing_client_ssl_option,
defined_client_ssl_option,
console.format.link(doc_link("command_line_reference.html#client-options")),
)
)
raise exceptions.SystemSetupError(
"Cannot specify '{}' without also specifying '{}' in client-options.".format(
defined_client_ssl_option, missing_client_ssl_option
)
)
elif client_cert and client_key:
self.logger.info("SSL client authentication: on")
self.ssl_context.load_cert_chain(certfile=client_cert, keyfile=client_key)
else:
self.logger.info("SSL support: off")
self.client_options["scheme"] = "http"
if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
self.logger.info("HTTP basic authentication: on")
self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
else:
self.logger.info("HTTP basic authentication: off")
if self._is_set(self.client_options, "compressed"):
console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
self.client_options["http_compress"] = self.client_options.pop("compressed")
if self._is_set(self.client_options, "http_compress"):
self.logger.info("HTTP compression: on")
else:
self.logger.info("HTTP compression: off")
if self._is_set(self.client_options, "enable_cleanup_closed"):
self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))
def _is_set(self, client_opts, k):
try:
return client_opts[k]
except KeyError:
return False
def create(self):
# pylint: disable=import-outside-toplevel
import elasticsearch
return elasticsearch.Elasticsearch(hosts=self.hosts, ssl_context=self.ssl_context, **self.client_options)
def create_async(self):
# pylint: disable=import-outside-toplevel
import io
import aiohttp
import elasticsearch
from elasticsearch.serializer import JSONSerializer
import esrally.async_connection
class LazyJSONSerializer(JSONSerializer):
def loads(self, s):
meta = RallyAsyncElasticsearch.request_context.get()
if "raw_response" in meta:
return io.BytesIO(s)
else:
return super().loads(s)
async def on_request_start(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_start()
async def on_request_end(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_end()
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
# ensure that we also stop the timer when a request "ends" with an exception (e.g. a timeout)
trace_config.on_request_exception.append(on_request_end)
# override the builtin JSON serializer
self.client_options["serializer"] = LazyJSONSerializer()
self.client_options["trace_config"] = trace_config
class VerifiedAsyncTransport(elasticsearch.AsyncTransport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# skip verification at this point; we've already verified this earlier with the synchronous client.
# The async client is used in the hot code path and we use customized overrides (such as that we don't
# parse response bodies in some cases for performance reasons, e.g. when using the bulk API).
self._verified_elasticsearch = True
class RallyAsyncElasticsearch(elasticsearch.AsyncElasticsearch, RequestContextHolder):
pass
return RallyAsyncElasticsearch(
hosts=self.hosts,
transport_class=VerifiedAsyncTransport,
connection_class=esrally.async_connection.AIOHttpConnection,
ssl_context=self.ssl_context,
**self.client_options,
)
def wait_for_rest_layer(es, max_attempts=40):
"""
Waits for ``max_attempts`` until Elasticsearch's REST API is available.
:param es: Elasticsearch client to use for connecting.
:param max_attempts: The maximum number of attempts to check whether the REST API is available.
:return: True iff Elasticsearch's REST API is available.
"""
# assume that at least the hosts that we expect to contact should be available. Note that this is not 100%
# bullet-proof as a cluster could have e.g. dedicated masters which are not contained in our list of target hosts
# but this is still better than just checking for any random node's REST API being reachable.
expected_node_count = len(es.transport.hosts)
logger = logging.getLogger(__name__)
for attempt in range(max_attempts):
logger.debug("REST API is available after %s attempts", attempt)
# pylint: disable=import-outside-toplevel
import elasticsearch
try:
# see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also
# available when the cluster status is RED (as long as all required nodes are present)
es.cluster.health(wait_for_nodes=">={}".format(expected_node_count))
logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt)
return True
except elasticsearch.ConnectionError as e:
if "SSL: UNKNOWN_PROTOCOL" in str(e):
raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e)
else:
logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt)
time.sleep(3)
except elasticsearch.TransportError as e:
# cluster block, x-pack not initialized yet, our wait condition is not reached
if e.status_code in (503, 401, 408):
logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt)
time.sleep(3)
else:
logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt)
raise e
return False
| 44.873817 | 139 | 0.664745 |
import contextvars
import logging
import time
import certifi
import urllib3
from esrally import doc_link, exceptions
from esrally.utils import console, convert
class RequestContextManager:
def __init__(self, request_context_holder):
self.ctx_holder = request_context_holder
self.ctx = None
self.token = None
async def __aenter__(self):
self.ctx, self.token = self.ctx_holder.init_request_context()
return self
@property
def request_start(self):
return self.ctx["request_start"]
@property
def request_end(self):
return self.ctx["request_end"]
async def __aexit__(self, exc_type, exc_val, exc_tb):
request_start = self.request_start
request_end = self.request_end
self.ctx_holder.restore_context(self.token)
if self.token.old_value != contextvars.Token.MISSING:
self.ctx_holder.update_request_start(request_start)
self.ctx_holder.update_request_end(request_end)
self.token = None
return False
class RequestContextHolder:
request_context = contextvars.ContextVar("rally_request_context")
def new_request_context(self):
return RequestContextManager(self)
@classmethod
def init_request_context(cls):
ctx = {}
token = cls.request_context.set(ctx)
return ctx, token
@classmethod
def restore_context(cls, token):
cls.request_context.reset(token)
@classmethod
def update_request_start(cls, new_request_start):
meta = cls.request_context.get()
if "request_start" not in meta:
meta["request_start"] = new_request_start
@classmethod
def update_request_end(cls, new_request_end):
meta = cls.request_context.get()
meta["request_end"] = new_request_end
@classmethod
def on_request_start(cls):
cls.update_request_start(time.perf_counter())
@classmethod
def on_request_end(cls):
cls.update_request_end(time.perf_counter())
@classmethod
def return_raw_response(cls):
ctx = cls.request_context.get()
ctx["raw_response"] = True
class EsClientFactory:
def __init__(self, hosts, client_options):
self.hosts = hosts
self.client_options = dict(client_options)
self.ssl_context = None
self.logger = logging.getLogger(__name__)
masked_client_options = dict(client_options)
if "basic_auth_password" in masked_client_options:
masked_client_options["basic_auth_password"] = "*****"
if "http_auth" in masked_client_options:
masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
if self.client_options.pop("use_ssl", False):
# pylint: disable=import-outside-toplevel
import ssl
self.logger.info("SSL support: on")
self.client_options["scheme"] = "https"
# ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
# but can be disabled via the verify_mode property later on.
self.ssl_context = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH, cafile=self.client_options.pop("ca_certs", certifi.where())
)
if not self.client_options.pop("verify_certs", True):
self.logger.info("SSL certificate verification: off")
# order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
self.ssl_context.verify_mode = ssl.CERT_NONE
self.ssl_context.check_hostname = False
self.logger.warning(
"User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
"benchmark. Disabling urllib warnings now to avoid a logging storm. "
"See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details."
)
# disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
# advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
urllib3.disable_warnings()
else:
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.check_hostname = True
self.logger.info("SSL certificate verification: on")
# When using SSL_context, all SSL related kwargs in client options get ignored
client_cert = self.client_options.pop("client_cert", False)
client_key = self.client_options.pop("client_key", False)
if not client_cert and not client_key:
self.logger.info("SSL client authentication: off")
elif bool(client_cert) != bool(client_key):
self.logger.error("Supplied client-options contain only one of client_cert/client_key. ")
defined_client_ssl_option = "client_key" if client_key else "client_cert"
missing_client_ssl_option = "client_cert" if client_key else "client_key"
console.println(
"'{}' is missing from client-options but '{}' has been specified.\n"
"If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
"Read the documentation at {}\n".format(
missing_client_ssl_option,
defined_client_ssl_option,
console.format.link(doc_link("command_line_reference.html#client-options")),
)
)
raise exceptions.SystemSetupError(
"Cannot specify '{}' without also specifying '{}' in client-options.".format(
defined_client_ssl_option, missing_client_ssl_option
)
)
elif client_cert and client_key:
self.logger.info("SSL client authentication: on")
self.ssl_context.load_cert_chain(certfile=client_cert, keyfile=client_key)
else:
self.logger.info("SSL support: off")
self.client_options["scheme"] = "http"
if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
self.logger.info("HTTP basic authentication: on")
self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
else:
self.logger.info("HTTP basic authentication: off")
if self._is_set(self.client_options, "compressed"):
console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
self.client_options["http_compress"] = self.client_options.pop("compressed")
if self._is_set(self.client_options, "http_compress"):
self.logger.info("HTTP compression: on")
else:
self.logger.info("HTTP compression: off")
if self._is_set(self.client_options, "enable_cleanup_closed"):
self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))
def _is_set(self, client_opts, k):
try:
return client_opts[k]
except KeyError:
return False
def create(self):
import elasticsearch
return elasticsearch.Elasticsearch(hosts=self.hosts, ssl_context=self.ssl_context, **self.client_options)
def create_async(self):
import io
import aiohttp
import elasticsearch
from elasticsearch.serializer import JSONSerializer
import esrally.async_connection
class LazyJSONSerializer(JSONSerializer):
def loads(self, s):
meta = RallyAsyncElasticsearch.request_context.get()
if "raw_response" in meta:
return io.BytesIO(s)
else:
return super().loads(s)
async def on_request_start(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_start()
async def on_request_end(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_end()
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
trace_config.on_request_exception.append(on_request_end)
self.client_options["serializer"] = LazyJSONSerializer()
self.client_options["trace_config"] = trace_config
class VerifiedAsyncTransport(elasticsearch.AsyncTransport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The async client is used in the hot code path and we use customized overrides (such as that we don't
self._verified_elasticsearch = True
class RallyAsyncElasticsearch(elasticsearch.AsyncElasticsearch, RequestContextHolder):
pass
return RallyAsyncElasticsearch(
hosts=self.hosts,
transport_class=VerifiedAsyncTransport,
connection_class=esrally.async_connection.AIOHttpConnection,
ssl_context=self.ssl_context,
**self.client_options,
)
def wait_for_rest_layer(es, max_attempts=40):
expected_node_count = len(es.transport.hosts)
logger = logging.getLogger(__name__)
for attempt in range(max_attempts):
logger.debug("REST API is available after %s attempts", attempt)
# pylint: disable=import-outside-toplevel
import elasticsearch
try:
# see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also
# available when the cluster status is RED (as long as all required nodes are present)
es.cluster.health(wait_for_nodes=">={}".format(expected_node_count))
logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt)
return True
except elasticsearch.ConnectionError as e:
if "SSL: UNKNOWN_PROTOCOL" in str(e):
raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e)
else:
logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt)
time.sleep(3)
except elasticsearch.TransportError as e:
# cluster block, x-pack not initialized yet, our wait condition is not reached
if e.status_code in (503, 401, 408):
logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt)
time.sleep(3)
else:
logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt)
raise e
return False
| true | true |
f71823bb0a9512d2ba7d2b03e46696bf17185a01 | 2,937 | py | Python | cloudfeaster/privacy.py | simonsdave/clf | 643ce7e6ba9bd47c35b235cb24264dbc9024367c | [
"MIT"
] | 4 | 2015-12-17T17:32:23.000Z | 2022-01-02T20:31:08.000Z | cloudfeaster/privacy.py | simonsdave/clf | 643ce7e6ba9bd47c35b235cb24264dbc9024367c | [
"MIT"
] | 61 | 2015-05-25T10:16:55.000Z | 2022-01-15T23:49:38.000Z | cloudfeaster/privacy.py | simonsdave/clf | 643ce7e6ba9bd47c35b235cb24264dbc9024367c | [
"MIT"
] | 2 | 2015-12-10T18:18:10.000Z | 2021-01-30T15:29:13.000Z | """This module exists as a place to centralize functionality and
configuration related to privacy.
"""
import hashlib
import logging
class RedactingFormatter(object):
"""
Credits - this formatter was heavily inspired by https://relaxdiego.com/2014/07/logging-in-python.html
"""
@classmethod
def install_for_all_handlers(self, crawl_args):
# :TODO: can this be configured when configuring logging
# this is inspired by https://gist.github.com/acdha/9238791
for handler in logging.root.handlers:
handler.setFormatter(self(handler.formatter, crawl_args))
def __init__(self, original_formatter, crawl_args):
self.original_formatter = original_formatter
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def format(self, record):
msg = self.original_formatter.format(record)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def __getattr__(self, attr):
return getattr(self.original_formatter, attr)
class RedactingFilter(logging.Filter):
def __init__(self, crawl_args):
super(RedactingFilter, self).__init__()
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def filter(self, record):
record.msg = self.redact(record.msg)
if isinstance(record.args, dict):
for k in record.args.keys():
record.args[k] = self._redact(record.args[k])
else:
record.args = tuple(self._redact(arg) for arg in record.args)
return True
def _redact(self, msg):
msg = None
# isinstance(msg, basestring) and msg or str(msg)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def hash_crawl_arg(crawl_arg):
"""Take a crawl argument (ie. an identifying or authenticating factor)
and create a hash. Hash will have the form <hash function name>:<hash digest>.
"""
hash = hashlib.sha256(str(crawl_arg).encode('utf-8'))
return '{hash_name}:{hash_digest}'.format(hash_name=hash.name, hash_digest=hash.hexdigest())
| 36.7125 | 106 | 0.657133 |
import hashlib
import logging
class RedactingFormatter(object):
@classmethod
def install_for_all_handlers(self, crawl_args):
for handler in logging.root.handlers:
handler.setFormatter(self(handler.formatter, crawl_args))
def __init__(self, original_formatter, crawl_args):
self.original_formatter = original_formatter
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def format(self, record):
msg = self.original_formatter.format(record)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def __getattr__(self, attr):
return getattr(self.original_formatter, attr)
class RedactingFilter(logging.Filter):
def __init__(self, crawl_args):
super(RedactingFilter, self).__init__()
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def filter(self, record):
record.msg = self.redact(record.msg)
if isinstance(record.args, dict):
for k in record.args.keys():
record.args[k] = self._redact(record.args[k])
else:
record.args = tuple(self._redact(arg) for arg in record.args)
return True
def _redact(self, msg):
msg = None
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def hash_crawl_arg(crawl_arg):
hash = hashlib.sha256(str(crawl_arg).encode('utf-8'))
return '{hash_name}:{hash_digest}'.format(hash_name=hash.name, hash_digest=hash.hexdigest())
| true | true |
f718246c9b97a010fbc0d6588245bd4852b549f4 | 1,163 | py | Python | www/tests/test_import.py | sejalseth/brython | 0b59368eac40a3b1eef7b13f2102b18cb5629687 | [
"BSD-3-Clause"
] | 5,926 | 2015-01-01T07:45:08.000Z | 2022-03-31T12:34:38.000Z | www/tests/test_import.py | sejalseth/brython | 0b59368eac40a3b1eef7b13f2102b18cb5629687 | [
"BSD-3-Clause"
] | 1,728 | 2015-01-01T01:09:12.000Z | 2022-03-30T23:25:22.000Z | www/tests/test_import.py | sejalseth/brython | 0b59368eac40a3b1eef7b13f2102b18cb5629687 | [
"BSD-3-Clause"
] | 574 | 2015-01-02T01:36:10.000Z | 2022-03-26T10:18:48.000Z | import simple
class Simple2:
def __init__(self):
self.info = "SimpleClass2"
class Simple3(simple.Simple):
def __init__(self):
simple.Simple.__init__(self)
text = "text in simple"
assert simple.text == text
_s = simple.Simple()
_s3 = Simple3()
assert _s.info == _s3.info
import recursive_import
_s = recursive_import.myClass()
assert str(_s) == "success!"
import from_import_test.b
assert from_import_test.b.v == 1
import from_import_test.c
assert from_import_test.c.v == 1
# test of keyword "global" in functions of an imported module
import global_in_imported
assert global_in_imported.X == 15
from delegator import Delegator
delegate = Delegator([])
# issue 768
import modtest
# issue 1261
import colorsys
colorsys.ONE_THIRD # no AttributeError
from colorsys import *
try:
ONE_THIRD
raise Exception("should have raised NameError")
except NameError:
pass
# use "__getattr__" and "__dir__" at module level (PEP 562)
assert simple.strange == "a strange name"
assert dir(simple) == ["Simple", "text", "strange", "unknown"]
# issue 1483
from foobar import *
assert str(Foo()) == "foo"
print('passed all tests')
| 18.758065 | 62 | 0.72485 | import simple
class Simple2:
def __init__(self):
self.info = "SimpleClass2"
class Simple3(simple.Simple):
def __init__(self):
simple.Simple.__init__(self)
text = "text in simple"
assert simple.text == text
_s = simple.Simple()
_s3 = Simple3()
assert _s.info == _s3.info
import recursive_import
_s = recursive_import.myClass()
assert str(_s) == "success!"
import from_import_test.b
assert from_import_test.b.v == 1
import from_import_test.c
assert from_import_test.c.v == 1
import global_in_imported
assert global_in_imported.X == 15
from delegator import Delegator
delegate = Delegator([])
import modtest
import colorsys
colorsys.ONE_THIRD
from colorsys import *
try:
ONE_THIRD
raise Exception("should have raised NameError")
except NameError:
pass
assert simple.strange == "a strange name"
assert dir(simple) == ["Simple", "text", "strange", "unknown"]
from foobar import *
assert str(Foo()) == "foo"
print('passed all tests')
| true | true |
f71824b21dd9aad49d682f6da45462de71c6c6b0 | 403 | py | Python | mozillians/api/urls.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | mozillians/api/urls.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | mozillians/api/urls.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, patterns, url
from tastypie.api import Api
import mozillians.groups.api
import mozillians.users.api
v1_api = Api(api_name='v1')
v1_api.register(mozillians.users.api.UserResource())
v1_api.register(mozillians.groups.api.GroupResource())
v1_api.register(mozillians.groups.api.SkillResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),)
| 23.705882 | 54 | 0.769231 | from django.conf.urls import include, patterns, url
from tastypie.api import Api
import mozillians.groups.api
import mozillians.users.api
v1_api = Api(api_name='v1')
v1_api.register(mozillians.users.api.UserResource())
v1_api.register(mozillians.groups.api.GroupResource())
v1_api.register(mozillians.groups.api.SkillResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),)
| true | true |
f71826ef8902c67bed889b8698b64504138920f2 | 10,060 | py | Python | graspologic/layouts/render.py | tliu68/graspologic | d1cf7678bc63ab9769828a82a90f66bf1dfa0eff | [
"MIT"
] | 1 | 2021-07-06T15:36:27.000Z | 2021-07-06T15:36:27.000Z | graspologic/layouts/render.py | tliu68/graspologic | d1cf7678bc63ab9769828a82a90f66bf1dfa0eff | [
"MIT"
] | null | null | null | graspologic/layouts/render.py | tliu68/graspologic | d1cf7678bc63ab9769828a82a90f66bf1dfa0eff | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import networkx as nx
from typing import Any, Dict, List, Optional, Tuple
from graspologic.layouts.classes import NodePosition
import matplotlib.pyplot as plt
def _calculate_x_y_domain(
positions: List[NodePosition],
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""calculate the overall x/y domain, converting to a square
so we can have a consistent scale
"""
min_x = min_y = float("inf")
max_x = max_y = float("-inf")
for node_position in positions:
min_x = min(min_x, node_position.x - node_position.size)
max_x = max(max_x, node_position.x + node_position.size)
min_y = min(min_y, node_position.y - node_position.size)
max_y = max(max_y, node_position.y + node_position.size)
x_delta = max_x - min_x
y_delta = max_y - min_y
max_delta = max(x_delta, y_delta)
if max_delta == x_delta:
difference = (max_delta - y_delta) / 2
min_y = min_y - difference
max_y = max_y + difference
elif max_delta == y_delta:
difference = (max_delta - x_delta) / 2
min_x = min_x - difference
max_x = max_x + difference
return (min_x, max_x), (min_y, max_y)
def _scale_value(
domain: Tuple[float, float], data_range: Tuple[float, float], value: float
) -> float:
return data_range[0] + (data_range[1] - data_range[0]) * (
(value - domain[0]) / (domain[1] - domain[0])
)
def _scale_node_sizes_for_rendering(
sizes: List[float],
spatial_domain: Tuple[float, float],
spatial_range: Tuple[float, float],
dpi: float,
):
"""scale the size again to match the rendered pixel range
we would expect this to be handled by the underlying viz framework, but it isn't, size is specified
as the bounding box in points of the rendered output, so we need to transform our size to match.
There are 72 points per inch. Multiplying by 72 / dpi converts from pixels to points.
"""
spatial_domain = (0, spatial_domain[1] - spatial_domain[0])
return list(
map(
lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)
** 2,
sizes,
)
)
def _draw_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_alpha: float,
edge_line_width: float,
edge_alpha: float,
figure_width: float,
figure_height: float,
vertex_line_width: float = 0.01,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
if len(positions) != len(graph.nodes()):
raise ValueError(
f"The number of positions provided {len(positions)} is not the same as the "
f"number of nodes in the graph {len(graph.nodes())}"
)
for position in positions:
if position.node_id not in graph:
raise ValueError(
f"The node position provided for {position.node_id} references a node "
f"not found in our graph"
)
plt.rcParams["figure.dpi"] = dpi # TODO, test at different dpi
plt.clf()
figure = plt.gcf()
ax = plt.gca()
ax.set_axis_off()
figure.set_size_inches(figure_width, figure_height)
window_extent_width = ax.get_window_extent().width
x_domain, y_domain = _calculate_x_y_domain(positions)
position_map = {position.node_id: position for position in positions}
node_positions = {
position.node_id: (position.x, position.y) for position in positions
}
vertices = []
vertex_sizes = []
node_color_list = []
edge_color_list = []
for node in graph.nodes():
vertices.append(node)
vertex_sizes.append(position_map[node].size)
node_color_list.append(node_colors[node])
vertex_sizes = _scale_node_sizes_for_rendering(
vertex_sizes, x_domain, (0, window_extent_width), dpi
)
for source, target in graph.edges():
edge_color_list.append(node_colors[source])
ax.set_xbound(x_domain)
ax.set_xlim(x_domain)
ax.set_ybound(y_domain)
ax.set_ylim(y_domain)
nx.draw_networkx_edges(
graph,
pos=node_positions,
alpha=edge_alpha,
width=edge_line_width,
edge_color=edge_color_list,
arrows=arrows,
ax=ax,
)
nx.draw_networkx_nodes(
graph,
pos=node_positions,
nodelist=vertices,
node_color=node_color_list,
alpha=vertex_alpha,
linewidths=vertex_line_width,
node_size=vertex_sizes,
node_shape=vertex_shape,
ax=ax,
)
def show_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 500,
):
"""
Renders and displays a graph.
Attempts to display it via the platform-specific display library such as TkInter
Edges will be displayed with the same color as the source node.
Parameters
----------
graph : nx.Graph
The graph to be displayed. If the networkx Graph contains only nodes, no
edges will be displayed.
positions : List[:class:`graspologic.layouts.NodePosition`]
The positionsfor every node in the graph.
node_colors : Dict[Any, str]
A mapping of node id to colors. Must contain an entry for every node in the
graph.
vertex_line_width : float
Line width of vertex outline. Default is``0.01``.
vertex_alpha : float
Alpha (transparency) of vertices in visualization. Default is``0.55``.
edge_line_width : float
Line width of edge. Default is``0.5``.
edge_alpha : float
Alpha (transparency) of edges in visualization. Default is``0.02``.
figure_width : float
Width of figure. Default is ``15.0``.
figure_height : float
eight of figure. Default is``15.0``.
light_background : bool
Light background or dark background. Default is``True``.
vertex_shape : str
Matplotlib Marker for the vertex shape. See
`https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_
for a list of allowed values . Default is ``o`` (i.e: a circle)
arrows : bool
For directed graphs, if ``True``, draw arrow heads. Default is ``False``
dpi : float
Dots per inch of the figure. Default is ``500``.
"""
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
plt.gcf().set_facecolor(facecolor)
plt.show()
plt.close("all")
def save_graph(
output_path: str,
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
"""
Renders a graph to file.
Edges will be displayed with the same color as the source node.
Parameters
----------
output_path : str
The output path to write the rendered graph to. Suggested file extension is
``.png``.
graph : nx.Graph
The graph to be displayed. If the networkx Graph contains only nodes, no
edges will be displayed.
positions : List[:class:`graspologic.layouts.NodePosition`]
The positionsfor every node in the graph.
node_colors : Dict[Any, str]
A mapping of node id to colors. Must contain an entry for every node in the
graph.
vertex_line_width : float
Line width of vertex outline. Default is``0.01``.
vertex_alpha : float
Alpha (transparency) of vertices in visualization. Default is``0.55``.
edge_line_width : float
Line width of edge. Default is``0.5``.
edge_alpha : float
Alpha (transparency) of edges in visualization. Default is``0.02``.
figure_width : float
Width of figure. Default is ``15.0``.
figure_height : float
eight of figure. Default is``15.0``.
light_background : bool
Light background or dark background. Default is``True``.
vertex_shape : str
Matplotlib Marker for the vertex shape. See
`https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_
for a list of allowed values . Default is ``o`` (i.e: a circle)
arrows : bool
For directed graphs, if ``True``, draw arrow heads. Default is ``False``
dpi : float
Dots per inch of the figure. Default is ``100``.
Returns
-------
"""
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
plt.savefig(output_path, facecolor=facecolor)
plt.close("all")
| 31.53605 | 103 | 0.641451 |
import networkx as nx
from typing import Any, Dict, List, Optional, Tuple
from graspologic.layouts.classes import NodePosition
import matplotlib.pyplot as plt
def _calculate_x_y_domain(
positions: List[NodePosition],
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
min_x = min_y = float("inf")
max_x = max_y = float("-inf")
for node_position in positions:
min_x = min(min_x, node_position.x - node_position.size)
max_x = max(max_x, node_position.x + node_position.size)
min_y = min(min_y, node_position.y - node_position.size)
max_y = max(max_y, node_position.y + node_position.size)
x_delta = max_x - min_x
y_delta = max_y - min_y
max_delta = max(x_delta, y_delta)
if max_delta == x_delta:
difference = (max_delta - y_delta) / 2
min_y = min_y - difference
max_y = max_y + difference
elif max_delta == y_delta:
difference = (max_delta - x_delta) / 2
min_x = min_x - difference
max_x = max_x + difference
return (min_x, max_x), (min_y, max_y)
def _scale_value(
domain: Tuple[float, float], data_range: Tuple[float, float], value: float
) -> float:
return data_range[0] + (data_range[1] - data_range[0]) * (
(value - domain[0]) / (domain[1] - domain[0])
)
def _scale_node_sizes_for_rendering(
sizes: List[float],
spatial_domain: Tuple[float, float],
spatial_range: Tuple[float, float],
dpi: float,
):
spatial_domain = (0, spatial_domain[1] - spatial_domain[0])
return list(
map(
lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)
** 2,
sizes,
)
)
def _draw_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_alpha: float,
edge_line_width: float,
edge_alpha: float,
figure_width: float,
figure_height: float,
vertex_line_width: float = 0.01,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
if len(positions) != len(graph.nodes()):
raise ValueError(
f"The number of positions provided {len(positions)} is not the same as the "
f"number of nodes in the graph {len(graph.nodes())}"
)
for position in positions:
if position.node_id not in graph:
raise ValueError(
f"The node position provided for {position.node_id} references a node "
f"not found in our graph"
)
plt.rcParams["figure.dpi"] = dpi
plt.clf()
figure = plt.gcf()
ax = plt.gca()
ax.set_axis_off()
figure.set_size_inches(figure_width, figure_height)
window_extent_width = ax.get_window_extent().width
x_domain, y_domain = _calculate_x_y_domain(positions)
position_map = {position.node_id: position for position in positions}
node_positions = {
position.node_id: (position.x, position.y) for position in positions
}
vertices = []
vertex_sizes = []
node_color_list = []
edge_color_list = []
for node in graph.nodes():
vertices.append(node)
vertex_sizes.append(position_map[node].size)
node_color_list.append(node_colors[node])
vertex_sizes = _scale_node_sizes_for_rendering(
vertex_sizes, x_domain, (0, window_extent_width), dpi
)
for source, target in graph.edges():
edge_color_list.append(node_colors[source])
ax.set_xbound(x_domain)
ax.set_xlim(x_domain)
ax.set_ybound(y_domain)
ax.set_ylim(y_domain)
nx.draw_networkx_edges(
graph,
pos=node_positions,
alpha=edge_alpha,
width=edge_line_width,
edge_color=edge_color_list,
arrows=arrows,
ax=ax,
)
nx.draw_networkx_nodes(
graph,
pos=node_positions,
nodelist=vertices,
node_color=node_color_list,
alpha=vertex_alpha,
linewidths=vertex_line_width,
node_size=vertex_sizes,
node_shape=vertex_shape,
ax=ax,
)
def show_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 500,
):
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
plt.gcf().set_facecolor(facecolor)
plt.show()
plt.close("all")
def save_graph(
output_path: str,
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
plt.savefig(output_path, facecolor=facecolor)
plt.close("all")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.