hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c479e5971d949fcf67c534f48a3d16b3e4c4a28 | 2,063 | py | Python | zfused_maya/zfused_maya/core/color.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | 2 | 2019-02-22T03:33:26.000Z | 2019-02-23T03:29:26.000Z | zfused_maya/zfused_maya/core/color.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | zfused_maya/zfused_maya/core/color.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# --author-- lanhua.zhou
import os
import json
import logging
__all__ = ["get_component_color_data", "LetterColor"]
DIRNAME = os.path.dirname(__file__)
MENU_DIRNAME = os.path.dirname(os.path.dirname(DIRNAME))
COMPONENT_COLOR_FILE = "{}/conf/componentcolor.json".format(MENU_DIRNAME)
logger = logging.getLogger(__name__)
def get_component_color_data():
"""
get menu scripts
rtype: list
"""
_menu_data = []
logger.info("read menu json file data")
with open(COMPONENT_COLOR_FILE, "r") as _file_handle:
_data = _file_handle.read()
_menu_data = json.loads(_data)
return _menu_data
class LetterColor(object):
_color_dict = {
"a":"#E5A3B4",
"b":"#EDC89A",
"c":"#F2F08F",
"d":"#E0E67A",
"e":"#BBDB97",
"f":"#ACD9BA",
"g":"#A1DAE1",
"h":"#C19FCA",
"i":"#CF2027",
"j":"#D96927",
"k":"#ECDA42",
"l":"#A5C33B",
"m":"#77C258",
"n":"#54958C",
"o":"#486EB6",
"p":"#77449A",
"q":"#7F7E80",
"r":"#7C1214",
"s":"#83421B",
"t":"#86792F",
"u":"#587232",
"v":"#417135",
"w":"#3D6C4C",
"x":"#253676",
"y":"#462165",
"z":"#1D1D1D"
}
@classmethod
def color(cls, letter):
return cls._color_dict[letter]
def convert(value):
""" change color value type
"""
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3) | 25.158537 | 73 | 0.492002 |
import os
import json
import logging
__all__ = ["get_component_color_data", "LetterColor"]
DIRNAME = os.path.dirname(__file__)
MENU_DIRNAME = os.path.dirname(os.path.dirname(DIRNAME))
COMPONENT_COLOR_FILE = "{}/conf/componentcolor.json".format(MENU_DIRNAME)
logger = logging.getLogger(__name__)
def get_component_color_data():
_menu_data = []
logger.info("read menu json file data")
with open(COMPONENT_COLOR_FILE, "r") as _file_handle:
_data = _file_handle.read()
_menu_data = json.loads(_data)
return _menu_data
class LetterColor(object):
_color_dict = {
"a":"#E5A3B4",
"b":"#EDC89A",
"c":"#F2F08F",
"d":"#E0E67A",
"e":"#BBDB97",
"f":"#ACD9BA",
"g":"#A1DAE1",
"h":"#C19FCA",
"i":"#CF2027",
"j":"#D96927",
"k":"#ECDA42",
"l":"#A5C33B",
"m":"#77C258",
"n":"#54958C",
"o":"#486EB6",
"p":"#77449A",
"q":"#7F7E80",
"r":"#7C1214",
"s":"#83421B",
"t":"#86792F",
"u":"#587232",
"v":"#417135",
"w":"#3D6C4C",
"x":"#253676",
"y":"#462165",
"z":"#1D1D1D"
}
@classmethod
def color(cls, letter):
return cls._color_dict[letter]
def convert(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3) | true | true |
1c479e9907ca8ed897efe7210ee012940850571b | 181 | py | Python | DJANGO PROJECT/Configurator/ConfigWebApp/forms.py | BobbyElmes/Fusion-Configurator-Source-Code | 08e6c14789a2e8d073b312422ce893ee463369f5 | [
"MIT"
] | null | null | null | DJANGO PROJECT/Configurator/ConfigWebApp/forms.py | BobbyElmes/Fusion-Configurator-Source-Code | 08e6c14789a2e8d073b312422ce893ee463369f5 | [
"MIT"
] | null | null | null | DJANGO PROJECT/Configurator/ConfigWebApp/forms.py | BobbyElmes/Fusion-Configurator-Source-Code | 08e6c14789a2e8d073b312422ce893ee463369f5 | [
"MIT"
] | null | null | null | from django import forms
class Register(forms.Form):
username = forms.CharField(label='username', max_length=35)
password = forms.CharField(label='password', max_length=35) | 36.2 | 63 | 0.756906 | from django import forms
class Register(forms.Form):
username = forms.CharField(label='username', max_length=35)
password = forms.CharField(label='password', max_length=35) | true | true |
1c479fcc08d0b2f40c0963da403abaa4ff01ae81 | 4,853 | py | Python | qa/rpc-tests/httpbasics.py | PapicoinProject/Papicoin | c971fcd1f81d07fe9de2e2c3893f362d9a8529f5 | [
"MIT"
] | 1 | 2022-03-19T16:50:57.000Z | 2022-03-19T16:50:57.000Z | qa/rpc-tests/httpbasics.py | PapicoinProject/Papicoin | c971fcd1f81d07fe9de2e2c3893f362d9a8529f5 | [
"MIT"
] | null | null | null | qa/rpc-tests/httpbasics.py | PapicoinProject/Papicoin | c971fcd1f81d07fe9de2e2c3893f362d9a8529f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 42.570175 | 108 | 0.632186 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
thpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| true | true |
1c47a0df09096f9bfb11acb7116db2b3e4c3ba4a | 1,076 | py | Python | api/views.py | masoodmomin/django-react-todoapp | 06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c | [
"MIT"
] | 1 | 2020-12-06T12:32:23.000Z | 2020-12-06T12:32:23.000Z | api/views.py | masoodmomin/django-react-todoapp | 06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c | [
"MIT"
] | null | null | null | api/views.py | masoodmomin/django-react-todoapp | 06fb4f7603bba726e6b0b13cf7dfc5e0aa068f0c | [
"MIT"
] | null | null | null | from django.http import request
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TodoSerializer
from .models import Todo
from django.http import JsonResponse
@api_view(['GET'])
def all(request):
todo = Todo.objects.all()
serializer = TodoSerializer(todo, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['POST'])
def create(request):
serializer = TodoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response("Created successfully.")
@api_view(['DELETE'])
def delete(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
todo.delete()
return Response("Deleted successfully.")
@api_view(['PUT'])
def status(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
serializer = TodoSerializer(instance = todo,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
| 29.081081 | 66 | 0.727695 | from django.http import request
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TodoSerializer
from .models import Todo
from django.http import JsonResponse
@api_view(['GET'])
def all(request):
todo = Todo.objects.all()
serializer = TodoSerializer(todo, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['POST'])
def create(request):
serializer = TodoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response("Created successfully.")
@api_view(['DELETE'])
def delete(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
todo.delete()
return Response("Deleted successfully.")
@api_view(['PUT'])
def status(request):
text = request.data["text"]
todo = Todo.objects.get(text=text)
serializer = TodoSerializer(instance = todo,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
| true | true |
1c47a10742a03a90e69f50b632ec06af813dc613 | 18,268 | py | Python | core/controllers/suggestion.py | ReshuKumari/oppia | cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1 | [
"Apache-2.0"
] | null | null | null | core/controllers/suggestion.py | ReshuKumari/oppia | cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1 | [
"Apache-2.0"
] | null | null | null | core/controllers/suggestion.py | ReshuKumari/oppia | cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import opportunity_services
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
import feconf
import python_utils
import utils
def _get_target_id_to_exploration_opportunity_dict(suggestions):
"""Returns a dict of target_id to exploration opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding exploration opportunity
summary dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in (
opportunity_services.get_exploration_opportunity_summaries_by_ids(
list(target_ids)).items())
}
return opportunity_id_to_opportunity_dict
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
"""Handles POST requests."""
if (self.payload.get('suggestion_type') ==
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT):
raise self.InvalidInputException(
'Content suggestion submissions are no longer supported.')
try:
suggestion = suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change'),
self.payload.get('description'))
except utils.ValidationError as e:
raise self.InvalidInputException(e)
# TODO(#10513) : Find a way to save the images before the suggestion is
# created.
suggestion_image_context = suggestion.image_context
new_image_filenames = (
suggestion.get_new_image_filenames_added_in_suggestion())
for filename in new_image_filenames:
image = self.request.get(filename)
if not image:
logging.exception(
'Image not provided for file with name %s when the '
' suggestion with target id %s was created.' % (
filename, suggestion.target_id))
raise self.InvalidInputException(
'No image data provided for file with name %s.'
% (filename))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
raise self.InvalidInputException('%s' % (e))
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, suggestion_image_context, suggestion.target_id,
image, 'image', image_is_compressible)
target_entity_html_list = suggestion.get_target_entity_html_strings()
target_image_filenames = (
html_cleaner.get_image_filenames_from_html_strings(
target_entity_html_list))
fs_services.copy_images(
suggestion.target_type, suggestion.target_id,
suggestion_image_context, suggestion.target_id,
target_image_filenames)
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def put(self, target_id, suggestion_id):
"""Handles PUT requests.
Args:
target_id: str. The ID of the suggestion target.
suggestion_id: str. The ID of the suggestion.
"""
if (
suggestion_id.split('.')[0] !=
feconf.ENTITY_TYPE_EXPLORATION):
raise self.InvalidInputException(
'This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.author_id == self.user_id:
raise self.UnauthorizedUserException(
'You cannot accept/reject your own suggestion.')
if action == constants.ACTION_ACCEPT_SUGGESTION:
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > constants.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% constants.MAX_COMMIT_MESSAGE_LENGTH)
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class ResubmitSuggestionHandler(base.BaseHandler):
"""Handler to reopen a rejected suggestion."""
@acl_decorators.can_resubmit_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Args:
suggestion_id: str. The ID of the suggestion.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
new_change = self.payload.get('change')
change_cls = type(suggestion.change)
change_object = change_cls(new_change)
summary_message = self.payload.get('summary_message')
suggestion_services.resubmit_rejected_suggestion(
suggestion_id, summary_message, self.user_id, change_object)
self.render_json(self.values)
class SuggestionToSkillActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to skills."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_skill)
def put(self, target_id, suggestion_id):
"""Handles PUT requests.
Args:
target_id: str. The ID of the suggestion target.
suggestion_id: str. The ID of the suggestion.
"""
if suggestion_id.split('.')[0] != feconf.ENTITY_TYPE_SKILL:
raise self.InvalidInputException(
'This handler allows actions only on suggestions to skills.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The skill id provided does not match the skill id present as '
'part of the suggestion_id')
action = self.payload.get('action')
if action == constants.ACTION_ACCEPT_SUGGESTION:
# Question suggestions do not use commit messages.
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, 'UNUSED_COMMIT_MESSAGE',
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionsProviderHandler(base.BaseHandler):
"""Provides suggestions for a user and given suggestion type."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def _require_valid_suggestion_and_target_types(
self, target_type, suggestion_type):
"""Checks whether the given target_type and suggestion_type are valid.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
Raises:
InvalidInputException. If the given target_type of suggestion_type
are invalid.
"""
if target_type not in feconf.SUGGESTION_TARGET_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid target_type: %s' % target_type)
if suggestion_type not in feconf.SUGGESTION_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid suggestion_type: %s' % suggestion_type)
def _render_suggestions(self, target_type, suggestions):
"""Renders retrieved suggestions.
Args:
target_type: str. The suggestion type.
suggestions: list(BaseSuggestion). A list of suggestions to render.
"""
if target_type == feconf.ENTITY_TYPE_EXPLORATION:
target_id_to_opportunity_dict = (
_get_target_id_to_exploration_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
elif target_type == feconf.ENTITY_TYPE_SKILL:
target_id_to_opportunity_dict = (
_get_target_id_to_skill_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
else:
self.render_json({})
class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which can be reviewed by the user for a given
suggestion type.
"""
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
"""Handles GET requests.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
"""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_reviewable_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which are submitted by the user for a given
suggestion type.
"""
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
"""Handles GET requests.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
"""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_submitted_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
# The query_fields_and_values variable is a list of tuples. The first
# element in each tuple is the field being queried and the second
# element is the value of the field being queried.
# request.GET.items() parses the params from the url into the above
# format. So in the url, the query should be passed as:
# ?field1=value1&field2=value2...fieldN=valueN.
query_fields_and_values = list(self.request.GET.items())
for query in query_fields_and_values:
if query[0] not in feconf.ALLOWED_SUGGESTION_QUERY_FIELDS:
raise self.InvalidInputException(
'Not allowed to query on field %s' % query[0])
suggestions = suggestion_services.query_suggestions(
query_fields_and_values)
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
class UpdateTranslationSuggestionHandler(base.BaseHandler):
"""Handles update operations relating to translation suggestions."""
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Raises:
InvalidInputException. The suggestion is already handled.
InvalidInputException. The 'translation_html' parameter is missing.
InvalidInputException. The 'translation_html' parameter is not a
string.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('translation_html') is None:
raise self.InvalidInputException(
'The parameter \'translation_html\' is missing.'
)
if not isinstance(
self.payload.get('translation_html'), python_utils.BASESTRING):
raise self.InvalidInputException(
'The parameter \'translation_html\' should be a string.'
)
suggestion_services.update_translation_suggestion(
suggestion_id, self.payload.get('translation_html'))
self.render_json(self.values)
class UpdateQuestionSuggestionHandler(base.BaseHandler):
"""Handles update operations relating to question suggestions."""
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
"""Handles PUT requests.
Raises:
InvalidInputException. The suggestion is already handled.
InvalidInputException. The 'skill_difficulty' parameter is missing.
InvalidInputException. The 'skill_difficulty' is not a decimal.
InvalidInputException. The 'question_state_data' parameter is
missing.
InvalidInputException. The 'question_state_data' parameter is
invalid.
"""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('skill_difficulty') is None:
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' is missing.'
)
if not isinstance(self.payload.get('skill_difficulty'), float):
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' should be a decimal.'
)
if self.payload.get('question_state_data') is None:
raise self.InvalidInputException(
'The parameter \'question_state_data\' is missing.'
)
question_state_data_obj = state_domain.State.from_dict(
self.payload.get('question_state_data'))
question_state_data_obj.validate(None, False)
suggestion_services.update_question_suggestion(
suggestion_id,
self.payload.get('skill_difficulty'),
self.payload.get('question_state_data'))
self.render_json(self.values)
| 39.117773 | 80 | 0.661047 |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import opportunity_services
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
import feconf
import python_utils
import utils
def _get_target_id_to_exploration_opportunity_dict(suggestions):
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in (
opportunity_services.get_exploration_opportunity_summaries_by_ids(
list(target_ids)).items())
}
return opportunity_id_to_opportunity_dict
def _get_target_id_to_skill_opportunity_dict(suggestions):
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
class SuggestionHandler(base.BaseHandler):
@acl_decorators.can_suggest_changes
def post(self):
if (self.payload.get('suggestion_type') ==
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT):
raise self.InvalidInputException(
'Content suggestion submissions are no longer supported.')
try:
suggestion = suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change'),
self.payload.get('description'))
except utils.ValidationError as e:
raise self.InvalidInputException(e)
ntext
new_image_filenames = (
suggestion.get_new_image_filenames_added_in_suggestion())
for filename in new_image_filenames:
image = self.request.get(filename)
if not image:
logging.exception(
'Image not provided for file with name %s when the '
' suggestion with target id %s was created.' % (
filename, suggestion.target_id))
raise self.InvalidInputException(
'No image data provided for file with name %s.'
% (filename))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
raise self.InvalidInputException('%s' % (e))
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, suggestion_image_context, suggestion.target_id,
image, 'image', image_is_compressible)
target_entity_html_list = suggestion.get_target_entity_html_strings()
target_image_filenames = (
html_cleaner.get_image_filenames_from_html_strings(
target_entity_html_list))
fs_services.copy_images(
suggestion.target_type, suggestion.target_id,
suggestion_image_context, suggestion.target_id,
target_image_filenames)
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def put(self, target_id, suggestion_id):
if (
suggestion_id.split('.')[0] !=
feconf.ENTITY_TYPE_EXPLORATION):
raise self.InvalidInputException(
'This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.author_id == self.user_id:
raise self.UnauthorizedUserException(
'You cannot accept/reject your own suggestion.')
if action == constants.ACTION_ACCEPT_SUGGESTION:
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > constants.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% constants.MAX_COMMIT_MESSAGE_LENGTH)
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class ResubmitSuggestionHandler(base.BaseHandler):
@acl_decorators.can_resubmit_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
new_change = self.payload.get('change')
change_cls = type(suggestion.change)
change_object = change_cls(new_change)
summary_message = self.payload.get('summary_message')
suggestion_services.resubmit_rejected_suggestion(
suggestion_id, summary_message, self.user_id, change_object)
self.render_json(self.values)
class SuggestionToSkillActionHandler(base.BaseHandler):
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_skill)
def put(self, target_id, suggestion_id):
if suggestion_id.split('.')[0] != feconf.ENTITY_TYPE_SKILL:
raise self.InvalidInputException(
'This handler allows actions only on suggestions to skills.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The skill id provided does not match the skill id present as '
'part of the suggestion_id')
action = self.payload.get('action')
if action == constants.ACTION_ACCEPT_SUGGESTION:
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, 'UNUSED_COMMIT_MESSAGE',
self.payload.get('review_message'))
elif action == constants.ACTION_REJECT_SUGGESTION:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionsProviderHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def _require_valid_suggestion_and_target_types(
self, target_type, suggestion_type):
if target_type not in feconf.SUGGESTION_TARGET_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid target_type: %s' % target_type)
if suggestion_type not in feconf.SUGGESTION_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid suggestion_type: %s' % suggestion_type)
def _render_suggestions(self, target_type, suggestions):
if target_type == feconf.ENTITY_TYPE_EXPLORATION:
target_id_to_opportunity_dict = (
_get_target_id_to_exploration_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
elif target_type == feconf.ENTITY_TYPE_SKILL:
target_id_to_opportunity_dict = (
_get_target_id_to_skill_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
else:
self.render_json({})
class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_reviewable_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_submitted_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
query_fields_and_values = list(self.request.GET.items())
for query in query_fields_and_values:
if query[0] not in feconf.ALLOWED_SUGGESTION_QUERY_FIELDS:
raise self.InvalidInputException(
'Not allowed to query on field %s' % query[0])
suggestions = suggestion_services.query_suggestions(
query_fields_and_values)
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
class UpdateTranslationSuggestionHandler(base.BaseHandler):
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('translation_html') is None:
raise self.InvalidInputException(
'The parameter \'translation_html\' is missing.'
)
if not isinstance(
self.payload.get('translation_html'), python_utils.BASESTRING):
raise self.InvalidInputException(
'The parameter \'translation_html\' should be a string.'
)
suggestion_services.update_translation_suggestion(
suggestion_id, self.payload.get('translation_html'))
self.render_json(self.values)
class UpdateQuestionSuggestionHandler(base.BaseHandler):
@acl_decorators.can_update_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.is_handled:
raise self.InvalidInputException(
'The suggestion with id %s has been accepted or rejected'
% (suggestion_id)
)
if self.payload.get('skill_difficulty') is None:
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' is missing.'
)
if not isinstance(self.payload.get('skill_difficulty'), float):
raise self.InvalidInputException(
'The parameter \'skill_difficulty\' should be a decimal.'
)
if self.payload.get('question_state_data') is None:
raise self.InvalidInputException(
'The parameter \'question_state_data\' is missing.'
)
question_state_data_obj = state_domain.State.from_dict(
self.payload.get('question_state_data'))
question_state_data_obj.validate(None, False)
suggestion_services.update_question_suggestion(
suggestion_id,
self.payload.get('skill_difficulty'),
self.payload.get('question_state_data'))
self.render_json(self.values)
| true | true |
1c47a212af9aebe31a9460335ed92e68251a9076 | 3,160 | py | Python | Exec/testing/Viscous-Vortex/check.py | darylbond/cerberus | a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a | [
"MIT"
] | 5 | 2021-05-10T01:21:52.000Z | 2022-03-10T17:26:41.000Z | Exec/testing/Viscous-Vortex/check.py | darylbond/cerberus | a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a | [
"MIT"
] | 3 | 2021-05-26T01:12:12.000Z | 2021-12-14T00:34:06.000Z | Exec/testing/Viscous-Vortex/check.py | darylbond/cerberus | a1b99f6b50ba6876d4705f26e6be98ed6e1c5c6a | [
"MIT"
] | 3 | 2021-05-11T02:45:27.000Z | 2021-09-06T12:08:23.000Z |
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
def check():
#==============================================================================
# Simulation results
#==============================================================================
# get a list of all the files in this directory
files = get_files('.', include=['plt'], exclude=["temp"], get_all=True)
f = files[-1]
data = ReadBoxLib(f)
t = data.time
data = ReadBoxLib(f, max_level=-1)
xc, u = data.get("x_vel-air")
xc, v = data.get("y_vel-air")
vel = np.sqrt(u**2 + v**2)
yc, xc = np.meshgrid(xc[1], xc[0])
R = np.sqrt(xc**2 + yc**2)
R_linear = np.ravel(R)
vel_linear = np.ravel(vel)
r_max = 8.0
R_linear = np.ma.masked_where(R_linear>r_max, R_linear)
vel_linear = np.ma.masked_where(R_linear>r_max, vel_linear)
I = np.argsort(R_linear)
R_linear = R_linear[I]
vel_linear = vel_linear[I]
# =============================================================================
# analytical solution
# =============================================================================
# D. J. Munoz, V. Springel, R. Marcus, M. Vogelsberger, L. Hernquist,
# Multidimensional, compressible viscous flow on a moving Voronoi mesh,
# Monthly Notices of the Royal Astronomical Society,
# Volume 428, Issue 1, 1 January 2013, Pages 254-279,
# https://doi.org/10.1093/mnras/sts015
G = 1.0
mu0 = 0.08
rho0 = 1.0
nu = mu0/rho0
t0 = 10.0
def vtheta(R,t):
return G/(2*np.pi*R)*(1-np.exp(-R**2/(4*nu*t)))
vt = vtheta(R_linear, data.time+t0)
# =============================================================================
# check
# =============================================================================
success = 0
rel_err = np.abs((vel_linear - vt)/vt)
if np.max(rel_err) > 0.01:
success = 1
# =============================================================================
# plot
# =============================================================================
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
# matplotlib.rc('text', usetex = True)
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
fig = plt.figure(figsize=(5,2))
ax = fig.add_subplot(111)
ax.plot(R_linear, vel_linear,'.', ms=2, mfc='none')
ax.plot(R_linear, vt, 'k--', lw=1)
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$v_\theta$")
ax = ax.twinx()
ax.plot(R_linear, rel_err*1000, 'r.', ms=0.5)
ax.set_ylabel(r'$\left| \frac{\hat{v}_\theta - v_\theta}{v_\theta} \right|\times 10^3$')
ax.set_xlim(0,8)
ylim = ax.get_ylim()
ax.set_ylim(0, ylim[1])
fig.tight_layout()
fig.savefig("plot.pdf", dpi=300)
return success
if __name__ == "__main__":
sys.exit(check()) | 27.241379 | 92 | 0.472468 |
import sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
def check():
files = get_files('.', include=['plt'], exclude=["temp"], get_all=True)
f = files[-1]
data = ReadBoxLib(f)
t = data.time
data = ReadBoxLib(f, max_level=-1)
xc, u = data.get("x_vel-air")
xc, v = data.get("y_vel-air")
vel = np.sqrt(u**2 + v**2)
yc, xc = np.meshgrid(xc[1], xc[0])
R = np.sqrt(xc**2 + yc**2)
R_linear = np.ravel(R)
vel_linear = np.ravel(vel)
r_max = 8.0
R_linear = np.ma.masked_where(R_linear>r_max, R_linear)
vel_linear = np.ma.masked_where(R_linear>r_max, vel_linear)
I = np.argsort(R_linear)
R_linear = R_linear[I]
vel_linear = vel_linear[I]
G = 1.0
mu0 = 0.08
rho0 = 1.0
nu = mu0/rho0
t0 = 10.0
def vtheta(R,t):
return G/(2*np.pi*R)*(1-np.exp(-R**2/(4*nu*t)))
vt = vtheta(R_linear, data.time+t0)
success = 0
rel_err = np.abs((vel_linear - vt)/vt)
if np.max(rel_err) > 0.01:
success = 1
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
fig = plt.figure(figsize=(5,2))
ax = fig.add_subplot(111)
ax.plot(R_linear, vel_linear,'.', ms=2, mfc='none')
ax.plot(R_linear, vt, 'k--', lw=1)
ax.set_xlabel(r"$r$")
ax.set_ylabel(r"$v_\theta$")
ax = ax.twinx()
ax.plot(R_linear, rel_err*1000, 'r.', ms=0.5)
ax.set_ylabel(r'$\left| \frac{\hat{v}_\theta - v_\theta}{v_\theta} \right|\times 10^3$')
ax.set_xlim(0,8)
ylim = ax.get_ylim()
ax.set_ylim(0, ylim[1])
fig.tight_layout()
fig.savefig("plot.pdf", dpi=300)
return success
if __name__ == "__main__":
sys.exit(check()) | true | true |
1c47a21bcb817eb9aae5fdc55c17b7fec9d7bcef | 1,310 | py | Python | auger_cli/commands/experiment_sessions.py | deeplearninc/auger-cli | afa52224043834e11f40d69d2042d53dfccc5ae5 | [
"MIT"
] | 1 | 2019-04-17T12:40:58.000Z | 2019-04-17T12:40:58.000Z | auger_cli/commands/experiment_sessions.py | deeplearninc/auger-cli | afa52224043834e11f40d69d2042d53dfccc5ae5 | [
"MIT"
] | 25 | 2019-03-06T08:20:04.000Z | 2019-07-07T06:00:20.000Z | auger_cli/commands/experiment_sessions.py | deeplearninc/auger-cli | afa52224043834e11f40d69d2042d53dfccc5ae5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
from auger_cli.cli_client import pass_client
from auger_cli.formatter import (
print_list,
print_record,
print_table
)
from auger_cli.api import experiment_sessions
@click.group(
'experiment_sessions',
invoke_without_command=True,
short_help='Manage Auger project experiment sessions.'
)
@click.option(
'--project-id',
'-p',
default='',
help='Experiment sessions project ID.'
)
@click.option(
'--experiment-id',
'-e',
default='',
help='Experiment sessions experiment ID.'
)
@click.pass_context
def experiment_sessions_group(ctx, project_id, experiment_id):
if ctx.invoked_subcommand is None:
with ctx.obj.cli_error_handler():
print_table(
experiment_sessions.list(ctx.obj, project_id, experiment_id),
attributes=experiment_sessions.display_list_attributes
)
else:
pass
@click.command(short_help='Display experiment session details.')
@click.argument('experiment_session_id')
@pass_client
def show(client, experiment_session_id):
with client.cli_error_handler():
print_record(experiment_sessions.read(client, experiment_session_id), experiment_sessions.display_attributes)
experiment_sessions_group.add_command(show)
| 24.716981 | 117 | 0.714504 |
import click
from auger_cli.cli_client import pass_client
from auger_cli.formatter import (
print_list,
print_record,
print_table
)
from auger_cli.api import experiment_sessions
@click.group(
'experiment_sessions',
invoke_without_command=True,
short_help='Manage Auger project experiment sessions.'
)
@click.option(
'--project-id',
'-p',
default='',
help='Experiment sessions project ID.'
)
@click.option(
'--experiment-id',
'-e',
default='',
help='Experiment sessions experiment ID.'
)
@click.pass_context
def experiment_sessions_group(ctx, project_id, experiment_id):
if ctx.invoked_subcommand is None:
with ctx.obj.cli_error_handler():
print_table(
experiment_sessions.list(ctx.obj, project_id, experiment_id),
attributes=experiment_sessions.display_list_attributes
)
else:
pass
@click.command(short_help='Display experiment session details.')
@click.argument('experiment_session_id')
@pass_client
def show(client, experiment_session_id):
with client.cli_error_handler():
print_record(experiment_sessions.read(client, experiment_session_id), experiment_sessions.display_attributes)
experiment_sessions_group.add_command(show)
| true | true |
1c47a2331be5ca842b9b76d50b82dda69ffca458 | 5,055 | py | Python | test/functional/test_framework/netutil.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Knotcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| 32.197452 | 111 | 0.600198 |
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
STATE_LISTEN = '0A'
def get_socket_inodes(pid):
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' '))
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9])
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
def all_interfaces():
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912,
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
if '.' in addr:
addr = [int(x) for x in addr.split('.')]
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
import socket
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| true | true |
1c47a240eda919b8a1cb429d2d0afedc165532f8 | 263 | py | Python | Statistics/randomNum.py | ssm29njit/calculator601SheethalJedidiah | 2812fbabcf5249eeee8a2f34edd6152cfa2d175e | [
"MIT"
] | 1 | 2020-11-08T05:11:27.000Z | 2020-11-08T05:11:27.000Z | Statistics/randomNum.py | ssm29njit/calculator601SheethalJedidiah | 2812fbabcf5249eeee8a2f34edd6152cfa2d175e | [
"MIT"
] | null | null | null | Statistics/randomNum.py | ssm29njit/calculator601SheethalJedidiah | 2812fbabcf5249eeee8a2f34edd6152cfa2d175e | [
"MIT"
] | 1 | 2020-12-09T15:37:51.000Z | 2020-12-09T15:37:51.000Z | from random import random
def getRandomNum(data,sample_size):
random_values = random.sample(data, k=sample_size-1)
return random_values
#def getSample(data, sample_size):
# random_values = random.sample(data, k=sample_size)
# return random_values | 26.3 | 56 | 0.760456 | from random import random
def getRandomNum(data,sample_size):
random_values = random.sample(data, k=sample_size-1)
return random_values
| true | true |
1c47a26a1e9d995623a6018575abb2f888b8d25f | 11,579 | py | Python | tests/learning/test_rumelhart_semantic_network.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | tests/learning/test_rumelhart_semantic_network.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | tests/learning/test_rumelhart_semantic_network.py | bdsinger/PsyNeuLink | 71d8a0bb1691ff85061d4ad3de866d9930a69a73 | [
"Apache-2.0"
] | null | null | null | import pytest
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
def validate_learning_mechs(sys):
def get_learning_mech(name):
return next(lm for lm in sys.learning_mechanisms if lm.name == name)
REP_IN_to_REP_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_IN to REP_HIDDEN')
REP_HIDDEN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_HIDDEN to REL_HIDDEN')
REL_IN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_IN to REL_HIDDEN')
REL_HIDDEN_to_REP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to REP_OUT')
REL_HIDDEN_to_PROP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to PROP_OUT')
REL_HIDDEN_to_QUAL_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to QUAL_OUT')
REL_HIDDEN_to_ACT_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to ACT_OUT')
# Validate error_signal Projections for REP_IN to REP_HIDDEN
assert len(REP_IN_to_REP_HIDDEN_LM.input_states) == 3
assert REP_IN_to_REP_HIDDEN_LM.input_states[pnl.ERROR_SIGNAL].path_afferents[0].sender.owner == \
REP_HIDDEN_to_REL_HIDDEN_LM
# Validate error_signal Projections to LearningMechanisms for REP_HIDDEN_to REL_HIDDEN Projections
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REP_HIDDEN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
# Validate error_signal Projections to LearningMechanisms for REL_IN to REL_HIDDEN Projections
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REL_IN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
class TestRumelhartSemanticNetwork:
"""
Tests construction and training of network with both convergent and divergent pathways
with the following structure:
# Semantic Network:
# _
# REP PROP QUAL ACT |
# \___\__/____/ |
# | _ | Output Processes
# HIDDEN | _|
# / \ |
# HIDDEN REL_IN | Input Processes
# / |
# REP_IN _|
"""
def test_rumelhart_semantic_network_sequential(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions
.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
learning=pnl.LEARNING,
name='REP_HIDDEN_PROC')
rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_HIDDEN_PROC')
rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REL_REP_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_hidden_proc,
rel_hidden_proc,
rel_rep_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
def test_rumelhart_semantic_network_convergent(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
def test_rumelhart_semantic_network_crossing(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_qual_proc,
rel_act_proc])
# S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
# targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
# act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
)
| 62.589189 | 143 | 0.586925 | import pytest
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
def validate_learning_mechs(sys):
def get_learning_mech(name):
return next(lm for lm in sys.learning_mechanisms if lm.name == name)
REP_IN_to_REP_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_IN to REP_HIDDEN')
REP_HIDDEN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REP_HIDDEN to REL_HIDDEN')
REL_IN_to_REL_HIDDEN_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_IN to REL_HIDDEN')
REL_HIDDEN_to_REP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to REP_OUT')
REL_HIDDEN_to_PROP_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to PROP_OUT')
REL_HIDDEN_to_QUAL_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to QUAL_OUT')
REL_HIDDEN_to_ACT_OUT_LM = get_learning_mech('LearningMechanism for MappingProjection from REL_HIDDEN to ACT_OUT')
assert len(REP_IN_to_REP_HIDDEN_LM.input_states) == 3
assert REP_IN_to_REP_HIDDEN_LM.input_states[pnl.ERROR_SIGNAL].path_afferents[0].sender.owner == \
REP_HIDDEN_to_REL_HIDDEN_LM
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REP_HIDDEN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
assert all(lm in [input_state.path_afferents[0].sender.owner for input_state in
REL_IN_to_REL_HIDDEN_LM.input_states]
for lm in {REL_HIDDEN_to_REP_OUT_LM, REL_HIDDEN_to_PROP_OUT_LM,
REL_HIDDEN_to_QUAL_OUT_LM, REL_HIDDEN_to_ACT_OUT_LM})
class TestRumelhartSemanticNetwork:
def test_rumelhart_semantic_network_sequential(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions
.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
learning=pnl.LEARNING,
name='REP_HIDDEN_PROC')
rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_HIDDEN_PROC')
rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REL_REP_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_hidden_proc,
rel_hidden_proc,
rel_rep_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
def test_rumelhart_semantic_network_convergent(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden],
learning=pnl.LEARNING,
name='REL_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_prop_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
def test_rumelhart_semantic_network_crossing(self):
rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
rep_hidden = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_HIDDEN')
rel_hidden = pnl.TransferMechanism(size=5, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REL_HIDDEN')
rep_out = pnl.TransferMechanism(size=10, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='REP_OUT')
prop_out = pnl.TransferMechanism(size=12, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='PROP_OUT')
qual_out = pnl.TransferMechanism(size=13, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='QUAL_OUT')
act_out = pnl.TransferMechanism(size=14, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='ACT_OUT')
rep_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden, rep_out],
learning=pnl.LEARNING,
name='REP_PROC')
rel_proc = pnl.Process(pathway=[rel_in, rel_hidden, prop_out],
learning=pnl.LEARNING,
name='REL_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
learning=pnl.LEARNING,
name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
learning=pnl.LEARNING,
name='REL_ACT_PROC')
S = pnl.System(processes=[rep_proc,
rel_proc,
rel_qual_proc,
rel_act_proc])
validate_learning_mechs(S)
print(S.origin_mechanisms)
print(S.terminal_mechanisms)
S.run(inputs={rel_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]},
)
| true | true |
1c47a3c567eca3d2d1212e401d44eb434aeea753 | 124 | py | Python | blog/blog/api/urls.py | akiracadet/django-rest-sandbox | d5eb8667328b20b85b41b814e1071aad4627fac3 | [
"MIT"
] | null | null | null | blog/blog/api/urls.py | akiracadet/django-rest-sandbox | d5eb8667328b20b85b41b814e1071aad4627fac3 | [
"MIT"
] | 4 | 2021-04-08T19:39:29.000Z | 2021-09-22T19:33:36.000Z | blog/blog/api/urls.py | akiracadet/django-rest-sandbox | d5eb8667328b20b85b41b814e1071aad4627fac3 | [
"MIT"
] | null | null | null | from django.urls import include
from django.urls import path
urlpatterns = [
path('posts/', include('posts.urls')),
]
| 15.5 | 42 | 0.701613 | from django.urls import include
from django.urls import path
urlpatterns = [
path('posts/', include('posts.urls')),
]
| true | true |
1c47a4d77aa127fc90e8639b68a267f11d0041c2 | 403 | py | Python | bookshop_proj/asgi.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
] | null | null | null | bookshop_proj/asgi.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
] | null | null | null | bookshop_proj/asgi.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
] | null | null | null | """
ASGI config for bookshop_proj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookshop_proj.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookshop_proj.settings')
application = get_asgi_application()
| true | true |
1c47a53589ababd0727d0971d389fb95baaeab43 | 4,168 | py | Python | research/minigo/evaluation.py | SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | [
"Apache-2.0"
] | null | null | null | research/minigo/evaluation.py | SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | [
"Apache-2.0"
] | null | null | null | research/minigo/evaluation.py | SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation of playing games between two neural nets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import go
import sgf_wrapper
from gtp_wrapper import MCTSPlayer
def play_match(params, black_net, white_net, games, readouts,
sgf_dir, verbosity):
"""Plays matches between two neural nets.
One net that wins by a margin of 55% will be the winner.
Args:
params: An object of hyperparameters.
black_net: Instance of the DualNetRunner class to play as black.
white_net: Instance of the DualNetRunner class to play as white.
games: Number of games to play. We play all the games at the same time.
readouts: Number of readouts to perform for each step in each game.
sgf_dir: Directory to write the sgf results.
verbosity: Verbosity to show evaluation process.
Returns:
'B' is the winner is black_net, otherwise 'W'.
"""
# For n games, we create lists of n black and n white players
black = MCTSPlayer(
params.board_size, black_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
white = MCTSPlayer(
params.board_size, white_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
black_name = os.path.basename(black_net.save_file)
white_name = os.path.basename(white_net.save_file)
black_win_counts = 0
white_win_counts = 0
for i in range(games):
num_move = 0 # The move number of the current game
black.initialize_game()
white.initialize_game()
while True:
start = time.time()
active = white if num_move % 2 else black
inactive = black if num_move % 2 else white
current_readouts = active.root.N
while active.root.N < current_readouts + readouts:
active.tree_search()
# print some stats on the search
if verbosity >= 3:
print(active.root.position)
# First, check the roots for hopeless games.
if active.should_resign(): # Force resign
active.set_result(-active.root.position.to_play, was_resign=True)
inactive.set_result(
active.root.position.to_play, was_resign=True)
if active.is_done():
fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(
int(time.time()), white_name, black_name, i)
with open(os.path.join(sgf_dir, fname), 'w') as f:
sgfstr = sgf_wrapper.make_sgf(
params.board_size, active.position.recent, active.result_string,
black_name=black_name, white_name=white_name)
f.write(sgfstr)
print('Finished game', i, active.result_string)
if active.result_string is not None:
if active.result_string[0] == 'B':
black_win_counts += 1
elif active.result_string[0] == 'W':
white_win_counts += 1
break
move = active.pick_move()
active.play_move(move)
inactive.play_move(move)
dur = time.time() - start
num_move += 1
if (verbosity > 1) or (verbosity == 1 and num_move % 10 == 9):
timeper = (dur / readouts) * 100.0
print(active.root.position)
print('{:d}: {:d} readouts, {:.3f} s/100. ({:.2f} sec)'.format(
num_move, readouts, timeper, dur))
if (black_win_counts - white_win_counts) > params.eval_win_rate * games:
return go.BLACK_NAME
else:
return go.WHITE_NAME
| 35.02521 | 80 | 0.669626 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import go
import sgf_wrapper
from gtp_wrapper import MCTSPlayer
def play_match(params, black_net, white_net, games, readouts,
sgf_dir, verbosity):
black = MCTSPlayer(
params.board_size, black_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
white = MCTSPlayer(
params.board_size, white_net, verbosity=verbosity, two_player_mode=True,
num_parallel=params.simultaneous_leaves)
black_name = os.path.basename(black_net.save_file)
white_name = os.path.basename(white_net.save_file)
black_win_counts = 0
white_win_counts = 0
for i in range(games):
num_move = 0
black.initialize_game()
white.initialize_game()
while True:
start = time.time()
active = white if num_move % 2 else black
inactive = black if num_move % 2 else white
current_readouts = active.root.N
while active.root.N < current_readouts + readouts:
active.tree_search()
if verbosity >= 3:
print(active.root.position)
if active.should_resign():
active.set_result(-active.root.position.to_play, was_resign=True)
inactive.set_result(
active.root.position.to_play, was_resign=True)
if active.is_done():
fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(
int(time.time()), white_name, black_name, i)
with open(os.path.join(sgf_dir, fname), 'w') as f:
sgfstr = sgf_wrapper.make_sgf(
params.board_size, active.position.recent, active.result_string,
black_name=black_name, white_name=white_name)
f.write(sgfstr)
print('Finished game', i, active.result_string)
if active.result_string is not None:
if active.result_string[0] == 'B':
black_win_counts += 1
elif active.result_string[0] == 'W':
white_win_counts += 1
break
move = active.pick_move()
active.play_move(move)
inactive.play_move(move)
dur = time.time() - start
num_move += 1
if (verbosity > 1) or (verbosity == 1 and num_move % 10 == 9):
timeper = (dur / readouts) * 100.0
print(active.root.position)
print('{:d}: {:d} readouts, {:.3f} s/100. ({:.2f} sec)'.format(
num_move, readouts, timeper, dur))
if (black_win_counts - white_win_counts) > params.eval_win_rate * games:
return go.BLACK_NAME
else:
return go.WHITE_NAME
| true | true |
1c47a5a2c3724fb74c6a56157c990c41856f9b53 | 401 | py | Python | tweepy/error.py | skoczen/tweepy | 3b4bbabe1ecafee40d9d5942fbd59c4056c8997c | [
"MIT"
] | 24 | 2015-11-12T06:33:24.000Z | 2019-04-16T11:11:13.000Z | tweepy/error.py | skoczen/tweepy | 3b4bbabe1ecafee40d9d5942fbd59c4056c8997c | [
"MIT"
] | 3 | 2015-11-12T22:16:22.000Z | 2021-08-09T07:00:27.000Z | tweepy/error.py | skoczen/tweepy | 3b4bbabe1ecafee40d9d5942fbd59c4056c8997c | [
"MIT"
] | 7 | 2015-11-12T20:09:56.000Z | 2020-12-16T17:59:02.000Z | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import six
class TweepError(Exception):
"""Tweepy exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
| 20.05 | 46 | 0.693267 |
from __future__ import print_function
import six
class TweepError(Exception):
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
| true | true |
1c47a9283d75ce997bacf6c8e784da408d98f090 | 164 | py | Python | python/kyu-6/detect-pangram/test_detect_pangram.py | ledwindra/codewars | 0552669a69e801cfe5f9a3696a4d98be63a96951 | [
"WTFPL"
] | 1 | 2020-11-13T16:55:04.000Z | 2020-11-13T16:55:04.000Z | python/kyu-6/detect-pangram/test_detect_pangram.py | ledwindra/codewars | 0552669a69e801cfe5f9a3696a4d98be63a96951 | [
"WTFPL"
] | 1 | 2020-01-28T15:48:17.000Z | 2020-01-28T15:48:17.000Z | python/kyu-6/detect-pangram/test_detect_pangram.py | ledwindra/codewars | 0552669a69e801cfe5f9a3696a4d98be63a96951 | [
"WTFPL"
] | null | null | null | from detect_pangram import is_pangram
class TestPangram:
def test_0(self):
assert is_pangram('The quick, brown fox jumps over the lazy dog!') == True | 23.428571 | 82 | 0.719512 | from detect_pangram import is_pangram
class TestPangram:
def test_0(self):
assert is_pangram('The quick, brown fox jumps over the lazy dog!') == True | true | true |
1c47a928f0a5aff8aff873bfd002dda97fcd6bb1 | 15,931 | py | Python | notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py | ornlneutronimaging/notebooks | d219cdc9ec103fd8bb45891b984f45d3d6facecd | [
"BSD-3-Clause"
] | null | null | null | notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py | ornlneutronimaging/notebooks | d219cdc9ec103fd8bb45891b984f45d3d6facecd | [
"BSD-3-Clause"
] | null | null | null | notebooks/__code/metadata_overlapping_images/metadata_overlapping_images.py | ornlneutronimaging/notebooks | d219cdc9ec103fd8bb45891b984f45d3d6facecd | [
"BSD-3-Clause"
] | null | null | null | from IPython.core.display import HTML
from IPython.core.display import display
import os
import copy
from qtpy.QtWidgets import QMainWindow, QFileDialog
from qtpy import QtGui
from collections import OrderedDict
from __code import load_ui
from .initialization import Initializer
from .event_handler import MetadataTableHandler
from __code.metadata_overlapping_images.export_images import ExportImages
from .display import DisplayImages, DisplayScalePyqtUi, DisplayMetadataPyqtUi
from .export_table import ExportTable
from __code.metadata_overlapping_images import HELP_PAGE
class MetadataOverlappingImagesUi(QMainWindow):
x_axis_column_index = 0
y_axis_column_index = 2
xy_axis_menu_logo = {'enable': u"\u2713 ", # \u25CF (dark circle)
'disable': " "}
metadata_operation = {0: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
2: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
3: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
}
data_dict = {}
data_dict_raw = {}
timestamp_dict = {}
default_scale_roi = None
rotation_angle = 0
histogram_level = []
# scale pyqtgraph
scale_pyqt_ui = None
scale_legend_pyqt_ui = None
metadata1_pyqt_ui = None # metadata 1 text
metadata2_pyqt_ui = None # metadata 2 text
graph_pyqt_ui = None
# size of tables
guide_table_width = [40, 400, 150, 150]
live_image = []
display_ui = []
# guide and profile pg ROIs
list_guide_pyqt_roi = list()
list_profile_pyqt_roi = list()
list_table_widget_checkbox = list()
list_metadata = []
dict_list_metadata = OrderedDict() # {0: '10', 1: 'hfir', ...}
list_scale_units = ["mm", u"\u00B5m", "nm"]
list_scale_units = {'string': ["mm", u"\u00B5m", "nm"],
'html': ["mm", "<span>µm</span>", "nm"]}
rgba_color = {'white': (255, 255, 255, 255, None),
'red': (255, 0, 0, 255, None),
'green': (0, 255, 0, 255, None),
'blue': (0, 0, 255, 255, None),
'black': (0, 0, 0, 255, None)}
rgb_color = {'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'black': (0, 0, 0)}
html_color = {'white': "#FFF",
'red': "#F00",
'green': "#0F0",
'blue': "#00F",
'black': "#000"}
# ui of pop up window that allows to define metadata column value (format it)
metadata_string_format_ui = None
def __init__(self, parent=None, working_dir='', data_dict=None):
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
super(MetadataOverlappingImagesUi, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
os.path.join('ui', 'ui_metadata_overlapping_images.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Metadata Overlapping Images")
self.working_dir = working_dir
self.data_dict = data_dict # Normalization data dictionary {'file_name': [],
#'data': [[...],[...]]],
#'metadata': [],
#'shape': {}}
# untouched array of images (used to move and rotate images)
self.data_dict_raw = copy.deepcopy(data_dict)
# initialization
o_initialization = Initializer(parent=self)
o_initialization.pyqtgraph()
o_initialization.parameters()
o_initialization.statusbar()
o_initialization.table()
o_initialization.widgets()
o_initialization.event()
# display first images
self.slider_file_changed(0)
self.text_metadata_1_enable_pressed(self.ui.checkBox.isChecked())
self.text_metadata_2_enable_pressed(self.ui.checkBox_2.isChecked())
# ========================================================================================
# MAIN UI EVENTs
def metadata_table_right_click(self, position):
o_metadata_table = MetadataTableHandler(parent=self)
o_metadata_table.right_click(position)
def previous_image_button_clicked(self):
self.change_slider(offset=-1)
self.update_metadata_pyqt_ui()
def next_image_button_clicked(self):
self.change_slider(offset = +1)
self.update_metadata_pyqt_ui()
def help_button_clicked(self):
import webbrowser
webbrowser.open(HELP_PAGE)
def closeEvent(self, event=None):
if self.metadata_string_format_ui:
self.metadata_string_format_ui.close()
def slider_file_changed(self, slider_value):
self.display_image()
self.ui.image_slider_value.setText(str(slider_value))
self.check_status_next_prev_image_button()
self.update_metadata_pyqt_ui()
def slider_file_clicked(self):
current_slider_value = self.ui.file_slider.value()
self.slider_file_changed(current_slider_value)
self.update_metadata_pyqt_ui()
def scale_checkbox_clicked(self, status):
self.ui.scale_groupbox.setEnabled(status)
self.ui.scale_position_frame.setEnabled(status)
o_display = DisplayScalePyqtUi(parent=self)
o_display.run()
def metadata_checkbox_clicked(self, status):
self.ui.metadata_groupbox.setEnabled(status)
self.ui.metadata_position_frame.setEnabled(status)
self.ui.enable_graph_checkbox.setEnabled(status)
self.ui.text_graph_tabWidget.setEnabled(status)
self.ui.toolBox.setEnabled(status)
if status:
self.ui.graph_groupBox.setEnabled(self.ui.enable_graph_checkbox.isChecked())
else:
self.ui.graph_groupBox.setEnabled(False)
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.run()
def select_metadata_checkbox_clicked(self, status):
self.ui.select_metadata_combobox.setEnabled(status)
self.update_metadata_pyqt_ui()
def font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def metadata_list_changed(self, index, column):
o_event = MetadataTableHandler(parent=self)
o_event.metadata_list_changed(index, column)
def scale_orientation_clicked(self):
o_init = Initializer(parent=self)
o_init.set_scale_spinbox_max_value()
self.update_scale_pyqt_ui()
def scale_thickness_value_changed(self, value):
self.update_scale_pyqt_ui()
def scale_color_changed(self, value):
self.update_scale_pyqt_ui()
def scale_size_changed(self, value):
self.update_scale_pyqt_ui()
def scale_real_size_changed(self):
"""update the label of the scale"""
self.update_scale_pyqt_ui()
def scale_units_changed(self):
self.update_scale_pyqt_ui()
def scale_position_moved(self, new_value):
self.update_scale_pyqt_ui()
def scale_position_clicked(self):
self.update_scale_pyqt_ui()
def metadata_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata2_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata2_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata_color_changed(self, value):
self.update_metadata_pyqt_ui()
def metadata_name_return_pressed(self):
self.update_metadata_pyqt_ui()
def graph_position_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_position_clicked(self):
self.update_metadata_pyqt_ui()
def graph_color_changed(self, value):
self.update_metadata_pyqt_ui()
def graph_axis_label_changed(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_text_or_graph_clicked(self):
status = self.ui.metadata_graph_option.isChecked()
self.ui.metadata_graph_size_label.setVisible(status)
self.ui.metadata_graph_size_slider.setVisible(status)
self.update_metadata_pyqt_ui()
def metadata_graph_size_pressed(self):
self.update_metadata_pyqt_ui()
def metadata_graph_size_moved(self, slider_value):
self.update_metadata_pyqt_ui()
def table_cell_changed(self, row, column):
self.update_metadata_pyqt_ui()
def export_table_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportTable(parent=self,
export_folder=_export_folder)
o_export.run()
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportImages(parent=self,
export_folder=_export_folder)
o_export.run()
# def import_table_pressed(self):
# _table_file = QFileDialog.getOpenFileName(self,
# directory=os.path.dirname(self.working_dir),
# caption="Select Input File")
# QtGui.QGuiApplication.processEvents()
#
# if type(_table_file) is tuple:
# _table_file = _table_file[0]
#
# if _table_file:
# o_import = TableLoader(parent=self,
# filename=str(_table_file))
# o_import.load_table()
# o_import.populate()
# self.update_metadata_pyqt_ui()
def enable_graph_button_clicked(self, new_state):
self.ui.graph_groupBox.setEnabled(new_state)
self.ui.metadata_position_frame_3.setEnabled(new_state)
self.ui.graph_position_y.setEnabled(new_state)
self.ui.graph_position_x.setEnabled(new_state)
self.ui.label_15.setEnabled(new_state)
self.ui.label_16.setEnabled(new_state)
self.update_metadata_pyqt_ui()
def display_red_vertical_marker_clicked(self):
self.update_metadata_pyqt_ui()
def text_metadata_1_enable_pressed(self, status):
self.ui.metadata_position_frame.setEnabled(status)
self.ui.metadata_position_x.setEnabled(status)
self.ui.metadata_position_y.setEnabled(status)
self.ui.label_10.setEnabled(status)
self.ui.label_11.setEnabled(status)
self.ui.label_14.setEnabled(status)
self.ui.font_size_slider.setEnabled(status)
self.ui.prefix_label_1.setEnabled(status)
self.ui.suffix_label_1.setEnabled(status)
self.ui.prefix_lineEdit_1.setEnabled(status)
self.ui.suffix_lineEdit_1.setEnabled(status)
self.ui.metadata_1_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def text_metadata_2_enable_pressed(self, status):
self.ui.metadata_position_frame_2.setEnabled(status)
self.ui.metadata_position_x_2.setEnabled(status)
self.ui.metadata_position_y_2.setEnabled(status)
self.ui.label_18.setEnabled(status)
self.ui.label_19.setEnabled(status)
self.ui.label_20.setEnabled(status)
self.ui.font_size_slider_2.setEnabled(status)
self.ui.prefix_label_2.setEnabled(status)
self.ui.suffix_label_2.setEnabled(status)
self.ui.prefix_lineEdit_2.setEnabled(status)
self.ui.suffix_lineEdit_2.setEnabled(status)
self.ui.metadata_2_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def metadata_1_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def metadata_2_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
# ========================================================================================
def update_metadata_pyqt_ui(self):
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def update_scale_pyqt_ui(self):
# if self.scale_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_pyqt_ui)
# if self.scale_legend_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_legend_pyqt_ui)
o_display = DisplayScalePyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def display_image(self, recalculate_image=False):
"""display the image selected by the file slider"""
DisplayImages(parent=self, recalculate_image=recalculate_image)
def check_status_next_prev_image_button(self):
"""this will enable or not the prev or next button next to the slider file image"""
current_slider_value = self.ui.file_slider.value()
min_slider_value = self.ui.file_slider.minimum()
max_slider_value = self.ui.file_slider.maximum()
_prev = True
_next = True
if current_slider_value == min_slider_value:
_prev = False
elif current_slider_value == max_slider_value:
_next = False
self.ui.previous_image_button.setEnabled(_prev)
self.ui.next_image_button.setEnabled(_next)
def change_slider(self, offset=+1):
self.ui.file_slider.blockSignals(True)
current_slider_value = self.ui.file_slider.value()
new_row_selected = current_slider_value + offset
self.ui.image_slider_value.setText(str(new_row_selected))
self.ui.file_slider.setValue(new_row_selected)
self.check_status_next_prev_image_button()
self.display_image()
self.ui.file_slider.blockSignals(False)
| 37.751185 | 102 | 0.612203 | from IPython.core.display import HTML
from IPython.core.display import display
import os
import copy
from qtpy.QtWidgets import QMainWindow, QFileDialog
from qtpy import QtGui
from collections import OrderedDict
from __code import load_ui
from .initialization import Initializer
from .event_handler import MetadataTableHandler
from __code.metadata_overlapping_images.export_images import ExportImages
from .display import DisplayImages, DisplayScalePyqtUi, DisplayMetadataPyqtUi
from .export_table import ExportTable
from __code.metadata_overlapping_images import HELP_PAGE
class MetadataOverlappingImagesUi(QMainWindow):
x_axis_column_index = 0
y_axis_column_index = 2
xy_axis_menu_logo = {'enable': u"\u2713 ",
'disable': " "}
metadata_operation = {0: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
2: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
3: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
}
data_dict = {}
data_dict_raw = {}
timestamp_dict = {}
default_scale_roi = None
rotation_angle = 0
histogram_level = []
scale_pyqt_ui = None
scale_legend_pyqt_ui = None
metadata1_pyqt_ui = None
metadata2_pyqt_ui = None
graph_pyqt_ui = None
guide_table_width = [40, 400, 150, 150]
live_image = []
display_ui = []
list_guide_pyqt_roi = list()
list_profile_pyqt_roi = list()
list_table_widget_checkbox = list()
list_metadata = []
dict_list_metadata = OrderedDict()
list_scale_units = ["mm", u"\u00B5m", "nm"]
list_scale_units = {'string': ["mm", u"\u00B5m", "nm"],
'html': ["mm", "<span>µm</span>", "nm"]}
rgba_color = {'white': (255, 255, 255, 255, None),
'red': (255, 0, 0, 255, None),
'green': (0, 255, 0, 255, None),
'blue': (0, 0, 255, 255, None),
'black': (0, 0, 0, 255, None)}
rgb_color = {'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'black': (0, 0, 0)}
html_color = {'white': "#FFF",
'red': "#F00",
'green': "#0F0",
'blue': "#00F",
'black': "#000"}
metadata_string_format_ui = None
def __init__(self, parent=None, working_dir='', data_dict=None):
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
super(MetadataOverlappingImagesUi, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
os.path.join('ui', 'ui_metadata_overlapping_images.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Metadata Overlapping Images")
self.working_dir = working_dir
self.data_dict = data_dict
self.data_dict_raw = copy.deepcopy(data_dict)
o_initialization = Initializer(parent=self)
o_initialization.pyqtgraph()
o_initialization.parameters()
o_initialization.statusbar()
o_initialization.table()
o_initialization.widgets()
o_initialization.event()
self.slider_file_changed(0)
self.text_metadata_1_enable_pressed(self.ui.checkBox.isChecked())
self.text_metadata_2_enable_pressed(self.ui.checkBox_2.isChecked())
def metadata_table_right_click(self, position):
o_metadata_table = MetadataTableHandler(parent=self)
o_metadata_table.right_click(position)
def previous_image_button_clicked(self):
self.change_slider(offset=-1)
self.update_metadata_pyqt_ui()
def next_image_button_clicked(self):
self.change_slider(offset = +1)
self.update_metadata_pyqt_ui()
def help_button_clicked(self):
import webbrowser
webbrowser.open(HELP_PAGE)
def closeEvent(self, event=None):
if self.metadata_string_format_ui:
self.metadata_string_format_ui.close()
def slider_file_changed(self, slider_value):
self.display_image()
self.ui.image_slider_value.setText(str(slider_value))
self.check_status_next_prev_image_button()
self.update_metadata_pyqt_ui()
def slider_file_clicked(self):
current_slider_value = self.ui.file_slider.value()
self.slider_file_changed(current_slider_value)
self.update_metadata_pyqt_ui()
def scale_checkbox_clicked(self, status):
self.ui.scale_groupbox.setEnabled(status)
self.ui.scale_position_frame.setEnabled(status)
o_display = DisplayScalePyqtUi(parent=self)
o_display.run()
def metadata_checkbox_clicked(self, status):
self.ui.metadata_groupbox.setEnabled(status)
self.ui.metadata_position_frame.setEnabled(status)
self.ui.enable_graph_checkbox.setEnabled(status)
self.ui.text_graph_tabWidget.setEnabled(status)
self.ui.toolBox.setEnabled(status)
if status:
self.ui.graph_groupBox.setEnabled(self.ui.enable_graph_checkbox.isChecked())
else:
self.ui.graph_groupBox.setEnabled(False)
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.run()
def select_metadata_checkbox_clicked(self, status):
self.ui.select_metadata_combobox.setEnabled(status)
self.update_metadata_pyqt_ui()
def font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def metadata_list_changed(self, index, column):
o_event = MetadataTableHandler(parent=self)
o_event.metadata_list_changed(index, column)
def scale_orientation_clicked(self):
o_init = Initializer(parent=self)
o_init.set_scale_spinbox_max_value()
self.update_scale_pyqt_ui()
def scale_thickness_value_changed(self, value):
self.update_scale_pyqt_ui()
def scale_color_changed(self, value):
self.update_scale_pyqt_ui()
def scale_size_changed(self, value):
self.update_scale_pyqt_ui()
def scale_real_size_changed(self):
self.update_scale_pyqt_ui()
def scale_units_changed(self):
self.update_scale_pyqt_ui()
def scale_position_moved(self, new_value):
self.update_scale_pyqt_ui()
def scale_position_clicked(self):
self.update_scale_pyqt_ui()
def metadata_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata2_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata2_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata_color_changed(self, value):
self.update_metadata_pyqt_ui()
def metadata_name_return_pressed(self):
self.update_metadata_pyqt_ui()
def graph_position_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_position_clicked(self):
self.update_metadata_pyqt_ui()
def graph_color_changed(self, value):
self.update_metadata_pyqt_ui()
def graph_axis_label_changed(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_text_or_graph_clicked(self):
status = self.ui.metadata_graph_option.isChecked()
self.ui.metadata_graph_size_label.setVisible(status)
self.ui.metadata_graph_size_slider.setVisible(status)
self.update_metadata_pyqt_ui()
def metadata_graph_size_pressed(self):
self.update_metadata_pyqt_ui()
def metadata_graph_size_moved(self, slider_value):
self.update_metadata_pyqt_ui()
def table_cell_changed(self, row, column):
self.update_metadata_pyqt_ui()
def export_table_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportTable(parent=self,
export_folder=_export_folder)
o_export.run()
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportImages(parent=self,
export_folder=_export_folder)
o_export.run()
def enable_graph_button_clicked(self, new_state):
self.ui.graph_groupBox.setEnabled(new_state)
self.ui.metadata_position_frame_3.setEnabled(new_state)
self.ui.graph_position_y.setEnabled(new_state)
self.ui.graph_position_x.setEnabled(new_state)
self.ui.label_15.setEnabled(new_state)
self.ui.label_16.setEnabled(new_state)
self.update_metadata_pyqt_ui()
def display_red_vertical_marker_clicked(self):
self.update_metadata_pyqt_ui()
def text_metadata_1_enable_pressed(self, status):
self.ui.metadata_position_frame.setEnabled(status)
self.ui.metadata_position_x.setEnabled(status)
self.ui.metadata_position_y.setEnabled(status)
self.ui.label_10.setEnabled(status)
self.ui.label_11.setEnabled(status)
self.ui.label_14.setEnabled(status)
self.ui.font_size_slider.setEnabled(status)
self.ui.prefix_label_1.setEnabled(status)
self.ui.suffix_label_1.setEnabled(status)
self.ui.prefix_lineEdit_1.setEnabled(status)
self.ui.suffix_lineEdit_1.setEnabled(status)
self.ui.metadata_1_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def text_metadata_2_enable_pressed(self, status):
self.ui.metadata_position_frame_2.setEnabled(status)
self.ui.metadata_position_x_2.setEnabled(status)
self.ui.metadata_position_y_2.setEnabled(status)
self.ui.label_18.setEnabled(status)
self.ui.label_19.setEnabled(status)
self.ui.label_20.setEnabled(status)
self.ui.font_size_slider_2.setEnabled(status)
self.ui.prefix_label_2.setEnabled(status)
self.ui.suffix_label_2.setEnabled(status)
self.ui.prefix_lineEdit_2.setEnabled(status)
self.ui.suffix_lineEdit_2.setEnabled(status)
self.ui.metadata_2_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def metadata_1_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def metadata_2_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def update_metadata_pyqt_ui(self):
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def update_scale_pyqt_ui(self):
o_display = DisplayScalePyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def display_image(self, recalculate_image=False):
DisplayImages(parent=self, recalculate_image=recalculate_image)
def check_status_next_prev_image_button(self):
current_slider_value = self.ui.file_slider.value()
min_slider_value = self.ui.file_slider.minimum()
max_slider_value = self.ui.file_slider.maximum()
_prev = True
_next = True
if current_slider_value == min_slider_value:
_prev = False
elif current_slider_value == max_slider_value:
_next = False
self.ui.previous_image_button.setEnabled(_prev)
self.ui.next_image_button.setEnabled(_next)
def change_slider(self, offset=+1):
self.ui.file_slider.blockSignals(True)
current_slider_value = self.ui.file_slider.value()
new_row_selected = current_slider_value + offset
self.ui.image_slider_value.setText(str(new_row_selected))
self.ui.file_slider.setValue(new_row_selected)
self.check_status_next_prev_image_button()
self.display_image()
self.ui.file_slider.blockSignals(False)
| true | true |
1c47a9ba768369e5fcda639a537396d54a754795 | 142 | py | Python | mysite/users/apps.py | saademad200/SE_Visualri | f01e22a5e47a44eb9219199027b68d1bd0bb4bca | [
"BSL-1.0"
] | null | null | null | mysite/users/apps.py | saademad200/SE_Visualri | f01e22a5e47a44eb9219199027b68d1bd0bb4bca | [
"BSL-1.0"
] | null | null | null | mysite/users/apps.py | saademad200/SE_Visualri | f01e22a5e47a44eb9219199027b68d1bd0bb4bca | [
"BSL-1.0"
] | null | null | null | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| 17.75 | 34 | 0.65493 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| true | true |
1c47aaec11d06eff56c121c77fb592d8b28a697b | 13,676 | py | Python | tornado/autoreload.py | DengJackNo1/tornado | 895a4fa69817c24fbf6ada6c5fb07351c6e91cd5 | [
"Apache-2.0"
] | 640 | 2018-09-12T03:14:13.000Z | 2022-03-30T04:38:09.000Z | tornado/autoreload.py | DengJackNo1/tornado | 895a4fa69817c24fbf6ada6c5fb07351c6e91cd5 | [
"Apache-2.0"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | tornado/autoreload.py | DengJackNo1/tornado | 895a4fa69817c24fbf6ada6c5fb07351c6e91cd5 | [
"Apache-2.0"
] | 230 | 2018-09-13T02:40:49.000Z | 2022-03-29T11:53:58.000Z | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Automatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead,
pass the keyword argument ``autoreload=True`` to the
`tornado.web.Application` constructor (or ``debug=True``, which
enables this setting and several others). This will enable autoreload
mode as well as checking for changes to templates and static
resources. Note that restarting is a destructive operation and any
requests in progress will be aborted when the process restarts. (If
you want to disable autoreload while using other debug-mode features,
pass both ``debug=True`` and ``autoreload=False``).
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module will not work correctly when `.HTTPServer`'s multi-process
mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
import os
import sys
# sys.path handling
# -----------------
#
# If a module is run with "python -m", the current directory (i.e. "")
# is automatically prepended to sys.path, but not if it is run as
# "path/to/file.py". The processing for "-m" rewrites the former to
# the latter, so subsequent executions won't have the same path as the
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
# relative again despite the future import.
#
# There is a separate __main__ block at the end of the file to call main().
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil # type: ignore
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None # type: ignore
import typing
from typing import Callable, Dict
if typing.TYPE_CHECKING:
from typing import List, Optional, Union # noqa: F401
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != "win32"
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
_autoreload_is_main = False
_original_argv = None # type: Optional[List[str]]
_original_spec = None
def start(check_time: int = 500) -> None:
"""Begins watching source files for changes.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {} # type: Dict[str, float]
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
def wait() -> None:
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start()
def watch(filename: str) -> None:
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn: Callable[[], None]) -> None:
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times: Dict[str, float]) -> None:
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times: Dict[str, float], path: str) -> None:
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload() -> None:
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
if _autoreload_is_main:
assert _original_argv is not None
spec = _original_spec
argv = _original_argv
else:
spec = getattr(sys.modules["__main__"], "__spec__", None)
argv = sys.argv
if spec:
argv = ["-m", spec.name] + argv[1:]
else:
path_prefix = "." + os.pathsep
if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith(
path_prefix
):
os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "")
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv( # type: ignore
os.P_NOWAIT, sys.executable, [sys.executable] + argv
)
# At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly.
os._exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main() -> None:
"""Command-line wrapper to re-run a script whenever its source changes.
Scripts may be specified by filename or module name::
python -m tornado.autoreload -m tornado.test.runtests
python -m tornado.autoreload tornado/test/runtests.py
Running a script with this wrapper is similar to calling
`tornado.autoreload.wait` at the end of the script, but this wrapper
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
# Remember that we were launched with autoreload as main.
# The main module can be tricky; set the variables both in our globals
# (which may be __main__) and the real importable version.
import tornado.autoreload
global _autoreload_is_main
global _original_argv, _original_spec
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
tornado.autoreload._original_argv = _original_argv = original_argv
original_spec = getattr(sys.modules["__main__"], "__spec__", None)
tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
# Execute the script in our namespace instead of creating
# a new one so that something that tries to import __main__
# (e.g. the unittest module) will see names defined in the
# script instead of just those defined in this module.
global __file__
__file__ = script
# If __package__ is defined, imports may be incorrectly
# interpreted as relative to this module.
global __package__
del __package__
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
# from the exception and watch every file.
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# SyntaxErrors are special: their innermost stack frame is fake
# so extract_tb won't see it and we have to get the filename
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == "module":
# runpy did a fake import of the module as __main__, but now it's
# no longer in sys.modules. Figure out where it is and watch it.
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename()) # type: ignore
wait()
if __name__ == "__main__":
# See also the other __main__ block at the top of the file, which modifies
# sys.path before our imports
main()
| 37.468493 | 88 | 0.674101 |
import os
import sys
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by reconstructing the original command
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
# variable (Python < 3.4) before re-execution so the new process will
# see the correct path. We attempt to address the latter problem when
# tornado.autoreload is run as __main__.
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None
import typing
from typing import Callable, Dict
if typing.TYPE_CHECKING:
from typing import List, Optional, Union
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != "win32"
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
_autoreload_is_main = False
_original_argv = None # type: Optional[List[str]]
_original_spec = None
def start(check_time: int = 500) -> None:
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {} # type: Dict[str, float]
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start()
def wait() -> None:
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start()
def watch(filename: str) -> None:
_watched_files.add(filename)
def add_reload_hook(fn: Callable[[], None]) -> None:
_reload_hooks.append(fn)
def _reload_on_update(modify_times: Dict[str, float]) -> None:
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times: Dict[str, float], path: str) -> None:
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload() -> None:
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If __main__.__spec__
# exists, we were invoked with -m and the effective path is about to
# change on re-exec. Reconstruct the original command line to
# ensure that the new process sees the same path we did. If
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
if _autoreload_is_main:
assert _original_argv is not None
spec = _original_spec
argv = _original_argv
else:
spec = getattr(sys.modules["__main__"], "__spec__", None)
argv = sys.argv
if spec:
argv = ["-m", spec.name] + argv[1:]
else:
path_prefix = "." + os.pathsep
if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith(
path_prefix
):
os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "")
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
os.spawnv(
os.P_NOWAIT, sys.executable, [sys.executable] + argv
)
os._exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main() -> None:
import tornado.autoreload
global _autoreload_is_main
global _original_argv, _original_spec
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
tornado.autoreload._original_argv = _original_argv = original_argv
original_spec = getattr(sys.modules["__main__"], "__spec__", None)
tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
global __file__
__file__ = script
global __package__
del __package__
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# Just to make sure we've covered everything, walk the stack trace
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == "module":
# runpy did a fake import of the module as __main__, but now it's
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename())
wait()
if __name__ == "__main__":
main()
| true | true |
1c47ab394fb23448ceb4c13702c16990ae7535cf | 649 | py | Python | Ex056.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex056.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex056.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | m = int()
q = int()
ma = int()
mm = int()
me = float()
nma = str()
a = int(input('Digite quantas pessoas tem o Grupo: '))
for c in range(0, a):
n = str(input('Digite o nome: '))
i = int(input('Digite a idade: '))
s = int(input('Digite o sexo:\n[1] para masculino\n[2]para feminino\n'))
m = m + i
q = q + 1
me = float(m / q)
if s == 1 and i > ma:
ma = i
nma = n
elif s == 2 and i < 20:
mm = mm + 1
print('A média de idade das pessoas digitas é {:.2f} anos.\nO homem mais velho é o {} com {} anos.\nE no grupo há {} '
'mulheres com menos de 20 anos.'.format(me, nma, ma, mm))
print('FIM')
| 28.217391 | 118 | 0.534669 | m = int()
q = int()
ma = int()
mm = int()
me = float()
nma = str()
a = int(input('Digite quantas pessoas tem o Grupo: '))
for c in range(0, a):
n = str(input('Digite o nome: '))
i = int(input('Digite a idade: '))
s = int(input('Digite o sexo:\n[1] para masculino\n[2]para feminino\n'))
m = m + i
q = q + 1
me = float(m / q)
if s == 1 and i > ma:
ma = i
nma = n
elif s == 2 and i < 20:
mm = mm + 1
print('A média de idade das pessoas digitas é {:.2f} anos.\nO homem mais velho é o {} com {} anos.\nE no grupo há {} '
'mulheres com menos de 20 anos.'.format(me, nma, ma, mm))
print('FIM')
| true | true |
1c47ac62262bb7d3b7efc480a2952496dfd81d53 | 571 | py | Python | core/da/sqlitedriver.py | ramkj/xman | 8ab14b0754e0ef3c44c27259c0df7c10697d3502 | [
"Apache-2.0"
] | null | null | null | core/da/sqlitedriver.py | ramkj/xman | 8ab14b0754e0ef3c44c27259c0df7c10697d3502 | [
"Apache-2.0"
] | null | null | null | core/da/sqlitedriver.py | ramkj/xman | 8ab14b0754e0ef3c44c27259c0df7c10697d3502 | [
"Apache-2.0"
] | null | null | null | import sqlite3
class SQLiteDriver:
def __init__(self, dbname: str ):
self.config = dbname
def __enter__(self) -> 'cursor':
self.connection = sqlite3.connect(self.config)
assert self.connection is not None, 'failed getting connection from DB'
self.connection.execute( 'PRAGMA foreign_keys=ON ' )
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.connection.commit()
self.cursor.close()
self.connection.close()
| 30.052632 | 79 | 0.654991 | import sqlite3
class SQLiteDriver:
def __init__(self, dbname: str ):
self.config = dbname
def __enter__(self) -> 'cursor':
self.connection = sqlite3.connect(self.config)
assert self.connection is not None, 'failed getting connection from DB'
self.connection.execute( 'PRAGMA foreign_keys=ON ' )
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.connection.commit()
self.cursor.close()
self.connection.close()
| true | true |
1c47ac976cbf51fb5ea1439ce4c43e00aa534a40 | 1,027 | py | Python | salt/runners/mine.py | bruce-one/salt | 0715f6c29a8e19c3cf7a67ad41aff84801c9f5ae | [
"Apache-2.0"
] | 1 | 2016-04-20T08:18:07.000Z | 2016-04-20T08:18:07.000Z | salt/runners/mine.py | quantonganh/salt | 8f1df678573153970c08b33978fe185d9ed1b71c | [
"Apache-2.0"
] | null | null | null | salt/runners/mine.py | quantonganh/salt | 8f1df678573153970c08b33978fe185d9ed1b71c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
A runner to access data from the salt mine
'''
# Import python libs
import os
# Import salt libs
import salt.payload
import salt.utils.minions
import salt.utils
def get(tgt, fun, tgt_type='glob'):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
CLI Example::
salt-run mine.get '*' network.interfaces
'''
ret = {}
serial = salt.payload.Serial(__opts__)
checker = salt.utils.minions.CkMinions(__opts__)
minions = checker.check_minions(
tgt,
tgt_type)
for minion in minions:
mine = os.path.join(
__opts__['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = serial.load(fp_).get(fun)
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
| 23.883721 | 74 | 0.558909 |
import os
import salt.payload
import salt.utils.minions
import salt.utils
def get(tgt, fun, tgt_type='glob'):
ret = {}
serial = salt.payload.Serial(__opts__)
checker = salt.utils.minions.CkMinions(__opts__)
minions = checker.check_minions(
tgt,
tgt_type)
for minion in minions:
mine = os.path.join(
__opts__['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = serial.load(fp_).get(fun)
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
| true | true |
1c47ade190e28d7400249b8c5dab37fe86d3fefc | 1,489 | py | Python | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionTrigonometric/Tanh.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Tanh(Instruccion):
def __init__(self, valor, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función tanh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.tanh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error | 55.148148 | 335 | 0.697112 | import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Tanh(Instruccion):
def __init__(self, valor, strGram, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función tanh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.tanh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error | true | true |
1c47ae46f17c882072873a49257d173aa670600d | 6,395 | py | Python | examples/benchmarks/json/errors.py | eerimoq/textparser | cc4a85f8b7e6d6be83f5072f45af4a7baf6c35df | [
"MIT"
] | 23 | 2018-09-01T14:39:07.000Z | 2021-11-08T11:52:43.000Z | examples/benchmarks/json/errors.py | risingdeveloper007/TextParser | c0f7b0268f86b77f4eb8366016987140792faff8 | [
"MIT"
] | 1 | 2020-07-06T13:19:25.000Z | 2020-08-01T08:16:34.000Z | examples/benchmarks/json/errors.py | risingdeveloper007/TextParser | c0f7b0268f86b77f4eb8366016987140792faff8 | [
"MIT"
] | 6 | 2019-05-01T21:31:03.000Z | 2021-08-24T11:57:21.000Z | #!/usr/bin/env python
"""Parse error comparsion for a few JSON parsers.
Example execution:
$ env PYTHONPATH=. python3 examples/benchmarks/json/errors.py
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
END
textparser: "Invalid syntax at line 1, column 1: ">>!<<""
lark_lalr: "'NoneType' object has no attribute 'pos_in_stream'"
lark_earley: "Incomplete parse: Could not find a solution to input"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 0), (line:1, col:1)"
parsita: "No exception raised!"
funcparserlib: "no tokens left in the stream: <EOF>"
parsy: "expected one of '"', '-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?', '[', 'false', 'null', 'true', '{' at 0:0"
parsimonious: "Rule 'json_file' didn't match at '' (line 1, column 1)."
pyleri: "No exception raised!"
textx: "None:1:1: error: Expected '[' or '{' at position (1, 1) => '*'."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
[
1,
{"a": {]}
]
END
textparser: "Invalid syntax at line 3, column 10: " {"a": {>>!<<]}""
lark_lalr: "Unexpected token Token(RSQB, ']') at line 3, column 10.
Expected: ESCAPED_STRING, RBRACE, string, pair
"
lark_earley: "Unexpected token Token(RSQB, ']') at line 3, column 10.
Expected: ESCAPED_STRING, RBRACE
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 5), (line:2, col:4)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 3,10-3,10: Op ']'"
parsy: "expected one of '"', '}' at 2:9"
parsimonious: "Rule 'members' didn't match at ']}
]
' (line 3, column 10)."
pyleri: "No exception raised!"
textx: "None:3:10: error: Expected STRING or '}' at position (3, 10) => ' {"a": {*]} ] '."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
[
1,
{3: null}
]
END
textparser: "Invalid syntax at line 3, column 4: " {>>!<<3: null}""
lark_lalr: "Unexpected token Token(SIGNED_NUMBER, '3') at line 3, column 4.
Expected: RBRACE, pair, string, ESCAPED_STRING
"
lark_earley: "Unexpected token Token(SIGNED_NUMBER, '3') at line 3, column 4.
Expected: ESCAPED_STRING, RBRACE
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 5), (line:2, col:4)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 3,4-3,4: Number '3'"
parsy: "expected one of '"', '}' at 2:3"
parsimonious: "Rule 'members' didn't match at '3: null}
]
' (line 3, column 4)."
pyleri: "No exception raised!"
textx: "None:3:4: error: Expected STRING or '}' at position (3, 4) => '[ 1, {*3: null} ]'."
-----------------------------------------------------------------
Input string between BEGIN and END:
BEGIN
nul
END
textparser: "Invalid syntax at line 1, column 1: ">>!<<nul""
lark_lalr: "No terminal defined for 'n' at line 1 col 1
nul
^
"
lark_earley: "No terminal defined for 'n' at line 1 col 1
nul
^
"
pyparsing: "Expected {string enclosed in double quotes | real number with scientific notation | real number | signed integer | Group:(Forward: ...) | Group:({Suppress:("[") [Forward: ... [, Forward: ...]...] Suppress:("]")}) | "true" | "false" | "null"} (at char 0), (line:1, col:1)"
parsita: "No exception raised!"
funcparserlib: "got unexpected token: 1,1-1,3: Name 'nul'"
parsy: "expected one of '"', '-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?', '[', 'false', 'null', 'true', '{' at 0:0"
parsimonious: "Rule 'json_file' didn't match at 'nul
' (line 1, column 1)."
pyleri: "No exception raised!"
textx: "None:1:1: error: Expected '[' or '{' at position (1, 1) => '*nul '."
$
"""
from __future__ import print_function
from parsers import textparser_json
from parsers import lark_json
from parsers import pyparsing_json
from parsers import funcparserlib_json
from parsers import parsimonious_json
from parsers import textx_json
try:
from parsers import parsita_json
except:
class parsita_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import parsy_json
except:
class parsy_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import pyleri_json
except:
class pyleri_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
def parse(string):
def _parse(function):
try:
function(string)
except Exception as e:
return str(e)
return 'No exception raised!'
results = [
('textparser', _parse(textparser_json.parse)),
('lark_lalr', _parse(lark_json.parse_lalr)),
('lark_earley', _parse(lark_json.parse_earley)),
('pyparsing', _parse(pyparsing_json.parse)),
('parsita', _parse(parsita_json.parse)),
('funcparserlib', _parse(funcparserlib_json.parse)),
('parsy', _parse(parsy_json.parse)),
('parsimonious', _parse(parsimonious_json.parse)),
('pyleri', _parse(pyleri_json.parse)),
('textx', _parse(textx_json.parse))
]
print('-----------------------------------------------------------------')
print()
print('Input string between BEGIN and END:')
print()
print('BEGIN')
print(string, end='')
print('END')
print()
for parser, error in results:
print('{}: "{}"'.format(parser, error))
print()
EMPTY_STRING = '''\
'''
BAD_DICT_END_STRING = '''\
[
1,
{"a": {]}
]
'''
BAD_DICT_KEY_STRING = '''\
[
1,
{3: null}
]
'''
BAD_NULL_STRING = '''\
nul
'''
parse(EMPTY_STRING)
parse(BAD_DICT_END_STRING)
parse(BAD_DICT_KEY_STRING)
parse(BAD_NULL_STRING)
| 25.682731 | 283 | 0.602033 |
from __future__ import print_function
from parsers import textparser_json
from parsers import lark_json
from parsers import pyparsing_json
from parsers import funcparserlib_json
from parsers import parsimonious_json
from parsers import textx_json
try:
from parsers import parsita_json
except:
class parsita_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import parsy_json
except:
class parsy_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
try:
from parsers import pyleri_json
except:
class pyleri_json(object):
@staticmethod
def parse(_json_string):
raise Exception('Import failed!')
def parse(string):
def _parse(function):
try:
function(string)
except Exception as e:
return str(e)
return 'No exception raised!'
results = [
('textparser', _parse(textparser_json.parse)),
('lark_lalr', _parse(lark_json.parse_lalr)),
('lark_earley', _parse(lark_json.parse_earley)),
('pyparsing', _parse(pyparsing_json.parse)),
('parsita', _parse(parsita_json.parse)),
('funcparserlib', _parse(funcparserlib_json.parse)),
('parsy', _parse(parsy_json.parse)),
('parsimonious', _parse(parsimonious_json.parse)),
('pyleri', _parse(pyleri_json.parse)),
('textx', _parse(textx_json.parse))
]
print('-----------------------------------------------------------------')
print()
print('Input string between BEGIN and END:')
print()
print('BEGIN')
print(string, end='')
print('END')
print()
for parser, error in results:
print('{}: "{}"'.format(parser, error))
print()
EMPTY_STRING = '''\
'''
BAD_DICT_END_STRING = '''\
[
1,
{"a": {]}
]
'''
BAD_DICT_KEY_STRING = '''\
[
1,
{3: null}
]
'''
BAD_NULL_STRING = '''\
nul
'''
parse(EMPTY_STRING)
parse(BAD_DICT_END_STRING)
parse(BAD_DICT_KEY_STRING)
parse(BAD_NULL_STRING)
| true | true |
1c47af64e57d9e011aed97ff68c6f130de74836b | 1,067 | py | Python | setup.py | timmypidashev/poilet | 40535f9d22f1722de130458e9e487a945abd653f | [
"MIT"
] | null | null | null | setup.py | timmypidashev/poilet | 40535f9d22f1722de130458e9e487a945abd653f | [
"MIT"
] | null | null | null | setup.py | timmypidashev/poilet | 40535f9d22f1722de130458e9e487a945abd653f | [
"MIT"
] | null | null | null | import re
from setuptools import setup
# README will be shown on PyPi
with open('README.md') as file:
readme = file.read()
# Track version number
with open('poilet/__init__.py') as file:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', file.read(), re.MULTILINE)
setup(
name='poilet',
author='timmypidashev',
url='https://github.com/timmypidashev/poilet',
project_urls={
'Discussions': 'https://github.com/timmypidashev/poilet/discussions',
'Issues': 'https://github.com/timmypidashev/poilet/issues',
},
version=version,
packages=['poilet'],
license='MIT',
description='Python variant of The Other Implementation of figLET',
long_description=readme,
long_description_content_type='text/markdown',
python_requires='>=3.10.4',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10'
]
)
| 30.485714 | 93 | 0.645736 | import re
from setuptools import setup
with open('README.md') as file:
readme = file.read()
with open('poilet/__init__.py') as file:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', file.read(), re.MULTILINE)
setup(
name='poilet',
author='timmypidashev',
url='https://github.com/timmypidashev/poilet',
project_urls={
'Discussions': 'https://github.com/timmypidashev/poilet/discussions',
'Issues': 'https://github.com/timmypidashev/poilet/issues',
},
version=version,
packages=['poilet'],
license='MIT',
description='Python variant of The Other Implementation of figLET',
long_description=readme,
long_description_content_type='text/markdown',
python_requires='>=3.10.4',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10'
]
)
| true | true |
1c47affeae4e58845137235341df557f0710b03f | 50,416 | py | Python | mypy/main.py | noudald/mypy | ecdd4b2e81945d998eb1e1116fb901ff7b63a703 | [
"PSF-2.0"
] | null | null | null | mypy/main.py | noudald/mypy | ecdd4b2e81945d998eb1e1116fb901ff7b63a703 | [
"PSF-2.0"
] | null | null | null | mypy/main.py | noudald/mypy | ecdd4b2e81945d998eb1e1116fb901ff7b63a703 | [
"PSF-2.0"
] | null | null | null | """Mypy type checker command line tool."""
import argparse
import ast
import configparser
import os
import re
import subprocess
import sys
import time
from typing import Any, Dict, List, Mapping, Optional, Tuple, Callable
from mypy import build
from mypy import defaults
from mypy import experiments
from mypy import util
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource, FindModuleCache, mypy_path, SearchPaths
from mypy.find_sources import create_source_list, InvalidSourceList
from mypy.fscache import FileSystemCache
from mypy.errors import CompileError
from mypy.options import Options, BuildType, PER_MODULE_OPTIONS
from mypy.report import reporter_classes
from mypy.version import __version__
MYPY = False
if MYPY:
from typing_extensions import Final
orig_stat = os.stat # type: Final
MEM_PROFILE = False # type: Final # If True, dump memory profile
def stat_proxy(path: str) -> os.stat_result:
try:
st = orig_stat(path)
except os.error as err:
print("stat(%r) -> %s" % (path, err))
raise
else:
print("stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)" %
(path, st.st_mode, st.st_mtime, st.st_size))
return st
def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None:
"""Main entry point to the type checker.
Args:
script_path: Path to the 'mypy' script (used for finding data files).
args: Custom command-line arguments. If not given, sys.argv[1:] will
be used.
"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 4):
sys.exit("Running mypy with Python 3.3 or lower is not supported; "
"please upgrade to 3.4 or newer")
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running mypy with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer")
t0 = time.time()
# To log stat() calls: os.stat = stat_proxy
sys.setrecursionlimit(2 ** 14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, fscache=fscache)
messages = []
def flush_errors(new_messages: List[str], serious: bool) -> None:
messages.extend(new_messages)
f = sys.stderr if serious else sys.stdout
try:
for msg in new_messages:
f.write(msg + '\n')
f.flush()
except BrokenPipeError:
sys.exit(2)
serious = False
blockers = False
res = None
try:
# Keep a dummy reference (res) for memory profiling below, as otherwise
# the result could be freed.
res = build.build(sources, options, None, flush_errors, fscache)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if options.warn_unused_configs and options.unused_configs:
print("Warning: unused section(s) in %s: %s" %
(options.config_file,
", ".join("[mypy-%s]" % glob for glob in options.per_module_options.keys()
if glob in options.unused_configs)),
file=sys.stderr)
if options.junit_xml:
t1 = time.time()
util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
del res # Now it's safe to delete
code = 0
if messages:
code = 2 if blockers else 1
if options.fast_exit:
# Exit without freeing objects -- it's faster.
#
# NOTE: We don't flush all open files on exit (or run other destructors)!
util.hard_exit(code)
elif code:
sys.exit(code)
def readlinkabs(link: str) -> str:
"""Return an absolute path to symbolic link destination."""
# Adapted from code by Greg Smith.
assert os.path.islink(link)
path = os.readlink(link)
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(link), path)
class SplitNamespace(argparse.Namespace):
def __init__(self, standard_namespace: object, alt_namespace: object, alt_prefix: str) -> None:
self.__dict__['_standard_namespace'] = standard_namespace
self.__dict__['_alt_namespace'] = alt_namespace
self.__dict__['_alt_prefix'] = alt_prefix
def _get(self) -> Tuple[Any, Any]:
return (self._standard_namespace, self._alt_namespace)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith(self._alt_prefix):
setattr(self._alt_namespace, name[len(self._alt_prefix):], value)
else:
setattr(self._standard_namespace, name, value)
def __getattr__(self, name: str) -> Any:
if name.startswith(self._alt_prefix):
return getattr(self._alt_namespace, name[len(self._alt_prefix):])
else:
return getattr(self._standard_namespace, name)
def parse_version(v: str) -> Tuple[int, int]:
m = re.match(r'\A(\d)\.(\d+)\Z', v)
if not m:
raise argparse.ArgumentTypeError(
"Invalid python version '{}' (expected format: 'x.y')".format(v))
major, minor = int(m.group(1)), int(m.group(2))
if major == 2:
if minor != 7:
raise argparse.ArgumentTypeError(
"Python 2.{} is not supported (must be 2.7)".format(minor))
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
raise argparse.ArgumentTypeError(
"Python 3.{0} is not supported (must be {1}.{2} or higher)".format(minor,
*defaults.PYTHON3_VERSION_MIN))
else:
raise argparse.ArgumentTypeError(
"Python major version '{}' out of range (must be 2 or 3)".format(major))
return major, minor
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: int) -> str:
if '\n' in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indentating, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs = [
('allow', 'disallow'),
('show', 'hide'),
] # type: Final
flag_prefix_map = {} # type: Final[Dict[str, str]]
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split('-', 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return '--{}-{}'.format(flag_prefix_map[prefix], rest)
elif prefix == 'no':
return '--{}'.format(rest)
return '--no-{}'.format(flag[2:])
class PythonExecutableInferenceError(Exception):
"""Represents a failure to infer the version or executable while searching."""
def python_executable_prefix(v: str) -> List[str]:
if sys.platform == 'win32':
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.5`, and it will
# execute an installed Python 3.5 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ['py', '-{}'.format(v)]
else:
return ['python{}'.format(v)]
def _python_version_from_executable(python_executable: str) -> Tuple[int, int]:
try:
check = subprocess.check_output([python_executable, '-c',
'import sys; print(repr(sys.version_info[:2]))'],
stderr=subprocess.STDOUT).decode()
return ast.literal_eval(check)
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'invalid Python executable {}'.format(python_executable))
def _python_executable_from_version(python_version: Tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = '.'.join(map(str, python_version))
try:
sys_exe = subprocess.check_output(python_executable_prefix(str_ver) +
['-c', 'import sys; print(sys.executable)'],
stderr=subprocess.STDOUT).decode().strip()
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'failed to find a Python executable matching version {},'
' perhaps try --python-executable, or --no-site-packages?'.format(python_version))
def infer_python_version_and_executable(options: Options,
special_opts: argparse.Namespace) -> None:
"""Infer the Python version or executable from each other. Check they are consistent.
This function mutates options based on special_opts to infer the correct Python version and
executable to use.
"""
# Infer Python version and/or executable if one is not given
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
if special_opts.python_executable is not None and special_opts.python_version is not None:
py_exe_ver = _python_version_from_executable(special_opts.python_executable)
if py_exe_ver != special_opts.python_version:
raise PythonExecutableInferenceError(
'Python version {} did not match executable {}, got version {}.'.format(
special_opts.python_version, special_opts.python_executable, py_exe_ver
))
else:
options.python_version = special_opts.python_version
options.python_executable = special_opts.python_executable
elif special_opts.python_executable is None and special_opts.python_version is not None:
options.python_version = special_opts.python_version
py_exe = None
if not special_opts.no_executable:
py_exe = _python_executable_from_version(special_opts.python_version)
options.python_executable = py_exe
elif special_opts.python_version is None and special_opts.python_executable is not None:
options.python_version = _python_version_from_executable(
special_opts.python_executable)
options.python_executable = special_opts.python_executable
HEADER = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]""" # type: Final
DESCRIPTION = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- http://mypy.readthedocs.io/en/latest/getting_started.html
For more details on both running mypy and using the flags below, see:
- http://mypy.readthedocs.io/en/latest/running_mypy.html
- http://mypy.readthedocs.io/en/latest/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- http://mypy.readthedocs.io/en/latest/config_file.html
""" # type: Final
FOOTER = """Environment variables:
Define MYPYPATH for additional module search path entries.""" # type: Final
def process_options(args: List[str],
require_targets: bool = True,
server_options: bool = False,
fscache: Optional[FileSystemCache] = None,
) -> Tuple[List[BuildSource], Options]:
"""Parse command line arguments.
If a FileSystemCache is passed in, and package_root options are given,
call fscache.set_package_root() to set the cache's package root.
"""
parser = argparse.ArgumentParser(prog='mypy',
usage=HEADER,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars='@',
formatter_class=AugmentedHelpFormatter,
add_help=False)
strict_flag_names = [] # type: List[str]
strict_flag_assignments = [] # type: List[Tuple[str, bool]]
def add_invertible_flag(flag: str,
*,
inverse: Optional[str] = None,
default: bool,
dest: Optional[str] = None,
help: str,
strict_flag: bool = False,
group: Optional[argparse._ActionsContainer] = None
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += " (inverse: {})".format(inverse)
arg = group.add_argument(flag,
action='store_false' if default else 'store_true',
dest=dest,
help=help)
dest = arg.dest
arg = group.add_argument(inverse,
action='store_true' if default else 'store_false',
dest=dest,
help=argparse.SUPPRESS)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(
title='Optional arguments')
general_group.add_argument(
'-h', '--help', action='help',
help="Show this help message and exit")
general_group.add_argument(
'-v', '--verbose', action='count', dest='verbosity',
help="More verbose messages")
general_group.add_argument(
'-V', '--version', action='version',
version='%(prog)s ' + __version__,
help="Show program's version number and exit")
config_group = parser.add_argument_group(
title='Config file',
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.")
config_group.add_argument(
'--config-file',
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(', '.join(defaults.CONFIG_FILES)))
add_invertible_flag('--warn-unused-configs', default=False, strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' config sections",
group=config_group)
imports_group = parser.add_argument_group(
title='Import discovery',
description="Configure how imports are discovered and followed.")
imports_group.add_argument(
'--ignore-missing-imports', action='store_true',
help="Silently ignore imports of missing modules")
imports_group.add_argument(
'--follow-imports', choices=['normal', 'silent', 'skip', 'error'],
default='normal', help="How to treat imports (default normal)")
imports_group.add_argument(
'--python-executable', action='store', metavar='EXECUTABLE',
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest='special-opts:python_executable')
imports_group.add_argument(
'--no-site-packages', action='store_true',
dest='special-opts:no_executable',
help="Do not search for installed PEP 561 compliant packages")
imports_group.add_argument(
'--no-silence-site-packages', action='store_true',
help="Do not silence errors in PEP 561 compliant installed packages")
add_invertible_flag(
'--namespace-packages', default=False,
help="Support namespace packages (PEP 420, __init__.py-less)",
group=imports_group)
platform_group = parser.add_argument_group(
title='Platform configuration',
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.")
platform_group.add_argument(
'--python-version', type=parse_version, metavar='x.y',
help='Type check code assuming it will be running on Python x.y',
dest='special-opts:python_version')
platform_group.add_argument(
'-2', '--py2', dest='special-opts:python_version', action='store_const',
const=defaults.PYTHON2_VERSION,
help="Use Python 2 mode (same as --python-version 2.7)")
platform_group.add_argument(
'--platform', action='store', metavar='PLATFORM',
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)")
platform_group.add_argument(
'--always-true', metavar='NAME', action='append', default=[],
help="Additional variable to be considered True (may be repeated)")
platform_group.add_argument(
'--always-false', metavar='NAME', action='append', default=[],
help="Additional variable to be considered False (may be repeated)")
disallow_any_group = parser.add_argument_group(
title='Dynamic typing',
description="Disallow the use of the dynamic 'Any' type under certain conditions.")
disallow_any_group.add_argument(
'--disallow-any-unimported', default=False, action='store_true',
help="Disallow Any types resulting from unfollowed imports")
add_invertible_flag('--disallow-subclassing-any', default=False, strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group)
disallow_any_group.add_argument(
'--disallow-any-expr', default=False, action='store_true',
help='Disallow all expressions that have type Any')
disallow_any_group.add_argument(
'--disallow-any-decorated', default=False, action='store_true',
help='Disallow functions that have Any in their signature '
'after decorator transformation')
disallow_any_group.add_argument(
'--disallow-any-explicit', default=False, action='store_true',
help='Disallow explicit Any in type positions')
disallow_any_group.add_argument(
'--disallow-any-generics', default=False, action='store_true',
help='Disallow usage of generic types that do not specify explicit '
'type parameters')
untyped_group = parser.add_argument_group(
title='Untyped definitions and calls',
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.")
add_invertible_flag('--disallow-untyped-calls', default=False, strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-defs', default=False, strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--disallow-incomplete-defs', default=False, strict_flag=True,
help="Disallow defining functions with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--check-untyped-defs', default=False, strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-decorators', default=False, strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group)
none_group = parser.add_argument_group(
title='None and Optional handling',
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"mypy.readthedocs.io/en/latest/kinds_of_types.html#no-strict-optional")
add_invertible_flag('--no-implicit-optional', default=False, strict_flag=True,
help="Don't assume arguments with default values of None are Optional",
group=none_group)
none_group.add_argument(
'--strict-optional', action='store_true',
help=argparse.SUPPRESS)
none_group.add_argument(
'--no-strict-optional', action='store_false', dest='strict_optional',
help="Disable strict Optional checks (inverse: --strict-optional)")
none_group.add_argument(
'--strict-optional-whitelist', metavar='GLOB', nargs='*',
help="Suppress strict Optional errors in all but the provided files; "
"implies --strict-optional (may suppress certain other errors "
"in non-whitelisted files)")
lint_group = parser.add_argument_group(
title='Warnings',
description="Detect code that is sound but redundant or problematic.")
add_invertible_flag('--warn-redundant-casts', default=False, strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group)
add_invertible_flag('--warn-unused-ignores', default=False, strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group)
add_invertible_flag('--no-warn-no-return', dest='warn_no_return', default=True,
help="Do not warn about functions that end without returning",
group=lint_group)
add_invertible_flag('--warn-return-any', default=False, strict_flag=True,
help="Warn about returning values of type Any"
" from non-Any typed functions",
group=lint_group)
# Note: this group is intentionally added here even though we don't add
# --strict to this group near the end.
#
# That way, this group will appear after the various strictness groups
# but before the remaining flags.
# We add `--strict` near the end so we don't accidentally miss any strictness
# flags that are added after this group.
strictness_group = parser.add_argument_group(
title='Other strictness checks')
add_invertible_flag('--allow-untyped-globals', default=False, strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group)
incremental_group = parser.add_argument_group(
title='Incremental mode',
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/latest/mypy_daemon.html#mypy-daemon")
incremental_group.add_argument(
'-i', '--incremental', action='store_true',
help=argparse.SUPPRESS)
incremental_group.add_argument(
'--no-incremental', action='store_false', dest='incremental',
help="Disable module cache (inverse: --incremental)")
incremental_group.add_argument(
'--cache-dir', action='store', metavar='DIR',
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR))
incremental_group.add_argument(
'--cache-fine-grained', action='store_true',
help="Include fine-grained dependency information in the cache for the mypy daemon")
incremental_group.add_argument(
'--quick-and-dirty', action='store_true',
help="Use cache even if dependencies out of date (implies --incremental)")
incremental_group.add_argument(
'--skip-version-check', action='store_true',
help="Allow using cache written by older mypy version")
internals_group = parser.add_argument_group(
title='Mypy internals',
description="Debug and customize mypy internals.")
internals_group.add_argument(
'--pdb', action='store_true', help="Invoke pdb on fatal error")
internals_group.add_argument(
'--show-traceback', '--tb', action='store_true',
help="Show traceback on fatal error")
internals_group.add_argument(
'--custom-typing', metavar='MODULE', dest='custom_typing_module',
help="Use a custom typing module")
internals_group.add_argument(
'--custom-typeshed-dir', metavar='DIR',
help="Use the custom typeshed in DIR")
add_invertible_flag('--warn-incomplete-stub', default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group)
internals_group.add_argument(
'--shadow-file', nargs=2, metavar=('SOURCE_FILE', 'SHADOW_FILE'),
dest='shadow_file', action='append',
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.")
add_invertible_flag('--fast-exit', default=False, help=argparse.SUPPRESS,
group=internals_group)
error_group = parser.add_argument_group(
title='Error reporting',
description="Adjust the amount of detail shown in error messages.")
add_invertible_flag('--show-error-context', default=False,
dest='show_error_context',
help='Precede errors with "note:" messages explaining context',
group=error_group)
add_invertible_flag('--show-column-numbers', default=False,
help="Show column numbers in error messages",
group=error_group)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names))
strictness_group.add_argument(
'--strict', action='store_true', dest='special-opts:strict',
help=strict_help)
report_group = parser.add_argument_group(
title='Report generation',
description='Generate a report in the specified format.')
for report_type in sorted(reporter_classes):
report_group.add_argument('--%s-report' % report_type.replace('_', '-'),
metavar='DIR',
dest='special-opts:%s_report' % report_type)
other_group = parser.add_argument_group(
title='Miscellaneous')
other_group.add_argument(
'--junit-xml', help="Write junit.xml to the given file")
other_group.add_argument(
'--scripts-are-modules', action='store_true',
help="Script x becomes module x instead of __main__")
other_group.add_argument(
'--find-occurrences', metavar='CLASS.MEMBER',
dest='special-opts:find_occurrences',
help="Print out all usages of a class member (experimental)")
if server_options:
# TODO: This flag is superfluous; remove after a short transition (2018-03-16)
other_group.add_argument(
'--experimental', action='store_true', dest='fine_grained_incremental',
help="Enable fine-grained incremental mode")
other_group.add_argument(
'--use-fine-grained-cache', action='store_true',
help="Use the cache in fine-grained incremental mode")
# hidden options
parser.add_argument(
'--stats', action='store_true', dest='dump_type_stats', help=argparse.SUPPRESS)
parser.add_argument(
'--inferstats', action='store_true', dest='dump_inference_stats',
help=argparse.SUPPRESS)
# --debug-cache will disable any cache-related compressions/optimizations,
# which will make the cache writing process output pretty-printed JSON (which
# is easier to debug).
parser.add_argument('--debug-cache', action='store_true', help=argparse.SUPPRESS)
# --dump-deps will dump all fine-grained dependencies to stdout
parser.add_argument('--dump-deps', action='store_true', help=argparse.SUPPRESS)
# --dump-graph will dump the contents of the graph of SCCs and exit.
parser.add_argument('--dump-graph', action='store_true', help=argparse.SUPPRESS)
# --semantic-analysis-only does exactly that.
parser.add_argument('--semantic-analysis-only', action='store_true', help=argparse.SUPPRESS)
# --local-partial-types disallows partial types spanning module top level and a function
# (implicitly defined in fine-grained incremental mode)
parser.add_argument('--local-partial-types', action='store_true', help=argparse.SUPPRESS)
# --logical-deps adds some more dependencies that are not semantically needed, but
# may be helpful to determine relative importance of classes and functions for overall
# type precision in a code base. It also _removes_ some deps, so this flag should be never
# used except for generating code stats. This also automatically enables --cache-fine-grained.
# NOTE: This is an experimental option that may be modified or removed at any time.
parser.add_argument('--logical-deps', action='store_true', help=argparse.SUPPRESS)
# --bazel changes some behaviors for use with Bazel (https://bazel.build).
parser.add_argument('--bazel', action='store_true', help=argparse.SUPPRESS)
# --package-root adds a directory below which directories are considered
# packages even without __init__.py. May be repeated.
parser.add_argument('--package-root', metavar='ROOT', action='append', default=[],
help=argparse.SUPPRESS)
# --cache-map FILE ... gives a mapping from source files to cache files.
# Each triple of arguments is a source file, a cache meta file, and a cache data file.
# Modules not mentioned in the file will go through cache_dir.
# Must be followed by another flag or by '--' (and then only file args may follow).
parser.add_argument('--cache-map', nargs='+', dest='special-opts:cache_map',
help=argparse.SUPPRESS)
# deprecated options
parser.add_argument('--disallow-any', dest='special-opts:disallow_any',
help=argparse.SUPPRESS)
add_invertible_flag('--strict-boolean', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-f', '--dirty-stubs', action='store_true',
dest='special-opts:dirty_stubs',
help=argparse.SUPPRESS)
parser.add_argument('--use-python-path', action='store_true',
dest='special-opts:use_python_path',
help=argparse.SUPPRESS)
parser.add_argument('-s', '--silent-imports', action='store_true',
dest='special-opts:silent_imports',
help=argparse.SUPPRESS)
parser.add_argument('--almost-silent', action='store_true',
dest='special-opts:almost_silent',
help=argparse.SUPPRESS)
parser.add_argument('--fast-parser', action='store_true', dest='special-opts:fast_parser',
help=argparse.SUPPRESS)
parser.add_argument('--no-fast-parser', action='store_true',
dest='special-opts:no_fast_parser',
help=argparse.SUPPRESS)
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/latest/running_mypy.html#running-mypy")
code_group.add_argument(
'-m', '--module', action='append', metavar='MODULE',
default=[],
dest='special-opts:modules',
help="Type-check module; can repeat for more modules")
code_group.add_argument(
'-p', '--package', action='append', metavar='PACKAGE',
default=[],
dest='special-opts:packages',
help="Type-check package recursively; can be repeated")
code_group.add_argument(
'-c', '--command', action='append', metavar='PROGRAM_TEXT',
dest='special-opts:command',
help="Type-check program passed in as string")
code_group.add_argument(
metavar='files', nargs='*', dest='special-opts:files',
help="Type-check given files or directories")
# Parse arguments once into a dummy namespace so we can get the
# filename for the config file and know if the user requested all strict options.
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
if config_file is not None and not os.path.exists(config_file):
parser.error("Cannot find config file '%s'" % config_file)
# Parse config file first, so command line can override.
options = Options()
parse_config_file(options, config_file)
# Set strict flags before parsing (if strict mode enabled), so other command
# line options can override.
if getattr(dummy, 'special-opts:strict'):
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
# Parse command line for real, using a split namespace.
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, 'special-opts:'))
# --use-python-path is no longer supported; explain why.
if special_opts.use_python_path:
parser.error("Sorry, --use-python-path is no longer supported.\n"
"If you are trying this because your code depends on a library module,\n"
"you should really investigate how to obtain stubs for that module.\n"
"See https://github.com/python/mypy/issues/1411 for more discussion."
)
# Process deprecated options
if special_opts.disallow_any:
print("--disallow-any option was split up into multiple flags. "
"See http://mypy.readthedocs.io/en/latest/command_line.html#disallow-dynamic-typing")
if options.strict_boolean:
print("Warning: --strict-boolean is deprecated; "
"see https://github.com/python/mypy/issues/3195", file=sys.stderr)
if special_opts.almost_silent:
print("Warning: --almost-silent has been replaced by "
"--follow-imports=errors", file=sys.stderr)
if options.follow_imports == 'normal':
options.follow_imports = 'errors'
elif special_opts.silent_imports:
print("Warning: --silent-imports has been replaced by "
"--ignore-missing-imports --follow-imports=skip", file=sys.stderr)
options.ignore_missing_imports = True
if options.follow_imports == 'normal':
options.follow_imports = 'skip'
if special_opts.dirty_stubs:
print("Warning: -f/--dirty-stubs is deprecated and no longer necessary. Mypy no longer "
"checks the git status of stubs.",
file=sys.stderr)
if special_opts.fast_parser:
print("Warning: --fast-parser is now the default (and only) parser.")
if special_opts.no_fast_parser:
print("Warning: --no-fast-parser no longer has any effect. The fast parser "
"is now mypy's default and only parser.")
try:
infer_python_version_and_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable:
options.python_executable = None
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(bool(c) for c in [special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files])
if code_methods == 0:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error("You can't make a variable always true and always false (%s)" %
', '.join(sorted(overlap)))
# Set build flags.
if options.strict_optional_whitelist is not None:
# TODO: Deprecate, then kill this flag
options.strict_optional = True
if special_opts.find_occurrences:
experiments.find_occurrences = special_opts.find_occurrences.split('.')
assert experiments.find_occurrences is not None
if len(experiments.find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(experiments.find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
# Set reports.
for flag, val in vars(special_opts).items():
if flag.endswith('_report') and val is not None:
report_type = flag[:-7].replace('_', '-')
report_dir = val
options.report_dirs[report_type] = report_dir
# Process --package-root.
if options.package_root:
process_package_roots(fscache, parser, options)
# Process --cache-map.
if special_opts.cache_map:
process_cache_map(parser, special_opts, options)
# Let quick_and_dirty imply incremental.
if options.quick_and_dirty:
options.incremental = True
# Let logical_deps imply cache_fine_grained (otherwise the former is useless).
if options.logical_deps:
options.cache_fine_grained = True
# Set target.
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
search_paths = SearchPaths((os.getcwd(),), tuple(mypy_path()), (), ())
targets = []
# TODO: use the same cache that the BuildManager will
cache = FindModuleCache(search_paths, fscache)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail("Package name '{}' cannot have a slash in it.".format(p))
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail("Can't find package '{}'".format(p))
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, '\n'.join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
except InvalidSourceList as e:
fail(str(e))
return targets, options
def process_package_roots(fscache: Optional[FileSystemCache],
parser: argparse.ArgumentParser,
options: Options) -> None:
"""Validate and normalize package_root."""
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
# Do some stuff with drive letters to make Windows happy (esp. tests).
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error("Package root cannot be absolute: %r" % root)
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error("Package root must be on current drive: %r" % (drive + root))
# Empty package root is always okay.
if root:
root = os.path.relpath(root) # Normalize the heck out of it.
if root.startswith(dotdotslash):
parser.error("Package root cannot be above current directory: %r" % root)
if root in trivial_paths:
root = ''
elif not root.endswith(os.sep):
root = root + os.sep
package_root.append(root)
options.package_root = package_root
# Pass the package root on the the filesystem cache.
fscache.set_package_root(package_root)
def process_cache_map(parser: argparse.ArgumentParser,
special_opts: argparse.Namespace,
options: Options) -> None:
"""Validate cache_map and copy into options.cache_map."""
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i:i + 3]
if source in options.cache_map:
parser.error("Duplicate --cache-map source %s)" % source)
if not source.endswith('.py') and not source.endswith('.pyi'):
parser.error("Invalid --cache-map source %s (triple[0] must be *.py[i])" % source)
if not meta_file.endswith('.meta.json'):
parser.error("Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" %
meta_file)
if not data_file.endswith('.data.json'):
parser.error("Invalid --cache-map data_file %s (triple[2] must be *.data.json)" %
data_file)
options.cache_map[source] = (meta_file, data_file)
# For most options, the type of the default value set in options.py is
# sufficient, and we don't have to do anything here. This table
# exists to specify types for values initialized to None or container
# types.
config_types = {
'python_version': parse_version,
'strict_optional_whitelist': lambda s: s.split(),
'custom_typing_module': str,
'custom_typeshed_dir': str,
'mypy_path': lambda s: [p.strip() for p in re.split('[,:]', s)],
'junit_xml': str,
# These two are for backwards compatibility
'silent_imports': bool,
'almost_silent': bool,
'plugins': lambda s: [p.strip() for p in s.split(',')],
'always_true': lambda s: [p.strip() for p in s.split(',')],
'always_false': lambda s: [p.strip() for p in s.split(',')],
'package_root': lambda s: [p.strip() for p in s.split(',')],
} # type: Final
def parse_config_file(options: Options, filename: Optional[str]) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
if filename is not None:
config_files = (filename,) # type: Tuple[str, ...]
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
parser.read(config_file)
except configparser.Error as err:
print("%s: %s" % (config_file, err), file=sys.stderr)
else:
file_read = config_file
options.config_file = file_read
break
else:
return
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=sys.stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]' % (file_read, 'mypy')
updates, report_dirs = parse_section(prefix, options, section)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = '%s: [%s]' % (file_read, name)
updates, report_dirs = parse_section(prefix, options, section)
if report_dirs:
print("%s: Per-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=sys.stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%s: Per-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=sys.stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%s: Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=sys.stderr)
else:
options.per_module_options[glob] = updates
def parse_section(prefix: str, template: Options,
section: Mapping[str, str]) -> Tuple[Dict[str, object], Dict[str, str]]:
"""Parse one section of a config file.
Returns a dict of option values encountered, and a dict of report directories.
"""
results = {} # type: Dict[str, object]
report_dirs = {} # type: Dict[str, str]
for key in section:
if key in config_types:
ct = config_types[key]
else:
dv = getattr(template, key, None)
if dv is None:
if key.endswith('_report'):
report_type = key[:-7].replace('_', '-')
if report_type in reporter_classes:
report_dirs[report_type] = section[key]
else:
print("%s: Unrecognized report type: %s" % (prefix, key),
file=sys.stderr)
continue
if key.startswith('x_'):
continue # Don't complain about `x_blah` flags
elif key == 'strict':
print("%s: Strict mode is not supported in configuration files: specify "
"individual flags instead (see 'mypy -h' for the list of flags enabled "
"in strict mode)" % prefix, file=sys.stderr)
else:
print("%s: Unrecognized option: %s = %s" % (prefix, key, section[key]),
file=sys.stderr)
continue
ct = type(dv)
v = None # type: Any
try:
if ct is bool:
v = section.getboolean(key) # type: ignore # Until better stub
elif callable(ct):
try:
v = ct(section.get(key))
except argparse.ArgumentTypeError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
else:
print("%s: Don't know what type %s should have" % (prefix, key), file=sys.stderr)
continue
except ValueError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
if key == 'silent_imports':
print("%s: silent_imports has been replaced by "
"ignore_missing_imports=True; follow_imports=skip" % prefix, file=sys.stderr)
if v:
if 'ignore_missing_imports' not in results:
results['ignore_missing_imports'] = True
if 'follow_imports' not in results:
results['follow_imports'] = 'skip'
if key == 'almost_silent':
print("%s: almost_silent has been replaced by "
"follow_imports=error" % prefix, file=sys.stderr)
if v:
if 'follow_imports' not in results:
results['follow_imports'] = 'error'
results[key] = v
return results, report_dirs
def fail(msg: str) -> None:
sys.stderr.write('%s\n' % msg)
sys.exit(1)
| 45.708069 | 99 | 0.623433 |
import argparse
import ast
import configparser
import os
import re
import subprocess
import sys
import time
from typing import Any, Dict, List, Mapping, Optional, Tuple, Callable
from mypy import build
from mypy import defaults
from mypy import experiments
from mypy import util
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource, FindModuleCache, mypy_path, SearchPaths
from mypy.find_sources import create_source_list, InvalidSourceList
from mypy.fscache import FileSystemCache
from mypy.errors import CompileError
from mypy.options import Options, BuildType, PER_MODULE_OPTIONS
from mypy.report import reporter_classes
from mypy.version import __version__
MYPY = False
if MYPY:
from typing_extensions import Final
orig_stat = os.stat
MEM_PROFILE = False > os.stat_result:
try:
st = orig_stat(path)
except os.error as err:
print("stat(%r) -> %s" % (path, err))
raise
else:
print("stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)" %
(path, st.st_mode, st.st_mtime, st.st_size))
return st
def main(script_path: Optional[str], args: Optional[List[str]] = None) -> None:
if sys.version_info[:2] < (3, 4):
sys.exit("Running mypy with Python 3.3 or lower is not supported; "
"please upgrade to 3.4 or newer")
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running mypy with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer")
t0 = time.time()
sys.setrecursionlimit(2 ** 14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, fscache=fscache)
messages = []
def flush_errors(new_messages: List[str], serious: bool) -> None:
messages.extend(new_messages)
f = sys.stderr if serious else sys.stdout
try:
for msg in new_messages:
f.write(msg + '\n')
f.flush()
except BrokenPipeError:
sys.exit(2)
serious = False
blockers = False
res = None
try:
res = build.build(sources, options, None, flush_errors, fscache)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if options.warn_unused_configs and options.unused_configs:
print("Warning: unused section(s) in %s: %s" %
(options.config_file,
", ".join("[mypy-%s]" % glob for glob in options.per_module_options.keys()
if glob in options.unused_configs)),
file=sys.stderr)
if options.junit_xml:
t1 = time.time()
util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
del res
code = 0
if messages:
code = 2 if blockers else 1
if options.fast_exit:
# Exit without freeing objects -- it's faster.
util.hard_exit(code)
elif code:
sys.exit(code)
def readlinkabs(link: str) -> str:
# Adapted from code by Greg Smith.
assert os.path.islink(link)
path = os.readlink(link)
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(link), path)
class SplitNamespace(argparse.Namespace):
def __init__(self, standard_namespace: object, alt_namespace: object, alt_prefix: str) -> None:
self.__dict__['_standard_namespace'] = standard_namespace
self.__dict__['_alt_namespace'] = alt_namespace
self.__dict__['_alt_prefix'] = alt_prefix
def _get(self) -> Tuple[Any, Any]:
return (self._standard_namespace, self._alt_namespace)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith(self._alt_prefix):
setattr(self._alt_namespace, name[len(self._alt_prefix):], value)
else:
setattr(self._standard_namespace, name, value)
def __getattr__(self, name: str) -> Any:
if name.startswith(self._alt_prefix):
return getattr(self._alt_namespace, name[len(self._alt_prefix):])
else:
return getattr(self._standard_namespace, name)
def parse_version(v: str) -> Tuple[int, int]:
m = re.match(r'\A(\d)\.(\d+)\Z', v)
if not m:
raise argparse.ArgumentTypeError(
"Invalid python version '{}' (expected format: 'x.y')".format(v))
major, minor = int(m.group(1)), int(m.group(2))
if major == 2:
if minor != 7:
raise argparse.ArgumentTypeError(
"Python 2.{} is not supported (must be 2.7)".format(minor))
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
raise argparse.ArgumentTypeError(
"Python 3.{0} is not supported (must be {1}.{2} or higher)".format(minor,
*defaults.PYTHON3_VERSION_MIN))
else:
raise argparse.ArgumentTypeError(
"Python major version '{}' out of range (must be 2 or 3)".format(major))
return major, minor
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: int) -> str:
if '\n' in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indentating, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs = [
('allow', 'disallow'),
('show', 'hide'),
] # type: Final
flag_prefix_map = {} # type: Final[Dict[str, str]]
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split('-', 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return '--{}-{}'.format(flag_prefix_map[prefix], rest)
elif prefix == 'no':
return '--{}'.format(rest)
return '--no-{}'.format(flag[2:])
class PythonExecutableInferenceError(Exception):
def python_executable_prefix(v: str) -> List[str]:
if sys.platform == 'win32':
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.5`, and it will
# execute an installed Python 3.5 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ['py', '-{}'.format(v)]
else:
return ['python{}'.format(v)]
def _python_version_from_executable(python_executable: str) -> Tuple[int, int]:
try:
check = subprocess.check_output([python_executable, '-c',
'import sys; print(repr(sys.version_info[:2]))'],
stderr=subprocess.STDOUT).decode()
return ast.literal_eval(check)
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'invalid Python executable {}'.format(python_executable))
def _python_executable_from_version(python_version: Tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = '.'.join(map(str, python_version))
try:
sys_exe = subprocess.check_output(python_executable_prefix(str_ver) +
['-c', 'import sys; print(sys.executable)'],
stderr=subprocess.STDOUT).decode().strip()
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError):
raise PythonExecutableInferenceError(
'failed to find a Python executable matching version {},'
' perhaps try --python-executable, or --no-site-packages?'.format(python_version))
def infer_python_version_and_executable(options: Options,
special_opts: argparse.Namespace) -> None:
# Infer Python version and/or executable if one is not given
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
if special_opts.python_executable is not None and special_opts.python_version is not None:
py_exe_ver = _python_version_from_executable(special_opts.python_executable)
if py_exe_ver != special_opts.python_version:
raise PythonExecutableInferenceError(
'Python version {} did not match executable {}, got version {}.'.format(
special_opts.python_version, special_opts.python_executable, py_exe_ver
))
else:
options.python_version = special_opts.python_version
options.python_executable = special_opts.python_executable
elif special_opts.python_executable is None and special_opts.python_version is not None:
options.python_version = special_opts.python_version
py_exe = None
if not special_opts.no_executable:
py_exe = _python_executable_from_version(special_opts.python_version)
options.python_executable = py_exe
elif special_opts.python_version is None and special_opts.python_executable is not None:
options.python_version = _python_version_from_executable(
special_opts.python_executable)
options.python_executable = special_opts.python_executable
HEADER = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]""" # type: Final
DESCRIPTION = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- http://mypy.readthedocs.io/en/latest/getting_started.html
For more details on both running mypy and using the flags below, see:
- http://mypy.readthedocs.io/en/latest/running_mypy.html
- http://mypy.readthedocs.io/en/latest/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- http://mypy.readthedocs.io/en/latest/config_file.html
""" # type: Final
FOOTER = """Environment variables:
Define MYPYPATH for additional module search path entries.""" # type: Final
def process_options(args: List[str],
require_targets: bool = True,
server_options: bool = False,
fscache: Optional[FileSystemCache] = None,
) -> Tuple[List[BuildSource], Options]:
parser = argparse.ArgumentParser(prog='mypy',
usage=HEADER,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars='@',
formatter_class=AugmentedHelpFormatter,
add_help=False)
strict_flag_names = [] # type: List[str]
strict_flag_assignments = [] # type: List[Tuple[str, bool]]
def add_invertible_flag(flag: str,
*,
inverse: Optional[str] = None,
default: bool,
dest: Optional[str] = None,
help: str,
strict_flag: bool = False,
group: Optional[argparse._ActionsContainer] = None
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += " (inverse: {})".format(inverse)
arg = group.add_argument(flag,
action='store_false' if default else 'store_true',
dest=dest,
help=help)
dest = arg.dest
arg = group.add_argument(inverse,
action='store_true' if default else 'store_false',
dest=dest,
help=argparse.SUPPRESS)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(
title='Optional arguments')
general_group.add_argument(
'-h', '--help', action='help',
help="Show this help message and exit")
general_group.add_argument(
'-v', '--verbose', action='count', dest='verbosity',
help="More verbose messages")
general_group.add_argument(
'-V', '--version', action='version',
version='%(prog)s ' + __version__,
help="Show program's version number and exit")
config_group = parser.add_argument_group(
title='Config file',
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.")
config_group.add_argument(
'--config-file',
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(', '.join(defaults.CONFIG_FILES)))
add_invertible_flag('--warn-unused-configs', default=False, strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' config sections",
group=config_group)
imports_group = parser.add_argument_group(
title='Import discovery',
description="Configure how imports are discovered and followed.")
imports_group.add_argument(
'--ignore-missing-imports', action='store_true',
help="Silently ignore imports of missing modules")
imports_group.add_argument(
'--follow-imports', choices=['normal', 'silent', 'skip', 'error'],
default='normal', help="How to treat imports (default normal)")
imports_group.add_argument(
'--python-executable', action='store', metavar='EXECUTABLE',
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest='special-opts:python_executable')
imports_group.add_argument(
'--no-site-packages', action='store_true',
dest='special-opts:no_executable',
help="Do not search for installed PEP 561 compliant packages")
imports_group.add_argument(
'--no-silence-site-packages', action='store_true',
help="Do not silence errors in PEP 561 compliant installed packages")
add_invertible_flag(
'--namespace-packages', default=False,
help="Support namespace packages (PEP 420, __init__.py-less)",
group=imports_group)
platform_group = parser.add_argument_group(
title='Platform configuration',
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.")
platform_group.add_argument(
'--python-version', type=parse_version, metavar='x.y',
help='Type check code assuming it will be running on Python x.y',
dest='special-opts:python_version')
platform_group.add_argument(
'-2', '--py2', dest='special-opts:python_version', action='store_const',
const=defaults.PYTHON2_VERSION,
help="Use Python 2 mode (same as --python-version 2.7)")
platform_group.add_argument(
'--platform', action='store', metavar='PLATFORM',
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)")
platform_group.add_argument(
'--always-true', metavar='NAME', action='append', default=[],
help="Additional variable to be considered True (may be repeated)")
platform_group.add_argument(
'--always-false', metavar='NAME', action='append', default=[],
help="Additional variable to be considered False (may be repeated)")
disallow_any_group = parser.add_argument_group(
title='Dynamic typing',
description="Disallow the use of the dynamic 'Any' type under certain conditions.")
disallow_any_group.add_argument(
'--disallow-any-unimported', default=False, action='store_true',
help="Disallow Any types resulting from unfollowed imports")
add_invertible_flag('--disallow-subclassing-any', default=False, strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group)
disallow_any_group.add_argument(
'--disallow-any-expr', default=False, action='store_true',
help='Disallow all expressions that have type Any')
disallow_any_group.add_argument(
'--disallow-any-decorated', default=False, action='store_true',
help='Disallow functions that have Any in their signature '
'after decorator transformation')
disallow_any_group.add_argument(
'--disallow-any-explicit', default=False, action='store_true',
help='Disallow explicit Any in type positions')
disallow_any_group.add_argument(
'--disallow-any-generics', default=False, action='store_true',
help='Disallow usage of generic types that do not specify explicit '
'type parameters')
untyped_group = parser.add_argument_group(
title='Untyped definitions and calls',
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.")
add_invertible_flag('--disallow-untyped-calls', default=False, strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-defs', default=False, strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--disallow-incomplete-defs', default=False, strict_flag=True,
help="Disallow defining functions with incomplete type annotations",
group=untyped_group)
add_invertible_flag('--check-untyped-defs', default=False, strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group)
add_invertible_flag('--disallow-untyped-decorators', default=False, strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group)
none_group = parser.add_argument_group(
title='None and Optional handling',
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"mypy.readthedocs.io/en/latest/kinds_of_types.html#no-strict-optional")
add_invertible_flag('--no-implicit-optional', default=False, strict_flag=True,
help="Don't assume arguments with default values of None are Optional",
group=none_group)
none_group.add_argument(
'--strict-optional', action='store_true',
help=argparse.SUPPRESS)
none_group.add_argument(
'--no-strict-optional', action='store_false', dest='strict_optional',
help="Disable strict Optional checks (inverse: --strict-optional)")
none_group.add_argument(
'--strict-optional-whitelist', metavar='GLOB', nargs='*',
help="Suppress strict Optional errors in all but the provided files; "
"implies --strict-optional (may suppress certain other errors "
"in non-whitelisted files)")
lint_group = parser.add_argument_group(
title='Warnings',
description="Detect code that is sound but redundant or problematic.")
add_invertible_flag('--warn-redundant-casts', default=False, strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group)
add_invertible_flag('--warn-unused-ignores', default=False, strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group)
add_invertible_flag('--no-warn-no-return', dest='warn_no_return', default=True,
help="Do not warn about functions that end without returning",
group=lint_group)
add_invertible_flag('--warn-return-any', default=False, strict_flag=True,
help="Warn about returning values of type Any"
" from non-Any typed functions",
group=lint_group)
# Note: this group is intentionally added here even though we don't add
# flags that are added after this group.
strictness_group = parser.add_argument_group(
title='Other strictness checks')
add_invertible_flag('--allow-untyped-globals', default=False, strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group)
incremental_group = parser.add_argument_group(
title='Incremental mode',
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/latest/mypy_daemon.html#mypy-daemon")
incremental_group.add_argument(
'-i', '--incremental', action='store_true',
help=argparse.SUPPRESS)
incremental_group.add_argument(
'--no-incremental', action='store_false', dest='incremental',
help="Disable module cache (inverse: --incremental)")
incremental_group.add_argument(
'--cache-dir', action='store', metavar='DIR',
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR))
incremental_group.add_argument(
'--cache-fine-grained', action='store_true',
help="Include fine-grained dependency information in the cache for the mypy daemon")
incremental_group.add_argument(
'--quick-and-dirty', action='store_true',
help="Use cache even if dependencies out of date (implies --incremental)")
incremental_group.add_argument(
'--skip-version-check', action='store_true',
help="Allow using cache written by older mypy version")
internals_group = parser.add_argument_group(
title='Mypy internals',
description="Debug and customize mypy internals.")
internals_group.add_argument(
'--pdb', action='store_true', help="Invoke pdb on fatal error")
internals_group.add_argument(
'--show-traceback', '--tb', action='store_true',
help="Show traceback on fatal error")
internals_group.add_argument(
'--custom-typing', metavar='MODULE', dest='custom_typing_module',
help="Use a custom typing module")
internals_group.add_argument(
'--custom-typeshed-dir', metavar='DIR',
help="Use the custom typeshed in DIR")
add_invertible_flag('--warn-incomplete-stub', default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group)
internals_group.add_argument(
'--shadow-file', nargs=2, metavar=('SOURCE_FILE', 'SHADOW_FILE'),
dest='shadow_file', action='append',
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.")
add_invertible_flag('--fast-exit', default=False, help=argparse.SUPPRESS,
group=internals_group)
error_group = parser.add_argument_group(
title='Error reporting',
description="Adjust the amount of detail shown in error messages.")
add_invertible_flag('--show-error-context', default=False,
dest='show_error_context',
help='Precede errors with "note:" messages explaining context',
group=error_group)
add_invertible_flag('--show-column-numbers', default=False,
help="Show column numbers in error messages",
group=error_group)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names))
strictness_group.add_argument(
'--strict', action='store_true', dest='special-opts:strict',
help=strict_help)
report_group = parser.add_argument_group(
title='Report generation',
description='Generate a report in the specified format.')
for report_type in sorted(reporter_classes):
report_group.add_argument('--%s-report' % report_type.replace('_', '-'),
metavar='DIR',
dest='special-opts:%s_report' % report_type)
other_group = parser.add_argument_group(
title='Miscellaneous')
other_group.add_argument(
'--junit-xml', help="Write junit.xml to the given file")
other_group.add_argument(
'--scripts-are-modules', action='store_true',
help="Script x becomes module x instead of __main__")
other_group.add_argument(
'--find-occurrences', metavar='CLASS.MEMBER',
dest='special-opts:find_occurrences',
help="Print out all usages of a class member (experimental)")
if server_options:
other_group.add_argument(
'--experimental', action='store_true', dest='fine_grained_incremental',
help="Enable fine-grained incremental mode")
other_group.add_argument(
'--use-fine-grained-cache', action='store_true',
help="Use the cache in fine-grained incremental mode")
parser.add_argument(
'--stats', action='store_true', dest='dump_type_stats', help=argparse.SUPPRESS)
parser.add_argument(
'--inferstats', action='store_true', dest='dump_inference_stats',
help=argparse.SUPPRESS)
parser.add_argument('--debug-cache', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--dump-deps', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--dump-graph', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--semantic-analysis-only', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--local-partial-types', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--logical-deps', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--bazel', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--package-root', metavar='ROOT', action='append', default=[],
help=argparse.SUPPRESS)
parser.add_argument('--cache-map', nargs='+', dest='special-opts:cache_map',
help=argparse.SUPPRESS)
parser.add_argument('--disallow-any', dest='special-opts:disallow_any',
help=argparse.SUPPRESS)
add_invertible_flag('--strict-boolean', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-f', '--dirty-stubs', action='store_true',
dest='special-opts:dirty_stubs',
help=argparse.SUPPRESS)
parser.add_argument('--use-python-path', action='store_true',
dest='special-opts:use_python_path',
help=argparse.SUPPRESS)
parser.add_argument('-s', '--silent-imports', action='store_true',
dest='special-opts:silent_imports',
help=argparse.SUPPRESS)
parser.add_argument('--almost-silent', action='store_true',
dest='special-opts:almost_silent',
help=argparse.SUPPRESS)
parser.add_argument('--fast-parser', action='store_true', dest='special-opts:fast_parser',
help=argparse.SUPPRESS)
parser.add_argument('--no-fast-parser', action='store_true',
dest='special-opts:no_fast_parser',
help=argparse.SUPPRESS)
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/latest/running_mypy.html#running-mypy")
code_group.add_argument(
'-m', '--module', action='append', metavar='MODULE',
default=[],
dest='special-opts:modules',
help="Type-check module; can repeat for more modules")
code_group.add_argument(
'-p', '--package', action='append', metavar='PACKAGE',
default=[],
dest='special-opts:packages',
help="Type-check package recursively; can be repeated")
code_group.add_argument(
'-c', '--command', action='append', metavar='PROGRAM_TEXT',
dest='special-opts:command',
help="Type-check program passed in as string")
code_group.add_argument(
metavar='files', nargs='*', dest='special-opts:files',
help="Type-check given files or directories")
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
if config_file is not None and not os.path.exists(config_file):
parser.error("Cannot find config file '%s'" % config_file)
options = Options()
parse_config_file(options, config_file)
if getattr(dummy, 'special-opts:strict'):
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, 'special-opts:'))
if special_opts.use_python_path:
parser.error("Sorry, --use-python-path is no longer supported.\n"
"If you are trying this because your code depends on a library module,\n"
"you should really investigate how to obtain stubs for that module.\n"
"See https://github.com/python/mypy/issues/1411 for more discussion."
)
if special_opts.disallow_any:
print("--disallow-any option was split up into multiple flags. "
"See http://mypy.readthedocs.io/en/latest/command_line.html#disallow-dynamic-typing")
if options.strict_boolean:
print("Warning: --strict-boolean is deprecated; "
"see https://github.com/python/mypy/issues/3195", file=sys.stderr)
if special_opts.almost_silent:
print("Warning: --almost-silent has been replaced by "
"--follow-imports=errors", file=sys.stderr)
if options.follow_imports == 'normal':
options.follow_imports = 'errors'
elif special_opts.silent_imports:
print("Warning: --silent-imports has been replaced by "
"--ignore-missing-imports --follow-imports=skip", file=sys.stderr)
options.ignore_missing_imports = True
if options.follow_imports == 'normal':
options.follow_imports = 'skip'
if special_opts.dirty_stubs:
print("Warning: -f/--dirty-stubs is deprecated and no longer necessary. Mypy no longer "
"checks the git status of stubs.",
file=sys.stderr)
if special_opts.fast_parser:
print("Warning: --fast-parser is now the default (and only) parser.")
if special_opts.no_fast_parser:
print("Warning: --no-fast-parser no longer has any effect. The fast parser "
"is now mypy's default and only parser.")
try:
infer_python_version_and_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable:
options.python_executable = None
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(bool(c) for c in [special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files])
if code_methods == 0:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error("You can't make a variable always true and always false (%s)" %
', '.join(sorted(overlap)))
if options.strict_optional_whitelist is not None:
options.strict_optional = True
if special_opts.find_occurrences:
experiments.find_occurrences = special_opts.find_occurrences.split('.')
assert experiments.find_occurrences is not None
if len(experiments.find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(experiments.find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
for flag, val in vars(special_opts).items():
if flag.endswith('_report') and val is not None:
report_type = flag[:-7].replace('_', '-')
report_dir = val
options.report_dirs[report_type] = report_dir
if options.package_root:
process_package_roots(fscache, parser, options)
if special_opts.cache_map:
process_cache_map(parser, special_opts, options)
if options.quick_and_dirty:
options.incremental = True
if options.logical_deps:
options.cache_fine_grained = True
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
search_paths = SearchPaths((os.getcwd(),), tuple(mypy_path()), (), ())
targets = []
cache = FindModuleCache(search_paths, fscache)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail("Package name '{}' cannot have a slash in it.".format(p))
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail("Can't find package '{}'".format(p))
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, '\n'.join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
except InvalidSourceList as e:
fail(str(e))
return targets, options
def process_package_roots(fscache: Optional[FileSystemCache],
parser: argparse.ArgumentParser,
options: Options) -> None:
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error("Package root cannot be absolute: %r" % root)
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error("Package root must be on current drive: %r" % (drive + root))
if root:
root = os.path.relpath(root)
if root.startswith(dotdotslash):
parser.error("Package root cannot be above current directory: %r" % root)
if root in trivial_paths:
root = ''
elif not root.endswith(os.sep):
root = root + os.sep
package_root.append(root)
options.package_root = package_root
fscache.set_package_root(package_root)
def process_cache_map(parser: argparse.ArgumentParser,
special_opts: argparse.Namespace,
options: Options) -> None:
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i:i + 3]
if source in options.cache_map:
parser.error("Duplicate --cache-map source %s)" % source)
if not source.endswith('.py') and not source.endswith('.pyi'):
parser.error("Invalid --cache-map source %s (triple[0] must be *.py[i])" % source)
if not meta_file.endswith('.meta.json'):
parser.error("Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" %
meta_file)
if not data_file.endswith('.data.json'):
parser.error("Invalid --cache-map data_file %s (triple[2] must be *.data.json)" %
data_file)
options.cache_map[source] = (meta_file, data_file)
# exists to specify types for values initialized to None or container
# types.
config_types = {
'python_version': parse_version,
'strict_optional_whitelist': lambda s: s.split(),
'custom_typing_module': str,
'custom_typeshed_dir': str,
'mypy_path': lambda s: [p.strip() for p in re.split('[,:]', s)],
'junit_xml': str,
# These two are for backwards compatibility
'silent_imports': bool,
'almost_silent': bool,
'plugins': lambda s: [p.strip() for p in s.split(',')],
'always_true': lambda s: [p.strip() for p in s.split(',')],
'always_false': lambda s: [p.strip() for p in s.split(',')],
'package_root': lambda s: [p.strip() for p in s.split(',')],
} # type: Final
def parse_config_file(options: Options, filename: Optional[str]) -> None:
if filename is not None:
config_files = (filename,) # type: Tuple[str, ...]
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
parser.read(config_file)
except configparser.Error as err:
print("%s: %s" % (config_file, err), file=sys.stderr)
else:
file_read = config_file
options.config_file = file_read
break
else:
return
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=sys.stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]' % (file_read, 'mypy')
updates, report_dirs = parse_section(prefix, options, section)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = '%s: [%s]' % (file_read, name)
updates, report_dirs = parse_section(prefix, options, section)
if report_dirs:
print("%s: Per-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=sys.stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%s: Per-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=sys.stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%s: Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=sys.stderr)
else:
options.per_module_options[glob] = updates
def parse_section(prefix: str, template: Options,
section: Mapping[str, str]) -> Tuple[Dict[str, object], Dict[str, str]]:
results = {} # type: Dict[str, object]
report_dirs = {} # type: Dict[str, str]
for key in section:
if key in config_types:
ct = config_types[key]
else:
dv = getattr(template, key, None)
if dv is None:
if key.endswith('_report'):
report_type = key[:-7].replace('_', '-')
if report_type in reporter_classes:
report_dirs[report_type] = section[key]
else:
print("%s: Unrecognized report type: %s" % (prefix, key),
file=sys.stderr)
continue
if key.startswith('x_'):
continue # Don't complain about `x_blah` flags
elif key == 'strict':
print("%s: Strict mode is not supported in configuration files: specify "
"individual flags instead (see 'mypy -h' for the list of flags enabled "
"in strict mode)" % prefix, file=sys.stderr)
else:
print("%s: Unrecognized option: %s = %s" % (prefix, key, section[key]),
file=sys.stderr)
continue
ct = type(dv)
v = None
try:
if ct is bool:
v = section.getboolean(key) allable(ct):
try:
v = ct(section.get(key))
except argparse.ArgumentTypeError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
else:
print("%s: Don't know what type %s should have" % (prefix, key), file=sys.stderr)
continue
except ValueError as err:
print("%s: %s: %s" % (prefix, key, err), file=sys.stderr)
continue
if key == 'silent_imports':
print("%s: silent_imports has been replaced by "
"ignore_missing_imports=True; follow_imports=skip" % prefix, file=sys.stderr)
if v:
if 'ignore_missing_imports' not in results:
results['ignore_missing_imports'] = True
if 'follow_imports' not in results:
results['follow_imports'] = 'skip'
if key == 'almost_silent':
print("%s: almost_silent has been replaced by "
"follow_imports=error" % prefix, file=sys.stderr)
if v:
if 'follow_imports' not in results:
results['follow_imports'] = 'error'
results[key] = v
return results, report_dirs
def fail(msg: str) -> None:
sys.stderr.write('%s\n' % msg)
sys.exit(1)
| true | true |
1c47b20f4f8dc841c057a6f528ecd4be3beca08f | 10,390 | py | Python | wbb/modules/misc.py | TAMILVIP007/WilliamButcherBot | e7a02edcd57ec62c7f80c601484e92e257e1d5bf | [
"MIT"
] | 1 | 2021-06-30T07:09:45.000Z | 2021-06-30T07:09:45.000Z | wbb/modules/misc.py | fakeenemy01/GroupBot | e7a02edcd57ec62c7f80c601484e92e257e1d5bf | [
"MIT"
] | null | null | null | wbb/modules/misc.py | fakeenemy01/GroupBot | e7a02edcd57ec62c7f80c601484e92e257e1d5bf | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import secrets
import string
import aiohttp
from cryptography.fernet import Fernet
from pyrogram import filters
from wbb import FERNET_ENCRYPTION_KEY, app, arq
from wbb.core.decorators.errors import capture_err
from wbb.utils import random_line
from wbb.utils.fetch import fetch
from wbb.utils.json_prettify import json_prettify
from wbb.utils.pastebin import paste
__MODULE__ = "Misc"
__HELP__ = """
/commit - Generate Funny Commit Messages
/runs - Idk Test Yourself
/id - Get Chat_ID or User_ID
/random [Length] - Generate Random Complex Passwords
/encrypt - Encrypt Text [Can Only Be Decrypted By This Bot]
/decrypt - Decrypt Text
/cheat [Language] [Query] - Get Programming Related Help
/weather [City] - To Get Weather Info
/tr [en] - Translate A Message
/json [URL] - Get JSON Response From An API or Something.
/arq - Statistics Of ARQ API.
/webss [URL] - Take A Screenshot Of A Webpage
/reverse - Reverse search an image.
/carbon - Make Carbon from code.
#RTFM - Tell noobs to read the manual
"""
@app.on_message(filters.command("commit") & ~filters.edited)
async def commit(_, message):
await message.reply_text(
(await random_line("wbb/utils/commit.txt"))
)
@app.on_message(filters.command("RTFM", "#"))
async def rtfm(_, message):
await message.delete()
if not message.reply_to_message:
return await message.reply_text("Reply To A Message lol")
await message.reply_to_message.reply_text(
"Are You Lost? READ THE FUCKING DOCS!"
)
@app.on_message(filters.command("runs") & ~filters.edited)
async def runs(_, message):
await message.reply_text(
(await random_line("wbb/utils/runs.txt"))
)
@app.on_message(filters.command("id"))
async def getid(_, message):
if len(message.command) == 2:
try:
id = (
await app.get_users(
message.text.split(None, 1)[1].strip()
)
).id
except Exception:
return await message.reply_text("No Such User")
text = f"**ID:** `{id}`"
return await message.reply_text(text, parse_mode="html")
text_unping = "<b>Chat ID:</b>"
if message.chat.username:
text_unping = f'<a href="https://t.me/{message.chat.username}">{text_unping}</a>'
text_unping += f" <code>{message.chat.id}</code>\n"
text = "<b>Message ID:</b>"
if message.link:
text = f'<a href="{message.link}">{text}</a>'
text += f" <code>{message.message_id}</code>\n"
text_unping += text
if message.from_user:
text_unping += f'<b><a href="tg://user?id={message.from_user.id}">User ID:</a></b> <code>{message.from_user.id}</code>\n'
text_ping = text_unping
reply = message.reply_to_message
if not getattr(reply, "empty", True):
text_unping += "\n"
text = "<b>Replied Message ID:</b>"
if reply.link:
text = f'<a href="{reply.link}">{text}</a>'
text += f" <code>{reply.message_id}</code>\n"
text_unping += text
text_ping = text_unping
if reply.from_user:
text = "<b>Replied User ID:</b>"
if reply.from_user.username:
text = f'<a href="https://t.me/{reply.from_user.username}">{text}</a>'
text += f" <code>{reply.from_user.id}</code>\n"
text_unping += text
text_ping += f'<b><a href="tg://user?id={reply.from_user.id}">Replied User ID:</a></b> <code>{reply.from_user.id}</code>\n'
if reply.forward_from:
text_unping += "\n"
text = "<b>Forwarded User ID:</b>"
if reply.forward_from.username:
text = f'<a href="https://t.me/{reply.forward_from.username}">{text}</a>'
text += f" <code>{reply.forward_from.id}</code>\n"
text_unping += text
text_ping += f'\n<b><a href="tg://user?id={reply.forward_from.id}">Forwarded User ID:</a></b> <code>{reply.forward_from.id}</code>\n'
reply = await message.reply_text(
text_unping, disable_web_page_preview=True, parse_mode="html"
)
if text_unping != text_ping:
await reply.edit_text(
text_ping,
disable_web_page_preview=True,
parse_mode="html",
)
# Random
@app.on_message(filters.command("random") & ~filters.edited)
@capture_err
async def random(_, message):
if len(message.command) != 2:
return await message.reply_text(
'"/random" Needs An Argurment.' " Ex: `/random 5`"
)
length = message.text.split(None, 1)[1]
try:
if 1 < int(length) < 1000:
alphabet = string.ascii_letters + string.digits
password = "".join(
secrets.choice(alphabet) for i in range(int(length))
)
await message.reply_text(f"`{password}`")
else:
await message.reply_text(
"Specify A Length Between 1-1000"
)
except ValueError:
await message.reply_text(
"Strings Won't Work!, Pass A Positive Integer Less Than 1000"
)
# Encrypt
@app.on_message(filters.command("encrypt") & ~filters.edited)
@capture_err
async def encrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Encrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
encrypted_text = cipher_suite.encrypt(text_in_bytes)
bytes_in_text = encrypted_text.decode("utf-8")
await message.reply_text(bytes_in_text)
# Decrypt
@app.on_message(filters.command("decrypt") & ~filters.edited)
@capture_err
async def decrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Decrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
try:
decoded_text = cipher_suite.decrypt(text_in_bytes)
except Exception:
return await message.reply_text("Incorrect token")
bytes_in_text = decoded_text.decode("utf-8")
await message.reply_text(bytes_in_text)
async def fetch_text(url):
async with aiohttp.ClientSession(
headers={"user-agent": "curl"}
) as session:
async with session.get(url) as resp:
data = await resp.text()
return data
# Cheat.sh
@app.on_message(filters.command("cheat") & ~filters.edited)
@capture_err
async def cheat(_, message):
if len(message.command) < 3:
return await message.reply_text("/cheat [language] [query]")
text = message.text.split(None, 1)[1]
m = await message.reply_text("Searching")
try:
ftext = text.split()
language = ftext[0]
query = ftext[1]
data = await fetch_text(
f"http://cht.sh/{language}/{query}?QT"
)
if not data:
return await m.edit("Found Literally Nothing!")
await m.edit(f"`{data}`")
except Exception as e:
await m.edit(str(e))
print(str(e))
# Translate
@app.on_message(filters.command("tr") & ~filters.edited)
@capture_err
async def tr(_, message):
if len(message.command) != 2:
return await message.reply_text("/tr [LANGUAGE_CODE]")
lang = message.text.split(None, 1)[1]
if not message.reply_to_message or not lang:
return await message.reply_text(
"Reply to a message with /tr [language code]"
+ "\nGet supported language list from here -"
+ " https://py-googletrans.readthedocs.io/en"
+ "/latest/#googletrans-languages"
)
reply = message.reply_to_message
text = message.text or message.reply
if not text:
return await message.reply_text(
"Reply to a text to translate it"
)
result = await arq.translate(text, lang)
if not result.ok:
return await message.reply_text(result.result)
await message.reply_text(result.result.translatedText)
@app.on_message(filters.command("json") & ~filters.edited)
@capture_err
async def json_fetch(_, message):
if len(message.command) != 2:
return await message.reply_text("/json [URL]")
url = message.text.split(None, 1)[1]
m = await message.reply_text("Fetching")
try:
data = await fetch(url)
data = await json_prettify(data)
if len(data) < 4090:
await m.edit(data)
else:
link = await paste(data)
await m.edit(
f"[OUTPUT_TOO_LONG]({link})",
disable_web_page_preview=True,
)
except Exception as e:
await m.edit(str(e))
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except Exception:
return await m.edit("No Such Website.")
await m.delete()
| 33.516129 | 145 | 0.64052 | import secrets
import string
import aiohttp
from cryptography.fernet import Fernet
from pyrogram import filters
from wbb import FERNET_ENCRYPTION_KEY, app, arq
from wbb.core.decorators.errors import capture_err
from wbb.utils import random_line
from wbb.utils.fetch import fetch
from wbb.utils.json_prettify import json_prettify
from wbb.utils.pastebin import paste
__MODULE__ = "Misc"
__HELP__ = """
/commit - Generate Funny Commit Messages
/runs - Idk Test Yourself
/id - Get Chat_ID or User_ID
/random [Length] - Generate Random Complex Passwords
/encrypt - Encrypt Text [Can Only Be Decrypted By This Bot]
/decrypt - Decrypt Text
/cheat [Language] [Query] - Get Programming Related Help
/weather [City] - To Get Weather Info
/tr [en] - Translate A Message
/json [URL] - Get JSON Response From An API or Something.
/arq - Statistics Of ARQ API.
/webss [URL] - Take A Screenshot Of A Webpage
/reverse - Reverse search an image.
/carbon - Make Carbon from code.
#RTFM - Tell noobs to read the manual
"""
@app.on_message(filters.command("commit") & ~filters.edited)
async def commit(_, message):
await message.reply_text(
(await random_line("wbb/utils/commit.txt"))
)
@app.on_message(filters.command("RTFM", "#"))
async def rtfm(_, message):
await message.delete()
if not message.reply_to_message:
return await message.reply_text("Reply To A Message lol")
await message.reply_to_message.reply_text(
"Are You Lost? READ THE FUCKING DOCS!"
)
@app.on_message(filters.command("runs") & ~filters.edited)
async def runs(_, message):
await message.reply_text(
(await random_line("wbb/utils/runs.txt"))
)
@app.on_message(filters.command("id"))
async def getid(_, message):
if len(message.command) == 2:
try:
id = (
await app.get_users(
message.text.split(None, 1)[1].strip()
)
).id
except Exception:
return await message.reply_text("No Such User")
text = f"**ID:** `{id}`"
return await message.reply_text(text, parse_mode="html")
text_unping = "<b>Chat ID:</b>"
if message.chat.username:
text_unping = f'<a href="https://t.me/{message.chat.username}">{text_unping}</a>'
text_unping += f" <code>{message.chat.id}</code>\n"
text = "<b>Message ID:</b>"
if message.link:
text = f'<a href="{message.link}">{text}</a>'
text += f" <code>{message.message_id}</code>\n"
text_unping += text
if message.from_user:
text_unping += f'<b><a href="tg://user?id={message.from_user.id}">User ID:</a></b> <code>{message.from_user.id}</code>\n'
text_ping = text_unping
reply = message.reply_to_message
if not getattr(reply, "empty", True):
text_unping += "\n"
text = "<b>Replied Message ID:</b>"
if reply.link:
text = f'<a href="{reply.link}">{text}</a>'
text += f" <code>{reply.message_id}</code>\n"
text_unping += text
text_ping = text_unping
if reply.from_user:
text = "<b>Replied User ID:</b>"
if reply.from_user.username:
text = f'<a href="https://t.me/{reply.from_user.username}">{text}</a>'
text += f" <code>{reply.from_user.id}</code>\n"
text_unping += text
text_ping += f'<b><a href="tg://user?id={reply.from_user.id}">Replied User ID:</a></b> <code>{reply.from_user.id}</code>\n'
if reply.forward_from:
text_unping += "\n"
text = "<b>Forwarded User ID:</b>"
if reply.forward_from.username:
text = f'<a href="https://t.me/{reply.forward_from.username}">{text}</a>'
text += f" <code>{reply.forward_from.id}</code>\n"
text_unping += text
text_ping += f'\n<b><a href="tg://user?id={reply.forward_from.id}">Forwarded User ID:</a></b> <code>{reply.forward_from.id}</code>\n'
reply = await message.reply_text(
text_unping, disable_web_page_preview=True, parse_mode="html"
)
if text_unping != text_ping:
await reply.edit_text(
text_ping,
disable_web_page_preview=True,
parse_mode="html",
)
@app.on_message(filters.command("random") & ~filters.edited)
@capture_err
async def random(_, message):
if len(message.command) != 2:
return await message.reply_text(
'"/random" Needs An Argurment.' " Ex: `/random 5`"
)
length = message.text.split(None, 1)[1]
try:
if 1 < int(length) < 1000:
alphabet = string.ascii_letters + string.digits
password = "".join(
secrets.choice(alphabet) for i in range(int(length))
)
await message.reply_text(f"`{password}`")
else:
await message.reply_text(
"Specify A Length Between 1-1000"
)
except ValueError:
await message.reply_text(
"Strings Won't Work!, Pass A Positive Integer Less Than 1000"
)
# Encrypt
@app.on_message(filters.command("encrypt") & ~filters.edited)
@capture_err
async def encrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Encrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
encrypted_text = cipher_suite.encrypt(text_in_bytes)
bytes_in_text = encrypted_text.decode("utf-8")
await message.reply_text(bytes_in_text)
# Decrypt
@app.on_message(filters.command("decrypt") & ~filters.edited)
@capture_err
async def decrypt(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply To A Message To Decrypt It."
)
text = message.reply_to_message.text
text_in_bytes = bytes(text, "utf-8")
cipher_suite = Fernet(FERNET_ENCRYPTION_KEY)
try:
decoded_text = cipher_suite.decrypt(text_in_bytes)
except Exception:
return await message.reply_text("Incorrect token")
bytes_in_text = decoded_text.decode("utf-8")
await message.reply_text(bytes_in_text)
async def fetch_text(url):
async with aiohttp.ClientSession(
headers={"user-agent": "curl"}
) as session:
async with session.get(url) as resp:
data = await resp.text()
return data
# Cheat.sh
@app.on_message(filters.command("cheat") & ~filters.edited)
@capture_err
async def cheat(_, message):
if len(message.command) < 3:
return await message.reply_text("/cheat [language] [query]")
text = message.text.split(None, 1)[1]
m = await message.reply_text("Searching")
try:
ftext = text.split()
language = ftext[0]
query = ftext[1]
data = await fetch_text(
f"http://cht.sh/{language}/{query}?QT"
)
if not data:
return await m.edit("Found Literally Nothing!")
await m.edit(f"`{data}`")
except Exception as e:
await m.edit(str(e))
print(str(e))
# Translate
@app.on_message(filters.command("tr") & ~filters.edited)
@capture_err
async def tr(_, message):
if len(message.command) != 2:
return await message.reply_text("/tr [LANGUAGE_CODE]")
lang = message.text.split(None, 1)[1]
if not message.reply_to_message or not lang:
return await message.reply_text(
"Reply to a message with /tr [language code]"
+ "\nGet supported language list from here -"
+ " https://py-googletrans.readthedocs.io/en"
+ "/latest/#googletrans-languages"
)
reply = message.reply_to_message
text = message.text or message.reply
if not text:
return await message.reply_text(
"Reply to a text to translate it"
)
result = await arq.translate(text, lang)
if not result.ok:
return await message.reply_text(result.result)
await message.reply_text(result.result.translatedText)
@app.on_message(filters.command("json") & ~filters.edited)
@capture_err
async def json_fetch(_, message):
if len(message.command) != 2:
return await message.reply_text("/json [URL]")
url = message.text.split(None, 1)[1]
m = await message.reply_text("Fetching")
try:
data = await fetch(url)
data = await json_prettify(data)
if len(data) < 4090:
await m.edit(data)
else:
link = await paste(data)
await m.edit(
f"[OUTPUT_TOO_LONG]({link})",
disable_web_page_preview=True,
)
except Exception as e:
await m.edit(str(e))
@app.on_message(filters.command("webss"))
@capture_err
async def take_ss(_, message):
if len(message.command) != 2:
return await message.reply_text(
"Give A Url To Fetch Screenshot."
)
url = message.text.split(None, 1)[1]
m = await message.reply_text("**Uploading**")
try:
await app.send_photo(
message.chat.id,
photo=f"https://webshot.amanoteam.com/print?q={url}",
)
except Exception:
return await m.edit("No Such Website.")
await m.delete()
| true | true |
1c47b21893ab3220005fe7fa5a3318ed874a4750 | 592 | py | Python | python/tests/test_merge_sort.py | YahyaOmari/data-structures-and-algorithms | 86c1bc892ef3b62238555548f460065ac24c5ce3 | [
"MIT"
] | null | null | null | python/tests/test_merge_sort.py | YahyaOmari/data-structures-and-algorithms | 86c1bc892ef3b62238555548f460065ac24c5ce3 | [
"MIT"
] | 1 | 2021-05-04T21:33:34.000Z | 2021-05-04T21:33:34.000Z | python/tests/test_merge_sort.py | YahyaOmari/data-structures-and-algorithms | 86c1bc892ef3b62238555548f460065ac24c5ce3 | [
"MIT"
] | null | null | null | import pytest
from challenges.merge_sort.merge_sort import merge_sort
def test_merge_sort():
actual = merge_sort([5,2,6,0])
excpected = [0, 2, 5, 6]
assert excpected == actual
def test_merge_sort2():
actual = merge_sort([20,18,12,8,5,-2])
excpected = [-2, 5, 8, 12, 18, 20]
assert excpected == actual
def test_merge_sort3():
actual = merge_sort([5,12,7,5,5,7])
excpected = [5, 5, 5, 7, 7, 12]
assert excpected == actual
def test_merge_sort4():
actual = merge_sort([2,3,5,7,13,11])
excpected = [2, 3, 5, 7, 11, 13]
assert excpected == actual | 26.909091 | 55 | 0.636824 | import pytest
from challenges.merge_sort.merge_sort import merge_sort
def test_merge_sort():
actual = merge_sort([5,2,6,0])
excpected = [0, 2, 5, 6]
assert excpected == actual
def test_merge_sort2():
actual = merge_sort([20,18,12,8,5,-2])
excpected = [-2, 5, 8, 12, 18, 20]
assert excpected == actual
def test_merge_sort3():
actual = merge_sort([5,12,7,5,5,7])
excpected = [5, 5, 5, 7, 7, 12]
assert excpected == actual
def test_merge_sort4():
actual = merge_sort([2,3,5,7,13,11])
excpected = [2, 3, 5, 7, 11, 13]
assert excpected == actual | true | true |
1c47b32f4ca4a9f1fa63baf4c55c2e109438b7d7 | 3,730 | py | Python | pychron/dashboard/process_value.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/dashboard/process_value.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/dashboard/process_value.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from traits.api import HasTraits, Str, Either, Property, Float, Int, Bool, List, Enum
from traitsui.api import (
View,
VGroup,
HGroup,
UItem,
ListEditor,
InstanceEditor,
Readonly,
)
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.datetime_tools import convert_timestamp
from pychron.dashboard.conditional import DashboardConditional
from pychron.dashboard.constants import NOERROR, CRITICAL, WARNING
class ProcessValue(HasTraits):
name = Str
units = Str
tag = Str
func_name = Str
change_threshold = Float(1e-20)
period = Either(Float, Str) # "on_change" or number of seconds
last_time = Float
last_time_str = Property(depends_on="last_time")
enabled = Bool
last_value = Float
timeout = Float
plotid = Int
conditionals = List(DashboardConditional)
flag = Enum(NOERROR, WARNING, CRITICAL)
path = Str
record = Bool(False)
display_name = Property
def is_different(self, v):
ret = None
ct = time.time()
tt = 60 * 60 # max time (s) allowed without a measurement taken
# even if the current value is the same as the last value
threshold = self.change_threshold
if abs(self.last_value - v) > threshold or (
self.last_time and ct - self.last_time > tt
):
# a = abs(self.last_value - v) > threshold
# b = (self.last_time and ct - self.last_time > tt)
# self.debug('a={} {}-{}>{}, b={}'.format(a, self.last_value, v,threshold, b))
self.last_value = v
ret = True
return ret
def _get_display_name(self):
n = self.name
if self.units:
n = "{} ({})".format(n, self.units)
return n
def traits_view(self):
v = View(
VGroup(
HGroup(UItem("enabled"), Readonly("name")),
VGroup(
HGroup(Readonly("tag"), Readonly("period")),
HGroup(Readonly("last_time_str"), Readonly("last_value")),
VGroup(
UItem(
"conditionals",
editor=ListEditor(
editor=InstanceEditor(), style="custom", mutable=False
),
),
show_border=True,
label="Conditionals",
),
enabled_when="enabled",
),
)
)
return v
def _get_last_time_str(self):
r = ""
if self.last_time:
r = convert_timestamp(self.last_time)
return r
# ============= EOF =============================================
| 32.434783 | 90 | 0.531635 |
from __future__ import absolute_import
import time
from traits.api import HasTraits, Str, Either, Property, Float, Int, Bool, List, Enum
from traitsui.api import (
View,
VGroup,
HGroup,
UItem,
ListEditor,
InstanceEditor,
Readonly,
)
from pychron.core.helpers.datetime_tools import convert_timestamp
from pychron.dashboard.conditional import DashboardConditional
from pychron.dashboard.constants import NOERROR, CRITICAL, WARNING
class ProcessValue(HasTraits):
name = Str
units = Str
tag = Str
func_name = Str
change_threshold = Float(1e-20)
period = Either(Float, Str)
last_time = Float
last_time_str = Property(depends_on="last_time")
enabled = Bool
last_value = Float
timeout = Float
plotid = Int
conditionals = List(DashboardConditional)
flag = Enum(NOERROR, WARNING, CRITICAL)
path = Str
record = Bool(False)
display_name = Property
def is_different(self, v):
ret = None
ct = time.time()
tt = 60 * 60
threshold = self.change_threshold
if abs(self.last_value - v) > threshold or (
self.last_time and ct - self.last_time > tt
):
self.last_value = v
ret = True
return ret
def _get_display_name(self):
n = self.name
if self.units:
n = "{} ({})".format(n, self.units)
return n
def traits_view(self):
v = View(
VGroup(
HGroup(UItem("enabled"), Readonly("name")),
VGroup(
HGroup(Readonly("tag"), Readonly("period")),
HGroup(Readonly("last_time_str"), Readonly("last_value")),
VGroup(
UItem(
"conditionals",
editor=ListEditor(
editor=InstanceEditor(), style="custom", mutable=False
),
),
show_border=True,
label="Conditionals",
),
enabled_when="enabled",
),
)
)
return v
def _get_last_time_str(self):
r = ""
if self.last_time:
r = convert_timestamp(self.last_time)
return r
| true | true |
1c47b4039bfa2cc4e0a27db2b332508a8ada0804 | 1,964 | py | Python | facelib/InsightFace/models/data/data_pipe.py | ffletcherr/FaceLib | fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63 | [
"MIT"
] | null | null | null | facelib/InsightFace/models/data/data_pipe.py | ffletcherr/FaceLib | fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63 | [
"MIT"
] | null | null | null | facelib/InsightFace/models/data/data_pipe.py | ffletcherr/FaceLib | fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63 | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
def de_preprocess(tensor):
return tensor * 0.5 + 0.5
def get_train_dataset(imgs_folder):
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ds = ImageFolder(imgs_folder, train_transform)
class_num = ds[-1][1] + 1
return ds, class_num
def get_train_loader(conf):
if conf.data_mode in ['ms1m', 'concat']:
ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder / 'imgs')
print('ms1m loader generated')
if conf.data_mode in ['vgg', 'concat']:
vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder / 'imgs')
print('vgg loader generated')
if conf.data_mode == 'vgg':
ds = vgg_ds
class_num = vgg_class_num
elif conf.data_mode == 'ms1m':
ds = ms1m_ds
class_num = ms1m_class_num
elif conf.data_mode == 'concat':
for i, (url, label) in enumerate(vgg_ds.imgs):
vgg_ds.imgs[i] = (url, label + ms1m_class_num)
ds = ConcatDataset([ms1m_ds, vgg_ds])
class_num = vgg_class_num + ms1m_class_num
elif conf.data_mode == 'emore':
ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')
loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,
num_workers=conf.num_workers)
return loader, class_num
def get_val_data(data_path):
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame
| 34.45614 | 97 | 0.679735 | from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
def de_preprocess(tensor):
return tensor * 0.5 + 0.5
def get_train_dataset(imgs_folder):
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ds = ImageFolder(imgs_folder, train_transform)
class_num = ds[-1][1] + 1
return ds, class_num
def get_train_loader(conf):
if conf.data_mode in ['ms1m', 'concat']:
ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder / 'imgs')
print('ms1m loader generated')
if conf.data_mode in ['vgg', 'concat']:
vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder / 'imgs')
print('vgg loader generated')
if conf.data_mode == 'vgg':
ds = vgg_ds
class_num = vgg_class_num
elif conf.data_mode == 'ms1m':
ds = ms1m_ds
class_num = ms1m_class_num
elif conf.data_mode == 'concat':
for i, (url, label) in enumerate(vgg_ds.imgs):
vgg_ds.imgs[i] = (url, label + ms1m_class_num)
ds = ConcatDataset([ms1m_ds, vgg_ds])
class_num = vgg_class_num + ms1m_class_num
elif conf.data_mode == 'emore':
ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')
loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,
num_workers=conf.num_workers)
return loader, class_num
def get_val_data(data_path):
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame
| true | true |
1c47b4fbe727de9a582c4425c5640a77c610d033 | 588 | py | Python | Challenge 1/script.py | kutyel/tuenti-challenge-6 | 63b4f1843cc55c0d409dd610a3b297c276b63a83 | [
"MIT"
] | 1 | 2016-06-27T18:28:37.000Z | 2016-06-27T18:28:37.000Z | Challenge 1/script.py | kutyel/tuenti-challenge-6 | 63b4f1843cc55c0d409dd610a3b297c276b63a83 | [
"MIT"
] | null | null | null | Challenge 1/script.py | kutyel/tuenti-challenge-6 | 63b4f1843cc55c0d409dd610a3b297c276b63a83 | [
"MIT"
] | null | null | null | from __future__ import print_function
with open('output.txt', 'w') as output:
with open('submitInput.txt', 'r') as input_:
cases = int(input_.readline())
lines = input_.readlines()
for test, line in enumerate(lines):
result = 0
people = int(line)
if people == 4:
result = 1
else:
while people > 0:
people -= 4 if result < 1 else 2
result += 1
print("Case #{0}: {1}".format(test+1, result), file=output)
| 28 | 72 | 0.472789 | from __future__ import print_function
with open('output.txt', 'w') as output:
with open('submitInput.txt', 'r') as input_:
cases = int(input_.readline())
lines = input_.readlines()
for test, line in enumerate(lines):
result = 0
people = int(line)
if people == 4:
result = 1
else:
while people > 0:
people -= 4 if result < 1 else 2
result += 1
print("Case #{0}: {1}".format(test+1, result), file=output)
| true | true |
1c47b4fd441724e07fa4f7a33443a0d5dca4808b | 1,228 | py | Python | zclassifiershiftedae/prepare_data.py | VAShibaev/text_style_transfer | 42a4a653d7c47b5f04fe8c2b043f70a28b924e1f | [
"Apache-2.0"
] | 38 | 2019-09-05T16:39:19.000Z | 2022-03-07T18:04:06.000Z | zclassifiershiftedae/prepare_data.py | VAShibaev/text_style_transfer | 42a4a653d7c47b5f04fe8c2b043f70a28b924e1f | [
"Apache-2.0"
] | 1 | 2020-12-08T05:12:29.000Z | 2020-12-08T05:12:29.000Z | zclassifiershiftedae/prepare_data.py | VAShibaev/text_style_transfer | 42a4a653d7c47b5f04fe8c2b043f70a28b924e1f | [
"Apache-2.0"
] | 5 | 2019-10-21T22:46:05.000Z | 2020-10-20T02:28:45.000Z | # -*- coding: utf-8 -*-
# It's a code from
# Toward Controlled Generation of Text, ICML2017
# Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing
# https://github.com/asyml/texar/tree/master/examples/text_style_transfer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads data.
"""
import texar as tx
# pylint: disable=invalid-name
def prepare_data():
"""Downloads data.
"""
tx.data.maybe_download(
urls='https://drive.google.com/file/d/'
'1HaUKEYDBEk6GlJGmXwqYteB-4rS9q8Lg/view?usp=sharing',
path='./',
filenames='yelp.zip',
extract=True)
def main():
"""Entrypoint.
"""
prepare_data()
if __name__ == '__main__':
main()
| 27.909091 | 74 | 0.694625 |
# Toward Controlled Generation of Text, ICML2017
# Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing
# https://github.com/asyml/texar/tree/master/examples/text_style_transfer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import texar as tx
# pylint: disable=invalid-name
def prepare_data():
tx.data.maybe_download(
urls='https://drive.google.com/file/d/'
'1HaUKEYDBEk6GlJGmXwqYteB-4rS9q8Lg/view?usp=sharing',
path='./',
filenames='yelp.zip',
extract=True)
def main():
prepare_data()
if __name__ == '__main__':
main()
| true | true |
1c47b6c5780ab8f0347dbfcc2cf7a16e0039e94d | 450 | py | Python | _app/posts/serializers.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | _app/posts/serializers.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | _app/posts/serializers.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Post
from comments.serializers import CommentSerializer
#from django.conf import settings
# Serializers define the API representation.
class PostSerializer(serializers.HyperlinkedModelSerializer):
#comments = CommentSerializer(many=True, read_only=True)
class Meta:
model = Post
#fields = "__all__"
fields = ('id',"author" ,'content', "comments","url")
| 25 | 61 | 0.735556 | from rest_framework import serializers
from .models import Post
from comments.serializers import CommentSerializer
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = ('id',"author" ,'content', "comments","url")
| true | true |
1c47b7a20cbcab7a8a56ae19a8d8c0cabb9a422d | 577 | py | Python | Class Work/composing-methods-more/burger_toppings.py | Pondorasti/SPD-2.3 | 42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8 | [
"MIT"
] | null | null | null | Class Work/composing-methods-more/burger_toppings.py | Pondorasti/SPD-2.3 | 42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8 | [
"MIT"
] | null | null | null | Class Work/composing-methods-more/burger_toppings.py | Pondorasti/SPD-2.3 | 42728c1f2dfc371fb6bdf1ba008c5d41266f2fa8 | [
"MIT"
] | null | null | null | # by Kami Bigdely
# Split temporary variable
patty = 70 # [gr]
pickle = 20 # [gr]
tomatoes = 25 # [gr]
lettuce = 15 # [gr]
buns = 95 # [gr]
ny_burger_weight = (2 * patty + 4 * pickle + 3 *
tomatoes + 2 * lettuce + 2 * buns)
print("NY Burger Weight", ny_burger_weight)
kimchi = 30 # [gr]
mayo = 5 # [gr]
golden_fried_onion = 20 # [gr]
seoul_kimchi_burger_weight = (2 * patty + 4 * pickle + 3 * tomatoes
+ kimchi + mayo + golden_fried_onion + 2 * buns)
print("Seoul Kimchi Burger Weight", seoul_kimchi_burger_weight)
| 27.47619 | 78 | 0.59792 |
patty = 70
pickle = 20
tomatoes = 25
lettuce = 15
buns = 95
ny_burger_weight = (2 * patty + 4 * pickle + 3 *
tomatoes + 2 * lettuce + 2 * buns)
print("NY Burger Weight", ny_burger_weight)
kimchi = 30
mayo = 5
golden_fried_onion = 20
seoul_kimchi_burger_weight = (2 * patty + 4 * pickle + 3 * tomatoes
+ kimchi + mayo + golden_fried_onion + 2 * buns)
print("Seoul Kimchi Burger Weight", seoul_kimchi_burger_weight)
| true | true |
1c47b7b8a1f8b36aa064bd1292aa46d379b22d4a | 67 | py | Python | ApplicationServer/descriptors/__init__.py | paltmey/scias | 9006b85ad5a0084d7501413649e0679ba8adbe63 | [
"MIT"
] | null | null | null | ApplicationServer/descriptors/__init__.py | paltmey/scias | 9006b85ad5a0084d7501413649e0679ba8adbe63 | [
"MIT"
] | null | null | null | ApplicationServer/descriptors/__init__.py | paltmey/scias | 9006b85ad5a0084d7501413649e0679ba8adbe63 | [
"MIT"
] | null | null | null | from calculateDescriptors_cython import calculateDescriptors_cython | 67 | 67 | 0.955224 | from calculateDescriptors_cython import calculateDescriptors_cython | true | true |
1c47b8b7abc09b5031051f41169039d786791bfa | 10,082 | py | Python | configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 24 | 2021-10-14T03:28:28.000Z | 2022-03-29T09:30:04.000Z | configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-12-14T15:04:49.000Z | 2022-02-19T09:54:42.000Z | configs/vrd/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-10-31T11:23:06.000Z | 2021-12-17T06:38:50.000Z | # dataset settings
dataset_type = 'VrdDataset'
data_root = 'data/vrd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/train_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/train_images.json',
pipeline=train_pipeline,
num_im=-1,
split='train',
img_prefix=data_root + 'sg_dataset/sg_train_images'),
val=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='val',
img_prefix=data_root + 'sg_dataset/sg_test_images/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'sg_dataset/sg_test_images/'))
# model settings
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VRD_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=101,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
relation_head=dict(
type='HETHead',
dataset_config=dataset_config,
num_classes=101,
num_predicates=71,
use_bias=True,
head_config=dict(
use_gt_box=False,
use_gt_label=False,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=2,
glove_dir='data/glove/',
pick_parent='area',
isc_thresh=0.9,
child_order='confidence',
chain_style='GNN',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False, # for sgdet training, not require
num_sample_per_gt_rel=4,
num_rel_per_image=1024,
pos_fraction=0.25,
test_overlap=True # for testing
),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50, # Follow the setting in TDE, 80 Bboxes are selected.
mask_thr_binary=0.5,
rle_mask_encode=False, # do not transform the mask into rle.
crop_mask=True, # so that the mask shape is the same as bbox, instead of image shape
format_mask_result=False, # do not transform to the result format like bbox
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, nogc_thres_num=[10, 70])
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'mask_head'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=50,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './experiments/VRD_Detection_faster_rcnn_x101_64x4d_fpn_1x_ftCOCO/latest.pth'
# load_mapping = dict(align_dict={'relation_head.bbox_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs',
# 'relation_head.relation_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs'})
resume_from = None
workflow = [('train', 1), ('val', 1)]
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| 36.930403 | 115 | 0.587681 |
dataset_type = 'VrdDataset'
data_root = 'data/vrd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/train_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/train_images.json',
pipeline=train_pipeline,
num_im=-1,
split='train',
img_prefix=data_root + 'sg_dataset/sg_train_images'),
val=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='val',
img_prefix=data_root + 'sg_dataset/sg_test_images/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'sg_annotations/test_sgs.json',
dict_file=data_root + 'sg_annotations/labels.json',
image_file=data_root + 'sg_annotations/test_images.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'sg_dataset/sg_test_images/'))
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VRD_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=101,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
relation_head=dict(
type='HETHead',
dataset_config=dataset_config,
num_classes=101,
num_predicates=71,
use_bias=True,
head_config=dict(
use_gt_box=False,
use_gt_label=False,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=2,
glove_dir='data/glove/',
pick_parent='area',
isc_thresh=0.9,
child_order='confidence',
chain_style='GNN',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False,
num_sample_per_gt_rel=4,
num_rel_per_image=1024,
pos_fraction=0.25,
test_overlap=True
),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50,
mask_thr_binary=0.5,
rle_mask_encode=False,
crop_mask=True,
format_mask_result=False,
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, nogc_thres_num=[10, 70])
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'mask_head'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=50,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VRD_SgDet_heth_area_gnn_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './experiments/VRD_Detection_faster_rcnn_x101_64x4d_fpn_1x_ftCOCO/latest.pth'
resume_from = None
workflow = [('train', 1), ('val', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| true | true |
1c47b9e4144242f50539d655abe8afb3386e443d | 1,209 | py | Python | examples/MERAOpt.py | vnechaev/QGOpt | 697f02d89df67a576cd6953ffdd2db62970727da | [
"Apache-2.0"
] | null | null | null | examples/MERAOpt.py | vnechaev/QGOpt | 697f02d89df67a576cd6953ffdd2db62970727da | [
"Apache-2.0"
] | null | null | null | examples/MERAOpt.py | vnechaev/QGOpt | 697f02d89df67a576cd6953ffdd2db62970727da | [
"Apache-2.0"
] | null | null | null | import QGOpt.manifolds as m
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as opt
import tensorflow as tf
def adj(A):
"""Correct adjoint
Args:
A: tf.tensor of shape (..., n, m)
Returns:
tf tensor of shape (..., m, n), adjoint matrix"""
return tf.math.conj(tf.linalg.matrix_transpose(A))
class MERAOpt(opt.OptimizerV2):
def __init__(self,
name="Fast"):
"""Constructs a new MERA inspired optimizer.
Returns:
object of class MERAOpt"""
super(MERAOpt, self).__init__(name)
def _create_slots(self, var_list):
# MERAOpt does not need slots
pass
def _resource_apply_dense(self, grad, var):
# Complex version of grad
complex_grad = m.real_to_complex(grad)
# MERA like update
_, u, v = tf.linalg.svd(adj(complex_grad))
var.assign(m.convert.complex_to_real(-v @ adj(u)))
def _resource_apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def get_config(self):
config = super(MERAOpt, self).get_config()
config.update({
})
return config
| 25.723404 | 79 | 0.623656 | import QGOpt.manifolds as m
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as opt
import tensorflow as tf
def adj(A):
return tf.math.conj(tf.linalg.matrix_transpose(A))
class MERAOpt(opt.OptimizerV2):
def __init__(self,
name="Fast"):
super(MERAOpt, self).__init__(name)
def _create_slots(self, var_list):
pass
def _resource_apply_dense(self, grad, var):
complex_grad = m.real_to_complex(grad)
_, u, v = tf.linalg.svd(adj(complex_grad))
var.assign(m.convert.complex_to_real(-v @ adj(u)))
def _resource_apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def get_config(self):
config = super(MERAOpt, self).get_config()
config.update({
})
return config
| true | true |
1c47b9f5723d75dc27d382fcc620139929908569 | 5,099 | py | Python | sdk/AsposeEmailCloudSdk/models/object_exist.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | 1 | 2020-02-26T13:19:06.000Z | 2020-02-26T13:19:06.000Z | sdk/AsposeEmailCloudSdk/models/object_exist.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | sdk/AsposeEmailCloudSdk/models/object_exist.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ObjectExist.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class ObjectExist(object):
"""Object exists
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'exists',
'is_folder': 'isFolder'
}
def __init__(self, exists: bool = None, is_folder: bool = None):
"""
Object exists
:param exists: Indicates that the file or folder exists.
:type exists: bool
:param is_folder: True if it is a folder, false if it is a file.
:type is_folder: bool
"""
self._exists = None
self._is_folder = None
if exists is not None:
self.exists = exists
if is_folder is not None:
self.is_folder = is_folder
@property
def exists(self) -> bool:
"""
Indicates that the file or folder exists.
:return: The exists of this ObjectExist.
:rtype: bool
"""
return self._exists
@exists.setter
def exists(self, exists: bool):
"""
Indicates that the file or folder exists.
:param exists: The exists of this ObjectExist.
:type: bool
"""
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`")
self._exists = exists
@property
def is_folder(self) -> bool:
"""
True if it is a folder, false if it is a file.
:return: The is_folder of this ObjectExist.
:rtype: bool
"""
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder: bool):
"""
True if it is a folder, false if it is a file.
:param is_folder: The is_folder of this ObjectExist.
:type: bool
"""
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`")
self._is_folder = is_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectExist):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.069182 | 81 | 0.576388 |
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
class ObjectExist(object):
swagger_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'exists',
'is_folder': 'isFolder'
}
def __init__(self, exists: bool = None, is_folder: bool = None):
self._exists = None
self._is_folder = None
if exists is not None:
self.exists = exists
if is_folder is not None:
self.is_folder = is_folder
@property
def exists(self) -> bool:
return self._exists
@exists.setter
def exists(self, exists: bool):
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`")
self._exists = exists
@property
def is_folder(self) -> bool:
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder: bool):
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`")
self._is_folder = is_folder
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ObjectExist):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c47ba7cf688f8310a64c27916a4b31c58e71077 | 178 | py | Python | PythonDocs/src/002.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | 1 | 2022-02-23T13:42:01.000Z | 2022-02-23T13:42:01.000Z | PythonDocs/src/002.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | null | null | null | PythonDocs/src/002.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | null | null | null | # 单个变量赋值
name = "小明"
print(name)
# 多变量赋统一值
tom_age = jerry_age = 10
print(f"tom的年龄为{tom_age}, jerry的年龄为{jerry_age}")
# 多个变量赋不同值
name, age = "小明", 23
print(f"{name}的年龄是{age}岁")
| 14.833333 | 48 | 0.679775 |
name = "小明"
print(name)
tom_age = jerry_age = 10
print(f"tom的年龄为{tom_age}, jerry的年龄为{jerry_age}")
name, age = "小明", 23
print(f"{name}的年龄是{age}岁")
| true | true |
1c47bc9b26db9cf25c8c537f793dfeaff97f5c14 | 4,813 | py | Python | homeassistant/components/ecobee/sensor.py | ottersen/home-assistant | 7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf | [
"Apache-2.0"
] | 2 | 2017-06-18T15:09:59.000Z | 2017-06-18T15:11:09.000Z | homeassistant/components/ecobee/sensor.py | ottersen/home-assistant | 7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf | [
"Apache-2.0"
] | null | null | null | homeassistant/components/ecobee/sensor.py | ottersen/home-assistant | 7a57c3a66af0e47cb6a1f9971dd2b14e6acae1bf | [
"Apache-2.0"
] | null | null | null | """Support for Ecobee sensors."""
from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER, _LOGGER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", "%"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up ecobee sensors."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee (temperature and humidity) sensors."""
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(Entity):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_type, sensor_index):
"""Initialize the sensor."""
self.data = data
self._name = "{} {}".format(sensor_name, SENSOR_TYPES[sensor_type][0])
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
"""Return the state of the sensor."""
if self._state in [ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
for item in sensor["capability"]:
if item["type"] == self.type and self.sensor_name == sensor["name"]:
self._state = item["value"]
| 35.651852 | 88 | 0.5909 | from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER, _LOGGER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", "%"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(Entity):
def __init__(self, data, sensor_name, sensor_type, sensor_index):
self.data = data
self._name = "{} {}".format(sensor_name, SENSOR_TYPES[sensor_type][0])
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
return self._name
@property
def unique_id(self):
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
if self._state in [ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
async def async_update(self):
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
for item in sensor["capability"]:
if item["type"] == self.type and self.sensor_name == sensor["name"]:
self._state = item["value"]
| true | true |
1c47bca4679357156bbce5a4240e93b0d106e17f | 1,959 | py | Python | pycontracts/forward_solidity.py | rpip/contracts | a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4 | [
"MIT"
] | null | null | null | pycontracts/forward_solidity.py | rpip/contracts | a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4 | [
"MIT"
] | null | null | null | pycontracts/forward_solidity.py | rpip/contracts | a2d831e1ac4a728bc7342f8d2856bdeb79c37cc4 | [
"MIT"
] | 4 | 2019-02-01T13:46:47.000Z | 2020-01-17T00:46:44.000Z | from web3 import Web3
from pycontracts import contracts
from pycontracts.forward import Forward, CallReverted
class ForwardSolidity(Forward):
def __init__(self, contract, owner = None):
self.contract = contract
super().__init__(contract.address)
self._owner = owner
@staticmethod
def wrap(w3, address, owner = None):
return ForwardSolidity(
contract = w3.eth.contract(
address = address,
abi = contracts['Forward']['abi'],
),
owner = owner
)
@staticmethod
def deploy(w3, owner, originator = None):
c = w3.eth.contract(
bytecode = contracts['Forward']['deploy'],
abi = contracts['Forward']['abi'],
)
tx_hash = c.constructor(owner).transact({
'from': originator or w3.eth.defaultAccount,
})
r = w3.eth.waitForTransactionReceipt(tx_hash)
return ForwardSolidity.wrap(w3, r.contractAddress, owner = owner)
@property
def owner(self):
if not self._owner:
self._owner = self.contract.functions.getOwner().call()
return self._owner
def nonce(self):
return self.contract.functions.getNonce().call()
def _build(self, call):
return self.contract.functions.forward(
27 + call.signature.v,
call.signature.r.to_bytes(32, "big"),
call.signature.s.to_bytes(32, "big"),
call.target, call.value, call.data
)
def build(self, call):
t = self._build(call).buildTransaction({"nonce": 0, "gas": 0, "gasPrice": 0})
return Web3.toBytes(hexstr = t["data"])
def transact(self, call, originator):
return self._build(call).transact({ 'from': originator })
def call(self, call, type=bytes):
success, return_data = self._build(call).call()
return self._handle_result(success, return_data, call, type)
| 32.114754 | 85 | 0.600306 | from web3 import Web3
from pycontracts import contracts
from pycontracts.forward import Forward, CallReverted
class ForwardSolidity(Forward):
def __init__(self, contract, owner = None):
self.contract = contract
super().__init__(contract.address)
self._owner = owner
@staticmethod
def wrap(w3, address, owner = None):
return ForwardSolidity(
contract = w3.eth.contract(
address = address,
abi = contracts['Forward']['abi'],
),
owner = owner
)
@staticmethod
def deploy(w3, owner, originator = None):
c = w3.eth.contract(
bytecode = contracts['Forward']['deploy'],
abi = contracts['Forward']['abi'],
)
tx_hash = c.constructor(owner).transact({
'from': originator or w3.eth.defaultAccount,
})
r = w3.eth.waitForTransactionReceipt(tx_hash)
return ForwardSolidity.wrap(w3, r.contractAddress, owner = owner)
@property
def owner(self):
if not self._owner:
self._owner = self.contract.functions.getOwner().call()
return self._owner
def nonce(self):
return self.contract.functions.getNonce().call()
def _build(self, call):
return self.contract.functions.forward(
27 + call.signature.v,
call.signature.r.to_bytes(32, "big"),
call.signature.s.to_bytes(32, "big"),
call.target, call.value, call.data
)
def build(self, call):
t = self._build(call).buildTransaction({"nonce": 0, "gas": 0, "gasPrice": 0})
return Web3.toBytes(hexstr = t["data"])
def transact(self, call, originator):
return self._build(call).transact({ 'from': originator })
def call(self, call, type=bytes):
success, return_data = self._build(call).call()
return self._handle_result(success, return_data, call, type)
| true | true |
1c47bcf3b91293c8818a278695ef22bba118cc44 | 605 | py | Python | setup.py | lmijovic/pylhe | afd270044a5c37fec409daa1be45e67ac5fe9c82 | [
"Apache-2.0"
] | 1 | 2020-05-18T17:25:58.000Z | 2020-05-18T17:25:58.000Z | setup.py | 8me/pylhe | a165fba7f9cda1d3f28ae679e41571d52534dc9d | [
"Apache-2.0"
] | null | null | null | setup.py | 8me/pylhe | a165fba7f9cda1d3f28ae679e41571d52534dc9d | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
extras_require = {
"test": [
"pytest",
"pytest-cov>=2.5.1",
"scikit-hep-testdata>=0.3.1",
"pydocstyle",
"check-manifest",
"flake8",
],
}
extras_require["lint"] = sorted(set(["pyflakes", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(extras_require["test"] + ["pre-commit", "check-manifest", "twine"])
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| 26.304348 | 81 | 0.618182 | from setuptools import setup
extras_require = {
"test": [
"pytest",
"pytest-cov>=2.5.1",
"scikit-hep-testdata>=0.3.1",
"pydocstyle",
"check-manifest",
"flake8",
],
}
extras_require["lint"] = sorted(set(["pyflakes", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(extras_require["test"] + ["pre-commit", "check-manifest", "twine"])
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| true | true |
1c47bd99ea1abbad60f1ccb8e2ccf3f9e0e37943 | 7,863 | py | Python | tests/vhdl/test_decoder.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 4 | 2019-07-01T14:41:38.000Z | 2021-11-28T12:54:49.000Z | tests/vhdl/test_decoder.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 4 | 2019-08-23T15:05:24.000Z | 2020-12-16T10:02:20.000Z | tests/vhdl/test_decoder.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 1 | 2021-07-16T13:41:21.000Z | 2021-07-16T13:41:21.000Z | """Unit tests for the VHDL address decoder generator."""
from unittest import TestCase
from vhdmmio.vhdl.address_decoder import AddressDecoder
from vhdmmio.core.address import MaskedAddress
from vhdmmio.template import TemplateEngine
class TestVhdlDecoder(TestCase):
"""Unit tests for the VHDL address decoder generator."""
maxDiff = None
def _test_decoder(self, addresses, match=None,
optimize=False, allow_overlap=False, allow_duplicate=False):
dec = AddressDecoder('address', 32, optimize, allow_overlap, allow_duplicate)
for address in addresses:
dec[MaskedAddress.parse_config(address)] = str(address)
result = str(dec)
if match is not None:
self.assertEqual(result, '\n'.join(match))
return dec
def test_empty(self):
"""tests constructing an empty address decoder"""
self._test_decoder([], [''])
def test_if(self):
"""tests address decoder if statement construction"""
self._test_decoder(['8|3'], [
'if address(31 downto 2) = "000000000000000000000000000010" then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end if;',
])
self._test_decoder(['8|3'], optimize=True, match=[
'-- address = 000000000000000000000000000010--',
'',
'8|3',
])
def test_if_else(self):
"""tests address decoder if-else statement construction"""
self._test_decoder(['4|3', '0|3'], match=[
'if address(31 downto 3) = "00000000000000000000000000000" then',
' if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
' else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' end if;',
'end if;',
])
self._test_decoder(['4|3', '0|3'], optimize=True, match=[
'if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
def test_if_elsif(self):
"""tests address decoder if-elsif statement construction"""
self._test_decoder(['8|7', '4|3', '0|3'], optimize=True, match=[
'if address(3) = \'1\' then',
' -- address = 00000000000000000000000000001---',
'',
' 8|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
self._test_decoder(['12|3', '8|3', '0|7'], optimize=True, match=[
'if address(3) = \'0\' then',
' -- address = 00000000000000000000000000000---',
'',
' 0|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'else',
' -- address = 000000000000000000000000000011--',
'',
' 12|3',
'',
'end if;',
])
def test_case_statement(self):
"""tests address decoder case statement construction"""
self._test_decoder(['8|3', '4|3'], match=[
'if address(31 downto 4) = "0000000000000000000000000000" then',
' case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when "10" =>',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
' when others =>',
' null;',
' end case;',
'end if;',
])
self._test_decoder(['8|3', '4|3'], optimize=True, match=[
'case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when others => -- "10"',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end case;',
])
def test_common_suffix(self):
"""tests address decoder common suffix detection"""
self._test_decoder([16, 32], match=[
'if address(31 downto 6) = "00000000000000000000000000" then',
' if address(3 downto 0) = "0000" then',
' case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when "10" =>',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
' when others =>',
' null;',
' end case;',
' end if;',
'end if;',
])
self._test_decoder([16, 32], optimize=True, match=[
'case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when others => -- "10"',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
'end case;',
])
def test_duplicate(self):
"""tests address decoder duplicate address error"""
with self.assertRaisesRegex(ValueError, 'duplicate'):
self._test_decoder([3, '3|0'])
self._test_decoder([3, '3|0'], allow_duplicate=True, match=[
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' 3|0',
'',
'end if;',
])
def test_overlapping(self):
"""tests address decoder overlapping address error"""
with self.assertRaisesRegex(ValueError, 'overlap'):
self._test_decoder([3, '3|3'])
self._test_decoder([3, '3|3'], allow_overlap=True, match=[
'if address(31 downto 2) = "000000000000000000000000000000" then',
' if address(1 downto 0) = "11" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' end if;',
'',
' -- address = 000000000000000000000000000000--',
'',
' 3|3',
'',
'end if;',
])
def test_template(self):
"""tests adding decoders to templates"""
tple = TemplateEngine()
self._test_decoder([3]).append_to_template(tple, 'BLOCK', 'comment for decoder')
self.assertEqual(tple.apply_str_to_str('$BLOCK', comment='-- '), '\n'.join([
'-- comment for decoder',
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
'end if;',
''
]))
| 32.626556 | 88 | 0.452499 |
from unittest import TestCase
from vhdmmio.vhdl.address_decoder import AddressDecoder
from vhdmmio.core.address import MaskedAddress
from vhdmmio.template import TemplateEngine
class TestVhdlDecoder(TestCase):
maxDiff = None
def _test_decoder(self, addresses, match=None,
optimize=False, allow_overlap=False, allow_duplicate=False):
dec = AddressDecoder('address', 32, optimize, allow_overlap, allow_duplicate)
for address in addresses:
dec[MaskedAddress.parse_config(address)] = str(address)
result = str(dec)
if match is not None:
self.assertEqual(result, '\n'.join(match))
return dec
def test_empty(self):
self._test_decoder([], [''])
def test_if(self):
self._test_decoder(['8|3'], [
'if address(31 downto 2) = "000000000000000000000000000010" then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end if;',
])
self._test_decoder(['8|3'], optimize=True, match=[
'-- address = 000000000000000000000000000010--',
'',
'8|3',
])
def test_if_else(self):
self._test_decoder(['4|3', '0|3'], match=[
'if address(31 downto 3) = "00000000000000000000000000000" then',
' if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
' else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' end if;',
'end if;',
])
self._test_decoder(['4|3', '0|3'], optimize=True, match=[
'if address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
def test_if_elsif(self):
self._test_decoder(['8|7', '4|3', '0|3'], optimize=True, match=[
'if address(3) = \'1\' then',
' -- address = 00000000000000000000000000001---',
'',
' 8|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000000--',
'',
' 0|3',
'',
'else',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
'end if;',
])
self._test_decoder(['12|3', '8|3', '0|7'], optimize=True, match=[
'if address(3) = \'0\' then',
' -- address = 00000000000000000000000000000---',
'',
' 0|7',
'',
'elsif address(2) = \'0\' then',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'else',
' -- address = 000000000000000000000000000011--',
'',
' 12|3',
'',
'end if;',
])
def test_case_statement(self):
self._test_decoder(['8|3', '4|3'], match=[
'if address(31 downto 4) = "0000000000000000000000000000" then',
' case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when "10" =>',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
' when others =>',
' null;',
' end case;',
'end if;',
])
self._test_decoder(['8|3', '4|3'], optimize=True, match=[
'case address(3 downto 2) is',
' when "01" =>',
' -- address = 000000000000000000000000000001--',
'',
' 4|3',
'',
' when others => -- "10"',
' -- address = 000000000000000000000000000010--',
'',
' 8|3',
'',
'end case;',
])
def test_common_suffix(self):
self._test_decoder([16, 32], match=[
'if address(31 downto 6) = "00000000000000000000000000" then',
' if address(3 downto 0) = "0000" then',
' case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when "10" =>',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
' when others =>',
' null;',
' end case;',
' end if;',
'end if;',
])
self._test_decoder([16, 32], optimize=True, match=[
'case address(5 downto 4) is',
' when "01" =>',
' -- address = 00000000000000000000000000010000',
'',
' 16',
'',
' when others => -- "10"',
' -- address = 00000000000000000000000000100000',
'',
' 32',
'',
'end case;',
])
def test_duplicate(self):
with self.assertRaisesRegex(ValueError, 'duplicate'):
self._test_decoder([3, '3|0'])
self._test_decoder([3, '3|0'], allow_duplicate=True, match=[
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' 3|0',
'',
'end if;',
])
def test_overlapping(self):
with self.assertRaisesRegex(ValueError, 'overlap'):
self._test_decoder([3, '3|3'])
self._test_decoder([3, '3|3'], allow_overlap=True, match=[
'if address(31 downto 2) = "000000000000000000000000000000" then',
' if address(1 downto 0) = "11" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
' end if;',
'',
' -- address = 000000000000000000000000000000--',
'',
' 3|3',
'',
'end if;',
])
def test_template(self):
tple = TemplateEngine()
self._test_decoder([3]).append_to_template(tple, 'BLOCK', 'comment for decoder')
self.assertEqual(tple.apply_str_to_str('$BLOCK', comment='-- '), '\n'.join([
'-- comment for decoder',
'if address(31 downto 0) = "00000000000000000000000000000011" then',
' -- address = 00000000000000000000000000000011',
'',
' 3',
'',
'end if;',
''
]))
| true | true |
1c47bd9fc2b2b2f8e378fb299617e772a61d05cc | 704 | py | Python | 0x0F-python-object_relational_mapping/4-cities_by_state.py | Rmolimock/holbertonschool-higher_level_programming | cf0421cbb6463b3960dc581badf7d4bbe1622b7d | [
"MIT"
] | 1 | 2019-05-21T09:34:41.000Z | 2019-05-21T09:34:41.000Z | 0x0F-python-object_relational_mapping/4-cities_by_state.py | Rmolimock/holbertonschool-higher_level_programming | cf0421cbb6463b3960dc581badf7d4bbe1622b7d | [
"MIT"
] | null | null | null | 0x0F-python-object_relational_mapping/4-cities_by_state.py | Rmolimock/holbertonschool-higher_level_programming | cf0421cbb6463b3960dc581badf7d4bbe1622b7d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
Lists all states from a given database with given name
protect against sql injection
'''
import MySQLdb
from sys import argv
if __name__ == "__main__":
connection = MySQLdb.connect(host="localhost", port=3306, charset="utf8",
user=argv[1], passwd=argv[2], db=argv[3])
cursor = connection.cursor()
cursor.execute("SELECT cities.id, cities.name, states.name"
" FROM cities LEFT JOIN states"
" ON cities.state_id = states.id"
" ORDER BY cities.id ASC")
rows = cursor.fetchall()
for eachRow in rows:
print(eachRow)
cursor.close()
connection.close()
| 30.608696 | 77 | 0.599432 |
import MySQLdb
from sys import argv
if __name__ == "__main__":
connection = MySQLdb.connect(host="localhost", port=3306, charset="utf8",
user=argv[1], passwd=argv[2], db=argv[3])
cursor = connection.cursor()
cursor.execute("SELECT cities.id, cities.name, states.name"
" FROM cities LEFT JOIN states"
" ON cities.state_id = states.id"
" ORDER BY cities.id ASC")
rows = cursor.fetchall()
for eachRow in rows:
print(eachRow)
cursor.close()
connection.close()
| true | true |
1c47be45651c7c68c942bf5b7c7f590e320b1cd0 | 49,438 | py | Python | homeassistant/components/google_assistant/trait.py | unverbraucht/core | 312af53935a1bffd58b3b35e82e31292a6ec22ad | [
"Apache-2.0"
] | 2 | 2019-11-20T20:56:59.000Z | 2021-01-03T08:52:18.000Z | homeassistant/components/google_assistant/trait.py | shownor/core | b50281a9173e7fb4a37b3f813ca92876088eaac3 | [
"Apache-2.0"
] | 5 | 2020-04-26T10:50:01.000Z | 2021-03-16T21:19:46.000Z | homeassistant/components/google_assistant/trait.py | winterscar/core | 5a55d508791aae65f16396691d014c73fb2095f0 | [
"Apache-2.0"
] | 1 | 2021-04-18T19:36:34.000Z | 2021-04-18T19:36:34.000Z | """Implement the Google Smart Home traits."""
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NOT_SUPPORTED,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + "CameraStream"
TRAIT_ONOFF = PREFIX_TRAITS + "OnOff"
TRAIT_DOCK = PREFIX_TRAITS + "Dock"
TRAIT_STARTSTOP = PREFIX_TRAITS + "StartStop"
TRAIT_BRIGHTNESS = PREFIX_TRAITS + "Brightness"
TRAIT_COLOR_SETTING = PREFIX_TRAITS + "ColorSetting"
TRAIT_SCENE = PREFIX_TRAITS + "Scene"
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + "TemperatureSetting"
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + "LockUnlock"
TRAIT_FANSPEED = PREFIX_TRAITS + "FanSpeed"
TRAIT_MODES = PREFIX_TRAITS + "Modes"
TRAIT_OPENCLOSE = PREFIX_TRAITS + "OpenClose"
TRAIT_VOLUME = PREFIX_TRAITS + "Volume"
TRAIT_ARMDISARM = PREFIX_TRAITS + "ArmDisarm"
TRAIT_HUMIDITY_SETTING = PREFIX_TRAITS + "HumiditySetting"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = PREFIX_COMMANDS + "OnOff"
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + "GetCameraStream"
COMMAND_DOCK = PREFIX_COMMANDS + "Dock"
COMMAND_STARTSTOP = PREFIX_COMMANDS + "StartStop"
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + "PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + "BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + "ColorAbsolute"
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + "ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + "ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + "ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + "ThermostatSetMode"
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + "LockUnlock"
COMMAND_FANSPEED = PREFIX_COMMANDS + "SetFanSpeed"
COMMAND_MODES = PREFIX_COMMANDS + "SetModes"
COMMAND_OPENCLOSE = PREFIX_COMMANDS + "OpenClose"
COMMAND_SET_VOLUME = PREFIX_COMMANDS + "setVolume"
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + "volumeRelative"
COMMAND_ARMDISARM = PREFIX_COMMANDS + "ArmDisarm"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
def query_attributes(self):
"""Return StartStop query attributes."""
return {
"isRunning": self.state.state == vacuum.STATE_CLEANING,
"isPaused": self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = ",".join(modes)
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = []
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self.state_to_service:
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "post_pending_state" in self.state.attributes:
armed_state = self.state.attributes["post_pending_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
if self.state.state == params["armLevel"]:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[params["armLevel"]]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
),
}
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["online"] = True
response["currentFanSpeedSetting"] = speed
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetFanSpeed command."""
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_SPEED: params["fanSpeed"]},
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"input source": ["input source", "input", "source"],
"sound mode": ["sound mode", "effects"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != media_player.DOMAIN:
return False
return (
features & media_player.SUPPORT_SELECT_SOURCE
or features & media_player.SUPPORT_SELECT_SOUND_MODE
)
def sync_attributes(self):
"""Return mode attributes for a sync request."""
def _generate(name, settings):
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(
setting, [setting]
),
"lang": "en",
}
],
}
)
return mode
attrs = self.state.attributes
modes = []
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
modes.append(
_generate("input source", attrs[media_player.ATTR_INPUT_SOURCE_LIST])
)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
modes.append(
_generate("sound mode", attrs[media_player.ATTR_SOUND_MODE_LIST])
)
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
mode_settings["input source"] = attrs.get(media_player.ATTR_INPUT_SOURCE)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
if mode_settings:
response["on"] = self.state.state != STATE_OFF
response["online"] = True
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetModes command."""
settings = params.get("updateModeSettings")
requested_source = settings.get("input source")
sound_mode = settings.get("sound mode")
if requested_source:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE]
override_position = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
if self.override_position is not None:
response["openPercent"] = self.override_position
elif domain == cover.DOMAIN:
# When it's an assumed state, we will return that querying state
# is not supported.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.override_position or self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
if params["openPercent"] == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif params["openPercent"] == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& cover.SUPPORT_SET_POSITION
):
service = cover.SERVICE_SET_COVER_POSITION
should_verify = True
svc_params[cover.ATTR_POSITION] = params["openPercent"]
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED, "Setting a position is not supported"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
if (
self.state.attributes.get(ATTR_ASSUMED_STATE)
or self.state.state == STATE_UNKNOWN
):
self.override_position = params["openPercent"]
@register_trait
class VolumeTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
response["isMuted"] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params["volumeLevel"]
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level / 100,
},
blocking=True,
context=data.context,
)
async def _execute_volume_relative(self, data, params):
# This could also support up/down commands using relativeSteps
relative = params["volumeRelativeLevel"]
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: current + relative / 100,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| 33.792208 | 88 | 0.594806 | import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NOT_SUPPORTED,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + "CameraStream"
TRAIT_ONOFF = PREFIX_TRAITS + "OnOff"
TRAIT_DOCK = PREFIX_TRAITS + "Dock"
TRAIT_STARTSTOP = PREFIX_TRAITS + "StartStop"
TRAIT_BRIGHTNESS = PREFIX_TRAITS + "Brightness"
TRAIT_COLOR_SETTING = PREFIX_TRAITS + "ColorSetting"
TRAIT_SCENE = PREFIX_TRAITS + "Scene"
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + "TemperatureSetting"
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + "LockUnlock"
TRAIT_FANSPEED = PREFIX_TRAITS + "FanSpeed"
TRAIT_MODES = PREFIX_TRAITS + "Modes"
TRAIT_OPENCLOSE = PREFIX_TRAITS + "OpenClose"
TRAIT_VOLUME = PREFIX_TRAITS + "Volume"
TRAIT_ARMDISARM = PREFIX_TRAITS + "ArmDisarm"
TRAIT_HUMIDITY_SETTING = PREFIX_TRAITS + "HumiditySetting"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = PREFIX_COMMANDS + "OnOff"
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + "GetCameraStream"
COMMAND_DOCK = PREFIX_COMMANDS + "Dock"
COMMAND_STARTSTOP = PREFIX_COMMANDS + "StartStop"
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + "PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + "BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + "ColorAbsolute"
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + "ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + "ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + "ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + "ThermostatSetMode"
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + "LockUnlock"
COMMAND_FANSPEED = PREFIX_COMMANDS + "SetFanSpeed"
COMMAND_MODES = PREFIX_COMMANDS + "SetModes"
COMMAND_OPENCLOSE = PREFIX_COMMANDS + "OpenClose"
COMMAND_SET_VOLUME = PREFIX_COMMANDS + "setVolume"
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + "volumeRelative"
COMMAND_ARMDISARM = PREFIX_COMMANDS + "ArmDisarm"
TRAITS = []
def register_trait(trait):
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
class _Trait:
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
return False
def __init__(self, hass, state, config):
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
raise NotImplementedError
def query_attributes(self):
raise NotImplementedError
def can_execute(self, command, params):
return command in self.commands
async def execute(self, command, data, params, challenge):
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
return {}
def query_attributes(self):
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
return {}
def query_attributes(self):
return {"on": self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
return {}
async def execute(self, command, data, params, challenge):
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
return domain == vacuum.DOMAIN
def sync_attributes(self):
return {}
def query_attributes(self):
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
return domain == vacuum.DOMAIN
def sync_attributes(self):
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
def query_attributes(self):
return {
"isRunning": self.state.state == vacuum.STATE_CLEANING,
"isPaused": self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class TemperatureSettingTrait(_Trait):
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = ",".join(modes)
return response
def query_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
name = TRAIT_HUMIDITY_SETTING
commands = []
@staticmethod
def supported(domain, features, device_class):
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
return response
def query_attributes(self):
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
@register_trait
class LockUnlockTrait(_Trait):
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
return True
def sync_attributes(self):
return {}
def query_attributes(self):
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
return True
def sync_attributes(self):
response = {}
levels = []
for state in self.state_to_service:
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
if "post_pending_state" in self.state.attributes:
armed_state = self.state.attributes["post_pending_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
if params["arm"] and not params.get("cancel"):
if self.state.state == params["armLevel"]:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[params["armLevel"]]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
),
}
def query_attributes(self):
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["online"] = True
response["currentFanSpeedSetting"] = speed
return response
async def execute(self, command, data, params, challenge):
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_SPEED: params["fanSpeed"]},
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"input source": ["input source", "input", "source"],
"sound mode": ["sound mode", "effects"],
}
@staticmethod
def supported(domain, features, device_class):
if domain != media_player.DOMAIN:
return False
return (
features & media_player.SUPPORT_SELECT_SOURCE
or features & media_player.SUPPORT_SELECT_SOUND_MODE
)
def sync_attributes(self):
def _generate(name, settings):
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(
setting, [setting]
),
"lang": "en",
}
],
}
)
return mode
attrs = self.state.attributes
modes = []
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
modes.append(
_generate("input source", attrs[media_player.ATTR_INPUT_SOURCE_LIST])
)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
modes.append(
_generate("sound mode", attrs[media_player.ATTR_SOUND_MODE_LIST])
)
payload = {"availableModes": modes}
return payload
def query_attributes(self):
attrs = self.state.attributes
response = {}
mode_settings = {}
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
mode_settings["input source"] = attrs.get(media_player.ATTR_INPUT_SOURCE)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
if mode_settings:
response["on"] = self.state.state != STATE_OFF
response["online"] = True
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
settings = params.get("updateModeSettings")
requested_source = settings.get("input source")
sound_mode = settings.get("sound mode")
if requested_source:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
# Cover device classes that require 2FA
COVER_2FA = (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE]
override_position = None
@staticmethod
def supported(domain, features, device_class):
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
response = {}
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
return response
def query_attributes(self):
domain = self.state.domain
response = {}
if self.override_position is not None:
response["openPercent"] = self.override_position
elif domain == cover.DOMAIN:
# When it's an assumed state, we will return that querying state
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.override_position or self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
domain = self.state.domain
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
if params["openPercent"] == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif params["openPercent"] == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& cover.SUPPORT_SET_POSITION
):
service = cover.SERVICE_SET_COVER_POSITION
should_verify = True
svc_params[cover.ATTR_POSITION] = params["openPercent"]
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED, "Setting a position is not supported"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
if (
self.state.attributes.get(ATTR_ASSUMED_STATE)
or self.state.state == STATE_UNKNOWN
):
self.override_position = params["openPercent"]
@register_trait
class VolumeTrait(_Trait):
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
return {}
def query_attributes(self):
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
response["currentVolume"] = int(level * 100)
response["isMuted"] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params["volumeLevel"]
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level / 100,
},
blocking=True,
context=data.context,
)
async def _execute_volume_relative(self, data, params):
relative = params["volumeRelativeLevel"]
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: current + relative / 100,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| true | true |
1c47be6838a559b898608b686a690144038060ab | 811 | py | Python | mysite/mysite/urls.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/mysite/urls.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/mysite/urls.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('buckets.urls')),
]
| 35.26087 | 79 | 0.696671 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^', include('buckets.urls')),
]
| true | true |
1c47be6c708b01f8c5d2442695b7f5df61fef530 | 1,547 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 3 | 2016-08-17T08:52:51.000Z | 2020-03-29T04:56:45.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/clock/test_clock_run_stopwatch_laps.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 1 | 2021-11-18T21:21:19.000Z | 2021-11-18T21:21:19.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.clock.app import Clock
import time
class TestClockRunStopWatch(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.clock = Clock(self.marionette)
self.clock.launch()
def test_click_run_stopwatch_laps(self):
stopwatch_view = self.clock.switch_view("stopwatch")
self.assertEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_start()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_lap()
time.sleep(0.2)
self.assertEqual(len(stopwatch_view.lap_items), 2)
self.assertNotEqual(stopwatch_view.lap_items[0].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[1].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[0].time, stopwatch_view.lap_items[1].time)
stopwatch_view.tap_pause()
recorded_time = stopwatch_view.current_time
stopwatch_view.tap_resume()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, recorded_time)
stopwatch_view.tap_pause()
stopwatch_view.tap_reset()
self.assertEqual(len(stopwatch_view.lap_items), 0)
self.assertEqual(stopwatch_view.current_time, '00:00.00')
| 30.94 | 95 | 0.701357 |
from gaiatest import GaiaTestCase
from gaiatest.apps.clock.app import Clock
import time
class TestClockRunStopWatch(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.clock = Clock(self.marionette)
self.clock.launch()
def test_click_run_stopwatch_laps(self):
stopwatch_view = self.clock.switch_view("stopwatch")
self.assertEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_start()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, '00:00.00')
stopwatch_view.tap_lap()
time.sleep(0.2)
self.assertEqual(len(stopwatch_view.lap_items), 2)
self.assertNotEqual(stopwatch_view.lap_items[0].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[1].time, '00:00.00')
self.assertNotEqual(stopwatch_view.lap_items[0].time, stopwatch_view.lap_items[1].time)
stopwatch_view.tap_pause()
recorded_time = stopwatch_view.current_time
stopwatch_view.tap_resume()
time.sleep(0.2)
self.assertNotEqual(stopwatch_view.current_time, recorded_time)
stopwatch_view.tap_pause()
stopwatch_view.tap_reset()
self.assertEqual(len(stopwatch_view.lap_items), 0)
self.assertEqual(stopwatch_view.current_time, '00:00.00')
| true | true |
1c47be9cac33d18c0c0a8c405deb236cf91a9e3f | 14,282 | py | Python | test/functional/p2p_unrequested_blocks.py | Quirky-Turt-Crypto/Quirky-Turt-Coin | 2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59 | [
"MIT"
] | null | null | null | test/functional/p2p_unrequested_blocks.py | Quirky-Turt-Crypto/Quirky-Turt-Coin | 2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59 | [
"MIT"
] | null | null | null | test/functional/p2p_unrequested_blocks.py | Quirky-Turt-Crypto/Quirky-Turt-Coin | 2fce9fe4f3be715a8ad3269ed9cefb4e5b6fad59 | [
"MIT"
] | 1 | 2021-05-16T16:09:23.000Z | 2021-05-16T16:09:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import quirkyturtTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(quirkyturtTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "quirkyturtd"),
help="quirkyturtd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.080247 | 113 | 0.676096 |
from test_framework.mininode import *
from test_framework.test_framework import quirkyturtTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(quirkyturtTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "quirkyturtd"),
help="quirkyturtd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
min_work_node.wait_for_verack()
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
blocks_h2 = []
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
try:
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| true | true |
1c47beb831ed519d0ec874e7fd8ab065c7a7379d | 6,290 | py | Python | patent_example/patent_example.py | RobKraft/dedupe-examples | bf02a805f8d1a0581b07c1eb81503c769b9541f1 | [
"MIT"
] | null | null | null | patent_example/patent_example.py | RobKraft/dedupe-examples | bf02a805f8d1a0581b07c1eb81503c769b9541f1 | [
"MIT"
] | null | null | null | patent_example/patent_example.py | RobKraft/dedupe-examples | bf02a805f8d1a0581b07c1eb81503c769b9541f1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This code demonstrates how to use dedupe to disambiguate patent
authors and demonstrates the Set and LatLong data types.
"""
import os
import csv
import logging
import optparse
import dedupe
def readData(filename, set_delim='**'):
"""
Remap columns for the following cases:
- Lat and Long are mapped into a single LatLong tuple
- Class and Coauthor are stored as delimited strings but mapped into
tuples
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
row = dict((k, v.lower()) for k, v in row.items())
if row['Lat'] == row['Lng'] == '0.0':
row['LatLong'] = None
else:
row['LatLong'] = (float(row['Lat']), float(row['Lng']))
row['Class'] = tuple(sorted(row['Class'].split(set_delim))) if row['Class'] else None
row['Coauthor'] = tuple(sorted([author for author
in row['Coauthor'].split(set_delim)
if author != 'none']))
if row['Name'] == '':
row['Name'] = None
data_d[idx] = row
return data_d
# These generators will give us the corpora setting up the Set
# distance metrics
def classes(data):
for record in data.values():
yield record['Class']
def coauthors(data):
for record in data.values():
yield record['Coauthor']
def names(data):
for record in data.values():
yield record['Name']
if __name__ == '__main__':
# ## Logging
# Dedupe uses Python logging to show or suppress verbose output. Added
# for convenience. To enable verbose logging, run `python
# patent_example.py -v`
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose > 1:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
input_file = 'patstat_input.csv'
output_file = 'patstat_output.csv'
settings_file = 'patstat_settings.json'
training_file = 'patstat_training.json'
scriptpath = os.path.dirname(__file__)
input_file = os.path.join(scriptpath, input_file)
output_file = os.path.join(scriptpath, output_file)
settings_file = os.path.join(scriptpath, settings_file)
training_file = os.path.join(scriptpath, training_file)
print('importing data ...')
data_d = readData(input_file)
# ## Training
if os.path.exists(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as sf:
deduper = dedupe.StaticDedupe(sf, num_cores=2)
else:
# Define the fields dedupe will pay attention to
fields = [
{'field': 'Name',
'variable name': 'Name',
'type': 'String',
'has missing': True},
{'field': 'LatLong',
'type': 'LatLong',
'has missing': True},
{'field': 'Class',
'variable name': 'Class',
'type': 'Set',
'corpus': classes(data_d),
'has missing': True},
{'field': 'Coauthor',
'variable name': 'Coauthor',
'type': 'Set',
'corpus': coauthors(data_d),
'has missing': True},
{'field': 'Name',
'variable name': 'Name Text',
'type': 'Text',
'corpus': names(data_d),
'has missing': True},
{'type': 'Interaction',
'interaction variables': ['Name', 'Name Text']}
]
# Create a new deduper object and pass our data model to it.
deduper = dedupe.Dedupe(fields, num_cores=2)
# If we have training data saved from a previous run of dedupe,
# look for it an load it in.
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf:
deduper.prepare_training(data_d, training_file=tf)
else:
deduper.prepare_training(data_d)
# ## Active learning
# Starts the training loop. Dedupe will find the next pair of records
# it is least certain about and ask you to label them as duplicates
# or not.
# use 'y', 'n' and 'u' keys to flag duplicates
# press 'f' when you are finished
print('starting active labeling...')
dedupe.console_label(deduper)
deduper.train()
# When finished, save our training away to disk
with open(training_file, 'w') as tf:
deduper.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
clustered_dupes = deduper.partition(data_d, 0.5)
print('# duplicate sets', len(clustered_dupes))
# ## Writing Results
# Write our original data back out to a CSV with a new column called
# 'Cluster ID' which indicates which records refer to each other.
cluster_membership = {}
for cluster_id, (records, scores) in enumerate(clustered_dupes):
for record_id, score in zip(records, scores):
cluster_membership[record_id] = {
"Cluster ID": cluster_id,
"confidence_score": score
}
with open(output_file, 'w') as f_output, open(input_file) as f_input:
reader = csv.DictReader(f_input)
fieldnames = ['Cluster ID', 'confidence_score'] + reader.fieldnames
writer = csv.DictWriter(f_output, fieldnames=fieldnames)
writer.writeheader()
for row_id, row in enumerate(reader):
row.update(cluster_membership[row_id])
writer.writerow(row)
| 31.767677 | 97 | 0.583466 |
import os
import csv
import logging
import optparse
import dedupe
def readData(filename, set_delim='**'):
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
row = dict((k, v.lower()) for k, v in row.items())
if row['Lat'] == row['Lng'] == '0.0':
row['LatLong'] = None
else:
row['LatLong'] = (float(row['Lat']), float(row['Lng']))
row['Class'] = tuple(sorted(row['Class'].split(set_delim))) if row['Class'] else None
row['Coauthor'] = tuple(sorted([author for author
in row['Coauthor'].split(set_delim)
if author != 'none']))
if row['Name'] == '':
row['Name'] = None
data_d[idx] = row
return data_d
def classes(data):
for record in data.values():
yield record['Class']
def coauthors(data):
for record in data.values():
yield record['Coauthor']
def names(data):
for record in data.values():
yield record['Name']
if __name__ == '__main__':
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose > 1:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
input_file = 'patstat_input.csv'
output_file = 'patstat_output.csv'
settings_file = 'patstat_settings.json'
training_file = 'patstat_training.json'
scriptpath = os.path.dirname(__file__)
input_file = os.path.join(scriptpath, input_file)
output_file = os.path.join(scriptpath, output_file)
settings_file = os.path.join(scriptpath, settings_file)
training_file = os.path.join(scriptpath, training_file)
print('importing data ...')
data_d = readData(input_file)
ts(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as sf:
deduper = dedupe.StaticDedupe(sf, num_cores=2)
else:
fields = [
{'field': 'Name',
'variable name': 'Name',
'type': 'String',
'has missing': True},
{'field': 'LatLong',
'type': 'LatLong',
'has missing': True},
{'field': 'Class',
'variable name': 'Class',
'type': 'Set',
'corpus': classes(data_d),
'has missing': True},
{'field': 'Coauthor',
'variable name': 'Coauthor',
'type': 'Set',
'corpus': coauthors(data_d),
'has missing': True},
{'field': 'Name',
'variable name': 'Name Text',
'type': 'Text',
'corpus': names(data_d),
'has missing': True},
{'type': 'Interaction',
'interaction variables': ['Name', 'Name Text']}
]
deduper = dedupe.Dedupe(fields, num_cores=2)
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf:
deduper.prepare_training(data_d, training_file=tf)
else:
deduper.prepare_training(data_d)
print('starting active labeling...')
dedupe.console_label(deduper)
deduper.train()
with open(training_file, 'w') as tf:
deduper.write_training(tf)
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
clustered_dupes = deduper.partition(data_d, 0.5)
print('# duplicate sets', len(clustered_dupes))
= {}
for cluster_id, (records, scores) in enumerate(clustered_dupes):
for record_id, score in zip(records, scores):
cluster_membership[record_id] = {
"Cluster ID": cluster_id,
"confidence_score": score
}
with open(output_file, 'w') as f_output, open(input_file) as f_input:
reader = csv.DictReader(f_input)
fieldnames = ['Cluster ID', 'confidence_score'] + reader.fieldnames
writer = csv.DictWriter(f_output, fieldnames=fieldnames)
writer.writeheader()
for row_id, row in enumerate(reader):
row.update(cluster_membership[row_id])
writer.writerow(row)
| true | true |
1c47c26b2239aaaa497597e10ff585638018c10a | 446 | py | Python | oo/teste_carro.py | vladimirvinicius/pythonbirds | 2c0c6bfcda6fbeaffc36f6f04ccd94ab704e0b1a | [
"MIT"
] | 1 | 2020-10-04T03:29:20.000Z | 2020-10-04T03:29:20.000Z | oo/teste_carro.py | JosemarBrito/pythonbirds | eaa80f98bd4365b1146556b5f144dbab03fbf9bb | [
"MIT"
] | null | null | null | oo/teste_carro.py | JosemarBrito/pythonbirds | eaa80f98bd4365b1146556b5f144dbab03fbf9bb | [
"MIT"
] | null | null | null | from unittest import TestCase
from oo.carro import Motor
class CarroTestCase(TestCase):
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
def teste_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEqual(1, motor.velocidade)
def teste_frear(self):
motor = Motor()
motor.frear()
self.assertEqual(0, motor.velocidade) | 23.473684 | 45 | 0.650224 | from unittest import TestCase
from oo.carro import Motor
class CarroTestCase(TestCase):
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
def teste_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEqual(1, motor.velocidade)
def teste_frear(self):
motor = Motor()
motor.frear()
self.assertEqual(0, motor.velocidade) | true | true |
1c47c3ee33915e701135e1412bec7e390f756847 | 2,676 | py | Python | gamma_cloudinary/config.py | barakaVictor/django-gamma-cloudinary | 598af46844ca7b2de3cc832cb0d8dd3f9742e625 | [
"BSD-3-Clause"
] | 1 | 2022-03-13T13:44:19.000Z | 2022-03-13T13:44:19.000Z | gamma_cloudinary/config.py | barakaVictor/django-gamma-cloudinary | 598af46844ca7b2de3cc832cb0d8dd3f9742e625 | [
"BSD-3-Clause"
] | 4 | 2021-09-22T11:44:24.000Z | 2022-01-13T11:06:54.000Z | gamma_cloudinary/config.py | barakaVictor/django-gamma-cloudinary | 598af46844ca7b2de3cc832cb0d8dd3f9742e625 | [
"BSD-3-Clause"
] | null | null | null | import os
import cloudinary
from operator import itemgetter
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
#Execute setup code for cloudinary configuration
def setup_cloudinary():
if settings.configured:
try:
#check for the existence of CLOUDINARY_STORAGE object in django settings module
cloudinary_settings = getattr(settings, 'CLOUDINARY_STORAGE')
#if CLOUDINARY_STORAGE exists check for the minimum required keys to get cloudinary up and running
itemgetter('CLOUD_NAME', 'API_KEY', 'API_SECRET')(cloudinary_settings)
except AttributeError:
#if CLOUDINARY_STORAGE is not set check for the existence of
#either CLOUDINARY_URL or (CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET)
#environment variables and exit silently if they have been set
if os.environ.get('CLOUDINARY_URL'):
pass
if (os.environ.get('CLOUDINARY_CLOUD_NAME') and os.environ.get('CLOUDINARY_API_KEY') and os.environ.get('CLOUDINARY_API_SECRET')):
pass
else:
#else raise an ImproperlyConfigured exceoption if CLOUDINARY_STORAGE does not exist in
#the django settings module and CLOUDINARY_URL or (CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET)
#environment variables have not been set
raise ImproperlyConfigured('In order to use cloudinary storage, you need to provide '
'CLOUDINARY_STORAGE dictionary with CLOUD_NAME, API_SECRET '
'and API_KEY in the django settings module or set CLOUDINARY_URL'
'(or CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET) '
'environment variables).')
except KeyError as e:
#raise ImproperlyConfigured exception if CLOUDINARY_STORAGE has been set in the django settings
#module but without all of the minimum required attributes(CLOUD_NAME, API_KEY, API_SECRET)
#to get cloudinary working
raise ImproperlyConfigured(f'{e.args[0]} is a required setting in the cloudinary config.')
else:
#While passing config parameters to cloudinary.config(), run dictionary
#comprehension to convert all keys to snake_case fromat as is required in
#cloudinary data type guidelines
cloudinary.config(**{key.lower(): value for key, value in cloudinary_settings.items()})
| 58.173913 | 142 | 0.664051 | import os
import cloudinary
from operator import itemgetter
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def setup_cloudinary():
if settings.configured:
try:
cloudinary_settings = getattr(settings, 'CLOUDINARY_STORAGE')
itemgetter('CLOUD_NAME', 'API_KEY', 'API_SECRET')(cloudinary_settings)
except AttributeError:
if os.environ.get('CLOUDINARY_URL'):
pass
if (os.environ.get('CLOUDINARY_CLOUD_NAME') and os.environ.get('CLOUDINARY_API_KEY') and os.environ.get('CLOUDINARY_API_SECRET')):
pass
else:
raise ImproperlyConfigured('In order to use cloudinary storage, you need to provide '
'CLOUDINARY_STORAGE dictionary with CLOUD_NAME, API_SECRET '
'and API_KEY in the django settings module or set CLOUDINARY_URL'
'(or CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET) '
'environment variables).')
except KeyError as e:
raise ImproperlyConfigured(f'{e.args[0]} is a required setting in the cloudinary config.')
else:
cloudinary.config(**{key.lower(): value for key, value in cloudinary_settings.items()})
| true | true |
1c47c4f4f4455be041aae5c83a2d2cfc01c700b7 | 1,554 | py | Python | pytest_curl_report/plugin.py | t2y/pytest-curl-report | 8690d8e6b78ad578af07ffad592556119304dac8 | [
"Apache-2.0"
] | null | null | null | pytest_curl_report/plugin.py | t2y/pytest-curl-report | 8690d8e6b78ad578af07ffad592556119304dac8 | [
"Apache-2.0"
] | null | null | null | pytest_curl_report/plugin.py | t2y/pytest-curl-report | 8690d8e6b78ad578af07ffad592556119304dac8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from .curl import Curl
from .utils import get_inspect_functions
PLUGIN_NAMESPACE = 'curl_report'
def pytest_addoption(parser):
group = parser.getgroup('curlreport', 'curl report')
group.addoption(
'--no-curl-report', dest='no_curl_report',
action='store_true', default=False,
help='not generate curl report when a testcase is failed'
)
group.addoption(
'--curl-report-only', dest='curl_report_only',
action='store_true', default=False,
help='strip pytest assertion log and generate curl report only'
)
def pytest_runtest_makereport(__multicall__, item, call):
if item.config.option.no_curl_report:
return
report = __multicall__.execute()
if report.longrepr is None:
return report
if item.config.option.curl_report_only:
if hasattr(report, 'longrepr'):
if hasattr(report.longrepr, 'reprtraceback'):
# HACK: set dummy reporting function for traceback report
report.longrepr.reprtraceback.toterminal = lambda x: None
extra_info = getattr(pytest, PLUGIN_NAMESPACE, object())
inspect_funcs = get_inspect_functions()
for _, obj in call.excinfo.traceback[0].frame.f_locals.items():
for func in inspect_funcs:
r = func(obj)
if r is not None:
cmd = Curl(r, extra_info).make_command()
report.longrepr.addsection('How to reproduce with curl', cmd)
break
return report
| 31.08 | 77 | 0.651866 |
import pytest
from .curl import Curl
from .utils import get_inspect_functions
PLUGIN_NAMESPACE = 'curl_report'
def pytest_addoption(parser):
group = parser.getgroup('curlreport', 'curl report')
group.addoption(
'--no-curl-report', dest='no_curl_report',
action='store_true', default=False,
help='not generate curl report when a testcase is failed'
)
group.addoption(
'--curl-report-only', dest='curl_report_only',
action='store_true', default=False,
help='strip pytest assertion log and generate curl report only'
)
def pytest_runtest_makereport(__multicall__, item, call):
if item.config.option.no_curl_report:
return
report = __multicall__.execute()
if report.longrepr is None:
return report
if item.config.option.curl_report_only:
if hasattr(report, 'longrepr'):
if hasattr(report.longrepr, 'reprtraceback'):
report.longrepr.reprtraceback.toterminal = lambda x: None
extra_info = getattr(pytest, PLUGIN_NAMESPACE, object())
inspect_funcs = get_inspect_functions()
for _, obj in call.excinfo.traceback[0].frame.f_locals.items():
for func in inspect_funcs:
r = func(obj)
if r is not None:
cmd = Curl(r, extra_info).make_command()
report.longrepr.addsection('How to reproduce with curl', cmd)
break
return report
| true | true |
1c47c521e31ebadae1e4b554a33840207018eda8 | 336 | py | Python | quilljs_example/example/models.py | muke5hy/django-quill | 16250b9c9418907123c8b40ddc66523af5d4e4d4 | [
"BSD-3-Clause"
] | 11 | 2019-02-20T08:58:43.000Z | 2021-01-03T16:41:07.000Z | quilljs_example/example/models.py | muke5hy/django-quill | 16250b9c9418907123c8b40ddc66523af5d4e4d4 | [
"BSD-3-Clause"
] | null | null | null | quilljs_example/example/models.py | muke5hy/django-quill | 16250b9c9418907123c8b40ddc66523af5d4e4d4 | [
"BSD-3-Clause"
] | 3 | 2019-10-08T18:04:01.000Z | 2020-11-02T12:15:03.000Z | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from quilljs.fields import RichTextField
@python_2_unicode_compatible
class ExampleModel(models.Model):
editor = RichTextField()
editor2 = RichTextField(config='basic')
def __str__(self):
return 'This is just an example'
| 24 | 61 | 0.77381 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from quilljs.fields import RichTextField
@python_2_unicode_compatible
class ExampleModel(models.Model):
editor = RichTextField()
editor2 = RichTextField(config='basic')
def __str__(self):
return 'This is just an example'
| true | true |
1c47c5651fa334d977285c340e3c9f7fa5d3f735 | 2,263 | py | Python | setup.py | RobertDeRose/python-robin-srv | dcb3b8a0dff71f2b63695fdab48b322998328fc2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | RobertDeRose/python-robin-srv | dcb3b8a0dff71f2b63695fdab48b322998328fc2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | RobertDeRose/python-robin-srv | dcb3b8a0dff71f2b63695fdab48b322998328fc2 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='robin-srv',
version='0.1.0',
license='BSD',
description='A utility library to help with client-side load balancing based on SRV records.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Robert DeRose',
author_email='RobertDeRose@gmail.com',
url='https://github.com/RobertDeRose/python-robin-srv',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'dnspython'
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'robin-srv = robin_srv.cli:main',
]
},
)
| 30.581081 | 98 | 0.606717 |
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='robin-srv',
version='0.1.0',
license='BSD',
description='A utility library to help with client-side load balancing based on SRV records.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Robert DeRose',
author_email='RobertDeRose@gmail.com',
url='https://github.com/RobertDeRose/python-robin-srv',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
],
install_requires=[
'dnspython'
],
extras_require={
},
entry_points={
'console_scripts': [
'robin-srv = robin_srv.cli:main',
]
},
)
| true | true |
1c47c5ded622153fdda38f1bf3179dad8b91b2a3 | 2,653 | py | Python | tests/test_build.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | 3 | 2022-03-09T08:12:54.000Z | 2022-03-10T01:57:03.000Z | tests/test_build.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | null | null | null | tests/test_build.py | martinruenz/pytorch3d | 7f1e63aed1252ba8145d4a66ce2272331d60cdae | [
"BSD-3-Clause"
] | 1 | 2020-09-15T06:01:18.000Z | 2020-09-15T06:01:18.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import unittest
from collections import Counter
from pathlib import Path
# This file groups together tests which look at the code without running it.
# When running the tests inside conda's build, the code is not available.
in_conda_build = os.environ.get("CONDA_BUILD_STATE", "") == "TEST"
class TestBuild(unittest.TestCase):
@unittest.skipIf(in_conda_build, "In conda build")
def test_name_clash(self):
# For setup.py, all translation units need distinct names, so we
# cannot have foo.cu and foo.cpp, even in different directories.
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d"
stems = []
for extension in [".cu", ".cpp"]:
files = source_dir.glob(f"**/*{extension}")
stems.extend(f.stem for f in files)
counter = Counter(stems)
for k, v in counter.items():
self.assertEqual(v, 1, f"Too many files with stem {k}.")
@unittest.skipIf(in_conda_build, "In conda build")
def test_deprecated_usage(self):
# Check certain expressions do not occur in the csrc code
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d" / "csrc"
files = sorted(source_dir.glob("**/*.*"))
self.assertGreater(len(files), 4)
patterns = [".type()", ".data()"]
for file in files:
with open(file) as f:
text = f.read()
for pattern in patterns:
found = pattern in text
msg = (
f"{pattern} found in {file.name}"
+ ", this has been deprecated."
)
self.assertFalse(found, msg)
@unittest.skipIf(in_conda_build, "In conda build")
def test_copyright(self):
test_dir = Path(__file__).resolve().parent
root_dir = test_dir.parent
extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh")
expect = (
"Copyright (c) Facebook, Inc. and its affiliates."
+ " All rights reserved.\n"
)
for extension in extensions:
for i in root_dir.glob(f"**/*.{extension}"):
with open(i) as f:
firstline = f.readline()
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect), f"{i} missing copyright header."
)
| 36.342466 | 84 | 0.560498 |
import os
import unittest
from collections import Counter
from pathlib import Path
in_conda_build = os.environ.get("CONDA_BUILD_STATE", "") == "TEST"
class TestBuild(unittest.TestCase):
@unittest.skipIf(in_conda_build, "In conda build")
def test_name_clash(self):
# For setup.py, all translation units need distinct names, so we
# cannot have foo.cu and foo.cpp, even in different directories.
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d"
stems = []
for extension in [".cu", ".cpp"]:
files = source_dir.glob(f"**/*{extension}")
stems.extend(f.stem for f in files)
counter = Counter(stems)
for k, v in counter.items():
self.assertEqual(v, 1, f"Too many files with stem {k}.")
@unittest.skipIf(in_conda_build, "In conda build")
def test_deprecated_usage(self):
# Check certain expressions do not occur in the csrc code
test_dir = Path(__file__).resolve().parent
source_dir = test_dir.parent / "pytorch3d" / "csrc"
files = sorted(source_dir.glob("**/*.*"))
self.assertGreater(len(files), 4)
patterns = [".type()", ".data()"]
for file in files:
with open(file) as f:
text = f.read()
for pattern in patterns:
found = pattern in text
msg = (
f"{pattern} found in {file.name}"
+ ", this has been deprecated."
)
self.assertFalse(found, msg)
@unittest.skipIf(in_conda_build, "In conda build")
def test_copyright(self):
test_dir = Path(__file__).resolve().parent
root_dir = test_dir.parent
extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh")
expect = (
"Copyright (c) Facebook, Inc. and its affiliates."
+ " All rights reserved.\n"
)
for extension in extensions:
for i in root_dir.glob(f"**/*.{extension}"):
with open(i) as f:
firstline = f.readline()
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect), f"{i} missing copyright header."
)
| true | true |
1c47c5f47a3efdc6396fd4dbe3e492f94d567901 | 8,567 | py | Python | pytorch_toolkit/face_recognition/dump_features.py | xzry6/openvino_training_extensions | b8b17bbcc352633b0f0d3a99d6179a9ec616e426 | [
"Apache-2.0"
] | 158 | 2019-03-01T15:47:39.000Z | 2022-02-10T15:10:48.000Z | dump_features.py | sacchinbhg/face_recognition.pytorch | 05cb9b30e8220445fcb27988926d88f330091c12 | [
"Apache-2.0"
] | 6 | 2020-03-08T22:58:13.000Z | 2022-03-12T00:15:14.000Z | dump_features.py | sacchinbhg/face_recognition.pytorch | 05cb9b30e8220445fcb27988926d88f330091c12 | [
"Apache-2.0"
] | 23 | 2019-03-02T09:18:19.000Z | 2021-11-06T22:01:56.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import os
import os.path as osp
from tqdm import tqdm
import numpy as np
import glog as log
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as t
from scripts.matio import save_mat
from model.common import models_backbones
from datasets.megaface import MegaFace
from datasets.trillion_pairs import TrillionPairs
from utils.utils import load_model_state
from utils.augmentation import ResizeNumpy, NumpyToTensor
def clean_megaface(filenames, features, noises_list_path):
"""Filters megaface from outliers"""
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [line.strip() for line in noises_list]
clean_features = np.zeros((features.shape[0], features.shape[1] + 1), dtype=np.float32)
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
for line in noises_list:
if line in filename:
clean_features[i, features.shape[1]] = 100.0
break
return clean_features
def clean_facescrub(filenames, features, noises_list_path):
"""Replaces wrong instances of identities from the Facescrub with the centroids of these identities"""
clean_feature_size = features.shape[1] + 1
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [osp.splitext(line.strip())[0] for line in noises_list]
clean_features = np.zeros((features.shape[0], clean_feature_size), dtype=np.float32)
centroids = {}
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
id_name = osp.basename(filename).split('_')[0]
if not id_name in centroids:
centroids[id_name] = np.zeros(clean_feature_size, dtype=np.float32)
centroids[id_name] += clean_features[i, :]
for i, file_path in enumerate(tqdm(filenames)):
filename = osp.basename(file_path)
for line in noises_list:
if line in filename.replace(' ', '_'):
id_name = filename.split('_')[0]
clean_features[i, :] = centroids[id_name] + np.random.uniform(-0.001, 0.001, clean_feature_size)
clean_features[i, :] /= np.linalg.norm(clean_features[i, :])
break
return clean_features
@torch.no_grad()
def main(args):
input_filenames = []
output_filenames = []
input_dir = os.path.abspath(args.input_dir)
output_dir = os.path.abspath(args.output_dir)
if not args.trillion_format:
log.info('Reading info...')
with open(os.path.join(args.input_dir, os.path.basename(args.input_list)), 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
info = line.strip().split('|')
file = info[0].strip()
filename = os.path.join(input_dir, file)
path, _ = osp.split(filename)
out_folder = path.replace(input_dir, output_dir)
if not osp.isdir(out_folder):
os.makedirs(out_folder)
landmarks = None
bbox = None
if len(info) > 2:
landmarks = info[1].strip().split(' ')
landmarks = [float(x) for x in landmarks]
bbox = info[2].strip().split(' ')
bbox = [int(float(x)) for x in bbox]
outname = filename.replace(input_dir, output_dir) + args.file_ending
input_filenames.append({'path': filename, 'landmarks': landmarks, 'bbox': bbox})
output_filenames += [outname]
nrof_images = len(input_filenames)
log.info("Total number of images: ", nrof_images)
dataset = MegaFace(input_filenames)
else:
dataset = TrillionPairs(args.input_dir, osp.join(args.input_dir, 'testdata_lmk.txt'), test_mode=True)
nrof_images = len(dataset)
emb_array = np.zeros((nrof_images, args.embedding_size), dtype=np.float32)
dataset.transform = t.Compose([ResizeNumpy(models_backbones[args.model].get_input_res()),
NumpyToTensor(switch_rb=True)])
val_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=5, shuffle=False)
model = models_backbones[args.model](embedding_size=args.embedding_size, feature=True)
assert args.snap is not None
log.info('Snapshot ' + args.snap + ' ...')
log.info('Extracting embeddings ...')
model = load_model_state(model, args.snap, args.devices[0], eval_state=True)
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
f_output_filenames = []
with torch.cuda.device(args.devices[0]):
for i, data in enumerate(tqdm(val_loader), 0):
idxs, imgs = data['idx'], data['img']
batch_embeddings = F.normalize(model(imgs), p=2, dim=1).data.cpu().numpy()
batch_embeddings = batch_embeddings.reshape(batch_embeddings.shape[0], -1)
path_indices = idxs.data.cpu().numpy()
start_index = i*args.batch_size
end_index = min((i+1)*args.batch_size, nrof_images)
assert start_index == path_indices[0]
assert end_index == path_indices[-1] + 1
assert emb_array[start_index:end_index, :].shape == batch_embeddings.shape
emb_array[start_index:end_index, :] = batch_embeddings
if not args.trillion_format:
for index in path_indices:
f_output_filenames.append(output_filenames[index])
assert len(output_filenames) == len(output_filenames)
log.info('Extracting features Done.')
if args.trillion_format:
save_mat(args.file_ending, emb_array)
else:
if 'megaface_noises.txt' in args.noises_list:
log.info('Cleaning Megaface features')
emb_array = clean_megaface(f_output_filenames, emb_array, args.noises_list)
elif 'facescrub_noises.txt' in args.noises_list:
log.info('Cleaning Facescrub features')
emb_array = clean_facescrub(f_output_filenames, emb_array, args.noises_list)
else:
log.info('Megaface features are not cleaned up.')
log.info('Saving features to files...')
for i in tqdm(range(len(f_output_filenames))):
save_mat(f_output_filenames[i], emb_array[i, :])
def parse_argument(argv):
parser = argparse.ArgumentParser(description='Save embeddings to MegaFace features files')
parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')
parser.add_argument('input_dir', help='Path to MegaFace Features')
parser.add_argument('output_dir', help='Path to FaceScrub Features')
parser.add_argument('--input_list', default='list.txt', type=str, required=False)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--embedding_size', type=int, default=128)
parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')
parser.add_argument('--snap', type=str, required=True, help='Snapshot to evaluate.')
parser.add_argument('--noises_list', type=str, default='', required=False, help='A list of the Megaface or Facescrub noises produced by insightface. \
See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md')
parser.add_argument('--file_ending', help='Ending appended to original photo files. i.e.\
11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin', default='_rmnet.bin')
parser.add_argument('--trillion_format', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_argument(sys.argv[1:]))
| 44.159794 | 155 | 0.65834 |
import sys
import argparse
import os
import os.path as osp
from tqdm import tqdm
import numpy as np
import glog as log
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as t
from scripts.matio import save_mat
from model.common import models_backbones
from datasets.megaface import MegaFace
from datasets.trillion_pairs import TrillionPairs
from utils.utils import load_model_state
from utils.augmentation import ResizeNumpy, NumpyToTensor
def clean_megaface(filenames, features, noises_list_path):
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [line.strip() for line in noises_list]
clean_features = np.zeros((features.shape[0], features.shape[1] + 1), dtype=np.float32)
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
for line in noises_list:
if line in filename:
clean_features[i, features.shape[1]] = 100.0
break
return clean_features
def clean_facescrub(filenames, features, noises_list_path):
clean_feature_size = features.shape[1] + 1
with open(noises_list_path, 'r') as f:
noises_list = f.readlines()
noises_list = [osp.splitext(line.strip())[0] for line in noises_list]
clean_features = np.zeros((features.shape[0], clean_feature_size), dtype=np.float32)
centroids = {}
for i, filename in enumerate(tqdm(filenames)):
clean_features[i, 0: features.shape[1]] = features[i, :]
id_name = osp.basename(filename).split('_')[0]
if not id_name in centroids:
centroids[id_name] = np.zeros(clean_feature_size, dtype=np.float32)
centroids[id_name] += clean_features[i, :]
for i, file_path in enumerate(tqdm(filenames)):
filename = osp.basename(file_path)
for line in noises_list:
if line in filename.replace(' ', '_'):
id_name = filename.split('_')[0]
clean_features[i, :] = centroids[id_name] + np.random.uniform(-0.001, 0.001, clean_feature_size)
clean_features[i, :] /= np.linalg.norm(clean_features[i, :])
break
return clean_features
@torch.no_grad()
def main(args):
input_filenames = []
output_filenames = []
input_dir = os.path.abspath(args.input_dir)
output_dir = os.path.abspath(args.output_dir)
if not args.trillion_format:
log.info('Reading info...')
with open(os.path.join(args.input_dir, os.path.basename(args.input_list)), 'r') as f:
lines = f.readlines()
for line in tqdm(lines):
info = line.strip().split('|')
file = info[0].strip()
filename = os.path.join(input_dir, file)
path, _ = osp.split(filename)
out_folder = path.replace(input_dir, output_dir)
if not osp.isdir(out_folder):
os.makedirs(out_folder)
landmarks = None
bbox = None
if len(info) > 2:
landmarks = info[1].strip().split(' ')
landmarks = [float(x) for x in landmarks]
bbox = info[2].strip().split(' ')
bbox = [int(float(x)) for x in bbox]
outname = filename.replace(input_dir, output_dir) + args.file_ending
input_filenames.append({'path': filename, 'landmarks': landmarks, 'bbox': bbox})
output_filenames += [outname]
nrof_images = len(input_filenames)
log.info("Total number of images: ", nrof_images)
dataset = MegaFace(input_filenames)
else:
dataset = TrillionPairs(args.input_dir, osp.join(args.input_dir, 'testdata_lmk.txt'), test_mode=True)
nrof_images = len(dataset)
emb_array = np.zeros((nrof_images, args.embedding_size), dtype=np.float32)
dataset.transform = t.Compose([ResizeNumpy(models_backbones[args.model].get_input_res()),
NumpyToTensor(switch_rb=True)])
val_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=5, shuffle=False)
model = models_backbones[args.model](embedding_size=args.embedding_size, feature=True)
assert args.snap is not None
log.info('Snapshot ' + args.snap + ' ...')
log.info('Extracting embeddings ...')
model = load_model_state(model, args.snap, args.devices[0], eval_state=True)
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
f_output_filenames = []
with torch.cuda.device(args.devices[0]):
for i, data in enumerate(tqdm(val_loader), 0):
idxs, imgs = data['idx'], data['img']
batch_embeddings = F.normalize(model(imgs), p=2, dim=1).data.cpu().numpy()
batch_embeddings = batch_embeddings.reshape(batch_embeddings.shape[0], -1)
path_indices = idxs.data.cpu().numpy()
start_index = i*args.batch_size
end_index = min((i+1)*args.batch_size, nrof_images)
assert start_index == path_indices[0]
assert end_index == path_indices[-1] + 1
assert emb_array[start_index:end_index, :].shape == batch_embeddings.shape
emb_array[start_index:end_index, :] = batch_embeddings
if not args.trillion_format:
for index in path_indices:
f_output_filenames.append(output_filenames[index])
assert len(output_filenames) == len(output_filenames)
log.info('Extracting features Done.')
if args.trillion_format:
save_mat(args.file_ending, emb_array)
else:
if 'megaface_noises.txt' in args.noises_list:
log.info('Cleaning Megaface features')
emb_array = clean_megaface(f_output_filenames, emb_array, args.noises_list)
elif 'facescrub_noises.txt' in args.noises_list:
log.info('Cleaning Facescrub features')
emb_array = clean_facescrub(f_output_filenames, emb_array, args.noises_list)
else:
log.info('Megaface features are not cleaned up.')
log.info('Saving features to files...')
for i in tqdm(range(len(f_output_filenames))):
save_mat(f_output_filenames[i], emb_array[i, :])
def parse_argument(argv):
parser = argparse.ArgumentParser(description='Save embeddings to MegaFace features files')
parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')
parser.add_argument('input_dir', help='Path to MegaFace Features')
parser.add_argument('output_dir', help='Path to FaceScrub Features')
parser.add_argument('--input_list', default='list.txt', type=str, required=False)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--embedding_size', type=int, default=128)
parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')
parser.add_argument('--snap', type=str, required=True, help='Snapshot to evaluate.')
parser.add_argument('--noises_list', type=str, default='', required=False, help='A list of the Megaface or Facescrub noises produced by insightface. \
See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md')
parser.add_argument('--file_ending', help='Ending appended to original photo files. i.e.\
11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin', default='_rmnet.bin')
parser.add_argument('--trillion_format', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_argument(sys.argv[1:]))
| true | true |
1c47c6a57ba1e3e281016c90e86575d8ae9b3a68 | 11,286 | py | Python | fpga/lib/eth/tb/test_axis_gmii_tx.py | totuwei/corundum | e983ad519fb4523d0ffca32f5e436195bcfc945c | [
"BSD-2-Clause-FreeBSD"
] | 1,121 | 2015-05-26T14:41:44.000Z | 2022-03-31T07:17:48.000Z | tb/test_axis_gmii_tx.py | yuzu762/verilog-ethernet | 108c02d721aada8a8f51e22328f6ca6c64b70d33 | [
"MIT"
] | 98 | 2016-02-12T21:15:45.000Z | 2022-03-31T03:13:00.000Z | tb/test_axis_gmii_tx.py | yuzu762/verilog-ethernet | 108c02d721aada8a8f51e22328f6ca6c64b70d33 | [
"MIT"
] | 368 | 2015-05-05T20:49:01.000Z | 2022-03-31T09:43:53.000Z | #!/usr/bin/env python
"""
Copyright (c) 2015-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'axis_gmii_tx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 8
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
PTP_TS_ENABLE = 0
PTP_TS_WIDTH = 96
PTP_TAG_ENABLE = PTP_TS_ENABLE
PTP_TAG_WIDTH = 16
USER_WIDTH = (PTP_TAG_WIDTH if PTP_TAG_ENABLE else 0) + 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
clk_enable = Signal(bool(1))
mii_select = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
# Outputs
s_axis_tready = Signal(bool(0))
gmii_txd = Signal(intbv(0)[DATA_WIDTH:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
m_axis_ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
m_axis_ptp_ts_tag = Signal(intbv(0)[PTP_TAG_WIDTH:])
m_axis_ptp_ts_valid = Signal(bool(0))
start_packet = Signal(bool(0))
error_underflow = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=s_axis_tdata,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = gmii_ep.GMIISink()
sink_logic = sink.create_logic(
clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
clk_enable=clk_enable,
mii_select=mii_select,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tuser=s_axis_tuser,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
ptp_ts=ptp_ts,
m_axis_ptp_ts=m_axis_ptp_ts,
m_axis_ptp_ts_tag=m_axis_ptp_ts_tag,
m_axis_ptp_ts_valid=m_axis_ptp_ts_valid,
clk_enable=clk_enable,
mii_select=mii_select,
ifg_delay=ifg_delay,
start_packet=start_packet,
error_underflow=error_underflow
)
@always(delay(4))
def clkgen():
clk.next = not clk
clk_enable_rate = Signal(int(1))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
clk_enable.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
clk_enable.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
# testbench stimulus
for rate, mii in [(1, 0), (10, 0), (5, 1)]:
clk_enable_rate.next = rate
mii_select.next = mii
yield delay(100)
for payload_len in list(range(1,18))+list(range(64,82)):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
source.send(axis_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame1.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame1.eth_src_mac
assert eth_frame.eth_type == test_frame1.eth_type
assert eth_frame.payload.data.index(test_frame1.payload.data) == 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
axis_frame1.user = 1
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
assert rx_frame.error[-1]
# bad packet
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 32.153846 | 91 | 0.601985 |
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'axis_gmii_tx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
DATA_WIDTH = 8
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
PTP_TS_ENABLE = 0
PTP_TS_WIDTH = 96
PTP_TAG_ENABLE = PTP_TS_ENABLE
PTP_TAG_WIDTH = 16
USER_WIDTH = (PTP_TAG_WIDTH if PTP_TAG_ENABLE else 0) + 1
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
clk_enable = Signal(bool(1))
mii_select = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
s_axis_tready = Signal(bool(0))
gmii_txd = Signal(intbv(0)[DATA_WIDTH:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
m_axis_ptp_ts = Signal(intbv(0)[PTP_TS_WIDTH:])
m_axis_ptp_ts_tag = Signal(intbv(0)[PTP_TAG_WIDTH:])
m_axis_ptp_ts_valid = Signal(bool(0))
start_packet = Signal(bool(0))
error_underflow = Signal(bool(0))
source_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=s_axis_tdata,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = gmii_ep.GMIISink()
sink_logic = sink.create_logic(
clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
clk_enable=clk_enable,
mii_select=mii_select,
name='sink'
)
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tuser=s_axis_tuser,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
ptp_ts=ptp_ts,
m_axis_ptp_ts=m_axis_ptp_ts,
m_axis_ptp_ts_tag=m_axis_ptp_ts_tag,
m_axis_ptp_ts_valid=m_axis_ptp_ts_valid,
clk_enable=clk_enable,
mii_select=mii_select,
ifg_delay=ifg_delay,
start_packet=start_packet,
error_underflow=error_underflow
)
@always(delay(4))
def clkgen():
clk.next = not clk
clk_enable_rate = Signal(int(1))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
clk_enable.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
clk_enable.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
for rate, mii in [(1, 0), (10, 0), (5, 1)]:
clk_enable_rate.next = rate
mii_select.next = mii
yield delay(100)
for payload_len in list(range(1,18))+list(range(64,82)):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
source.send(axis_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame1.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame1.eth_src_mac
assert eth_frame.eth_type == test_frame1.eth_type
assert eth_frame.payload.data.index(test_frame1.payload.data) == 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis()
axis_frame2 = test_frame2.build_axis()
axis_frame1.user = 1
source.send(axis_frame1)
source.send(axis_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
assert rx_frame.error[-1]
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == max(payload_len, 46)
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame2.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame2.eth_src_mac
assert eth_frame.eth_type == test_frame2.eth_type
assert eth_frame.payload.data.index(test_frame2.payload.data) == 0
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| true | true |
1c47c743603061855d979f37a27b7acaf2a74e4b | 7,496 | py | Python | python-modules/twisted/twisted/internet/ssl.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 267 | 2015-03-22T15:23:48.000Z | 2022-03-05T21:57:34.000Z | python-modules/twisted/twisted/internet/ssl.py | rockyzhang/zhangyanhit-python-for-android-mips | 799dd5ca16f72135f2eab71e144a68842e2aaee0 | [
"Apache-2.0"
] | 133 | 2015-03-21T15:13:43.000Z | 2021-12-11T23:37:58.000Z | python-modules/twisted/twisted/internet/ssl.py | rockyzhang/zhangyanhit-python-for-android-mips | 799dd5ca16f72135f2eab71e144a68842e2aaee0 | [
"Apache-2.0"
] | 119 | 2015-04-28T16:07:10.000Z | 2022-03-18T03:49:48.000Z | # -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SSL transport. Requires PyOpenSSL (http://pyopenssl.sf.net).
SSL connections require a ContextFactory so they can create SSL contexts.
End users should only use the ContextFactory classes directly - for SSL
connections use the reactor.connectSSL/listenSSL and so on, as documented
in IReactorSSL.
All server context factories should inherit from ContextFactory, and all
client context factories should inherit from ClientContextFactory. At the
moment this is not enforced, but in the future it might be.
Future Plans:
- split module so reactor-specific classes are in a separate module
- support for switching TCP into SSL
- more options
Maintainer: Itamar Shtull-Trauring
"""
# If something goes wrong, most notably an OpenSSL import failure,
# sys.modules['twisted.internet.ssl'] will be bound to a partially
# initialized module object. This is wacko, but we will take advantage
# of it to publish whether or not SSL is available.
# See the end of this module for the other half of this solution.
# The correct idiom to import this module is thus:
# try:
# from twisted.internet import ssl
# except ImportError:
# # happens the first time the interpreter tries to import it
# ssl = None
# if ssl and not ssl.supported:
# # happens second and later times
# ssl = None
supported = False
# System imports
from OpenSSL import SSL
from zope.interface import implements, implementsOnly, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{SSL.Context}.
"""
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
"""I am an SSL client."""
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
"""Returns the address from which I am connecting."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Returns the address that I am connected."""
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
"""I am an SSL server.
"""
implements(interfaces.ISSLTransport)
def getHost(self):
"""Return server's address."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Return address of peer."""
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
"""I am an SSL port."""
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
"""(internal) create an SSL socket
"""
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# *Don't* call startTLS here
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
| 32.034188 | 102 | 0.689301 |
plementsOnly, implementedBy
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
isClient = 0
def getContext(self):
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
return self._context
class ClientContextFactory:
isClient = 1
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
implements(interfaces.ISSLTransport)
def getHost(self):
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
| true | true |
1c47c7b7ad1cb5f4dbaadc84f69896248dc1ef93 | 1,850 | py | Python | wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | 8 | 2021-04-29T16:49:45.000Z | 2021-08-09T18:56:35.000Z | wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | wikum-env3/lib/python3.7/site-packages/mwparserfromhell/nodes/text.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | 2 | 2020-08-03T13:02:06.000Z | 2020-11-04T03:15:44.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2019 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import str
__all__ = ["Text"]
class Text(Node):
"""Represents ordinary, unformatted text with no special properties."""
def __init__(self, value):
super(Text, self).__init__()
self.value = value
def __unicode__(self):
return self.value
def __strip__(self, **kwargs):
return self
def __showtree__(self, write, get, mark):
write(str(self).encode("unicode_escape").decode("utf8"))
@property
def value(self):
"""The actual text itself."""
return self._value
@value.setter
def value(self, newval):
self._value = str(newval)
| 34.259259 | 79 | 0.718378 |
from __future__ import unicode_literals
from . import Node
from ..compat import str
__all__ = ["Text"]
class Text(Node):
def __init__(self, value):
super(Text, self).__init__()
self.value = value
def __unicode__(self):
return self.value
def __strip__(self, **kwargs):
return self
def __showtree__(self, write, get, mark):
write(str(self).encode("unicode_escape").decode("utf8"))
@property
def value(self):
return self._value
@value.setter
def value(self, newval):
self._value = str(newval)
| true | true |
1c47c8b56a82daffb467121923485a7868336d49 | 981 | py | Python | ratelimit/rule.py | abersheeran/asgi-ratelimit | 504de6dca1eb99762114a0886d502679a608799e | [
"Apache-2.0"
] | 136 | 2020-06-08T10:38:19.000Z | 2022-03-24T14:45:51.000Z | ratelimit/rule.py | abersheeran/asgi-ratelimit | 504de6dca1eb99762114a0886d502679a608799e | [
"Apache-2.0"
] | 38 | 2020-07-12T15:35:15.000Z | 2022-03-25T03:27:45.000Z | ratelimit/rule.py | abersheeran/asgi-ratelimit | 504de6dca1eb99762114a0886d502679a608799e | [
"Apache-2.0"
] | 15 | 2021-01-19T13:48:37.000Z | 2022-03-18T02:34:52.000Z | from dataclasses import dataclass
from typing import Dict, Optional, Tuple
@dataclass
class Rule:
group: str = "default"
second: Optional[int] = None
minute: Optional[int] = None
hour: Optional[int] = None
day: Optional[int] = None
month: Optional[int] = None
block_time: Optional[int] = None
zone: Optional[str] = None
def ruleset(self, path: str, user: str) -> Dict[str, Tuple[int, int]]:
"""
builds a dictionary of keys, values where keys are
the redis keys and values is a tuple of (limit, ttl)
"""
return {
f"{path}:{user}:{name}": (limit, TTL[name])
for name, limit in map(lambda name: (name, getattr(self, name)), RULENAMES)
if limit is not None
}
TTL = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 24 * 60 * 60,
"month": 31 * 24 * 60 * 60,
}
RULENAMES: Tuple[str, ...] = ("second", "minute", "hour", "day", "month")
| 24.525 | 87 | 0.566769 | from dataclasses import dataclass
from typing import Dict, Optional, Tuple
@dataclass
class Rule:
group: str = "default"
second: Optional[int] = None
minute: Optional[int] = None
hour: Optional[int] = None
day: Optional[int] = None
month: Optional[int] = None
block_time: Optional[int] = None
zone: Optional[str] = None
def ruleset(self, path: str, user: str) -> Dict[str, Tuple[int, int]]:
return {
f"{path}:{user}:{name}": (limit, TTL[name])
for name, limit in map(lambda name: (name, getattr(self, name)), RULENAMES)
if limit is not None
}
TTL = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 24 * 60 * 60,
"month": 31 * 24 * 60 * 60,
}
RULENAMES: Tuple[str, ...] = ("second", "minute", "hour", "day", "month")
| true | true |
1c47c90a7ae040e58e2550f867ee1a2872a42dce | 32,446 | py | Python | cirq/sim/simulator.py | zchen088/Cirq | 8cf782554adbafed724987de3067de7ca565fa0c | [
"Apache-2.0"
] | 1 | 2019-12-18T17:42:14.000Z | 2019-12-18T17:42:14.000Z | cirq/sim/simulator.py | zchen088/Cirq | 8cf782554adbafed724987de3067de7ca565fa0c | [
"Apache-2.0"
] | null | null | null | cirq/sim/simulator.py | zchen088/Cirq | 8cf782554adbafed724987de3067de7ca565fa0c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for different types of simulators.
Simulator types include:
SimulatesSamples: mimics the interface of quantum hardware.
SimulatesAmplitudes: computes amplitudes of desired bitstrings in the
final state of the simulation.
SimulatesFinalState: allows access to the final state of the simulation.
SimulatesIntermediateState: allows for access to the state of the simulation
as the simulation iterates through the moments of a cirq.
"""
from typing import (
Any,
Dict,
Iterator,
List,
Sequence,
Tuple,
Union,
Optional,
TYPE_CHECKING,
Set,
cast,
Callable,
TypeVar,
Generic,
)
import abc
import collections
import numpy as np
from cirq import circuits, ops, protocols, study, value, work
from cirq._compat import deprecated
if TYPE_CHECKING:
import cirq
TStepResult = TypeVar('TStepResult', bound='StepResult')
TSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')
TSimulatorState = TypeVar('TSimulatorState')
class SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):
"""Simulator that mimics running on quantum hardware.
Implementors of this interface should implement the _run method.
"""
def run_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.Result]:
"""Runs the supplied Circuit, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
Result list for this run; one for each possible parameter
resolver.
"""
if not program.has_measurements():
raise ValueError("Circuit has no measurements to sample.")
_verify_unique_measurement_keys(program)
trial_results = [] # type: List[study.Result]
for param_resolver in study.to_resolvers(params):
measurements = {}
if repetitions == 0:
for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):
measurements[protocols.measurement_key(op)] = np.empty([0, 1])
else:
measurements = self._run(
circuit=program, param_resolver=param_resolver, repetitions=repetitions
)
trial_results.append(
study.Result.from_single_parameter_set(
params=param_resolver, measurements=measurements
)
)
return trial_results
@abc.abstractmethod
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
"""Run a simulation, mimicking quantum hardware.
Args:
circuit: The circuit to simulate.
param_resolver: Parameters to run with the program.
repetitions: Number of times to repeat the run. It is expected that
this is validated greater than zero before calling this method.
Returns:
A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
"""
raise NotImplementedError()
class SimulatesAmplitudes(metaclass=abc.ABCMeta):
"""Simulator that computes final amplitudes of given bitstrings.
Given a circuit and a list of bitstrings, computes the amplitudes
of the given bitstrings in the state obtained by applying the circuit
to the all zeros state. Implementors of this interface should implement
the compute_amplitudes_sweep method.
"""
def compute_amplitudes(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[complex]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
List of amplitudes.
"""
return self.compute_amplitudes_sweep(
program, bitstrings, study.ParamResolver(param_resolver), qubit_order
)[0]
@abc.abstractmethod
def compute_amplitudes_sweep(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
List of lists of amplitudes. The outer dimension indexes the
circuit parameters and the inner dimension indexes the bitstrings.
"""
raise NotImplementedError()
class SimulatesExpectationValues(metaclass=abc.ABCMeta):
"""Simulator that computes exact expectation values of observables.
Given a circuit and an observable map, computes exact (to float precision)
expectation values for each observable at the end of the circuit.
Implementors of this interface should implement the
simulate_expectation_values_sweep method.
"""
def simulate_expectation_values(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[float]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends with
measurement(s), this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
A list of expectation values, with the value at index `n`
corresponding to `observables[n]` from the input.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
return self.simulate_expectation_values_sweep(
program,
observables,
study.ParamResolver(param_resolver),
qubit_order,
initial_state,
permit_terminal_measurements,
)[0]
@abc.abstractmethod
def simulate_expectation_values_sweep(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'study.Sweepable',
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state, sweeping over the
given params.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends in a
measurement, this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
A list of expectation-value lists. The outer index determines the
sweep, and the inner index determines the observable. For instance,
results[1][3] would select the fourth observable measured in the
second sweep.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
class SimulatesFinalState(Generic[TSimulationTrialResult], metaclass=abc.ABCMeta):
"""Simulator that allows access to the simulator's final state.
Implementors of this interface should implement the simulate_sweep
method. This simulator only returns the state of the quantum system
for the final step of a simulation. This simulator state may be a state
vector, the density matrix, or another representation, depending on the
implementation. For simulators that also allow stepping through
a circuit see `SimulatesIntermediateState`.
"""
def simulate(
self,
program: 'cirq.Circuit',
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> TSimulationTrialResult:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
simulator's final state.
Args:
program: The circuit to simulate.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
SimulationTrialResults for the simulation. Includes the final state.
"""
return self.simulate_sweep(
program, study.ParamResolver(param_resolver), qubit_order, initial_state
)[0]
@abc.abstractmethod
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire final
simulator state. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
raise NotImplementedError()
class SimulatesIntermediateState(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState],
SimulatesFinalState[TSimulationTrialResult],
metaclass=abc.ABCMeta,
):
"""A SimulatesFinalState that simulates a circuit by moments.
Whereas a general SimulatesFinalState may return the entire simulator
state at the end of a circuit, a SimulatesIntermediateState can
simulate stepping through the moments of a circuit.
Implementors of this interface should implement the _base_iterator
method.
Note that state here refers to simulator state, which is not necessarily
a state vector.
"""
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
state vector. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
trial_results = []
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
for param_resolver in study.to_resolvers(params):
all_step_results = self.simulate_moment_steps(
program, param_resolver, qubit_order, initial_state
)
measurements = {} # type: Dict[str, np.ndarray]
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k] = np.array(v, dtype=np.uint8)
trial_results.append(
self._create_simulator_trial_result(
params=param_resolver,
measurements=measurements,
final_simulator_state=step_result._simulator_state(),
)
)
return trial_results
def simulate_moment_steps(
self,
circuit: circuits.Circuit,
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TStepResult]:
"""Returns an iterator of StepResults for each moment simulated.
If the circuit being simulated is empty, a single step result should
be returned with the state being set to the initial state.
Args:
circuit: The Circuit to simulate.
param_resolver: A ParamResolver for determining values of Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
Iterator that steps through the simulation, simulating each
moment and returning a StepResult for each moment.
"""
param_resolver = study.ParamResolver(param_resolver)
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)
@deprecated(deadline='v0.11', fix='Override _base_iterator instead')
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
If the initial state is an int, the state is set to the computational
basis state corresponding to this state. Otherwise if the initial
state is a np.ndarray it is the full initial state, either a pure state
or the full density matrix. If it is the pure state it must be the
correct size, be normalized (an L2 norm of 1), and be safely castable
to an appropriate dtype for the simulator. If it is a mixed state
it must be correctly sized and positive semidefinite with trace one.
Args:
circuit: The circuit to simulate.
param_resolver: A ParamResolver for determining values of
Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
return self.simulate_moment_steps(circuit, param_resolver, qubit_order, initial_state)
@abc.abstractmethod
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
Args:
circuit: The circuit to simulate.
param_resolver: A ParamResolver for determining values of
Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
raise NotImplementedError()
@abc.abstractmethod
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: TSimulatorState,
) -> TSimulationTrialResult:
"""This method can be implemented to create a trial result.
Args:
params: The ParamResolver for this trial.
measurements: The measurement results for this trial.
final_simulator_state: The final state of the simulator for the
StepResult.
Returns:
The SimulationTrialResult.
"""
raise NotImplementedError()
class StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):
"""Results of a step of a SimulatesIntermediateState.
Attributes:
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
def __init__(self, measurements: Optional[Dict[str, List[int]]] = None) -> None:
self.measurements = measurements or collections.defaultdict(list)
@abc.abstractmethod
def _simulator_state(self) -> TSimulatorState:
"""Returns the simulator state of the simulator after this step.
This method starts with an underscore to indicate that it is private.
To access public state, see public methods on StepResult.
The form of the simulator_state depends on the implementation of the
simulation,see documentation for the implementing class for the form of
details.
"""
@abc.abstractmethod
def sample(
self,
qubits: List[ops.Qid],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
Args:
qubits: The qubits to be sampled in an order that influence the
returned measurement results.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
Returns:
Measurement results with True corresponding to the ``|1⟩`` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits. These lists
are wrapped as an numpy ndarray.
"""
raise NotImplementedError()
def sample_measurement_ops(
self,
measurement_ops: List[ops.GateOperation],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Dict[str, np.ndarray]:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
In contrast to `sample` which samples qubits, this takes a list of
`cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`
instances and then returns a mapping from the key in the measurement
gate to the resulting bit strings. Different measurement operations must
not act on the same qubits.
Args:
measurement_ops: `GateOperation` instances whose gates are
`MeasurementGate` instances to be sampled form.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
Returns: A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
Raises:
ValueError: If the operation's gates are not `MeasurementGate`
instances or a qubit is acted upon multiple times by different
operations from `measurement_ops`.
"""
# Sanity checks.
seen_measurement_keys: Set[str] = set()
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError(f'{op.gate} was not a MeasurementGate')
key = protocols.measurement_key(gate)
if key in seen_measurement_keys:
raise ValueError(f'Duplicate MeasurementGate with key {key}')
seen_measurement_keys.add(key)
# Find measured qubits, ensuring a consistent ordering.
measured_qubits = []
seen_qubits: Set[cirq.Qid] = set()
for op in measurement_ops:
for q in op.qubits:
if q not in seen_qubits:
seen_qubits.add(q)
measured_qubits.append(q)
# Perform whole-system sampling of the measured qubits.
indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)
# Extract results for each measurement.
results: Dict[str, np.ndarray] = {}
qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}
for op in measurement_ops:
gate = cast(ops.MeasurementGate, op.gate)
out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)
inv_mask = gate.full_invert_mask()
for i, q in enumerate(op.qubits):
out[:, i] = indexed_sample[:, qubits_to_index[q]]
if inv_mask[i]:
out[:, i] ^= out[:, i] < 2
results[gate.key] = out
return results
@value.value_equality(unhashable=True)
class SimulationTrialResult:
"""Results of a simulation by a SimulatesFinalState.
Unlike Result these results contain the final simulator_state of the
system. This simulator_state is dependent on the simulation implementation
and may be, for example, the state vector or the density matrix of the
system.
Attributes:
params: A ParamResolver of settings used for this result.
measurements: A dictionary from measurement gate key to measurement
results. Measurement results are a numpy ndarray of actual boolean
measurement results (ordered by the qubits acted on by the
measurement gate.)
"""
def __init__(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: Any,
) -> None:
self.params = params
self.measurements = measurements
self._final_simulator_state = final_simulator_state
def __repr__(self) -> str:
return (
f'cirq.SimulationTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
def __str__(self) -> str:
def bitstring(vals):
separator = ' ' if np.max(vals) >= 10 else ''
return separator.join(str(int(v)) for v in vals)
results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])
if not results:
return '(no measurements)'
return ' '.join([f'{key}={val}' for key, val in results])
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('SimulationTrialResult(...)')
else:
p.text(str(self))
def _value_equality_values_(self) -> Any:
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return (self.params, measurements, self._final_simulator_state)
@property
def qubit_map(self) -> Dict[ops.Qid, int]:
"""A map from Qid to index used to define the ordering of the basis in
the result.
"""
return self._final_simulator_state.qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
return _qubit_map_to_shape(self.qubit_map)
def _qubit_map_to_shape(qubit_map: Dict[ops.Qid, int]) -> Tuple[int, ...]:
qid_shape: List[int] = [-1] * len(qubit_map)
try:
for q, i in qubit_map.items():
qid_shape[i] = q.dimension
except IndexError:
raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')
if -1 in qid_shape:
raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')
return tuple(qid_shape)
def _verify_unique_measurement_keys(circuit: circuits.Circuit):
result = collections.Counter(
key for op in ops.flatten_op_tree(iter(circuit)) for key in protocols.measurement_keys(op)
)
if result:
duplicates = [k for k, v in result.most_common() if v > 1]
if duplicates:
raise ValueError(f"Measurement key {','.join(duplicates)} repeated")
def check_all_resolved(circuit):
"""Raises if the circuit contains unresolved symbols."""
if protocols.is_parameterized(circuit):
unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved)
)
def split_into_matching_protocol_then_general(
circuit: 'cirq.Circuit',
predicate: Callable[['cirq.Operation'], bool],
) -> Tuple['cirq.Circuit', 'cirq.Circuit']:
"""Splits the circuit into a matching prefix and non-matching suffix.
The splitting happens in a per-qubit fashion. A non-matching operation on
qubit A will cause later operations on A to be part of the non-matching
suffix, but later operations on other qubits will continue to be put into
the matching part (as long as those qubits have had no non-matching operation
up to that point).
"""
blocked_qubits: Set[cirq.Qid] = set()
matching_prefix = circuits.Circuit()
general_suffix = circuits.Circuit()
for moment in circuit:
matching_part = []
general_part = []
for op in moment:
qs = set(op.qubits)
if not predicate(op) or not qs.isdisjoint(blocked_qubits):
blocked_qubits |= qs
if qs.isdisjoint(blocked_qubits):
matching_part.append(op)
else:
general_part.append(op)
if matching_part:
matching_prefix.append(ops.Moment(matching_part))
if general_part:
general_suffix.append(ops.Moment(general_part))
return matching_prefix, general_suffix
| 40.355721 | 98 | 0.654719 |
from typing import (
Any,
Dict,
Iterator,
List,
Sequence,
Tuple,
Union,
Optional,
TYPE_CHECKING,
Set,
cast,
Callable,
TypeVar,
Generic,
)
import abc
import collections
import numpy as np
from cirq import circuits, ops, protocols, study, value, work
from cirq._compat import deprecated
if TYPE_CHECKING:
import cirq
TStepResult = TypeVar('TStepResult', bound='StepResult')
TSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')
TSimulatorState = TypeVar('TSimulatorState')
class SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):
def run_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.Result]:
if not program.has_measurements():
raise ValueError("Circuit has no measurements to sample.")
_verify_unique_measurement_keys(program)
trial_results = []
for param_resolver in study.to_resolvers(params):
measurements = {}
if repetitions == 0:
for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):
measurements[protocols.measurement_key(op)] = np.empty([0, 1])
else:
measurements = self._run(
circuit=program, param_resolver=param_resolver, repetitions=repetitions
)
trial_results.append(
study.Result.from_single_parameter_set(
params=param_resolver, measurements=measurements
)
)
return trial_results
@abc.abstractmethod
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
raise NotImplementedError()
class SimulatesAmplitudes(metaclass=abc.ABCMeta):
def compute_amplitudes(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[complex]:
return self.compute_amplitudes_sweep(
program, bitstrings, study.ParamResolver(param_resolver), qubit_order
)[0]
@abc.abstractmethod
def compute_amplitudes_sweep(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
raise NotImplementedError()
class SimulatesExpectationValues(metaclass=abc.ABCMeta):
def simulate_expectation_values(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[float]:
return self.simulate_expectation_values_sweep(
program,
observables,
study.ParamResolver(param_resolver),
qubit_order,
initial_state,
permit_terminal_measurements,
)[0]
@abc.abstractmethod
def simulate_expectation_values_sweep(
self,
program: 'cirq.Circuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'study.Sweepable',
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
class SimulatesFinalState(Generic[TSimulationTrialResult], metaclass=abc.ABCMeta):
def simulate(
self,
program: 'cirq.Circuit',
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> TSimulationTrialResult:
return self.simulate_sweep(
program, study.ParamResolver(param_resolver), qubit_order, initial_state
)[0]
@abc.abstractmethod
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
raise NotImplementedError()
class SimulatesIntermediateState(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState],
SimulatesFinalState[TSimulationTrialResult],
metaclass=abc.ABCMeta,
):
def simulate_sweep(
self,
program: 'cirq.Circuit',
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
trial_results = []
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
for param_resolver in study.to_resolvers(params):
all_step_results = self.simulate_moment_steps(
program, param_resolver, qubit_order, initial_state
)
measurements = {}
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k] = np.array(v, dtype=np.uint8)
trial_results.append(
self._create_simulator_trial_result(
params=param_resolver,
measurements=measurements,
final_simulator_state=step_result._simulator_state(),
)
)
return trial_results
def simulate_moment_steps(
self,
circuit: circuits.Circuit,
param_resolver: 'study.ParamResolverOrSimilarType' = None,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TStepResult]:
param_resolver = study.ParamResolver(param_resolver)
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)
@deprecated(deadline='v0.11', fix='Override _base_iterator instead')
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
return self.simulate_moment_steps(circuit, param_resolver, qubit_order, initial_state)
@abc.abstractmethod
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Any,
) -> Iterator[TStepResult]:
raise NotImplementedError()
@abc.abstractmethod
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: TSimulatorState,
) -> TSimulationTrialResult:
raise NotImplementedError()
class StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):
def __init__(self, measurements: Optional[Dict[str, List[int]]] = None) -> None:
self.measurements = measurements or collections.defaultdict(list)
@abc.abstractmethod
def _simulator_state(self) -> TSimulatorState:
@abc.abstractmethod
def sample(
self,
qubits: List[ops.Qid],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
raise NotImplementedError()
def sample_measurement_ops(
self,
measurement_ops: List[ops.GateOperation],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Dict[str, np.ndarray]:
seen_measurement_keys: Set[str] = set()
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError(f'{op.gate} was not a MeasurementGate')
key = protocols.measurement_key(gate)
if key in seen_measurement_keys:
raise ValueError(f'Duplicate MeasurementGate with key {key}')
seen_measurement_keys.add(key)
measured_qubits = []
seen_qubits: Set[cirq.Qid] = set()
for op in measurement_ops:
for q in op.qubits:
if q not in seen_qubits:
seen_qubits.add(q)
measured_qubits.append(q)
indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)
results: Dict[str, np.ndarray] = {}
qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}
for op in measurement_ops:
gate = cast(ops.MeasurementGate, op.gate)
out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)
inv_mask = gate.full_invert_mask()
for i, q in enumerate(op.qubits):
out[:, i] = indexed_sample[:, qubits_to_index[q]]
if inv_mask[i]:
out[:, i] ^= out[:, i] < 2
results[gate.key] = out
return results
@value.value_equality(unhashable=True)
class SimulationTrialResult:
def __init__(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: Any,
) -> None:
self.params = params
self.measurements = measurements
self._final_simulator_state = final_simulator_state
def __repr__(self) -> str:
return (
f'cirq.SimulationTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
def __str__(self) -> str:
def bitstring(vals):
separator = ' ' if np.max(vals) >= 10 else ''
return separator.join(str(int(v)) for v in vals)
results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])
if not results:
return '(no measurements)'
return ' '.join([f'{key}={val}' for key, val in results])
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
if cycle:
p.text('SimulationTrialResult(...)')
else:
p.text(str(self))
def _value_equality_values_(self) -> Any:
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return (self.params, measurements, self._final_simulator_state)
@property
def qubit_map(self) -> Dict[ops.Qid, int]:
return self._final_simulator_state.qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
return _qubit_map_to_shape(self.qubit_map)
def _qubit_map_to_shape(qubit_map: Dict[ops.Qid, int]) -> Tuple[int, ...]:
qid_shape: List[int] = [-1] * len(qubit_map)
try:
for q, i in qubit_map.items():
qid_shape[i] = q.dimension
except IndexError:
raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')
if -1 in qid_shape:
raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')
return tuple(qid_shape)
def _verify_unique_measurement_keys(circuit: circuits.Circuit):
result = collections.Counter(
key for op in ops.flatten_op_tree(iter(circuit)) for key in protocols.measurement_keys(op)
)
if result:
duplicates = [k for k, v in result.most_common() if v > 1]
if duplicates:
raise ValueError(f"Measurement key {','.join(duplicates)} repeated")
def check_all_resolved(circuit):
if protocols.is_parameterized(circuit):
unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved)
)
def split_into_matching_protocol_then_general(
circuit: 'cirq.Circuit',
predicate: Callable[['cirq.Operation'], bool],
) -> Tuple['cirq.Circuit', 'cirq.Circuit']:
blocked_qubits: Set[cirq.Qid] = set()
matching_prefix = circuits.Circuit()
general_suffix = circuits.Circuit()
for moment in circuit:
matching_part = []
general_part = []
for op in moment:
qs = set(op.qubits)
if not predicate(op) or not qs.isdisjoint(blocked_qubits):
blocked_qubits |= qs
if qs.isdisjoint(blocked_qubits):
matching_part.append(op)
else:
general_part.append(op)
if matching_part:
matching_prefix.append(ops.Moment(matching_part))
if general_part:
general_suffix.append(ops.Moment(general_part))
return matching_prefix, general_suffix
| true | true |
1c47cab40dab1478d28390903e21858b737bfe1a | 1,859 | py | Python | tools/site_compare/commands/scrape.py | rwatson/chromium-capsicum | b03da8e897f897c6ad2cda03ceda217b760fd528 | [
"BSD-3-Clause"
] | 11 | 2015-03-20T04:08:08.000Z | 2021-11-15T15:51:36.000Z | tools/site_compare/commands/scrape.py | changbai1980/chromium | c4625eefca763df86471d798ee5a4a054b4716ae | [
"BSD-3-Clause"
] | null | null | null | tools/site_compare/commands/scrape.py | changbai1980/chromium | c4625eefca763df86471d798ee5a4a054b4716ae | [
"BSD-3-Clause"
] | 1 | 2020-04-13T05:45:10.000Z | 2020-04-13T05:45:10.000Z | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command for scraping images from a URL or list of URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
"""Executes the Scrape command."""
def ScrapeResult(url, proc, wnd, result):
"""Capture and save the scrape."""
if log_file: log_file.write(result)
# Scrape the page
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
# Close the log file and return. We're done.
if log_file: log_file.close()
| 29.983871 | 85 | 0.720818 |
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
def ScrapeResult(url, proc, wnd, result):
if log_file: log_file.write(result)
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
if log_file: log_file.close()
| true | true |
1c47cad05b01e57c60e8dd11e39f42258a462d95 | 2,910 | py | Python | examples/orbslam_mono_kitti.py | frasermcghan/ORB_SLAM3-PythonBindings | a4fca4dbfbd70f31490e593f6c9e54c570827524 | [
"BSD-2-Clause",
"MIT"
] | 3 | 2021-11-12T06:11:19.000Z | 2022-03-17T04:24:25.000Z | examples/orbslam_mono_kitti.py | frasermcghan/ORB_SLAM3-PythonBindings | a4fca4dbfbd70f31490e593f6c9e54c570827524 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | examples/orbslam_mono_kitti.py | frasermcghan/ORB_SLAM3-PythonBindings | a4fca4dbfbd70f31490e593f6c9e54c570827524 | [
"BSD-2-Clause",
"MIT"
] | 1 | 2021-11-12T06:11:23.000Z | 2021-11-12T06:11:23.000Z | #!/usr/bin/env python3
import sys
import os.path
import orbslam3
import time
import cv2
def main(vocab_path, settings_path, sequence_path):
image_filenames, timestamps = load_images(sequence_path)
num_images = len(image_filenames)
slam = orbslam3.System(vocab_path, settings_path, orbslam3.Sensor.MONOCULAR)
slam.set_use_viewer(False)
slam.initialize()
times_track = [0 for _ in range(num_images)]
print("-----")
print("Start processing sequence ...")
print("Images in the sequence: {0}".format(num_images))
for idx in range(num_images):
image = cv2.imread(image_filenames[idx], cv2.IMREAD_UNCHANGED)
tframe = timestamps[idx]
if image is None:
print("failed to load image at {0}".format(image_filenames[idx]))
return 1
t1 = time.time()
slam.process_image_mono(image, tframe)
t2 = time.time()
ttrack = t2 - t1
times_track[idx] = ttrack
t = 0
if idx < num_images - 1:
t = timestamps[idx + 1] - tframe
elif idx > 0:
t = tframe - timestamps[idx - 1]
if ttrack < t:
time.sleep(t - ttrack)
save_trajectory(slam.get_trajectory_points(), "trajectory.txt")
slam.shutdown()
times_track = sorted(times_track)
total_time = sum(times_track)
print("-----")
print("median tracking time: {0}".format(times_track[num_images // 2]))
print("mean tracking time: {0}".format(total_time / num_images))
return 0
def load_images(path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, "times.txt")) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return (
[
os.path.join(path_to_sequence, "image_0", "{0:06}.png".format(idx))
for idx in range(len(timestamps))
],
timestamps,
)
def save_trajectory(trajectory, filename):
with open(filename, "w") as traj_file:
traj_file.writelines(
"{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n".format(
time=repr(t),
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2),
)
for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory
)
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: ./orbslam_mono_kitti path_to_vocabulary path_to_settings path_to_sequence"
)
main(sys.argv[1], sys.argv[2], sys.argv[3])
| 27.980769 | 99 | 0.56323 |
import sys
import os.path
import orbslam3
import time
import cv2
def main(vocab_path, settings_path, sequence_path):
image_filenames, timestamps = load_images(sequence_path)
num_images = len(image_filenames)
slam = orbslam3.System(vocab_path, settings_path, orbslam3.Sensor.MONOCULAR)
slam.set_use_viewer(False)
slam.initialize()
times_track = [0 for _ in range(num_images)]
print("-----")
print("Start processing sequence ...")
print("Images in the sequence: {0}".format(num_images))
for idx in range(num_images):
image = cv2.imread(image_filenames[idx], cv2.IMREAD_UNCHANGED)
tframe = timestamps[idx]
if image is None:
print("failed to load image at {0}".format(image_filenames[idx]))
return 1
t1 = time.time()
slam.process_image_mono(image, tframe)
t2 = time.time()
ttrack = t2 - t1
times_track[idx] = ttrack
t = 0
if idx < num_images - 1:
t = timestamps[idx + 1] - tframe
elif idx > 0:
t = tframe - timestamps[idx - 1]
if ttrack < t:
time.sleep(t - ttrack)
save_trajectory(slam.get_trajectory_points(), "trajectory.txt")
slam.shutdown()
times_track = sorted(times_track)
total_time = sum(times_track)
print("-----")
print("median tracking time: {0}".format(times_track[num_images // 2]))
print("mean tracking time: {0}".format(total_time / num_images))
return 0
def load_images(path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, "times.txt")) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return (
[
os.path.join(path_to_sequence, "image_0", "{0:06}.png".format(idx))
for idx in range(len(timestamps))
],
timestamps,
)
def save_trajectory(trajectory, filename):
with open(filename, "w") as traj_file:
traj_file.writelines(
"{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n".format(
time=repr(t),
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2),
)
for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory
)
if __name__ == "__main__":
if len(sys.argv) != 4:
print(
"Usage: ./orbslam_mono_kitti path_to_vocabulary path_to_settings path_to_sequence"
)
main(sys.argv[1], sys.argv[2], sys.argv[3])
| true | true |
1c47cae1d8d4dc028de321451ca5cca46d806629 | 2,498 | py | Python | utils/nodes_key_pair_updator/NodesKeyPairUpdator.py | dawidsielski/medical-data-share | e462ffcfe0650b4fed2bb113c331a2a7438a8509 | [
"MIT"
] | null | null | null | utils/nodes_key_pair_updator/NodesKeyPairUpdator.py | dawidsielski/medical-data-share | e462ffcfe0650b4fed2bb113c331a2a7438a8509 | [
"MIT"
] | null | null | null | utils/nodes_key_pair_updator/NodesKeyPairUpdator.py | dawidsielski/medical-data-share | e462ffcfe0650b4fed2bb113c331a2a7438a8509 | [
"MIT"
] | null | null | null | import os
import requests
import logging
from logging.handlers import TimedRotatingFileHandler
from urllib.parse import urljoin
from configparser import ConfigParser
from data_share import DataShare
from data_share.KeyGeneration import KeyGeneration
from nodes_available.NodesChecker import NodesChecker
from utils.request_id_generator.RequestIdGenerator import RequestIdGenerator
key_path = lambda name: os.path.join('keys', name)
config = ConfigParser()
config.read(os.path.join(os.getcwd(), 'config.ini'), encoding='utf-8')
os.makedirs('logs', exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(funcName)s:%(message)s')
website_file_rotating_handler = TimedRotatingFileHandler('logs/node_updates.log', when="midnight", interval=1)
website_file_rotating_handler.setLevel(logging.INFO)
website_file_rotating_handler.setFormatter(formatter)
website_file_rotating_handler.suffix = "%Y-%m-%d"
logger.addHandler(website_file_rotating_handler)
class NodeKeyPairUpdator(object):
@staticmethod
def rename_old_keys():
os.rename(key_path('public.key'), key_path('public.old.key'))
os.rename(key_path('private.key'), key_path('private.old.key'))
@staticmethod
def update_keys():
NodeKeyPairUpdator().rename_old_keys()
kg = KeyGeneration()
kg.generate_keys()
kg.save_keys()
NodeKeyPairUpdator.update_key_on_available_nodes()
os.remove(key_path('public.old.key'))
os.remove(key_path('private.old.key'))
logger.info('New_keys_generated')
@staticmethod
def update_key_on_available_nodes():
available_nodes = NodesChecker.get_all_nodes_availability()
logger.info(available_nodes)
for key, value in available_nodes.items():
url = urljoin(value['address'], 'update-keys')
logger.info('Sending for {}'.format(key))
keys = KeyGeneration()
keys.load_keys()
data = {
'node': config.get('NODE', 'LABORATORY_NAME'),
'public_key': keys.public_key.exportKey().decode(),
'request_id': RequestIdGenerator.generate_request_id(),
}
data.update({'signature': DataShare.get_signature_for_message(data, filename='private.old.key').decode()})
r = requests.post(url, json=data)
logger.info('{} {} {} {}'.format(key, url, r.status_code, data))
| 33.756757 | 118 | 0.700961 | import os
import requests
import logging
from logging.handlers import TimedRotatingFileHandler
from urllib.parse import urljoin
from configparser import ConfigParser
from data_share import DataShare
from data_share.KeyGeneration import KeyGeneration
from nodes_available.NodesChecker import NodesChecker
from utils.request_id_generator.RequestIdGenerator import RequestIdGenerator
key_path = lambda name: os.path.join('keys', name)
config = ConfigParser()
config.read(os.path.join(os.getcwd(), 'config.ini'), encoding='utf-8')
os.makedirs('logs', exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(funcName)s:%(message)s')
website_file_rotating_handler = TimedRotatingFileHandler('logs/node_updates.log', when="midnight", interval=1)
website_file_rotating_handler.setLevel(logging.INFO)
website_file_rotating_handler.setFormatter(formatter)
website_file_rotating_handler.suffix = "%Y-%m-%d"
logger.addHandler(website_file_rotating_handler)
class NodeKeyPairUpdator(object):
@staticmethod
def rename_old_keys():
os.rename(key_path('public.key'), key_path('public.old.key'))
os.rename(key_path('private.key'), key_path('private.old.key'))
@staticmethod
def update_keys():
NodeKeyPairUpdator().rename_old_keys()
kg = KeyGeneration()
kg.generate_keys()
kg.save_keys()
NodeKeyPairUpdator.update_key_on_available_nodes()
os.remove(key_path('public.old.key'))
os.remove(key_path('private.old.key'))
logger.info('New_keys_generated')
@staticmethod
def update_key_on_available_nodes():
available_nodes = NodesChecker.get_all_nodes_availability()
logger.info(available_nodes)
for key, value in available_nodes.items():
url = urljoin(value['address'], 'update-keys')
logger.info('Sending for {}'.format(key))
keys = KeyGeneration()
keys.load_keys()
data = {
'node': config.get('NODE', 'LABORATORY_NAME'),
'public_key': keys.public_key.exportKey().decode(),
'request_id': RequestIdGenerator.generate_request_id(),
}
data.update({'signature': DataShare.get_signature_for_message(data, filename='private.old.key').decode()})
r = requests.post(url, json=data)
logger.info('{} {} {} {}'.format(key, url, r.status_code, data))
| true | true |
1c47cc9cf70b865d84b86c603de769862667adeb | 1,701 | py | Python | pioneer/temp/mujoco_test.py | xdralex/pioneer | 1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607 | [
"MIT"
] | 2 | 2020-07-29T07:49:06.000Z | 2021-04-13T20:57:45.000Z | pioneer/temp/mujoco_test.py | xdralex/pioneer | 1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607 | [
"MIT"
] | null | null | null | pioneer/temp/mujoco_test.py | xdralex/pioneer | 1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607 | [
"MIT"
] | 2 | 2020-07-25T11:45:54.000Z | 2021-01-11T07:12:07.000Z | import mujoco_py
import numpy as np
from gym import spaces
model = mujoco_py.load_model_from_path('pioneer/envs/assets/pioneer2.xml')
sim = mujoco_py.MjSim(model)
print(f'timestep: {model.opt.timestep}')
bounds = model.jnt_range.copy().astype(np.float32)
low, high = bounds.T
position_space = spaces.Box(low=low, high=high, dtype=np.float32)
print(f'bounds: {bounds}')
print(f'nq={model.nq}, nv={model.nv}')
a0 = sim.get_state()
print(f'qpos={a0.qpos}, nv={a0.qvel}')
a1 = mujoco_py.MjSimState(a0.time, a0.qpos, [0.2, -0.2], a0.act, a0.udd_state)
sim.set_state(a1)
sim.step()
sim.forward()
print(sim.data.qpos.flat[:])
print(sim.data.qvel.flat[:2])
exit(0)
#
# print(position_space.sample())
#
# sim.step()
#
# print(f"{sim.data.get_body_xpos('pointer')}")
#
# a0 = sim.get_state()
# print(a0)
#
# a1 = mujoco_py.MjSimState(a0.time, -1.0, 0.0, a0.act, a0.udd_state)
# print(a1)
# sim.set_state(a1)
#
# bounds = model.actuator_ctrlrange.copy().astype(np.float32)
# print(bounds)
# print(sim.data.ctrl)
#
# # sim.data.ctrl[:] = [10.0]
#
# sim.step()
# sim.forward()
# a1 = mujoco_py.MjSimState(a0.time, 0.0, 1.0, a0.act, a0.udd_state)
# sim.set_state(a1)
#
# sim.step()
# sim.forward()
#
viewer = mujoco_py.mjviewer.MjViewer(sim)
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 20.0,
'lookat': np.array((0.0, 0.0, 0.0)),
'elevation': -35.0,
'azimuth': 135.0
}
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(viewer.cam, key)[:] = value
else:
setattr(viewer.cam, key, value)
while True:
sim.step()
viewer.render()
# print(f'{sim.get_state()} - {sim.data.get_body_xpos("pointer")}')
| 21 | 78 | 0.661376 | import mujoco_py
import numpy as np
from gym import spaces
model = mujoco_py.load_model_from_path('pioneer/envs/assets/pioneer2.xml')
sim = mujoco_py.MjSim(model)
print(f'timestep: {model.opt.timestep}')
bounds = model.jnt_range.copy().astype(np.float32)
low, high = bounds.T
position_space = spaces.Box(low=low, high=high, dtype=np.float32)
print(f'bounds: {bounds}')
print(f'nq={model.nq}, nv={model.nv}')
a0 = sim.get_state()
print(f'qpos={a0.qpos}, nv={a0.qvel}')
a1 = mujoco_py.MjSimState(a0.time, a0.qpos, [0.2, -0.2], a0.act, a0.udd_state)
sim.set_state(a1)
sim.step()
sim.forward()
print(sim.data.qpos.flat[:])
print(sim.data.qvel.flat[:2])
exit(0)
_py.mjviewer.MjViewer(sim)
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 20.0,
'lookat': np.array((0.0, 0.0, 0.0)),
'elevation': -35.0,
'azimuth': 135.0
}
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(viewer.cam, key)[:] = value
else:
setattr(viewer.cam, key, value)
while True:
sim.step()
viewer.render()
| true | true |
1c47cd19af43c4d1becad7a2ad917dd2ed58f098 | 10,620 | py | Python | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeScalingGroupsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeScalingGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeScalingGroups','ess')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ScalingGroupId10(self):
return self.get_query_params().get('ScalingGroupId.10')
def set_ScalingGroupId10(self,ScalingGroupId10):
self.add_query_param('ScalingGroupId.10',ScalingGroupId10)
def get_ScalingGroupId12(self):
return self.get_query_params().get('ScalingGroupId.12')
def set_ScalingGroupId12(self,ScalingGroupId12):
self.add_query_param('ScalingGroupId.12',ScalingGroupId12)
def get_ScalingGroupId13(self):
return self.get_query_params().get('ScalingGroupId.13')
def set_ScalingGroupId13(self,ScalingGroupId13):
self.add_query_param('ScalingGroupId.13',ScalingGroupId13)
def get_ScalingGroupId14(self):
return self.get_query_params().get('ScalingGroupId.14')
def set_ScalingGroupId14(self,ScalingGroupId14):
self.add_query_param('ScalingGroupId.14',ScalingGroupId14)
def get_ScalingGroupId15(self):
return self.get_query_params().get('ScalingGroupId.15')
def set_ScalingGroupId15(self,ScalingGroupId15):
self.add_query_param('ScalingGroupId.15',ScalingGroupId15)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ScalingGroupName20(self):
return self.get_query_params().get('ScalingGroupName.20')
def set_ScalingGroupName20(self,ScalingGroupName20):
self.add_query_param('ScalingGroupName.20',ScalingGroupName20)
def get_ScalingGroupName19(self):
return self.get_query_params().get('ScalingGroupName.19')
def set_ScalingGroupName19(self,ScalingGroupName19):
self.add_query_param('ScalingGroupName.19',ScalingGroupName19)
def get_ScalingGroupId20(self):
return self.get_query_params().get('ScalingGroupId.20')
def set_ScalingGroupId20(self,ScalingGroupId20):
self.add_query_param('ScalingGroupId.20',ScalingGroupId20)
def get_ScalingGroupName18(self):
return self.get_query_params().get('ScalingGroupName.18')
def set_ScalingGroupName18(self,ScalingGroupName18):
self.add_query_param('ScalingGroupName.18',ScalingGroupName18)
def get_ScalingGroupName17(self):
return self.get_query_params().get('ScalingGroupName.17')
def set_ScalingGroupName17(self,ScalingGroupName17):
self.add_query_param('ScalingGroupName.17',ScalingGroupName17)
def get_ScalingGroupName16(self):
return self.get_query_params().get('ScalingGroupName.16')
def set_ScalingGroupName16(self,ScalingGroupName16):
self.add_query_param('ScalingGroupName.16',ScalingGroupName16)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupName(self):
return self.get_query_params().get('ScalingGroupName')
def set_ScalingGroupName(self,ScalingGroupName):
self.add_query_param('ScalingGroupName',ScalingGroupName)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ScalingGroupName1(self):
return self.get_query_params().get('ScalingGroupName.1')
def set_ScalingGroupName1(self,ScalingGroupName1):
self.add_query_param('ScalingGroupName.1',ScalingGroupName1)
def get_ScalingGroupName2(self):
return self.get_query_params().get('ScalingGroupName.2')
def set_ScalingGroupName2(self,ScalingGroupName2):
self.add_query_param('ScalingGroupName.2',ScalingGroupName2)
def get_ScalingGroupId2(self):
return self.get_query_params().get('ScalingGroupId.2')
def set_ScalingGroupId2(self,ScalingGroupId2):
self.add_query_param('ScalingGroupId.2',ScalingGroupId2)
def get_ScalingGroupId1(self):
return self.get_query_params().get('ScalingGroupId.1')
def set_ScalingGroupId1(self,ScalingGroupId1):
self.add_query_param('ScalingGroupId.1',ScalingGroupId1)
def get_ScalingGroupId6(self):
return self.get_query_params().get('ScalingGroupId.6')
def set_ScalingGroupId6(self,ScalingGroupId6):
self.add_query_param('ScalingGroupId.6',ScalingGroupId6)
def get_ScalingGroupId16(self):
return self.get_query_params().get('ScalingGroupId.16')
def set_ScalingGroupId16(self,ScalingGroupId16):
self.add_query_param('ScalingGroupId.16',ScalingGroupId16)
def get_ScalingGroupName7(self):
return self.get_query_params().get('ScalingGroupName.7')
def set_ScalingGroupName7(self,ScalingGroupName7):
self.add_query_param('ScalingGroupName.7',ScalingGroupName7)
def get_ScalingGroupName11(self):
return self.get_query_params().get('ScalingGroupName.11')
def set_ScalingGroupName11(self,ScalingGroupName11):
self.add_query_param('ScalingGroupName.11',ScalingGroupName11)
def get_ScalingGroupId5(self):
return self.get_query_params().get('ScalingGroupId.5')
def set_ScalingGroupId5(self,ScalingGroupId5):
self.add_query_param('ScalingGroupId.5',ScalingGroupId5)
def get_ScalingGroupId17(self):
return self.get_query_params().get('ScalingGroupId.17')
def set_ScalingGroupId17(self,ScalingGroupId17):
self.add_query_param('ScalingGroupId.17',ScalingGroupId17)
def get_ScalingGroupName8(self):
return self.get_query_params().get('ScalingGroupName.8')
def set_ScalingGroupName8(self,ScalingGroupName8):
self.add_query_param('ScalingGroupName.8',ScalingGroupName8)
def get_ScalingGroupName10(self):
return self.get_query_params().get('ScalingGroupName.10')
def set_ScalingGroupName10(self,ScalingGroupName10):
self.add_query_param('ScalingGroupName.10',ScalingGroupName10)
def get_ScalingGroupId4(self):
return self.get_query_params().get('ScalingGroupId.4')
def set_ScalingGroupId4(self,ScalingGroupId4):
self.add_query_param('ScalingGroupId.4',ScalingGroupId4)
def get_ScalingGroupId18(self):
return self.get_query_params().get('ScalingGroupId.18')
def set_ScalingGroupId18(self,ScalingGroupId18):
self.add_query_param('ScalingGroupId.18',ScalingGroupId18)
def get_ScalingGroupName9(self):
return self.get_query_params().get('ScalingGroupName.9')
def set_ScalingGroupName9(self,ScalingGroupName9):
self.add_query_param('ScalingGroupName.9',ScalingGroupName9)
def get_ScalingGroupId3(self):
return self.get_query_params().get('ScalingGroupId.3')
def set_ScalingGroupId3(self,ScalingGroupId3):
self.add_query_param('ScalingGroupId.3',ScalingGroupId3)
def get_ScalingGroupId19(self):
return self.get_query_params().get('ScalingGroupId.19')
def set_ScalingGroupId19(self,ScalingGroupId19):
self.add_query_param('ScalingGroupId.19',ScalingGroupId19)
def get_ScalingGroupName3(self):
return self.get_query_params().get('ScalingGroupName.3')
def set_ScalingGroupName3(self,ScalingGroupName3):
self.add_query_param('ScalingGroupName.3',ScalingGroupName3)
def get_ScalingGroupName15(self):
return self.get_query_params().get('ScalingGroupName.15')
def set_ScalingGroupName15(self,ScalingGroupName15):
self.add_query_param('ScalingGroupName.15',ScalingGroupName15)
def get_ScalingGroupId9(self):
return self.get_query_params().get('ScalingGroupId.9')
def set_ScalingGroupId9(self,ScalingGroupId9):
self.add_query_param('ScalingGroupId.9',ScalingGroupId9)
def get_ScalingGroupName4(self):
return self.get_query_params().get('ScalingGroupName.4')
def set_ScalingGroupName4(self,ScalingGroupName4):
self.add_query_param('ScalingGroupName.4',ScalingGroupName4)
def get_ScalingGroupName14(self):
return self.get_query_params().get('ScalingGroupName.14')
def set_ScalingGroupName14(self,ScalingGroupName14):
self.add_query_param('ScalingGroupName.14',ScalingGroupName14)
def get_ScalingGroupId8(self):
return self.get_query_params().get('ScalingGroupId.8')
def set_ScalingGroupId8(self,ScalingGroupId8):
self.add_query_param('ScalingGroupId.8',ScalingGroupId8)
def get_ScalingGroupName5(self):
return self.get_query_params().get('ScalingGroupName.5')
def set_ScalingGroupName5(self,ScalingGroupName5):
self.add_query_param('ScalingGroupName.5',ScalingGroupName5)
def get_ScalingGroupName13(self):
return self.get_query_params().get('ScalingGroupName.13')
def set_ScalingGroupName13(self,ScalingGroupName13):
self.add_query_param('ScalingGroupName.13',ScalingGroupName13)
def get_ScalingGroupId7(self):
return self.get_query_params().get('ScalingGroupId.7')
def set_ScalingGroupId7(self,ScalingGroupId7):
self.add_query_param('ScalingGroupId.7',ScalingGroupId7)
def get_ScalingGroupName6(self):
return self.get_query_params().get('ScalingGroupName.6')
def set_ScalingGroupName6(self,ScalingGroupName6):
self.add_query_param('ScalingGroupName.6',ScalingGroupName6)
def get_ScalingGroupName12(self):
return self.get_query_params().get('ScalingGroupName.12')
def set_ScalingGroupName12(self,ScalingGroupName12):
self.add_query_param('ScalingGroupName.12',ScalingGroupName12) | 35.4 | 80 | 0.789642 |
from aliyunsdkcore.request import RpcRequest
class DescribeScalingGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeScalingGroups','ess')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ScalingGroupId10(self):
return self.get_query_params().get('ScalingGroupId.10')
def set_ScalingGroupId10(self,ScalingGroupId10):
self.add_query_param('ScalingGroupId.10',ScalingGroupId10)
def get_ScalingGroupId12(self):
return self.get_query_params().get('ScalingGroupId.12')
def set_ScalingGroupId12(self,ScalingGroupId12):
self.add_query_param('ScalingGroupId.12',ScalingGroupId12)
def get_ScalingGroupId13(self):
return self.get_query_params().get('ScalingGroupId.13')
def set_ScalingGroupId13(self,ScalingGroupId13):
self.add_query_param('ScalingGroupId.13',ScalingGroupId13)
def get_ScalingGroupId14(self):
return self.get_query_params().get('ScalingGroupId.14')
def set_ScalingGroupId14(self,ScalingGroupId14):
self.add_query_param('ScalingGroupId.14',ScalingGroupId14)
def get_ScalingGroupId15(self):
return self.get_query_params().get('ScalingGroupId.15')
def set_ScalingGroupId15(self,ScalingGroupId15):
self.add_query_param('ScalingGroupId.15',ScalingGroupId15)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ScalingGroupName20(self):
return self.get_query_params().get('ScalingGroupName.20')
def set_ScalingGroupName20(self,ScalingGroupName20):
self.add_query_param('ScalingGroupName.20',ScalingGroupName20)
def get_ScalingGroupName19(self):
return self.get_query_params().get('ScalingGroupName.19')
def set_ScalingGroupName19(self,ScalingGroupName19):
self.add_query_param('ScalingGroupName.19',ScalingGroupName19)
def get_ScalingGroupId20(self):
return self.get_query_params().get('ScalingGroupId.20')
def set_ScalingGroupId20(self,ScalingGroupId20):
self.add_query_param('ScalingGroupId.20',ScalingGroupId20)
def get_ScalingGroupName18(self):
return self.get_query_params().get('ScalingGroupName.18')
def set_ScalingGroupName18(self,ScalingGroupName18):
self.add_query_param('ScalingGroupName.18',ScalingGroupName18)
def get_ScalingGroupName17(self):
return self.get_query_params().get('ScalingGroupName.17')
def set_ScalingGroupName17(self,ScalingGroupName17):
self.add_query_param('ScalingGroupName.17',ScalingGroupName17)
def get_ScalingGroupName16(self):
return self.get_query_params().get('ScalingGroupName.16')
def set_ScalingGroupName16(self,ScalingGroupName16):
self.add_query_param('ScalingGroupName.16',ScalingGroupName16)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupName(self):
return self.get_query_params().get('ScalingGroupName')
def set_ScalingGroupName(self,ScalingGroupName):
self.add_query_param('ScalingGroupName',ScalingGroupName)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ScalingGroupName1(self):
return self.get_query_params().get('ScalingGroupName.1')
def set_ScalingGroupName1(self,ScalingGroupName1):
self.add_query_param('ScalingGroupName.1',ScalingGroupName1)
def get_ScalingGroupName2(self):
return self.get_query_params().get('ScalingGroupName.2')
def set_ScalingGroupName2(self,ScalingGroupName2):
self.add_query_param('ScalingGroupName.2',ScalingGroupName2)
def get_ScalingGroupId2(self):
return self.get_query_params().get('ScalingGroupId.2')
def set_ScalingGroupId2(self,ScalingGroupId2):
self.add_query_param('ScalingGroupId.2',ScalingGroupId2)
def get_ScalingGroupId1(self):
return self.get_query_params().get('ScalingGroupId.1')
def set_ScalingGroupId1(self,ScalingGroupId1):
self.add_query_param('ScalingGroupId.1',ScalingGroupId1)
def get_ScalingGroupId6(self):
return self.get_query_params().get('ScalingGroupId.6')
def set_ScalingGroupId6(self,ScalingGroupId6):
self.add_query_param('ScalingGroupId.6',ScalingGroupId6)
def get_ScalingGroupId16(self):
return self.get_query_params().get('ScalingGroupId.16')
def set_ScalingGroupId16(self,ScalingGroupId16):
self.add_query_param('ScalingGroupId.16',ScalingGroupId16)
def get_ScalingGroupName7(self):
return self.get_query_params().get('ScalingGroupName.7')
def set_ScalingGroupName7(self,ScalingGroupName7):
self.add_query_param('ScalingGroupName.7',ScalingGroupName7)
def get_ScalingGroupName11(self):
return self.get_query_params().get('ScalingGroupName.11')
def set_ScalingGroupName11(self,ScalingGroupName11):
self.add_query_param('ScalingGroupName.11',ScalingGroupName11)
def get_ScalingGroupId5(self):
return self.get_query_params().get('ScalingGroupId.5')
def set_ScalingGroupId5(self,ScalingGroupId5):
self.add_query_param('ScalingGroupId.5',ScalingGroupId5)
def get_ScalingGroupId17(self):
return self.get_query_params().get('ScalingGroupId.17')
def set_ScalingGroupId17(self,ScalingGroupId17):
self.add_query_param('ScalingGroupId.17',ScalingGroupId17)
def get_ScalingGroupName8(self):
return self.get_query_params().get('ScalingGroupName.8')
def set_ScalingGroupName8(self,ScalingGroupName8):
self.add_query_param('ScalingGroupName.8',ScalingGroupName8)
def get_ScalingGroupName10(self):
return self.get_query_params().get('ScalingGroupName.10')
def set_ScalingGroupName10(self,ScalingGroupName10):
self.add_query_param('ScalingGroupName.10',ScalingGroupName10)
def get_ScalingGroupId4(self):
return self.get_query_params().get('ScalingGroupId.4')
def set_ScalingGroupId4(self,ScalingGroupId4):
self.add_query_param('ScalingGroupId.4',ScalingGroupId4)
def get_ScalingGroupId18(self):
return self.get_query_params().get('ScalingGroupId.18')
def set_ScalingGroupId18(self,ScalingGroupId18):
self.add_query_param('ScalingGroupId.18',ScalingGroupId18)
def get_ScalingGroupName9(self):
return self.get_query_params().get('ScalingGroupName.9')
def set_ScalingGroupName9(self,ScalingGroupName9):
self.add_query_param('ScalingGroupName.9',ScalingGroupName9)
def get_ScalingGroupId3(self):
return self.get_query_params().get('ScalingGroupId.3')
def set_ScalingGroupId3(self,ScalingGroupId3):
self.add_query_param('ScalingGroupId.3',ScalingGroupId3)
def get_ScalingGroupId19(self):
return self.get_query_params().get('ScalingGroupId.19')
def set_ScalingGroupId19(self,ScalingGroupId19):
self.add_query_param('ScalingGroupId.19',ScalingGroupId19)
def get_ScalingGroupName3(self):
return self.get_query_params().get('ScalingGroupName.3')
def set_ScalingGroupName3(self,ScalingGroupName3):
self.add_query_param('ScalingGroupName.3',ScalingGroupName3)
def get_ScalingGroupName15(self):
return self.get_query_params().get('ScalingGroupName.15')
def set_ScalingGroupName15(self,ScalingGroupName15):
self.add_query_param('ScalingGroupName.15',ScalingGroupName15)
def get_ScalingGroupId9(self):
return self.get_query_params().get('ScalingGroupId.9')
def set_ScalingGroupId9(self,ScalingGroupId9):
self.add_query_param('ScalingGroupId.9',ScalingGroupId9)
def get_ScalingGroupName4(self):
return self.get_query_params().get('ScalingGroupName.4')
def set_ScalingGroupName4(self,ScalingGroupName4):
self.add_query_param('ScalingGroupName.4',ScalingGroupName4)
def get_ScalingGroupName14(self):
return self.get_query_params().get('ScalingGroupName.14')
def set_ScalingGroupName14(self,ScalingGroupName14):
self.add_query_param('ScalingGroupName.14',ScalingGroupName14)
def get_ScalingGroupId8(self):
return self.get_query_params().get('ScalingGroupId.8')
def set_ScalingGroupId8(self,ScalingGroupId8):
self.add_query_param('ScalingGroupId.8',ScalingGroupId8)
def get_ScalingGroupName5(self):
return self.get_query_params().get('ScalingGroupName.5')
def set_ScalingGroupName5(self,ScalingGroupName5):
self.add_query_param('ScalingGroupName.5',ScalingGroupName5)
def get_ScalingGroupName13(self):
return self.get_query_params().get('ScalingGroupName.13')
def set_ScalingGroupName13(self,ScalingGroupName13):
self.add_query_param('ScalingGroupName.13',ScalingGroupName13)
def get_ScalingGroupId7(self):
return self.get_query_params().get('ScalingGroupId.7')
def set_ScalingGroupId7(self,ScalingGroupId7):
self.add_query_param('ScalingGroupId.7',ScalingGroupId7)
def get_ScalingGroupName6(self):
return self.get_query_params().get('ScalingGroupName.6')
def set_ScalingGroupName6(self,ScalingGroupName6):
self.add_query_param('ScalingGroupName.6',ScalingGroupName6)
def get_ScalingGroupName12(self):
return self.get_query_params().get('ScalingGroupName.12')
def set_ScalingGroupName12(self,ScalingGroupName12):
self.add_query_param('ScalingGroupName.12',ScalingGroupName12) | true | true |
1c47cf10532163a7f13c7f50d06cffe8e0a01ccb | 580 | py | Python | chapter_4_MorphologicalAnalysis/30.0.py | takumi34/nlp_100 | b02e926627006c1004c611d249928de5ef22b60e | [
"BSD-3-Clause"
] | null | null | null | chapter_4_MorphologicalAnalysis/30.0.py | takumi34/nlp_100 | b02e926627006c1004c611d249928de5ef22b60e | [
"BSD-3-Clause"
] | null | null | null | chapter_4_MorphologicalAnalysis/30.0.py | takumi34/nlp_100 | b02e926627006c1004c611d249928de5ef22b60e | [
"BSD-3-Clause"
] | null | null | null | #夏目漱石『吾輩は猫である」を形態素解析し、その結果を保存する
#以後はこの解析データを使って問題を解く
from natto import MeCab
import os
import csv
fp = open("neko.txt","r", encoding="utf-8") #ファイルは<http://www.cl.ecei.tohoku.ac.jp/nlp100/>のもの
doc = fp.read()
nm = MeCab()
nm = MeCab("-Ochasen") #茶筅形式に出力フォーマットを変更
list_keitaiso = [i.split() for i in nm.parse(doc).splitlines()]
fp2= open('neko.txt.mecab', 'ab')
with open('neko.txt.mecab','a') as fp2: #<neko.text.mecab>に入れる
writer = csv.writer((fp2), lineterminator="\n")
for i in range(0,len(list_keitaiso)):
writer.writerow(list_keitaiso[i])
fp.close
| 22.307692 | 94 | 0.684483 |
from natto import MeCab
import os
import csv
fp = open("neko.txt","r", encoding="utf-8")
doc = fp.read()
nm = MeCab()
nm = MeCab("-Ochasen")
list_keitaiso = [i.split() for i in nm.parse(doc).splitlines()]
fp2= open('neko.txt.mecab', 'ab')
with open('neko.txt.mecab','a') as fp2:
writer = csv.writer((fp2), lineterminator="\n")
for i in range(0,len(list_keitaiso)):
writer.writerow(list_keitaiso[i])
fp.close
| false | true |
1c47d132a6791395267f3791dfb59ca1076cee0c | 360 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-04T18:12:16.000Z | 2019-06-04T18:12:16.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2018_06_01.models import *
| 45 | 76 | 0.444444 |
from .v2018_06_01.models import *
| true | true |
1c47d191e4ced18e1fb9d2ca1bfe78d40d28d1ae | 2,572 | py | Python | tests/Modules/Indexer/test_DIALS_indexer.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 10 | 2015-10-30T06:36:55.000Z | 2021-12-10T20:06:22.000Z | tests/Modules/Indexer/test_DIALS_indexer.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 528 | 2015-11-24T08:20:12.000Z | 2022-03-21T21:47:29.000Z | tests/Modules/Indexer/test_DIALS_indexer.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 14 | 2016-03-15T22:07:03.000Z | 2020-12-14T07:13:35.000Z | from unittest import mock
import os
import pytest
import sys
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XWavelength import XWavelength
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XSample import XSample
def exercise_dials_indexer(dials_data, tmp_dir, nproc=None):
if nproc is not None:
PhilIndex.params.xia2.settings.multiprocessing.nproc = nproc
template = dials_data("insulin").join("insulin_1_###.img").strpath
indexer = DialsIndexer()
indexer.set_working_directory(tmp_dir)
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(78.14, 78.14, 78.14, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.03545, abs=1e-3)
assert solution["metric"] == pytest.approx(0.02517, abs=5e-3)
assert solution["number"] == 22
assert solution["lattice"] == "cI"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx(
(94.41567208118963, 94.51337522659865), abs=1e-3
)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
# test serialization of indexer
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "hR"
assert indexer2.get_indexer_lattice() == "hR"
def test_dials_indexer_serial(regression_test, ccp4, dials_data, run_in_tmpdir):
with mock.patch.object(sys, "argv", []):
exercise_dials_indexer(dials_data, run_in_tmpdir.strpath, nproc=1)
| 34.293333 | 83 | 0.728616 | from unittest import mock
import os
import pytest
import sys
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XWavelength import XWavelength
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XSample import XSample
def exercise_dials_indexer(dials_data, tmp_dir, nproc=None):
if nproc is not None:
PhilIndex.params.xia2.settings.multiprocessing.nproc = nproc
template = dials_data("insulin").join("insulin_1_###.img").strpath
indexer = DialsIndexer()
indexer.set_working_directory(tmp_dir)
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(78.14, 78.14, 78.14, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.03545, abs=1e-3)
assert solution["metric"] == pytest.approx(0.02517, abs=5e-3)
assert solution["number"] == 22
assert solution["lattice"] == "cI"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx(
(94.41567208118963, 94.51337522659865), abs=1e-3
)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "hR"
assert indexer2.get_indexer_lattice() == "hR"
def test_dials_indexer_serial(regression_test, ccp4, dials_data, run_in_tmpdir):
with mock.patch.object(sys, "argv", []):
exercise_dials_indexer(dials_data, run_in_tmpdir.strpath, nproc=1)
| true | true |
1c47d2457497fd988ef9644f3fcee1f778042ce5 | 1,002 | py | Python | mayan/apps/common/tests/runner.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 2,743 | 2017-12-18T07:12:30.000Z | 2022-03-27T17:21:25.000Z | mayan/apps/common/tests/runner.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 15 | 2017-12-18T14:58:07.000Z | 2021-03-01T20:05:05.000Z | mayan/apps/common/tests/runner.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 257 | 2017-12-18T03:12:58.000Z | 2022-03-25T08:59:10.000Z | from __future__ import unicode_literals
from django import apps
from django.test.runner import DiscoverRunner
class MayanTestRunner(DiscoverRunner):
@classmethod
def add_arguments(cls, parser):
DiscoverRunner.add_arguments(parser)
parser.add_argument(
'--mayan-apps', action='store_true', default=False,
dest='mayan_apps',
help='Test all Mayan apps that report to have tests.'
)
def __init__(self, *args, **kwargs):
self.mayan_apps = kwargs.pop('mayan_apps')
super(MayanTestRunner, self).__init__(*args, **kwargs)
def build_suite(self, *args, **kwargs):
# Apps that report they have tests
if self.mayan_apps:
args = list(args)
args[0] = [
app.name for app in apps.apps.get_app_configs() if getattr(
app, 'has_tests', False
)
]
return super(MayanTestRunner, self).build_suite(*args, **kwargs)
| 31.3125 | 75 | 0.610778 | from __future__ import unicode_literals
from django import apps
from django.test.runner import DiscoverRunner
class MayanTestRunner(DiscoverRunner):
@classmethod
def add_arguments(cls, parser):
DiscoverRunner.add_arguments(parser)
parser.add_argument(
'--mayan-apps', action='store_true', default=False,
dest='mayan_apps',
help='Test all Mayan apps that report to have tests.'
)
def __init__(self, *args, **kwargs):
self.mayan_apps = kwargs.pop('mayan_apps')
super(MayanTestRunner, self).__init__(*args, **kwargs)
def build_suite(self, *args, **kwargs):
if self.mayan_apps:
args = list(args)
args[0] = [
app.name for app in apps.apps.get_app_configs() if getattr(
app, 'has_tests', False
)
]
return super(MayanTestRunner, self).build_suite(*args, **kwargs)
| true | true |
1c47d318a097cab72248e74942e5b67d49d708de | 2,197 | py | Python | admin/gen-parameter-document.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | 5 | 2015-05-03T08:49:11.000Z | 2022-03-23T11:44:00.000Z | admin/gen-parameter-document.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | null | null | null | admin/gen-parameter-document.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Generates a parameter document (to STDOUT) for a given configuration file. Allows a choice of
LaTeX and Markdown (Pandoc) formats.
"""
import logging as log
import argparse
import madsenlab.axelrod.utils as utils
def setup():
global args
parser = argparse.ArgumentParser()
parser.add_argument("--configuration", help="Configuration file for experiment", required=True)
parser.add_argument("--experiment", help="provide name for experiment", required=True)
parser.add_argument("--format", choices=['latex', 'pandoc'], help="Format for output ", required=True)
parser.add_argument("--debug", help="turn on debugging output")
parser.add_argument("--caption", help="Optional table caption (one will be autogenerated otherwise)")
parser.add_argument("--model", choices=['axelrod', 'extensible', 'treestructured'], required=True)
args = parser.parse_args()
if args.debug:
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
####### main loop #######
if __name__ == "__main__":
setup()
if args.model == 'axelrod':
simconfig = utils.AxelrodConfiguration(args.configuration)
elif args.model == 'extensible':
simconfig = utils.AxelrodExtensibleConfiguration(args.configuration)
elif args.model == 'treestructured':
simconfig = utils.TreeStructuredConfiguration(args.configuration)
else:
log.error("This shouldn't happen - args.model = %s", args.model)
if args.caption:
caption = args.caption
else:
caption = None
if args.format == 'pandoc':
print simconfig.to_pandoc_table(args.experiment, caption=caption)
elif args.format == 'latex':
print simconfig.to_latex_table(args.experiment, caption=caption)
else:
print "Unrecognized format: %s" % args.format
exit(1)
exit(0)
| 28.166667 | 119 | 0.682294 |
"""
Generates a parameter document (to STDOUT) for a given configuration file. Allows a choice of
LaTeX and Markdown (Pandoc) formats.
"""
import logging as log
import argparse
import madsenlab.axelrod.utils as utils
def setup():
global args
parser = argparse.ArgumentParser()
parser.add_argument("--configuration", help="Configuration file for experiment", required=True)
parser.add_argument("--experiment", help="provide name for experiment", required=True)
parser.add_argument("--format", choices=['latex', 'pandoc'], help="Format for output ", required=True)
parser.add_argument("--debug", help="turn on debugging output")
parser.add_argument("--caption", help="Optional table caption (one will be autogenerated otherwise)")
parser.add_argument("--model", choices=['axelrod', 'extensible', 'treestructured'], required=True)
args = parser.parse_args()
if args.debug:
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
del == 'extensible':
simconfig = utils.AxelrodExtensibleConfiguration(args.configuration)
elif args.model == 'treestructured':
simconfig = utils.TreeStructuredConfiguration(args.configuration)
else:
log.error("This shouldn't happen - args.model = %s", args.model)
if args.caption:
caption = args.caption
else:
caption = None
if args.format == 'pandoc':
print simconfig.to_pandoc_table(args.experiment, caption=caption)
elif args.format == 'latex':
print simconfig.to_latex_table(args.experiment, caption=caption)
else:
print "Unrecognized format: %s" % args.format
exit(1)
exit(0)
| false | true |
1c47d4df07c1c10285d70b8e964f1a6a01f4327e | 6,932 | py | Python | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | Zac-hills/d3m-primitives | 1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d | [
"Apache-2.0"
] | 1 | 2020-05-22T14:00:09.000Z | 2020-05-22T14:00:09.000Z | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | Zac-hills/d3m-primitives | 1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d | [
"Apache-2.0"
] | 18 | 2020-07-20T07:00:45.000Z | 2022-03-12T00:37:57.000Z | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | Zac-hills/d3m-primitives | 1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d | [
"Apache-2.0"
] | 6 | 2020-06-03T20:13:24.000Z | 2021-12-06T18:21:32.000Z | import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:cbethune@uncharted.software"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified \
column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
This primitive produces numerical representations of text data using a model
that was pre-trained on English Twitter bi-grams.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": ["https://github.com/kungfuai/d3m-primitives"],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
# class instance to avoid unnecessary re-init on subsequent produce calls
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs: D3M dataframe
Returns
-------
Outputs: Input D3M dataframe with vector components appended as additional columns
"""
# figure out columns to operate on
cols = self._get_operating_columns(
inputs, self.hyperparams["use_columns"], ("http://schema.org/Text",)
)
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
# lazy load the model and keep it around for subsequent produce calls
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(
path=self.volumes["sent2vec_model"]
)
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(
sentences=text
)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(
np.array(output_vectors).reshape(len(embedded_sentences), -1)
)
except ValueError:
# just return inputs with file names deleted if vectorizing fails
return CallResult(outputs)
# create df with vectorized columns and append to input df
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(
embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))
)
col_dict["structural_type"] = type(1.0)
col_dict["name"] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(
cls,
inputs: container.DataFrame,
use_columns: Sequence[int],
semantic_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(
inputs.metadata.list_columns_with_semantic_types(semantic_types)
)
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols) | 37.879781 | 130 | 0.604443 | import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:cbethune@uncharted.software"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified \
column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
metadata = metadata_base.PrimitiveMetadata(
{
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": ["https://github.com/kungfuai/d3m-primitives"],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
cols = self._get_operating_columns(
inputs, self.hyperparams["use_columns"], ("http://schema.org/Text",)
)
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(
path=self.volumes["sent2vec_model"]
)
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(
sentences=text
)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(
np.array(output_vectors).reshape(len(embedded_sentences), -1)
)
except ValueError:
return CallResult(outputs)
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(
embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))
)
col_dict["structural_type"] = type(1.0)
col_dict["name"] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))
df_dict["dimension"] = df_dict_1
df_dict_1["name"] = "columns"
df_dict_1["semantic_types"] = (
"https://metadata.datadrivendiscovery.org/types/TabularColumn",
)
df_dict_1["length"] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS,), df_dict
)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(
cls,
inputs: container.DataFrame,
use_columns: Sequence[int],
semantic_types: Sequence[str],
require_attribute: bool = True,
) -> Sequence[int]:
cols = set(use_columns)
type_cols = set(
inputs.metadata.list_columns_with_semantic_types(semantic_types)
)
if require_attribute:
attributes = set(
inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/Attribute",)
)
)
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols) | true | true |
1c47d526b70baa1b7149d593ed8aec9074118df1 | 1,016 | py | Python | setup.py | PrabhuLoganathan/pro.developers.PySelFame-6 | 3ee45e672f84965f0b8b3ccf7f8daf0c7d871261 | [
"BSD-3-Clause"
] | null | null | null | setup.py | PrabhuLoganathan/pro.developers.PySelFame-6 | 3ee45e672f84965f0b8b3ccf7f8daf0c7d871261 | [
"BSD-3-Clause"
] | null | null | null | setup.py | PrabhuLoganathan/pro.developers.PySelFame-6 | 3ee45e672f84965f0b8b3ccf7f8daf0c7d871261 | [
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(
name='knitter',
version='1.0.2',
author='Henry Wang',
author_email='skymatrix@126.com',
maintainer='Henry Wang',
maintainer_email='skymatrix@126.com',
url='https://github.com/hw712/Knitter',
description='A Web Automation Test Framework Based On Selenium WebDriver',
long_description="Knitter['nitə] is a web automation test framework, with which you can develop "
"the web ui automation with good maintainability and extendability.",
# https://pypi.org/classifiers/
classifiers=['License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries :: Application Frameworks'],
platforms=['linux', 'windows'],
license='BSD License',
packages=['knitter'],
install_requires=['selenium', 'xlrd'],
)
| 22.577778 | 101 | 0.626969 |
from setuptools import setup
setup(
name='knitter',
version='1.0.2',
author='Henry Wang',
author_email='skymatrix@126.com',
maintainer='Henry Wang',
maintainer_email='skymatrix@126.com',
url='https://github.com/hw712/Knitter',
description='A Web Automation Test Framework Based On Selenium WebDriver',
long_description="Knitter['nitə] is a web automation test framework, with which you can develop "
"the web ui automation with good maintainability and extendability.",
# https://pypi.org/classifiers/
classifiers=['License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries :: Application Frameworks'],
platforms=['linux', 'windows'],
license='BSD License',
packages=['knitter'],
install_requires=['selenium', 'xlrd'],
)
| true | true |
1c47d61e29b517b660c0f0ee0e55960b22da7061 | 202 | py | Python | mywebsite/users/tests/test_models.py | NyntoFive/mywebsite | 07af16c564f8a7c77763187cc4cd8742c91c6534 | [
"MIT"
] | null | null | null | mywebsite/users/tests/test_models.py | NyntoFive/mywebsite | 07af16c564f8a7c77763187cc4cd8742c91c6534 | [
"MIT"
] | null | null | null | mywebsite/users/tests/test_models.py | NyntoFive/mywebsite | 07af16c564f8a7c77763187cc4cd8742c91c6534 | [
"MIT"
] | null | null | null | import pytest
from mywebsite.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.2 | 64 | 0.772277 | import pytest
from mywebsite.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| true | true |
1c47d67efdc69d1364d3f7859468a66ce98d53af | 6,336 | py | Python | tests/integration/test_es.py | roguesupport/localstack | 087abb05fcb360297431ad8e5790c8014e0a80d7 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_es.py | roguesupport/localstack | 087abb05fcb360297431ad8e5790c8014e0a80d7 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_es.py | roguesupport/localstack | 087abb05fcb360297431ad8e5790c8014e0a80d7 | [
"Apache-2.0"
] | null | null | null | import logging
import threading
import botocore.exceptions
import pytest
from localstack import config
from localstack.constants import ELASTICSEARCH_DEFAULT_VERSION, OPENSEARCH_DEFAULT_VERSION
from localstack.services.install import install_elasticsearch, install_opensearch
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import short_uid, start_worker_thread
LOG = logging.getLogger(__name__)
# Common headers used when sending requests to OpenSearch
COMMON_HEADERS = {"content-type": "application/json", "Accept-encoding": "identity"}
# Lock and event to ensure that the installation is executed before the tests
INIT_LOCK = threading.Lock()
installed = threading.Event()
def install_async():
"""
Installs the default elasticsearch version in a worker thread. Used by conftest.py to make
sure elasticsearch is downloaded once the tests arrive here.
"""
if installed.is_set():
return
def run_install(*args):
with INIT_LOCK:
if installed.is_set():
return
LOG.info("installing elasticsearch default version")
install_elasticsearch()
LOG.info("done installing elasticsearch default version")
LOG.info("installing opensearch default version")
install_opensearch()
LOG.info("done installing opensearch default version")
installed.set()
start_worker_thread(run_install)
@pytest.fixture(autouse=True)
def elasticsearch():
if not installed.is_set():
install_async()
assert installed.wait(timeout=5 * 60), "gave up waiting for elasticsearch to install"
yield
def try_cluster_health(cluster_url: str):
response = requests.get(cluster_url)
assert response.ok, f"cluster endpoint returned an error: {response.text}"
response = requests.get(f"{cluster_url}/_cluster/health")
assert response.ok, f"cluster health endpoint returned an error: {response.text}"
assert response.json()["status"] in [
"orange",
"yellow",
"green",
], "expected cluster state to be in a valid state"
class TestElasticsearchProvider:
def test_list_versions(self, es_client):
response = es_client.list_elasticsearch_versions()
assert "ElasticsearchVersions" in response
versions = response["ElasticsearchVersions"]
assert "OpenSearch_1.0" in versions
assert "OpenSearch_1.1" in versions
assert "7.10" in versions
def test_get_compatible_versions(self, es_client):
response = es_client.get_compatible_elasticsearch_versions()
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 18
assert {"SourceVersion": "OpenSearch_1.0", "TargetVersions": ["OpenSearch_1.1"]} in versions
assert {
"SourceVersion": "7.10",
"TargetVersions": ["OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
assert {
"SourceVersion": "7.7",
"TargetVersions": ["7.8", "7.9", "7.10", "OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
@pytest.mark.skip_offline
def test_get_compatible_version_for_domain(self, es_client, opensearch_domain):
response = es_client.get_compatible_elasticsearch_versions(DomainName=opensearch_domain)
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
# The default version is the latest version, which is not compatible with any previous versions
assert len(versions) == 0
@pytest.mark.skip_offline
def test_create_domain(self, es_client, opensearch_create_domain):
es_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.list_domain_names(EngineType="Elasticsearch")
domain_names = [domain["DomainName"] for domain in response["DomainNames"]]
assert es_domain in domain_names
@pytest.mark.skip_offline
def test_create_existing_domain_causes_exception(self, es_client, opensearch_create_domain):
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
with pytest.raises(botocore.exceptions.ClientError) as exc_info:
es_client.create_elasticsearch_domain(DomainName=domain_name)
assert exc_info.type.__name__ == "ResourceAlreadyExistsException"
@pytest.mark.skip_offline
def test_describe_domains(self, es_client, opensearch_create_domain):
opensearch_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domains(DomainNames=[opensearch_domain])
assert len(response["DomainStatusList"]) == 1
assert response["DomainStatusList"][0]["DomainName"] == opensearch_domain
@pytest.mark.skip_offline
def test_domain_version(self, es_client, opensearch_domain, opensearch_create_domain):
response = es_client.describe_elasticsearch_domain(DomainName=opensearch_domain)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == OPENSEARCH_DEFAULT_VERSION
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domain(DomainName=domain_name)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == "7.10"
@pytest.mark.skip_offline
def test_path_endpoint_strategy(self, monkeypatch, opensearch_create_domain, es_client):
monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "path")
monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", True)
domain_name = f"es-domain-{short_uid()}"
opensearch_create_domain(DomainName=domain_name)
status = es_client.describe_elasticsearch_domain(DomainName=domain_name)["DomainStatus"]
assert "Endpoint" in status
endpoint = status["Endpoint"]
assert endpoint.endswith(f"/{domain_name}")
| 40.877419 | 103 | 0.72601 | import logging
import threading
import botocore.exceptions
import pytest
from localstack import config
from localstack.constants import ELASTICSEARCH_DEFAULT_VERSION, OPENSEARCH_DEFAULT_VERSION
from localstack.services.install import install_elasticsearch, install_opensearch
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import short_uid, start_worker_thread
LOG = logging.getLogger(__name__)
COMMON_HEADERS = {"content-type": "application/json", "Accept-encoding": "identity"}
INIT_LOCK = threading.Lock()
installed = threading.Event()
def install_async():
if installed.is_set():
return
def run_install(*args):
with INIT_LOCK:
if installed.is_set():
return
LOG.info("installing elasticsearch default version")
install_elasticsearch()
LOG.info("done installing elasticsearch default version")
LOG.info("installing opensearch default version")
install_opensearch()
LOG.info("done installing opensearch default version")
installed.set()
start_worker_thread(run_install)
@pytest.fixture(autouse=True)
def elasticsearch():
if not installed.is_set():
install_async()
assert installed.wait(timeout=5 * 60), "gave up waiting for elasticsearch to install"
yield
def try_cluster_health(cluster_url: str):
response = requests.get(cluster_url)
assert response.ok, f"cluster endpoint returned an error: {response.text}"
response = requests.get(f"{cluster_url}/_cluster/health")
assert response.ok, f"cluster health endpoint returned an error: {response.text}"
assert response.json()["status"] in [
"orange",
"yellow",
"green",
], "expected cluster state to be in a valid state"
class TestElasticsearchProvider:
def test_list_versions(self, es_client):
response = es_client.list_elasticsearch_versions()
assert "ElasticsearchVersions" in response
versions = response["ElasticsearchVersions"]
assert "OpenSearch_1.0" in versions
assert "OpenSearch_1.1" in versions
assert "7.10" in versions
def test_get_compatible_versions(self, es_client):
response = es_client.get_compatible_elasticsearch_versions()
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 18
assert {"SourceVersion": "OpenSearch_1.0", "TargetVersions": ["OpenSearch_1.1"]} in versions
assert {
"SourceVersion": "7.10",
"TargetVersions": ["OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
assert {
"SourceVersion": "7.7",
"TargetVersions": ["7.8", "7.9", "7.10", "OpenSearch_1.0", "OpenSearch_1.1"],
} in versions
@pytest.mark.skip_offline
def test_get_compatible_version_for_domain(self, es_client, opensearch_domain):
response = es_client.get_compatible_elasticsearch_versions(DomainName=opensearch_domain)
assert "CompatibleElasticsearchVersions" in response
versions = response["CompatibleElasticsearchVersions"]
assert len(versions) == 0
@pytest.mark.skip_offline
def test_create_domain(self, es_client, opensearch_create_domain):
es_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.list_domain_names(EngineType="Elasticsearch")
domain_names = [domain["DomainName"] for domain in response["DomainNames"]]
assert es_domain in domain_names
@pytest.mark.skip_offline
def test_create_existing_domain_causes_exception(self, es_client, opensearch_create_domain):
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
with pytest.raises(botocore.exceptions.ClientError) as exc_info:
es_client.create_elasticsearch_domain(DomainName=domain_name)
assert exc_info.type.__name__ == "ResourceAlreadyExistsException"
@pytest.mark.skip_offline
def test_describe_domains(self, es_client, opensearch_create_domain):
opensearch_domain = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domains(DomainNames=[opensearch_domain])
assert len(response["DomainStatusList"]) == 1
assert response["DomainStatusList"][0]["DomainName"] == opensearch_domain
@pytest.mark.skip_offline
def test_domain_version(self, es_client, opensearch_domain, opensearch_create_domain):
response = es_client.describe_elasticsearch_domain(DomainName=opensearch_domain)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == OPENSEARCH_DEFAULT_VERSION
domain_name = opensearch_create_domain(EngineVersion=ELASTICSEARCH_DEFAULT_VERSION)
response = es_client.describe_elasticsearch_domain(DomainName=domain_name)
assert "DomainStatus" in response
status = response["DomainStatus"]
assert "ElasticsearchVersion" in status
assert status["ElasticsearchVersion"] == "7.10"
@pytest.mark.skip_offline
def test_path_endpoint_strategy(self, monkeypatch, opensearch_create_domain, es_client):
monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "path")
monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", True)
domain_name = f"es-domain-{short_uid()}"
opensearch_create_domain(DomainName=domain_name)
status = es_client.describe_elasticsearch_domain(DomainName=domain_name)["DomainStatus"]
assert "Endpoint" in status
endpoint = status["Endpoint"]
assert endpoint.endswith(f"/{domain_name}")
| true | true |
1c47d7142612605ef5ca8a8c2d042e3d2166f135 | 5,609 | py | Python | aio_pika/robust_channel.py | askabelin/aio-pika | 38fd5897c556dd41624b8571b061f486e8e7508e | [
"Apache-2.0"
] | null | null | null | aio_pika/robust_channel.py | askabelin/aio-pika | 38fd5897c556dd41624b8571b061f486e8e7508e | [
"Apache-2.0"
] | null | null | null | aio_pika/robust_channel.py | askabelin/aio-pika | 38fd5897c556dd41624b8571b061f486e8e7508e | [
"Apache-2.0"
] | null | null | null | import asyncio
from typing import Callable, Any, Generator, Union
from logging import getLogger
from aio_pika.tools import create_future
from .compat import Awaitable
from .exchange import Exchange, ExchangeType
from .message import IncomingMessage
from .queue import Queue
from .common import BaseChannel, FutureStore
from .channel import Channel
from .robust_queue import RobustQueue
from .robust_exchange import RobustExchange
log = getLogger(__name__)
FunctionOrCoroutine = Union[Callable[[IncomingMessage], Any], Awaitable[IncomingMessage]]
class RobustChannel(Channel):
""" Channel abstraction """
QUEUE_CLASS = RobustQueue
EXCHANGE_CLASS = RobustExchange
def __init__(self, connection, loop: asyncio.AbstractEventLoop,
future_store: FutureStore, channel_number: int=None,
publisher_confirms: bool=True, on_return_raises=False):
"""
:param connection: :class:`aio_pika.adapter.AsyncioConnection` instance
:param loop: Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param future_store: :class:`aio_pika.common.FutureStore` instance
:param publisher_confirms: False if you don't need delivery confirmations (in pursuit of performance)
"""
super().__init__(
loop=loop,
future_store=future_store.get_child(),
connection=connection,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self._closed = False
self._exchanges = dict()
self._queues = dict()
self._qos = 0, 0
@asyncio.coroutine
def on_reconnect(self, connection, channel_number):
exc = ConnectionError('Auto Reconnect Error')
if not self._closing.done():
self._closing.set_exception(exc)
self._closing = create_future(loop=self.loop)
self._futures.reject_all(exc)
self._connection = connection
self._channel_number = channel_number
yield from self.initialize()
for exchange in self._exchanges.values():
yield from exchange.on_reconnect(self)
for queue in self._queues.values():
yield from queue.on_reconnect(self)
@asyncio.coroutine
def initialize(self, timeout=None):
result = yield from super().initialize()
prefetch_count, prefetch_size = self._qos
yield from self.set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size
)
return result
@asyncio.coroutine
def set_qos(self, prefetch_count: int = 0, prefetch_size: int = 0, all_channels=False, timeout: int = None):
if all_channels:
raise NotImplementedError("Not available to RobustConnection")
self._qos = prefetch_count, prefetch_size
return (yield from super().set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
timeout=timeout,
))
@BaseChannel._ensure_channel_is_open
@asyncio.coroutine
def close(self) -> None:
if self._closed:
return
with (yield from self._write_lock):
self._closed = True
self._channel.close()
yield from self.closing
self._channel = None
@asyncio.coroutine
def declare_exchange(self, name: str, type: ExchangeType = ExchangeType.DIRECT,
durable: bool = None, auto_delete: bool = False,
internal: bool = False, passive: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Exchange]:
exchange = yield from super().declare_exchange(
name=name, type=type, durable=durable, auto_delete=auto_delete,
internal=internal, passive=passive, arguments=arguments,
timeout=timeout,
)
if not internal and robust:
self._exchanges[name] = exchange
return exchange
@asyncio.coroutine
def exchange_delete(self, exchange_name: str, timeout: int = None, if_unused=False, nowait=False):
result = yield from super().exchange_delete(
exchange_name=exchange_name, timeout=timeout,
if_unused=if_unused, nowait=nowait
)
self._exchanges.pop(exchange_name, None)
return result
@asyncio.coroutine
def declare_queue(self, name: str = None, *, durable: bool = None, exclusive: bool = False,
passive: bool = False, auto_delete: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Queue]:
queue = yield from super().declare_queue(
name=name, durable=durable, exclusive=exclusive,
passive=passive, auto_delete=auto_delete,
arguments=arguments, timeout=timeout,
)
if robust:
self._queues[name] = queue
return queue
@asyncio.coroutine
def queue_delete(self, queue_name: str, timeout: int = None,
if_unused: bool = False, if_empty: bool = False, nowait: bool = False):
result = yield from super().queue_delete(
queue_name=queue_name, timeout=timeout,
if_unused=if_unused, if_empty=if_empty, nowait=nowait
)
self._queues.pop(queue_name, None)
return result
__all__ = ('RobustChannel',)
| 33.189349 | 112 | 0.63826 | import asyncio
from typing import Callable, Any, Generator, Union
from logging import getLogger
from aio_pika.tools import create_future
from .compat import Awaitable
from .exchange import Exchange, ExchangeType
from .message import IncomingMessage
from .queue import Queue
from .common import BaseChannel, FutureStore
from .channel import Channel
from .robust_queue import RobustQueue
from .robust_exchange import RobustExchange
log = getLogger(__name__)
FunctionOrCoroutine = Union[Callable[[IncomingMessage], Any], Awaitable[IncomingMessage]]
class RobustChannel(Channel):
QUEUE_CLASS = RobustQueue
EXCHANGE_CLASS = RobustExchange
def __init__(self, connection, loop: asyncio.AbstractEventLoop,
future_store: FutureStore, channel_number: int=None,
publisher_confirms: bool=True, on_return_raises=False):
super().__init__(
loop=loop,
future_store=future_store.get_child(),
connection=connection,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self._closed = False
self._exchanges = dict()
self._queues = dict()
self._qos = 0, 0
@asyncio.coroutine
def on_reconnect(self, connection, channel_number):
exc = ConnectionError('Auto Reconnect Error')
if not self._closing.done():
self._closing.set_exception(exc)
self._closing = create_future(loop=self.loop)
self._futures.reject_all(exc)
self._connection = connection
self._channel_number = channel_number
yield from self.initialize()
for exchange in self._exchanges.values():
yield from exchange.on_reconnect(self)
for queue in self._queues.values():
yield from queue.on_reconnect(self)
@asyncio.coroutine
def initialize(self, timeout=None):
result = yield from super().initialize()
prefetch_count, prefetch_size = self._qos
yield from self.set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size
)
return result
@asyncio.coroutine
def set_qos(self, prefetch_count: int = 0, prefetch_size: int = 0, all_channels=False, timeout: int = None):
if all_channels:
raise NotImplementedError("Not available to RobustConnection")
self._qos = prefetch_count, prefetch_size
return (yield from super().set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
timeout=timeout,
))
@BaseChannel._ensure_channel_is_open
@asyncio.coroutine
def close(self) -> None:
if self._closed:
return
with (yield from self._write_lock):
self._closed = True
self._channel.close()
yield from self.closing
self._channel = None
@asyncio.coroutine
def declare_exchange(self, name: str, type: ExchangeType = ExchangeType.DIRECT,
durable: bool = None, auto_delete: bool = False,
internal: bool = False, passive: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Exchange]:
exchange = yield from super().declare_exchange(
name=name, type=type, durable=durable, auto_delete=auto_delete,
internal=internal, passive=passive, arguments=arguments,
timeout=timeout,
)
if not internal and robust:
self._exchanges[name] = exchange
return exchange
@asyncio.coroutine
def exchange_delete(self, exchange_name: str, timeout: int = None, if_unused=False, nowait=False):
result = yield from super().exchange_delete(
exchange_name=exchange_name, timeout=timeout,
if_unused=if_unused, nowait=nowait
)
self._exchanges.pop(exchange_name, None)
return result
@asyncio.coroutine
def declare_queue(self, name: str = None, *, durable: bool = None, exclusive: bool = False,
passive: bool = False, auto_delete: bool = False,
arguments: dict = None, timeout: int = None,
robust: bool = True) -> Generator[Any, None, Queue]:
queue = yield from super().declare_queue(
name=name, durable=durable, exclusive=exclusive,
passive=passive, auto_delete=auto_delete,
arguments=arguments, timeout=timeout,
)
if robust:
self._queues[name] = queue
return queue
@asyncio.coroutine
def queue_delete(self, queue_name: str, timeout: int = None,
if_unused: bool = False, if_empty: bool = False, nowait: bool = False):
result = yield from super().queue_delete(
queue_name=queue_name, timeout=timeout,
if_unused=if_unused, if_empty=if_empty, nowait=nowait
)
self._queues.pop(queue_name, None)
return result
__all__ = ('RobustChannel',)
| true | true |
1c47d7c374e86f2955d404bda2c09808e815f342 | 4,040 | py | Python | recipes/b2/portable/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | null | null | null | recipes/b2/portable/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | 1 | 2021-11-22T13:54:48.000Z | 2021-11-22T14:09:45.000Z | recipes/b2/portable/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | null | null | null | from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class B2Conan(ConanFile):
name = "b2"
homepage = "https://www.bfgroup.xyz/b2/"
description = "B2 makes it easy to build C++ projects, everywhere."
topics = ("b2", "installer", "builder", "build", "build-system")
license = "BSL-1.0"
settings = "os", "arch"
url = "https://github.com/conan-io/conan-center-index"
'''
* use_cxx_env: False, True
Indicates if the build will use the CXX and
CXXFLAGS environment variables. The common use is to add additional flags
for building on specific platforms or for additional optimization options.
* toolset: 'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143'
Specifies the toolset to use for building. The default of 'auto' detects
a usable compiler for building and should be preferred. The 'cxx' toolset
uses the 'CXX' and 'CXXFLAGS' solely for building. Using the 'cxx'
toolset will also turn on the 'use_cxx_env' option. And the 'cross-cxx'
toolset uses the 'BUILD_CXX' and 'BUILD_CXXFLAGS' vars. This frees the
'CXX' and 'CXXFLAGS' variables for use in subprocesses.
'''
options = {
'use_cxx_env': [False, True],
'toolset': [
'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143']
}
default_options = {
'use_cxx_env': False,
'toolset': 'auto'
}
def validate(self):
if (self.options.toolset == 'cxx' or self.options.toolset == 'cross-cxx') and not self.options.use_cxx_env:
raise ConanInvalidConfiguration(
"Option toolset 'cxx' and 'cross-cxx' requires 'use_cxx_env=True'")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination="source")
def build(self):
use_windows_commands = os.name == 'nt'
command = "build" if use_windows_commands else "./build.sh"
if self.options.toolset != 'auto':
command += " "+str(self.options.toolset)
build_dir = os.path.join(self.source_folder, "source")
engine_dir = os.path.join(build_dir, "src", "engine")
os.chdir(engine_dir)
with tools.environment_append({"VSCMD_START_DIR": os.curdir}):
if self.options.use_cxx_env:
# Allow use of CXX env vars.
self.run(command)
else:
# To avoid using the CXX env vars we clear them out for the build.
with tools.environment_append({"CXX": "", "CXXFLAGS": ""}):
self.run(command)
os.chdir(build_dir)
command = os.path.join(
engine_dir, "b2.exe" if use_windows_commands else "b2")
full_command = \
"{0} --ignore-site-config --prefix=../output --abbreviate-paths install b2-install-layout=portable".format(
command)
self.run(full_command)
def package(self):
self.copy("LICENSE.txt", dst="licenses", src="source")
self.copy(pattern="*b2", dst="bin", src="output")
self.copy(pattern="*b2.exe", dst="bin", src="output")
self.copy(pattern="*.jam", dst="bin", src="output")
def package_info(self):
self.cpp_info.bindirs = ["bin"]
self.env_info.path = [os.path.join(
self.package_folder, "bin")]
def package_id(self):
del self.info.options.use_cxx_env
del self.info.options.toolset
| 41.22449 | 119 | 0.602723 | from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class B2Conan(ConanFile):
name = "b2"
homepage = "https://www.bfgroup.xyz/b2/"
description = "B2 makes it easy to build C++ projects, everywhere."
topics = ("b2", "installer", "builder", "build", "build-system")
license = "BSL-1.0"
settings = "os", "arch"
url = "https://github.com/conan-io/conan-center-index"
options = {
'use_cxx_env': [False, True],
'toolset': [
'auto', 'cxx', 'cross-cxx',
'acc', 'borland', 'clang', 'como', 'gcc-nocygwin', 'gcc',
'intel-darwin', 'intel-linux', 'intel-win32', 'kcc', 'kylix',
'mingw', 'mipspro', 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro',
'tru64cxx', 'vacpp', 'vc12', 'vc14', 'vc141', 'vc142', 'vc143']
}
default_options = {
'use_cxx_env': False,
'toolset': 'auto'
}
def validate(self):
if (self.options.toolset == 'cxx' or self.options.toolset == 'cross-cxx') and not self.options.use_cxx_env:
raise ConanInvalidConfiguration(
"Option toolset 'cxx' and 'cross-cxx' requires 'use_cxx_env=True'")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination="source")
def build(self):
use_windows_commands = os.name == 'nt'
command = "build" if use_windows_commands else "./build.sh"
if self.options.toolset != 'auto':
command += " "+str(self.options.toolset)
build_dir = os.path.join(self.source_folder, "source")
engine_dir = os.path.join(build_dir, "src", "engine")
os.chdir(engine_dir)
with tools.environment_append({"VSCMD_START_DIR": os.curdir}):
if self.options.use_cxx_env:
self.run(command)
else:
with tools.environment_append({"CXX": "", "CXXFLAGS": ""}):
self.run(command)
os.chdir(build_dir)
command = os.path.join(
engine_dir, "b2.exe" if use_windows_commands else "b2")
full_command = \
"{0} --ignore-site-config --prefix=../output --abbreviate-paths install b2-install-layout=portable".format(
command)
self.run(full_command)
def package(self):
self.copy("LICENSE.txt", dst="licenses", src="source")
self.copy(pattern="*b2", dst="bin", src="output")
self.copy(pattern="*b2.exe", dst="bin", src="output")
self.copy(pattern="*.jam", dst="bin", src="output")
def package_info(self):
self.cpp_info.bindirs = ["bin"]
self.env_info.path = [os.path.join(
self.package_folder, "bin")]
def package_id(self):
del self.info.options.use_cxx_env
del self.info.options.toolset
| true | true |
1c47d83c488b457f490f24ffef2a609a22042fe3 | 2,173 | py | Python | tests/importer/onnx/basic/test_gemm.py | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | tests/importer/onnx/basic/test_gemm.py | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | tests/importer/onnx/basic/test_gemm.py | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from onnx_test_runner import OnnxTestRunner
def _make_module():
input_A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [112, 224])
input_B = helper.make_tensor("B", TensorProto.FLOAT,
dims=(56, 224),
vals=np.random.randn(56, 224).astype(np.float32).flatten().tolist())
input_C = helper.make_tensor("C", TensorProto.FLOAT,
dims=(56,),
vals=np.random.randn(56,).astype(np.float32).flatten().tolist())
initializers = []
initializers.append(input_B)
initializers.append(input_C)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [112, 56])
node_def = helper.make_node(
'Gemm',
['A', 'B', 'C'],
['output'],
alpha=2.0,
beta=3.0,
transA=0,
transB=1
)
graph_def = helper.make_graph(
[node_def],
'test-model',
[input_A],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
def test_gemm(request):
model_def = _make_module()
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_gemm.py']) | 32.432836 | 100 | 0.655315 |
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from onnx_test_runner import OnnxTestRunner
def _make_module():
input_A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [112, 224])
input_B = helper.make_tensor("B", TensorProto.FLOAT,
dims=(56, 224),
vals=np.random.randn(56, 224).astype(np.float32).flatten().tolist())
input_C = helper.make_tensor("C", TensorProto.FLOAT,
dims=(56,),
vals=np.random.randn(56,).astype(np.float32).flatten().tolist())
initializers = []
initializers.append(input_B)
initializers.append(input_C)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [112, 56])
node_def = helper.make_node(
'Gemm',
['A', 'B', 'C'],
['output'],
alpha=2.0,
beta=3.0,
transA=0,
transB=1
)
graph_def = helper.make_graph(
[node_def],
'test-model',
[input_A],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
def test_gemm(request):
model_def = _make_module()
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_gemm.py']) | true | true |
1c47d8bd9f5b530094b55d25e5a8c3f6233d8908 | 140 | py | Python | src/pyggui/defaults/structures/__init__.py | 15minutOdmora/python-pyggui | 6675aeecfc7c47dac54a475dfb87d9e6b641041c | [
"MIT"
] | null | null | null | src/pyggui/defaults/structures/__init__.py | 15minutOdmora/python-pyggui | 6675aeecfc7c47dac54a475dfb87d9e6b641041c | [
"MIT"
] | null | null | null | src/pyggui/defaults/structures/__init__.py | 15minutOdmora/python-pyggui | 6675aeecfc7c47dac54a475dfb87d9e6b641041c | [
"MIT"
] | null | null | null | from pathlib import Path
# Define path constant at import time
PATH = Path(__file__).parent # Parent will fetch this files parent package
| 28 | 75 | 0.785714 | from pathlib import Path
PATH = Path(__file__).parent
| true | true |
1c47dad798962eed2a8ddb76b9b3510f811c3e95 | 1,682 | py | Python | sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py | mascaaj/rosdonkeycar | 2e98b837d9ad3a7dd73a3083f0866476501a73e7 | [
"MIT"
] | null | null | null | sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py | mascaaj/rosdonkeycar | 2e98b837d9ad3a7dd73a3083f0866476501a73e7 | [
"MIT"
] | null | null | null | sensors/tfmini_ros/scripts/ros_tfmini_laser_scanner.py | mascaaj/rosdonkeycar | 2e98b837d9ad3a7dd73a3083f0866476501a73e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from tfmini_servo_scanner import *
import math
SERVO_GPIO = 18
SRV_ANGLE_MIN = math.radians(-85)
SRV_ANGLE_MAX = math.radians(85)
SRV_DUTY_ANGLE_MIN = 2350
SRV_DUTY_ANGLE_MAX = 700
SRV_TIME_MIN_MAX = 0.7
LASER_ANGLE_SAMPLES = 50
def tfmini_laserscan_publisher():
scan_pup= rospy.Publisher('tfmini_laser', LaserScan, queue_size=0)
scan = LaserScan()
#-- Convention: counter clockwise is positive (left positive, right negative)
tfminiscanner = TfminiServoScanner(SERVO_GPIO, SRV_ANGLE_MIN, SRV_ANGLE_MAX,
SRV_DUTY_ANGLE_MIN, SRV_DUTY_ANGLE_MAX, LASER_ANGLE_SAMPLES,
SRV_TIME_MIN_MAX)
frame_id = rospy.get_param('~frame_id', '/map')
#-- Initialize the message
scan.header.frame_id = frame_id
scan.range_min = tfminiscanner.laser.distance_min*0.01
scan.range_max = tfminiscanner.laser.distance_max*0.01
tfminiscanner.reset_servo()
time.sleep(1)
counter = 0
while not rospy.is_shutdown():
ini_angle, end_angle, time_increment, angle_increment, ranges = tfminiscanner.scan(scale_factor=0.01, reset=True)
scan.angle_min = ini_angle
scan.angle_max = end_angle
scan.angle_increment = angle_increment
scan.time_increment = time_increment
scan.ranges = ranges
scan_pup.publish(scan)
if __name__ == "__main__":
rospy.init_node("tfmini_laserscan")
tfmini_laserscan_publisher()
| 30.035714 | 121 | 0.648038 |
import rospy
from sensor_msgs.msg import LaserScan
from tfmini_servo_scanner import *
import math
SERVO_GPIO = 18
SRV_ANGLE_MIN = math.radians(-85)
SRV_ANGLE_MAX = math.radians(85)
SRV_DUTY_ANGLE_MIN = 2350
SRV_DUTY_ANGLE_MAX = 700
SRV_TIME_MIN_MAX = 0.7
LASER_ANGLE_SAMPLES = 50
def tfmini_laserscan_publisher():
scan_pup= rospy.Publisher('tfmini_laser', LaserScan, queue_size=0)
scan = LaserScan()
tfminiscanner = TfminiServoScanner(SERVO_GPIO, SRV_ANGLE_MIN, SRV_ANGLE_MAX,
SRV_DUTY_ANGLE_MIN, SRV_DUTY_ANGLE_MAX, LASER_ANGLE_SAMPLES,
SRV_TIME_MIN_MAX)
frame_id = rospy.get_param('~frame_id', '/map')
scan.header.frame_id = frame_id
scan.range_min = tfminiscanner.laser.distance_min*0.01
scan.range_max = tfminiscanner.laser.distance_max*0.01
tfminiscanner.reset_servo()
time.sleep(1)
counter = 0
while not rospy.is_shutdown():
ini_angle, end_angle, time_increment, angle_increment, ranges = tfminiscanner.scan(scale_factor=0.01, reset=True)
scan.angle_min = ini_angle
scan.angle_max = end_angle
scan.angle_increment = angle_increment
scan.time_increment = time_increment
scan.ranges = ranges
scan_pup.publish(scan)
if __name__ == "__main__":
rospy.init_node("tfmini_laserscan")
tfmini_laserscan_publisher()
| true | true |
1c47dbc173fc346ee1f5f5043ff56d7fb45daca5 | 633 | py | Python | backend/manage.py | crowdbotics-apps/apptest-33096 | ab08576d017c0ba776394073ffaeeac46d72b8d2 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/apptest-33096 | ab08576d017c0ba776394073ffaeeac46d72b8d2 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/apptest-33096 | ab08576d017c0ba776394073ffaeeac46d72b8d2 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apptest_33096.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727 | 77 | 0.685624 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apptest_33096.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c47dbeb5a981b28bb4113e5889393e507765b42 | 8,223 | py | Python | discord/ext/flags/_command.py | CircuitsBots/Flag-Parsing | e5e997ef4a4642d15066df1ee9b62de05e2c2bc2 | [
"MIT"
] | 3 | 2021-03-16T20:54:37.000Z | 2021-11-11T11:01:20.000Z | discord/ext/flags/_command.py | CircuitsBots/Flag-Parsing | e5e997ef4a4642d15066df1ee9b62de05e2c2bc2 | [
"MIT"
] | null | null | null | discord/ext/flags/_command.py | CircuitsBots/Flag-Parsing | e5e997ef4a4642d15066df1ee9b62de05e2c2bc2 | [
"MIT"
] | 2 | 2021-09-17T04:24:57.000Z | 2022-02-05T17:11:25.000Z | import shlex
from collections import namedtuple
import argparse
import sys
import discord
from discord.ext import commands
from discord.ext.commands import converter
from . import _parser
__all__ = ["add_flag", "command", "group", "FlagCommand", "FlagGroup"]
argument = namedtuple("argument", "args kwargs")
def command(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagCommand)
return cls(func, **kwargs)
return inner
def group(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagGroup)
return cls(func, **kwargs)
return inner
def add_flag(*flag_names, **kwargs):
def inner(func):
if isinstance(func, commands.Command):
nfunc = func.callback
else:
nfunc = func
if not hasattr(nfunc, '_def_parser'):
nfunc._def_parser = _parser.DontExitArgumentParser()
nfunc._def_parser.add_argument(*flag_names, **kwargs)
return func
return inner
class FlagCommand(commands.Command):
async def _parse_flag_arguments(self, ctx):
if not hasattr(self.callback, '_def_parser'):
return
arg = ctx.view.read_rest()
try:
namespace = self.callback._def_parser.parse_args(shlex.split(arg), ctx=ctx)
except ValueError:
raise commands.ExpectedClosingQuoteError("quote")
flags = vars(namespace)
async def do_convertion(value):
# Would only call if a value is from _get_value else it is already a value.
if type(value) is _parser.ParserResult:
try:
value = await discord.utils.maybe_coroutine(value.result)
# ArgumentTypeErrors indicate errors
except argparse.ArgumentTypeError:
msg = str(sys.exc_info()[1])
raise argparse.ArgumentError(value.action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(value.action.type, '__name__', repr(value.action.type))
args = {'type': name, 'value': value.arg_string}
msg = 'invalid %(type)s value: %(value)r'
raise argparse.ArgumentError(value.action, msg % args)
return value
for flag, value in flags.items():
# iterate if value is a list, this happens when nargs = '+'
if type(value) is list:
value = [await do_convertion(v) for v in value]
else:
value = await do_convertion(value)
ctx.kwargs.update({flag: value})
@property
def old_signature(self):
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, converter._Greedy)
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default) if not greedy else
'[%s=%s]...' % (name, param.default))
continue
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
elif greedy:
result.append('[%s]...' % name)
elif self._is_typing_optional(param.annotation):
result.append('[%s]' % name)
elif param.kind == param.VAR_KEYWORD:
pass
else:
result.append('<%s>' % name)
return ' '.join(result)
@property
def signature(self):
result = self.old_signature
to_append = [result]
parser = self.callback._def_parser # type: _parser.DontExitArgumentParser
for action in parser._actions:
# in argparse, options are done before positionals
# so we need to loop over it twice unfortunately
if action.option_strings:
name = action.dest.upper()
flag = action.option_strings[0].lstrip('-').replace('-', '_')
k = '-' if len(flag) == 1 else '--'
should_print = action.default is not None and action.default != ''
if action.required:
if should_print:
to_append.append('<%s%s %s=%s>' % (k, flag, name, action.default))
else:
to_append.append('<%s%s %s>' % (k, flag, name))
else:
if should_print:
to_append.append('[%s%s %s=%s]' % (k, flag, name, action.default))
else:
to_append.append('[%s%s %s]' % (k, flag, name))
for action in parser._actions:
# here we do the positionals
if not action.option_strings:
name = action.dest
should_print = action.default is not None and action.default != ''
if action.nargs in ('*', '?'): # optional narg types
if should_print:
to_append.append('[%s=%s]' % (name, action.default))
else:
to_append.append('[%s]' % name)
else:
if should_print:
to_append.append('<%s=%s>' % (name, action.default))
else:
to_append.append('<%s>' % name)
return ' '.join(to_append)
async def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = await self.do_conversion(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
elif param.kind == param.VAR_KEYWORD:
await self._parse_flag_arguments(ctx)
break
if not self.ignore_extra:
if not view.eof:
raise commands.TooManyArguments('Too many arguments passed to ' + self.qualified_name)
class FlagGroup(FlagCommand, commands.Group):
pass
| 37.377273 | 109 | 0.538246 | import shlex
from collections import namedtuple
import argparse
import sys
import discord
from discord.ext import commands
from discord.ext.commands import converter
from . import _parser
__all__ = ["add_flag", "command", "group", "FlagCommand", "FlagGroup"]
argument = namedtuple("argument", "args kwargs")
def command(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagCommand)
return cls(func, **kwargs)
return inner
def group(**kwargs):
def inner(func):
cls = kwargs.pop('cls', FlagGroup)
return cls(func, **kwargs)
return inner
def add_flag(*flag_names, **kwargs):
def inner(func):
if isinstance(func, commands.Command):
nfunc = func.callback
else:
nfunc = func
if not hasattr(nfunc, '_def_parser'):
nfunc._def_parser = _parser.DontExitArgumentParser()
nfunc._def_parser.add_argument(*flag_names, **kwargs)
return func
return inner
class FlagCommand(commands.Command):
async def _parse_flag_arguments(self, ctx):
if not hasattr(self.callback, '_def_parser'):
return
arg = ctx.view.read_rest()
try:
namespace = self.callback._def_parser.parse_args(shlex.split(arg), ctx=ctx)
except ValueError:
raise commands.ExpectedClosingQuoteError("quote")
flags = vars(namespace)
async def do_convertion(value):
if type(value) is _parser.ParserResult:
try:
value = await discord.utils.maybe_coroutine(value.result)
except argparse.ArgumentTypeError:
msg = str(sys.exc_info()[1])
raise argparse.ArgumentError(value.action, msg)
except (TypeError, ValueError):
name = getattr(value.action.type, '__name__', repr(value.action.type))
args = {'type': name, 'value': value.arg_string}
msg = 'invalid %(type)s value: %(value)r'
raise argparse.ArgumentError(value.action, msg % args)
return value
for flag, value in flags.items():
if type(value) is list:
value = [await do_convertion(v) for v in value]
else:
value = await do_convertion(value)
ctx.kwargs.update({flag: value})
@property
def old_signature(self):
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.annotation, converter._Greedy)
if param.default is not param.empty:
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default) if not greedy else
'[%s=%s]...' % (name, param.default))
continue
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
elif greedy:
result.append('[%s]...' % name)
elif self._is_typing_optional(param.annotation):
result.append('[%s]' % name)
elif param.kind == param.VAR_KEYWORD:
pass
else:
result.append('<%s>' % name)
return ' '.join(result)
@property
def signature(self):
result = self.old_signature
to_append = [result]
parser = self.callback._def_parser # type: _parser.DontExitArgumentParser
for action in parser._actions:
# in argparse, options are done before positionals
# so we need to loop over it twice unfortunately
if action.option_strings:
name = action.dest.upper()
flag = action.option_strings[0].lstrip('-').replace('-', '_')
k = '-' if len(flag) == 1 else '--'
should_print = action.default is not None and action.default != ''
if action.required:
if should_print:
to_append.append('<%s%s %s=%s>' % (k, flag, name, action.default))
else:
to_append.append('<%s%s %s>' % (k, flag, name))
else:
if should_print:
to_append.append('[%s%s %s=%s]' % (k, flag, name, action.default))
else:
to_append.append('[%s%s %s]' % (k, flag, name))
for action in parser._actions:
# here we do the positionals
if not action.option_strings:
name = action.dest
should_print = action.default is not None and action.default != ''
if action.nargs in ('*', '?'): # optional narg types
if should_print:
to_append.append('[%s=%s]' % (name, action.default))
else:
to_append.append('[%s]' % name)
else:
if should_print:
to_append.append('<%s=%s>' % (name, action.default))
else:
to_append.append('<%s>' % name)
return ' '.join(to_append)
async def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.cog is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = await self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = await self.do_conversion(ctx, converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = await self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
elif param.kind == param.VAR_KEYWORD:
await self._parse_flag_arguments(ctx)
break
if not self.ignore_extra:
if not view.eof:
raise commands.TooManyArguments('Too many arguments passed to ' + self.qualified_name)
class FlagGroup(FlagCommand, commands.Group):
pass
| true | true |
1c47dc256aaf34a46737b9baba2f30bbed33feaf | 2,184 | py | Python | nuagevsdsim/simentities/nusimsapegressqosprofile.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | nuagevsdsim/simentities/nusimsapegressqosprofile.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | nuagevsdsim/simentities/nusimsapegressqosprofile.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
NUSimSAPEgressQoSProfile
"""
from vspk import v5_0 as vsdk
from nuagevsdsim.simentities.nusimresource import NUSimResource
class NUSimSAPEgressQoSProfile(NUSimResource):
""" Represents a SAPEgressQoSProfile
Notes:
7x50 SAP Egress QoS profile
"""
__vspk_class__ = vsdk.NUSAPEgressQoSProfile
__unique_fields__ = ['externalID']
__mandatory_fields__ = []
__default_fields__ = {
}
__get_parents__ = ['gateway', 'redundancygroup']
__create_parents__ = []
def __init__(self):
super(NUSimSAPEgressQoSProfile, self).__init__() | 39.709091 | 80 | 0.755952 |
from vspk import v5_0 as vsdk
from nuagevsdsim.simentities.nusimresource import NUSimResource
class NUSimSAPEgressQoSProfile(NUSimResource):
__vspk_class__ = vsdk.NUSAPEgressQoSProfile
__unique_fields__ = ['externalID']
__mandatory_fields__ = []
__default_fields__ = {
}
__get_parents__ = ['gateway', 'redundancygroup']
__create_parents__ = []
def __init__(self):
super(NUSimSAPEgressQoSProfile, self).__init__() | true | true |
1c47dc6133a02215bada1a3fbda451034cee9f3a | 2,563 | py | Python | python_module/SuperGLU/Services/DataTransforms/TableTransformService.py | GeneralizedLearningUtilities/SuperGLU | 1c373d1358431fb96dd70b324b26a14fc8ed1fcb | [
"MIT"
] | 8 | 2015-07-13T23:07:20.000Z | 2020-11-13T21:09:55.000Z | python_module/SuperGLU/Services/DataTransforms/TableTransformService.py | GeneralizedLearningUtilities/SuperGLU | 1c373d1358431fb96dd70b324b26a14fc8ed1fcb | [
"MIT"
] | 7 | 2016-01-13T12:13:56.000Z | 2021-12-14T21:12:28.000Z | python_module/SuperGLU/Services/DataTransforms/TableTransformService.py | GeneralizedLearningUtilities/SuperGLU | 1c373d1358431fb96dd70b324b26a14fc8ed1fcb | [
"MIT"
] | 6 | 2015-09-23T17:53:32.000Z | 2020-04-30T07:27:01.000Z | """
This service converts one table into another table,
by changing the headings (e.g., add, remove, re-sort),
and possibly adding calculated fields to the table.
"""
from SuperGLU.Core.MessagingGateway import BaseService
from SuperGLU.Services.DataTransforms.TableTransforms import TableTransformer
class TableTransformService(BaseService, TableTransformer):
TABLE_KEY = 'table'
OUTPUT_FIELDS_KEY = 'outputFields'
def __init__(self, functions=None, anId=None, gateway=None, authenticator=None):
"""
Receive a list of TableTransform instances,
These will be turned into a map of the function name to
the instance, so that these can be used to evaluate data.
"""
BaseService.__init__(self, anId, gateway, authenticator)
TableTransformer.__init__(self, functions)
def transformJSONObj(self, jsonObj):
jsonObj[self.TABLE_KEY] = self.transform(**jsonObj)
return jsonObj
if __name__ == '__main__':
import json
from SuperGLU.Services.DataTransforms.TableTransforms import AverageFieldsTransform
# Setup
averager = AverageFieldsTransform()
functions = [averager]
transformer = TableTransformService(functions)
# Make some sample data
myData = [['A', 'B', 'C', 'D']]
for i in xrange(3):
if i%2 == 0:
myData.append(range(0,4,1))
else:
myData.append(range(3,-1,-1))
print "Original"
print myData
print
# Re-arrange Columns
sortColsFunct = ['B', 'C', 'D', 'A']
myDataJSON = json.dumps({'table' : myData, 'outputFields' : sortColsFunct,
'miscDoNothing' : None})
myDataJSONObj = json.loads(myDataJSON)
myData = transformer.transformJSONObj(myDataJSONObj)
print "Rearranged"
print myData['table']
print
# Add identity columns that duplicate rows B and D
outfieldIdentities = ['A', 'B', 'C', 'D',
('B2', 'Identity', {'fieldName':'B'})]
myDataJSONObj['outputFields'] = outfieldIdentities
myData = transformer.transformJSONObj(myDataJSONObj)
print "Added Col"
print myData['table']
print
# Add column that is an average of A and C, and remove original B
averageACnoB = ['A', 'C', 'D', 'B2',
('Avg(A, C)', 'Average', {'fieldNames': ['A', 'C']})]
myDataJSONObj['outputFields'] = averageACnoB
myData = transformer.transformJSONObj(myDataJSONObj)
print "Averaged A and C, Removed Original B"
print myData['table']
print
| 34.173333 | 88 | 0.65002 | """
This service converts one table into another table,
by changing the headings (e.g., add, remove, re-sort),
and possibly adding calculated fields to the table.
"""
from SuperGLU.Core.MessagingGateway import BaseService
from SuperGLU.Services.DataTransforms.TableTransforms import TableTransformer
class TableTransformService(BaseService, TableTransformer):
TABLE_KEY = 'table'
OUTPUT_FIELDS_KEY = 'outputFields'
def __init__(self, functions=None, anId=None, gateway=None, authenticator=None):
"""
Receive a list of TableTransform instances,
These will be turned into a map of the function name to
the instance, so that these can be used to evaluate data.
"""
BaseService.__init__(self, anId, gateway, authenticator)
TableTransformer.__init__(self, functions)
def transformJSONObj(self, jsonObj):
jsonObj[self.TABLE_KEY] = self.transform(**jsonObj)
return jsonObj
if __name__ == '__main__':
import json
from SuperGLU.Services.DataTransforms.TableTransforms import AverageFieldsTransform
averager = AverageFieldsTransform()
functions = [averager]
transformer = TableTransformService(functions)
myData = [['A', 'B', 'C', 'D']]
for i in xrange(3):
if i%2 == 0:
myData.append(range(0,4,1))
else:
myData.append(range(3,-1,-1))
print "Original"
print myData
print
sortColsFunct = ['B', 'C', 'D', 'A']
myDataJSON = json.dumps({'table' : myData, 'outputFields' : sortColsFunct,
'miscDoNothing' : None})
myDataJSONObj = json.loads(myDataJSON)
myData = transformer.transformJSONObj(myDataJSONObj)
print "Rearranged"
print myData['table']
print
outfieldIdentities = ['A', 'B', 'C', 'D',
('B2', 'Identity', {'fieldName':'B'})]
myDataJSONObj['outputFields'] = outfieldIdentities
myData = transformer.transformJSONObj(myDataJSONObj)
print "Added Col"
print myData['table']
print
averageACnoB = ['A', 'C', 'D', 'B2',
('Avg(A, C)', 'Average', {'fieldNames': ['A', 'C']})]
myDataJSONObj['outputFields'] = averageACnoB
myData = transformer.transformJSONObj(myDataJSONObj)
print "Averaged A and C, Removed Original B"
print myData['table']
print
| false | true |
1c47dccf6915887e22c7722e4d7c64ee109f4851 | 10,838 | py | Python | CTFd/views.py | ws4/TopCTFd | 3b1e25df1318e86ff163a0b546f6e9b7f8305a5a | [
"Apache-2.0"
] | 1 | 2019-06-25T09:24:29.000Z | 2019-06-25T09:24:29.000Z | CTFd/views.py | ws4/TopCTFd | 3b1e25df1318e86ff163a0b546f6e9b7f8305a5a | [
"Apache-2.0"
] | null | null | null | CTFd/views.py | ws4/TopCTFd | 3b1e25df1318e86ff163a0b546f6e9b7f8305a5a | [
"Apache-2.0"
] | null | null | null | import os
import re
from flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file
from flask.helpers import safe_join
from jinja2.exceptions import TemplateNotFound
from passlib.hash import bcrypt_sha256
from CTFd.models import db, Teams, Solves, Awards, Files, Pages
from CTFd.utils import cache, markdown
from CTFd import utils
views = Blueprint('views', __name__)
@views.route('/setup', methods=['GET', 'POST'])
def setup():
# with app.app_context():
# admin = Teams.query.filter_by(admin=True).first()
if not utils.is_setup():
if not session.get('nonce'):
session['nonce'] = utils.sha512(os.urandom(10))
if request.method == 'POST':
ctf_name = request.form['ctf_name']
ctf_name = utils.set_config('ctf_name', ctf_name)
# CSS
css = utils.set_config('start', '')
# Admin user
name = request.form['name']
email = request.form['email']
password = request.form['password']
admin = Teams(name, email, password)
admin.admin = True
admin.banned = True
# Index page
page = Pages('index', """<div class="container main-container">
<img class="logo" src="themes/original/static/img/logo.png" />
<h3 class="text-center" >
<p>A11111111 cool CTF platform from <a href="https://ctfd.io">ctfd.io</a></p>
<p style="">Follow us on social media:</p>
<a href="https://twitter.com/ctfdio"><i class="fa fa-twitter fa-2x" aria-hidden="true"></i></a>
<a href="https://facebook.com/ctfdio"><i class="fa fa-facebook-official fa-2x" aria-hidden="true"></i></a>
<a href="https://github.com/ctfd"><i class="fa fa-github fa-2x" aria-hidden="true"></i></a>
</h3>
<br>
<h4 class="text-center">
<a href="admin">Click here</a> to login and setup your CTF
</h4>
</div>""".format(request.script_root))
# max attempts per challenge
max_tries = utils.set_config('max_tries', 0)
# Start time
start = utils.set_config('start', None)
end = utils.set_config('end', None)
freeze = utils.set_config('freeze', None)
# Challenges cannot be viewed by unregistered users
view_challenges_unregistered = utils.set_config('view_challenges_unregistered', None)
# Allow/Disallow registration
prevent_registration = utils.set_config('prevent_registration', None)
# Verify emails
verify_emails = utils.set_config('verify_emails', None)
mail_server = utils.set_config('mail_server', None)
mail_port = utils.set_config('mail_port', None)
mail_tls = utils.set_config('mail_tls', None)
mail_ssl = utils.set_config('mail_ssl', None)
mail_username = utils.set_config('mail_username', None)
mail_password = utils.set_config('mail_password', None)
mail_useauth = utils.set_config('mail_useauth', None)
setup = utils.set_config('setup', True)
db.session.add(page)
db.session.add(admin)
db.session.commit()
session['username'] = admin.name
session['id'] = admin.id
session['admin'] = admin.admin
session['nonce'] = utils.sha512(os.urandom(10))
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for('views.static_html'))
return render_template('setup.html', nonce=session.get('nonce'))
return redirect(url_for('views.static_html'))
# Custom CSS handler
@views.route('/static/user.css')
def custom_css():
return Response(utils.get_config('css'), mimetype='text/css')
# Static HTML files
@views.route("/", defaults={'template': 'index'})
@views.route("/<template>")
def static_html(template):
try:
return render_template('%s.html' % template)
except TemplateNotFound:
page = utils.get_page(template)
if page is None:
abort(404)
return render_template('page.html', content=markdown(page.html))
@views.route('/teams', defaults={'page': '1'})
@views.route('/teams/<int:page>')
def teams(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
if utils.get_config('verify_emails'):
count = Teams.query.filter_by(verified=True, banned=False).count()
teams = Teams.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()
else:
count = Teams.query.filter_by(banned=False).count()
teams = Teams.query.filter_by(banned=False).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)
@views.route('/team/<int:teamid>', methods=['GET', 'POST'])
def team(teamid):
if utils.get_config('view_scoreboard_if_utils.authed') and not utils.authed():
return redirect(url_for('auth.login', next=request.path))
errors = []
freeze = utils.get_config('freeze')
user = Teams.query.filter_by(id=teamid).first_or_404()
solves = Solves.query.filter_by(teamid=teamid)
awards = Awards.query.filter_by(teamid=teamid)
place = user.place()
score = user.score()
if freeze:
freeze = utils.unix_time_to_utc(freeze)
if teamid != session.get('id'):
solves = solves.filter(Solves.date < freeze)
awards = awards.filter(Awards.date < freeze)
solves = solves.all()
awards = awards.all()
db.session.close()
if utils.hide_scores() and teamid != session.get('id'):
errors.append('Scores are currently hidden')
if errors:
return render_template('team.html', team=user, errors=errors)
if request.method == 'GET':
return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())
elif request.method == 'POST':
json = {'solves': []}
for x in solves:
json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.teamid})
return jsonify(json)
@views.route('/profile', methods=['POST', 'GET'])
def profile():
if utils.authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
website = request.form.get('website')
affiliation = request.form.get('affiliation')
country = request.form.get('country')
user = Teams.query.filter_by(id=session['id']).first()
if not utils.get_config('prevent_name_change'):
names = Teams.query.filter_by(name=name).first()
name_len = len(request.form['name']) == 0
emails = Teams.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not utils.get_config('prevent_name_change') and names and name != session['username']:
errors.append('That team name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not utils.get_config('prevent_name_change') and name_len:
errors.append('Pick a longer team name')
if website.strip() and not utils.validate_url(website):
errors.append("That doesn't look like a valid URL")
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, website=website,
affiliation=affiliation, country=country, errors=errors)
else:
team = Teams.query.filter_by(id=session['id']).first()
if not utils.get_config('prevent_name_change'):
team.name = name
if team.email != email.lower():
team.email = email.lower()
if utils.get_config('verify_emails'):
team.verified = False
session['username'] = team.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
team.password = bcrypt_sha256.encrypt(request.form.get('password'))
team.website = website
team.affiliation = affiliation
team.country = country
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Teams.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
website = user.website
affiliation = user.affiliation
country = user.country
prevent_name_change = utils.get_config('prevent_name_change')
confirm_email = utils.get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,
country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
@views.route('/files', defaults={'path': ''})
@views.route('/files/<path:path>')
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
if f.chal:
if not utils.is_admin():
if not utils.ctftime():
if utils.view_after_ctf() and utils.ctf_started():
pass
else:
abort(403)
upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'])
return send_file(safe_join(upload_folder, f.location))
@views.route('/themes/<theme>/static/<path:path>')
def themes_handler(theme, path):
filename = safe_join(app.root_path, 'themes', theme, 'static', path)
if os.path.isfile(filename):
return send_file(filename)
else:
abort(404)
| 40.140741 | 153 | 0.605462 | import os
import re
from flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file
from flask.helpers import safe_join
from jinja2.exceptions import TemplateNotFound
from passlib.hash import bcrypt_sha256
from CTFd.models import db, Teams, Solves, Awards, Files, Pages
from CTFd.utils import cache, markdown
from CTFd import utils
views = Blueprint('views', __name__)
@views.route('/setup', methods=['GET', 'POST'])
def setup():
if not utils.is_setup():
if not session.get('nonce'):
session['nonce'] = utils.sha512(os.urandom(10))
if request.method == 'POST':
ctf_name = request.form['ctf_name']
ctf_name = utils.set_config('ctf_name', ctf_name)
css = utils.set_config('start', '')
name = request.form['name']
email = request.form['email']
password = request.form['password']
admin = Teams(name, email, password)
admin.admin = True
admin.banned = True
page = Pages('index', """<div class="container main-container">
<img class="logo" src="themes/original/static/img/logo.png" />
<h3 class="text-center" >
<p>A11111111 cool CTF platform from <a href="https://ctfd.io">ctfd.io</a></p>
<p style="">Follow us on social media:</p>
<a href="https://twitter.com/ctfdio"><i class="fa fa-twitter fa-2x" aria-hidden="true"></i></a>
<a href="https://facebook.com/ctfdio"><i class="fa fa-facebook-official fa-2x" aria-hidden="true"></i></a>
<a href="https://github.com/ctfd"><i class="fa fa-github fa-2x" aria-hidden="true"></i></a>
</h3>
<br>
<h4 class="text-center">
<a href="admin">Click here</a> to login and setup your CTF
</h4>
</div>""".format(request.script_root))
max_tries = utils.set_config('max_tries', 0)
start = utils.set_config('start', None)
end = utils.set_config('end', None)
freeze = utils.set_config('freeze', None)
view_challenges_unregistered = utils.set_config('view_challenges_unregistered', None)
prevent_registration = utils.set_config('prevent_registration', None)
verify_emails = utils.set_config('verify_emails', None)
mail_server = utils.set_config('mail_server', None)
mail_port = utils.set_config('mail_port', None)
mail_tls = utils.set_config('mail_tls', None)
mail_ssl = utils.set_config('mail_ssl', None)
mail_username = utils.set_config('mail_username', None)
mail_password = utils.set_config('mail_password', None)
mail_useauth = utils.set_config('mail_useauth', None)
setup = utils.set_config('setup', True)
db.session.add(page)
db.session.add(admin)
db.session.commit()
session['username'] = admin.name
session['id'] = admin.id
session['admin'] = admin.admin
session['nonce'] = utils.sha512(os.urandom(10))
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for('views.static_html'))
return render_template('setup.html', nonce=session.get('nonce'))
return redirect(url_for('views.static_html'))
@views.route('/static/user.css')
def custom_css():
return Response(utils.get_config('css'), mimetype='text/css')
@views.route("/", defaults={'template': 'index'})
@views.route("/<template>")
def static_html(template):
try:
return render_template('%s.html' % template)
except TemplateNotFound:
page = utils.get_page(template)
if page is None:
abort(404)
return render_template('page.html', content=markdown(page.html))
@views.route('/teams', defaults={'page': '1'})
@views.route('/teams/<int:page>')
def teams(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
if utils.get_config('verify_emails'):
count = Teams.query.filter_by(verified=True, banned=False).count()
teams = Teams.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()
else:
count = Teams.query.filter_by(banned=False).count()
teams = Teams.query.filter_by(banned=False).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)
@views.route('/team/<int:teamid>', methods=['GET', 'POST'])
def team(teamid):
if utils.get_config('view_scoreboard_if_utils.authed') and not utils.authed():
return redirect(url_for('auth.login', next=request.path))
errors = []
freeze = utils.get_config('freeze')
user = Teams.query.filter_by(id=teamid).first_or_404()
solves = Solves.query.filter_by(teamid=teamid)
awards = Awards.query.filter_by(teamid=teamid)
place = user.place()
score = user.score()
if freeze:
freeze = utils.unix_time_to_utc(freeze)
if teamid != session.get('id'):
solves = solves.filter(Solves.date < freeze)
awards = awards.filter(Awards.date < freeze)
solves = solves.all()
awards = awards.all()
db.session.close()
if utils.hide_scores() and teamid != session.get('id'):
errors.append('Scores are currently hidden')
if errors:
return render_template('team.html', team=user, errors=errors)
if request.method == 'GET':
return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())
elif request.method == 'POST':
json = {'solves': []}
for x in solves:
json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.teamid})
return jsonify(json)
@views.route('/profile', methods=['POST', 'GET'])
def profile():
if utils.authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
website = request.form.get('website')
affiliation = request.form.get('affiliation')
country = request.form.get('country')
user = Teams.query.filter_by(id=session['id']).first()
if not utils.get_config('prevent_name_change'):
names = Teams.query.filter_by(name=name).first()
name_len = len(request.form['name']) == 0
emails = Teams.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not utils.get_config('prevent_name_change') and names and name != session['username']:
errors.append('That team name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not utils.get_config('prevent_name_change') and name_len:
errors.append('Pick a longer team name')
if website.strip() and not utils.validate_url(website):
errors.append("That doesn't look like a valid URL")
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, website=website,
affiliation=affiliation, country=country, errors=errors)
else:
team = Teams.query.filter_by(id=session['id']).first()
if not utils.get_config('prevent_name_change'):
team.name = name
if team.email != email.lower():
team.email = email.lower()
if utils.get_config('verify_emails'):
team.verified = False
session['username'] = team.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
team.password = bcrypt_sha256.encrypt(request.form.get('password'))
team.website = website
team.affiliation = affiliation
team.country = country
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Teams.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
website = user.website
affiliation = user.affiliation
country = user.country
prevent_name_change = utils.get_config('prevent_name_change')
confirm_email = utils.get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,
country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
@views.route('/files', defaults={'path': ''})
@views.route('/files/<path:path>')
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
if f.chal:
if not utils.is_admin():
if not utils.ctftime():
if utils.view_after_ctf() and utils.ctf_started():
pass
else:
abort(403)
upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'])
return send_file(safe_join(upload_folder, f.location))
@views.route('/themes/<theme>/static/<path:path>')
def themes_handler(theme, path):
filename = safe_join(app.root_path, 'themes', theme, 'static', path)
if os.path.isfile(filename):
return send_file(filename)
else:
abort(404)
| true | true |
1c47ddab6db9862eac3c601311b8498882a12edb | 1,209 | py | Python | test.py | yyht/OpenAttack | 637e81a9c60874ec35f923d7c62687cbd6ee3633 | [
"MIT"
] | null | null | null | test.py | yyht/OpenAttack | 637e81a9c60874ec35f923d7c62687cbd6ee3633 | [
"MIT"
] | null | null | null | test.py | yyht/OpenAttack | 637e81a9c60874ec35f923d7c62687cbd6ee3633 | [
"MIT"
] | 1 | 2020-09-01T11:14:42.000Z | 2020-09-01T11:14:42.000Z | import OpenAttack
dataset = OpenAttack.loadDataset("SST")[0][:5]
clsf = OpenAttack.loadVictim("BiLSTM.SST")
rules = OpenAttack.attackers.SEAAttacker.get_rules(clsf, dataset)
triggers = OpenAttack.attackers.UATAttacker.get_triggers(clsf, dataset, word2id=clsf.config["word2id"], embedding=clsf.config["embedding"])
attackers = [
OpenAttack.attackers.FDAttacker(word2id=clsf.config["word2id"], embedding=clsf.config["embedding"]),
OpenAttack.attackers.SEAAttacker(rules=rules),
OpenAttack.attackers.UATAttacker(triggers=triggers),
OpenAttack.attackers.TextBuggerAttacker(),
OpenAttack.attackers.TextFoolerAttacker(),
OpenAttack.attackers.VIPERAttacker(),
OpenAttack.attackers.DeepWordBugAttacker(),
OpenAttack.attackers.GANAttacker(),
OpenAttack.attackers.GeneticAttacker(),
OpenAttack.attackers.HotFlipAttacker(),
OpenAttack.attackers.PWWSAttacker(),
OpenAttack.attackers.SCPNAttacker(),
]
for attacker in attackers:
print(attacker.__class__.__name__)
try:
print(
OpenAttack.attack_evals.DefaultAttackEval(attacker, clsf, progress_bar=False).eval(dataset)
)
except Exception as e:
print(e)
print("\n")
| 36.636364 | 139 | 0.74359 | import OpenAttack
dataset = OpenAttack.loadDataset("SST")[0][:5]
clsf = OpenAttack.loadVictim("BiLSTM.SST")
rules = OpenAttack.attackers.SEAAttacker.get_rules(clsf, dataset)
triggers = OpenAttack.attackers.UATAttacker.get_triggers(clsf, dataset, word2id=clsf.config["word2id"], embedding=clsf.config["embedding"])
attackers = [
OpenAttack.attackers.FDAttacker(word2id=clsf.config["word2id"], embedding=clsf.config["embedding"]),
OpenAttack.attackers.SEAAttacker(rules=rules),
OpenAttack.attackers.UATAttacker(triggers=triggers),
OpenAttack.attackers.TextBuggerAttacker(),
OpenAttack.attackers.TextFoolerAttacker(),
OpenAttack.attackers.VIPERAttacker(),
OpenAttack.attackers.DeepWordBugAttacker(),
OpenAttack.attackers.GANAttacker(),
OpenAttack.attackers.GeneticAttacker(),
OpenAttack.attackers.HotFlipAttacker(),
OpenAttack.attackers.PWWSAttacker(),
OpenAttack.attackers.SCPNAttacker(),
]
for attacker in attackers:
print(attacker.__class__.__name__)
try:
print(
OpenAttack.attack_evals.DefaultAttackEval(attacker, clsf, progress_bar=False).eval(dataset)
)
except Exception as e:
print(e)
print("\n")
| true | true |
1c47ded1a344d1cd51b13949d45a9106e2b325a5 | 1,220 | py | Python | algos/rl/ppo_ray_random.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 14 | 2020-04-03T12:41:50.000Z | 2022-02-04T00:05:01.000Z | algos/rl/ppo_ray_random.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 2 | 2020-03-02T04:32:58.000Z | 2021-09-15T20:02:25.000Z | algos/rl/ppo_ray_random.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 8 | 2020-03-02T10:30:36.000Z | 2021-08-03T02:29:38.000Z | import ray
import ray.tune as tune
from ray.rllib.agents import ppo
from gym_hls.envs.hls_env import HLSEnv
from gym_hls.envs.hls_multi_env import HLSMultiEnv
ray.init()
env_configs = {}
from gym_hls.envs.random_bm import get_random
num_pgms = 1000
bms = get_random(N=num_pgms)
for i, bm in enumerate(bms):
pgm, files= bm
env_configs['pgm'] = pgm
env_configs['pgm_files'] = files
env_configs['run_dir'] = 'run_'+pgm.replace(".c","")
#env_configs['feature_type'] = 'act_hist'
env_configs['verbose'] = True
env_configs['log_results'] = True
print("Tune for {}".format(pgm))
tune.run_experiments({
"my_experiment": {
"run": "PPO",
"env":HLSEnv,
"checkpoint_freq": 50,
"stop": {"episodes_total": 500},
"config": {
"sample_batch_size": 10,
"train_batch_size": 100,
"sgd_minibatch_size": 8,
"num_sgd_iter": 10,
"horizon": 12,
"num_gpus": 1,
"num_workers": 5,
"lr": 1e-3,
#"lr": tune.grid_search([0.01, 0.001, 0.0001]),
"vf_clip_param": 1e5,
"env_config": env_configs,
},
},
})
| 27.727273 | 61 | 0.568852 | import ray
import ray.tune as tune
from ray.rllib.agents import ppo
from gym_hls.envs.hls_env import HLSEnv
from gym_hls.envs.hls_multi_env import HLSMultiEnv
ray.init()
env_configs = {}
from gym_hls.envs.random_bm import get_random
num_pgms = 1000
bms = get_random(N=num_pgms)
for i, bm in enumerate(bms):
pgm, files= bm
env_configs['pgm'] = pgm
env_configs['pgm_files'] = files
env_configs['run_dir'] = 'run_'+pgm.replace(".c","")
env_configs['verbose'] = True
env_configs['log_results'] = True
print("Tune for {}".format(pgm))
tune.run_experiments({
"my_experiment": {
"run": "PPO",
"env":HLSEnv,
"checkpoint_freq": 50,
"stop": {"episodes_total": 500},
"config": {
"sample_batch_size": 10,
"train_batch_size": 100,
"sgd_minibatch_size": 8,
"num_sgd_iter": 10,
"horizon": 12,
"num_gpus": 1,
"num_workers": 5,
"lr": 1e-3,
"vf_clip_param": 1e5,
"env_config": env_configs,
},
},
})
| true | true |
1c47df1a360b94aa764852cc2aa26849ae5db656 | 178 | py | Python | src/users/admin.py | chiliseed/hub | 83f29fbdd12e2260397e18e635f508459fa4990e | [
"Apache-2.0"
] | null | null | null | src/users/admin.py | chiliseed/hub | 83f29fbdd12e2260397e18e635f508459fa4990e | [
"Apache-2.0"
] | 4 | 2021-04-08T20:10:15.000Z | 2021-06-10T20:18:17.000Z | src/users/admin.py | chiliseed/hub | 83f29fbdd12e2260397e18e635f508459fa4990e | [
"Apache-2.0"
] | null | null | null | """Django admin ui for users."""
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
admin.site.register(User, UserAdmin)
| 22.25 | 47 | 0.786517 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
admin.site.register(User, UserAdmin)
| true | true |
1c47e205f85d9c9e839f341b0ccf620d6cab46cc | 421 | py | Python | Ejemplos/ejemplo13/src/main2.py | ampotty/uip-pc3 | 8362680226df6629791e7a4c6cdf1b738eadc5de | [
"MIT"
] | 10 | 2015-10-27T18:29:06.000Z | 2019-04-03T04:05:31.000Z | Ejemplos/ejemplo13/src/main2.py | abdelgmartinezl/uip-pc3 | 8362680226df6629791e7a4c6cdf1b738eadc5de | [
"MIT"
] | 5 | 2015-10-13T01:12:51.000Z | 2016-10-08T18:01:17.000Z | Ejemplos/ejemplo13/src/main2.py | ampotty/uip-pc3 | 8362680226df6629791e7a4c6cdf1b738eadc5de | [
"MIT"
] | 25 | 2015-09-19T00:40:17.000Z | 2018-02-08T02:54:55.000Z | total = 0
cont1 = 0
while True:
nota = int(input("Calificacion (negativo para salir): "))
if nota < 0:
break
cont1 += 1
total = total + nota
if cont1 != 0:
promedio = total / cont1
else:
promedio = 0
if promedio >= 91:
print("Saca A",promedio)
elif promedio >= 81:
print("Saca B",promedio)
elif promedio >= 71:
print("Saca C",promedio)
else:
print("#TeQuedaste\a",promedio) | 21.05 | 61 | 0.605701 | total = 0
cont1 = 0
while True:
nota = int(input("Calificacion (negativo para salir): "))
if nota < 0:
break
cont1 += 1
total = total + nota
if cont1 != 0:
promedio = total / cont1
else:
promedio = 0
if promedio >= 91:
print("Saca A",promedio)
elif promedio >= 81:
print("Saca B",promedio)
elif promedio >= 71:
print("Saca C",promedio)
else:
print("#TeQuedaste\a",promedio) | true | true |
1c47e2bbbb869849f0c26e0f9ceb36f00f7e3eca | 671 | py | Python | manage.py | Lioncat2002/confession_site | 3b1d209b06cb1eac4b43f5c59cb70d2cfb49d25c | [
"MIT"
] | null | null | null | manage.py | Lioncat2002/confession_site | 3b1d209b06cb1eac4b43f5c59cb70d2cfb49d25c | [
"MIT"
] | null | null | null | manage.py | Lioncat2002/confession_site | 3b1d209b06cb1eac4b43f5c59cb70d2cfb49d25c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'confession_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.173913 | 79 | 0.682563 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'confession_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c47e4640389cb25f44fd436d0754a0c9ecffe9f | 33,651 | py | Python | trax/rl/actor_critic.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | trax/rl/actor_critic.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | trax/rl/actor_critic.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes for RL training in Trax."""
import functools
import os
import gym
import numpy as np
import tensorflow as tf
from trax import data
from trax import fastmath
from trax import layers as tl
from trax import shapes
from trax import supervised
from trax.fastmath import numpy as jnp
from trax.rl import advantages as rl_advantages
from trax.rl import training as rl_training
from trax.supervised import lr_schedules as lr
class ActorCriticAgent(rl_training.PolicyAgent):
"""Trains policy and value models using actor-critic methods.
Attrs:
on_policy (bool): Whether the algorithm is on-policy. Used in the data
generators. Should be set in derived classes.
"""
on_policy = None
def __init__(self, task,
value_model=None,
value_optimizer=None,
value_lr_schedule=lr.multifactor,
value_batch_size=64,
value_train_steps_per_epoch=500,
value_evals_per_epoch=1,
value_eval_steps=1,
n_shared_layers=0,
added_policy_slice_length=0,
n_replay_epochs=1,
scale_value_targets=False,
q_value=False,
q_value_aggregate_max=True,
q_value_n_samples=1,
**kwargs): # Arguments of PolicyAgent come here.
"""Configures the actor-critic trainer.
Args:
task: `RLTask` instance to use.
value_model: Model to use for the value function.
value_optimizer: Optimizer to train the value model.
value_lr_schedule: lr schedule for value model training.
value_batch_size: Batch size for value model training.
value_train_steps_per_epoch: Number of steps are we using to train the
value model in each epoch.
value_evals_per_epoch: Number of value trainer evaluations per RL epoch;
only affects metric reporting.
value_eval_steps: Number of value trainer steps per evaluation; only
affects metric reporting.
n_shared_layers: Number of layers to share between value and policy
models.
added_policy_slice_length: How much longer should slices of
trajectories be for policy than for value training; this
is useful for TD calculations and only affect the length
of elements produced for policy batches; value batches
have maximum length set by `max_slice_length` in `**kwargs`.
n_replay_epochs: Number of last epochs to take into the replay buffer;
only makes sense for off-policy algorithms.
scale_value_targets: If `True`, scale value function targets by
`1 / (1 - gamma)`.
q_value: If `True`, use Q-values as baselines.
q_value_aggregate_max: If `True`, aggregate Q-values with max (or mean).
q_value_n_samples: Number of samples to average over when calculating
baselines based on Q-values.
**kwargs: Arguments for `PolicyAgent` superclass.
"""
self._n_shared_layers = n_shared_layers
self._value_batch_size = value_batch_size
self._value_train_steps_per_epoch = value_train_steps_per_epoch
self._value_evals_per_epoch = value_evals_per_epoch
self._value_eval_steps = value_eval_steps
# The 2 below will be initalized in super.__init__ anyway, but are needed
# to construct value batches which are needed before PolicyAgent init
# since policy input creation calls the value model -- hence this code.
self._task = task
self._max_slice_length = kwargs.get('max_slice_length', 1)
self._added_policy_slice_length = added_policy_slice_length
self._n_replay_epochs = n_replay_epochs
task.set_n_replay_epochs(n_replay_epochs)
if scale_value_targets:
self._value_network_scale = 1 / (1 - self._task.gamma)
else:
self._value_network_scale = 1
self._q_value = q_value
self._q_value_aggregate_max = q_value_aggregate_max
self._q_value_n_samples = q_value_n_samples
is_discrete = isinstance(self._task.action_space, gym.spaces.Discrete)
self._is_discrete = is_discrete
self._vocab_size = None
self._sample_all_discrete_actions = False
if q_value and is_discrete:
self._vocab_size = self.task.action_space.n
# TODO(lukaszkaiser): the code below is specific to AWR, move it.
# If n_samples = n_actions, we'll take them all in actor and reweight.
if self._q_value_n_samples == self._vocab_size:
# TODO(lukaszkaiser): set this explicitly once it's in AWR Trainer.
self._sample_all_discrete_actions = True
if q_value:
value_model = functools.partial(value_model,
inject_actions=True,
is_discrete=is_discrete,
vocab_size=self._vocab_size)
self._value_eval_model = value_model(mode='eval')
self._value_eval_model.init(self._value_model_signature)
self._value_eval_jit = tl.jit_forward(
self._value_eval_model.pure_fn, fastmath.device_count(), do_mean=False)
# Initialize policy training.
super().__init__(task, **kwargs)
# Initialize training of the value function.
value_output_dir = kwargs.get('output_dir', None)
if value_output_dir is not None:
value_output_dir = os.path.join(value_output_dir, 'value')
# If needed, create value_output_dir and missing parent directories.
if not tf.io.gfile.isdir(value_output_dir):
tf.io.gfile.makedirs(value_output_dir)
self._value_inputs = data.inputs.Inputs(
train_stream=lambda _: self.value_batches_stream())
self._value_trainer = supervised.Trainer(
model=value_model,
optimizer=value_optimizer,
lr_schedule=value_lr_schedule(),
loss_fn=tl.L2Loss(),
inputs=self._value_inputs,
output_dir=value_output_dir,
metrics={'value_loss': tl.L2Loss()})
@property
def _value_model_signature(self):
obs_sig = shapes.signature(self._task.observation_space)
target_sig = mask_sig = shapes.ShapeDtype(
shape=(1, 1, 1),
)
inputs_sig = (obs_sig.replace(shape=(1, 1) + obs_sig.shape),)
if self._q_value:
act_sig = shapes.signature(self._task.action_space)
inputs_sig += (act_sig.replace(shape=(1, 1) + act_sig.shape),)
return (*inputs_sig, target_sig, mask_sig)
@property
def _replay_epochs(self):
if self.on_policy:
assert self._n_replay_epochs == 1, (
'Non-unit replay buffer size only makes sense for off-policy '
'algorithms.'
)
return [-(ep + 1) for ep in range(self._n_replay_epochs)]
def _run_value_model(self, observations, dist_inputs):
if dist_inputs is None:
dist_inputs = jnp.zeros(
observations.shape[:2] + (self._policy_dist.n_inputs,)
)
actions = None
if self._q_value:
if self._sample_all_discrete_actions:
# Since we want to sample all actions, start by creating their list.
act = np.arange(self._vocab_size)
# Now act is a vector [0, ..., vocab_size-1], but we'll need to tile it.
# Add extra dimenstions so it's the same dimensionality as dist_inputs.
act = jnp.reshape(act, [-1] + [1] * (len(dist_inputs.shape) - 1))
# Now act is [vocab_size, 1, ..., 1], dimensionality of dist_inputs.
dist_inputs = jnp.broadcast_to(
dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)
if self._sample_all_discrete_actions:
actions = act + jnp.zeros(dist_inputs.shape[:-1], dtype=jnp.int32)
actions = jnp.swapaxes(actions, 0, 1)
# Swapping the n_samples and batch_size axes, so the input is split
# between accelerators along the batch_size axis.
dist_inputs = jnp.swapaxes(dist_inputs, 0, 1)
if not self._sample_all_discrete_actions:
actions = self._policy_dist.sample(dist_inputs)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
obs = observations
obs = jnp.reshape(obs, [obs.shape[0], 1] + list(obs.shape[1:]))
inputs = (obs, actions)
else:
log_probs = None
inputs = (observations,)
n_devices = fastmath.device_count()
weights = tl.for_n_devices(self._value_eval_model.weights, n_devices)
state = tl.for_n_devices(self._value_eval_model.state, n_devices)
rng = self._value_eval_model.rng
values, _ = self._value_eval_jit(inputs, weights, state, rng)
values *= self._value_network_scale
values = jnp.squeeze(values, axis=-1) # Remove the singleton depth dim.
return (values, actions, log_probs)
def _aggregate_values(self, values, aggregate_max, act_log_probs):
if self._q_value:
if aggregate_max:
values = jnp.max(values, axis=1)
elif self._sample_all_discrete_actions:
values = jnp.sum(values * jnp.exp(act_log_probs), axis=1)
else:
values = jnp.mean(values, axis=1)
return np.array(values) # Move the values to CPU.
def value_batches_stream(self):
"""Use the RLTask self._task to create inputs to the value model."""
max_slice_length = self._max_slice_length + self._added_policy_slice_length
for np_trajectory in self._task.trajectory_batch_stream(
self._value_batch_size,
max_slice_length=max_slice_length,
min_slice_length=(1 + self._added_policy_slice_length),
margin=self._added_policy_slice_length,
epochs=self._replay_epochs,
):
(values, _, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs
)
values = self._aggregate_values(
values, self._q_value_aggregate_max, act_log_probs)
# TODO(pkozakowski): Add some shape assertions and docs.
# Calculate targets based on the advantages over the target network - this
# allows TD learning for value networks.
advantages = self._advantage_estimator(
rewards=np_trajectory.rewards,
returns=np_trajectory.returns,
values=values,
dones=np_trajectory.dones,
gamma=self._task.gamma,
n_extra_steps=self._added_policy_slice_length,
)
length = advantages.shape[1]
values = values[:, :length]
target_returns = values + advantages
inputs = (np_trajectory.observations[:, :length],)
if self._q_value:
inputs += (np_trajectory.actions[:, :length],)
# Insert an extra depth dimension, so the target shape is consistent with
# the network output shape.
yield (
# Inputs: observations and maybe actions.
*inputs,
# Targets: computed returns.
target_returns[:, :, None] / self._value_network_scale,
# Mask to zero-out padding.
np_trajectory.mask[:, :length, None],
)
def policy_inputs(self, trajectory, values):
"""Create inputs to policy model from a TrajectoryNp and values.
Args:
trajectory: a TrajectoryNp, the trajectory to create inputs from
values: a numpy array: value function computed on trajectory
Returns:
a tuple of numpy arrays of the form (inputs, x1, x2, ...) that will be
passed to the policy model; policy model will compute outputs from
inputs and (outputs, x1, x2, ...) will be passed to self.policy_loss
which should be overridden accordingly.
"""
return NotImplementedError
def policy_batches_stream(self):
"""Use the RLTask self._task to create inputs to the policy model."""
# Maximum slice length for policy is max_slice_len + the added policy len.
max_slice_length = self._max_slice_length + self._added_policy_slice_length
for np_trajectory in self._task.trajectory_batch_stream(
self._policy_batch_size,
epochs=self._replay_epochs,
max_slice_length=max_slice_length,
margin=self._added_policy_slice_length,
include_final_state=False):
(values, _, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs)
values = self._aggregate_values(values, False, act_log_probs)
if len(values.shape) != 2:
raise ValueError('Values are expected to have shape ' +
'[batch_size, length], got: %s' % str(values.shape))
if values.shape[0] != self._policy_batch_size:
raise ValueError('Values first dimension should = policy batch size, ' +
'%d != %d' %(values.shape[0], self._policy_batch_size))
yield self.policy_inputs(np_trajectory, values)
def train_epoch(self):
"""Trains RL for one epoch."""
# Copy policy state accumulated during data collection to the trainer.
self._policy_trainer.model_state = self._policy_collect_model.state
# Copy policy weights and state to value trainer.
if self._n_shared_layers > 0:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._policy_trainer, self._value_trainer
)
# Update the target value network.
self._value_eval_model.weights = self._value_trainer.model_weights
self._value_eval_model.state = self._value_trainer.model_state
n_value_evals = rl_training.remaining_evals(
self._value_trainer.step,
self._epoch,
self._value_train_steps_per_epoch,
self._value_evals_per_epoch)
for _ in range(n_value_evals):
self._value_trainer.train_epoch(
self._value_train_steps_per_epoch // self._value_evals_per_epoch,
self._value_eval_steps,
)
# Copy value weights and state to policy trainer.
if self._n_shared_layers > 0:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._value_trainer, self._policy_trainer
)
n_policy_evals = rl_training.remaining_evals(
self._policy_trainer.step,
self._epoch,
self._policy_train_steps_per_epoch,
self._policy_evals_per_epoch)
# Check if there was a restart after value training finishes and policy not.
stopped_after_value = (n_value_evals == 0 and
n_policy_evals < self._policy_evals_per_epoch)
should_copy_weights = self._n_shared_layers > 0 and not stopped_after_value
if should_copy_weights:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._value_trainer, self._policy_trainer
)
# Update the target value network.
self._value_eval_model.weights = self._value_trainer.model_weights
self._value_eval_model.state = self._value_trainer.model_state
for _ in range(n_policy_evals):
self._policy_trainer.train_epoch(
self._policy_train_steps_per_epoch // self._policy_evals_per_epoch,
self._policy_eval_steps,
)
def close(self):
self._value_trainer.close()
super().close()
def _copy_model_weights_and_state( # pylint: disable=invalid-name
start, end, from_trainer, to_trainer, copy_optimizer_slots=False
):
"""Copy model weights[start:end] from from_trainer to to_trainer."""
from_weights = from_trainer.model_weights
to_weights = list(to_trainer.model_weights)
shared_weights = from_weights[start:end]
to_weights[start:end] = shared_weights
to_trainer.model_weights = to_weights
from_state = from_trainer.model_state
to_state = list(to_trainer.model_state)
shared_state = from_state[start:end]
to_state[start:end] = shared_state
to_trainer.model_state = to_state
if copy_optimizer_slots:
# TODO(lukaszkaiser): make a nicer API in Trainer to support this.
# Currently we use the hack below. Note [0] since that's the model w/o loss.
# pylint: disable=protected-access
from_slots = from_trainer._opt_state.slots[0][start:end]
to_slots = to_trainer._opt_state.slots[0]
# The lines below do to_slots[start:end] = from_slots, but on tuples.
new_slots = to_slots[:start] + from_slots[start:end] + to_slots[end:]
new_slots = tuple([new_slots] + list(to_trainer._opt_state.slots[1:]))
to_trainer._opt_state = to_trainer._opt_state._replace(slots=new_slots)
# pylint: enable=protected-access
### Implementations of common actor-critic algorithms.
class AdvantageBasedActorCriticAgent(ActorCriticAgent):
"""Base class for advantage-based actor-critic algorithms."""
def __init__(
self,
task,
advantage_estimator=rl_advantages.td_lambda,
advantage_normalization=True,
advantage_normalization_epsilon=1e-5,
**kwargs
):
self._advantage_estimator = advantage_estimator
self._advantage_normalization = advantage_normalization
self._advantage_normalization_epsilon = advantage_normalization_epsilon
super().__init__(task, **kwargs)
def policy_inputs(self, trajectory, values):
"""Create inputs to policy model from a TrajectoryNp and values."""
# How much TD to use is determined by the added policy slice length,
# as the policy batches need to be this much longer to calculate TD.
advantages = self._advantage_estimator(
rewards=trajectory.rewards,
returns=trajectory.returns,
values=values,
dones=trajectory.dones,
gamma=self._task.gamma,
n_extra_steps=self._added_policy_slice_length,
)
# Observations should be the same length as advantages - so if we are
# using n_extra_steps, we need to trim the length to match.
obs = trajectory.observations[:, :advantages.shape[1]]
act = trajectory.actions[:, :advantages.shape[1]]
mask = trajectory.mask[:, :advantages.shape[1]] # Mask to zero-out padding.
if trajectory.dist_inputs is not None:
dist_inputs = trajectory.dist_inputs[:, :advantages.shape[1]]
else:
dist_inputs = jnp.zeros(advantages.shape + (self._policy_dist.n_inputs,))
# Shape checks to help debugging.
if len(advantages.shape) != 2:
raise ValueError('Advantages are expected to have shape ' +
'[batch_size, length], got: %s' % str(advantages.shape))
if act.shape[0:2] != advantages.shape:
raise ValueError('First 2 dimensions of actions should be the same as in '
'advantages, %s != %s' % (act.shape[0:2],
advantages.shape))
if obs.shape[0:2] != advantages.shape:
raise ValueError('First 2 dimensions of observations should be the same '
'as in advantages, %s != %s' % (obs.shape[0:2],
advantages.shape))
if dist_inputs.shape[:2] != advantages.shape:
raise ValueError('First 2 dimensions of dist_inputs should be the same '
'as in advantages, %s != %s' % (dist_inputs.shape[:2],
advantages.shape))
if mask.shape != advantages.shape:
raise ValueError('Mask and advantages shapes should be the same'
', %s != %s' % (mask.shape, advantages.shape))
return (obs, act, advantages, dist_inputs, mask)
@property
def policy_loss_given_log_probs(self):
"""Policy loss given action log-probabilities."""
raise NotImplementedError
def _preprocess_advantages(self, advantages):
if self._advantage_normalization:
advantages = (
(advantages - jnp.mean(advantages)) /
(jnp.std(advantages) + self._advantage_normalization_epsilon)
)
return advantages
@property
def policy_loss(self, **unused_kwargs):
"""Policy loss."""
def LossInput(dist_inputs, actions, advantages, old_dist_inputs): # pylint: disable=invalid-name
"""Calculates action log probabilities and normalizes advantages."""
advantages = self._preprocess_advantages(advantages)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
old_log_probs = self._policy_dist.log_prob(old_dist_inputs, actions)
return (log_probs, advantages, old_log_probs)
return tl.Serial(
tl.Fn('LossInput', LossInput, n_out=3),
# Policy loss is expected to consume
# (log_probs, advantages, old_log_probs, mask).
self.policy_loss_given_log_probs,
)
@property
def policy_metrics(self):
metrics = super().policy_metrics
metrics.update({
'advantage_mean': self.advantage_mean,
'advantage_std': self.advantage_std,
})
return metrics
@property
def advantage_mean(self):
return tl.Serial([
# (dist_inputs, advantages, old_dist_inputs, mask)
tl.Select([1]), # Select just the advantages.
tl.Fn('AdvantageMean', lambda x: jnp.mean(x)), # pylint: disable=unnecessary-lambda
])
@property
def advantage_std(self):
return tl.Serial([
# (dist_inputs, advantages, old_dist_inputs, mask)
tl.Select([1]), # Select just the advantages.
tl.Fn('AdvantageStd', lambda x: jnp.std(x)), # pylint: disable=unnecessary-lambda
])
class A2C(AdvantageBasedActorCriticAgent):
"""Trains policy and value models using the A2C algortithm."""
on_policy = True
def __init__(self, task, entropy_coeff=0.01, **kwargs):
"""Configures the A2C Trainer."""
self._entropy_coeff = entropy_coeff
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
"""Definition of the Advantage Actor Critic (A2C) loss."""
# A2C is one of the most basic actor-critic RL algorithms.
# TODO(henrykm) re-factor f into rl_layers and finally share code between
# actor_critic.py and actor_critic_joint.py - requires change of inputs
# in actor_critic_joint.py from dist_inputs to log_probs.
def f(log_probs, advantages, old_log_probs, mask):
del old_log_probs # Not used in A2C.
# log_probs of the shape float32[128,1]
# advantages of the shape int32[128,1]
# mask of the shape int32[128,1]
if log_probs.shape != advantages.shape:
raise ValueError('New log-probs and advantages shapes '
'should be the same, %s != %s' % (log_probs.shape,
advantages.shape))
if log_probs.shape != mask.shape:
raise ValueError('New log-probs and mask shapes should be the same'
', %s != %s' % (log_probs.shape, mask.shape))
a2c_objective = -jnp.sum(log_probs * advantages * mask) / jnp.sum(mask)
entropy_vec = self._policy_dist.entropy(log_probs) * self._entropy_coeff
entropy_loss = jnp.mean(entropy_vec)
combined_loss = a2c_objective - entropy_loss
return combined_loss
return tl.Fn('A2CLoss', f)
class PPO(AdvantageBasedActorCriticAgent):
"""The Proximal Policy Optimization Algorithm aka PPO.
Trains policy and value models using the PPO algortithm.
"""
on_policy = True
def __init__(self, task, epsilon=0.2, entropy_coeff=0.01, **kwargs):
"""Configures the PPO Trainer."""
self._entropy_coeff = entropy_coeff
self._epsilon = epsilon
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
"""Definition of the Proximal Policy Optimization loss."""
def f(new_log_probs, advantages, old_log_probs, mask):
# new_log_probs of the shape float32[128,1]
# advantages of the shape int32[128,1]
# old_log_probs of the shape int32[128,1]
# mask of the shape int32[128,1]
if new_log_probs.shape != advantages.shape:
raise ValueError('New log-probs and advantages shapes '
'should be the same, %s != %s' % (new_log_probs.shape,
advantages.shape))
if new_log_probs.shape != old_log_probs.shape:
raise ValueError('New log-probs and old log-probs shapes '
'should be the same, %s != %s' % (new_log_probs.shape,
old_log_probs.shape))
if new_log_probs.shape != mask.shape:
raise ValueError('New log-probs and mask shapes should be the same'
', %s != %s' % (new_log_probs.shape, mask.shape))
# The ratio between new_probs and old_probs expressed
# using log_probs and exponentaion
probs_ratio = jnp.exp(new_log_probs - old_log_probs)
if advantages.shape != probs_ratio.shape:
raise ValueError('New log-probs and old log probs shapes '
'should be the same, %s != %s' % (advantages.shape,
probs_ratio.shape))
unclipped_objective = probs_ratio * advantages
clipped_objective = jnp.clip(probs_ratio,
1 - self._epsilon,
1 + self._epsilon) * advantages
if unclipped_objective.shape != probs_ratio.shape:
raise ValueError('unclipped_objective and clipped_objective shapes '
'should be the same, %s != %s' % (
unclipped_objective.shape,
clipped_objective.shape))
ppo_objective = jnp.minimum(unclipped_objective, clipped_objective)
if ppo_objective.shape != mask.shape:
raise ValueError('ppo_objective and mask shapes '
'should be the same, %s != %s' % (
ppo_objective.shape,
mask.shape))
ppo_loss = -jnp.sum(ppo_objective * mask) / jnp.sum(mask)
entropy_vec = self._policy_dist.entropy(
new_log_probs) * self._entropy_coeff
entropy_loss = jnp.mean(entropy_vec)
combined_loss = ppo_loss - entropy_loss
return combined_loss
return tl.Fn('PPOLoss', f)
# AWR is an off-policy actor-critic RL algorithm.
def awr_weights(advantages, beta):
return jnp.exp(advantages / beta)
# Helper functions for computing AWR metrics.
def awr_metrics(beta, preprocess_layer=None):
return { # pylint: disable=g-complex-comprehension
'awr_weight_' + name: awr_weight_stat(name, fn, beta, preprocess_layer)
for (name, fn) in [
('mean', jnp.mean),
('std', jnp.std),
('min', jnp.min),
('max', jnp.max),
]
}
def awr_weight_stat(stat_name, stat_fn, beta, preprocess_layer):
# Select just the advantages if preprocess layer is not given.
preprocess = tl.Select([1]) if preprocess_layer is None else preprocess_layer
return tl.Serial([
preprocess,
tl.Fn(
'AWRWeight' + stat_name.capitalize(),
lambda x: stat_fn(awr_weights(x, beta)),
),
])
def AWRLoss(beta, w_max): # pylint: disable=invalid-name
"""Definition of the Advantage Weighted Regression (AWR) loss."""
def f(log_probs, advantages, old_log_probs, mask):
del old_log_probs # Not used in AWR.
weights = jnp.minimum(awr_weights(advantages, beta), w_max)
return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)
return tl.Fn('AWRLoss', f)
class AWR(AdvantageBasedActorCriticAgent):
"""Trains policy and value models using AWR."""
on_policy = False
def __init__(self, task, beta=1.0, w_max=20.0, **kwargs):
"""Configures the AWR Trainer."""
self._beta = beta
self._w_max = w_max
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
"""Policy loss."""
return AWRLoss(beta=self._beta, w_max=self._w_max) # pylint: disable=no-value-for-parameter
@property
def policy_metrics(self):
metrics = super().policy_metrics
metrics.update(awr_metrics(self._beta))
return metrics
def SamplingAWRLoss(beta, w_max, reweight=False, sampled_all_discrete=False): # pylint: disable=invalid-name
"""Definition of the Advantage Weighted Regression (AWR) loss."""
def f(log_probs, advantages, old_log_probs, mask):
if reweight: # Use new policy weights for sampled actions instead.
mask *= jnp.exp(fastmath.stop_gradient(log_probs) - old_log_probs)
if sampled_all_discrete: # Actions were sampled uniformly; weight them.
mask *= jnp.exp(old_log_probs)
weights = jnp.minimum(awr_weights(advantages, beta), w_max)
return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)
return tl.Fn('SamplingAWRLoss', f)
class SamplingAWR(AdvantageBasedActorCriticAgent):
"""Trains policy and value models using Sampling AWR."""
on_policy = False
def __init__(self, task, beta=1.0, w_max=20.0, reweight=False, **kwargs):
"""Configures the AWR Trainer."""
self._beta = beta
self._w_max = w_max
self._reweight = reweight
super().__init__(task, q_value=True, **kwargs)
def _policy_inputs_to_advantages(self, preprocess):
"""A layer that computes advantages from policy inputs."""
def fn(dist_inputs, actions, q_values, act_log_probs, mask):
del dist_inputs, actions, mask
q_values = jnp.swapaxes(q_values, 0, 1)
act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)
if self._sample_all_discrete_actions:
values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)
else:
values = jnp.mean(q_values, axis=0)
advantages = q_values - values # Broadcasting values over n_samples
if preprocess:
advantages = self._preprocess_advantages(advantages)
return advantages
return tl.Fn('PolicyInputsToAdvantages', fn)
@property
def policy_metrics(self):
metrics = {
'policy_loss': self.policy_loss,
'advantage_mean': tl.Serial(
self._policy_inputs_to_advantages(False),
tl.Fn('Mean', lambda x: jnp.mean(x)) # pylint: disable=unnecessary-lambda
),
'advantage_std': tl.Serial(
self._policy_inputs_to_advantages(False),
tl.Fn('Std', lambda x: jnp.std(x)) # pylint: disable=unnecessary-lambda
)
}
metrics.update(awr_metrics(
self._beta, preprocess_layer=self._policy_inputs_to_advantages(True)))
return metrics
@property
def policy_loss(self, **unused_kwargs):
"""Policy loss."""
def LossInput(dist_inputs, actions, q_values, act_log_probs, mask): # pylint: disable=invalid-name
"""Calculates action log probabilities and normalizes advantages."""
# (batch_size, n_samples, ...) -> (n_samples, batch_size, ...)
q_values = jnp.swapaxes(q_values, 0, 1)
mask = jnp.swapaxes(mask, 0, 1)
actions = jnp.swapaxes(actions, 0, 1)
act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)
# TODO(pkozakowski,lukaszkaiser): Try max here, or reweighting?
if self._sample_all_discrete_actions:
values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)
else:
values = jnp.mean(q_values, axis=0)
advantages = q_values - values # Broadcasting values over n_samples
advantages = self._preprocess_advantages(advantages)
# Broadcast inputs and calculate log-probs
dist_inputs = jnp.broadcast_to(
dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
return (log_probs, advantages, act_log_probs, mask)
return tl.Serial(
tl.Fn('LossInput', LossInput, n_out=4),
# Policy loss is expected to consume
# (log_probs, advantages, old_log_probs, mask).
SamplingAWRLoss(
beta=self._beta, w_max=self._w_max, reweight=self._reweight,
sampled_all_discrete=self._sample_all_discrete_actions)
)
def policy_batches_stream(self):
"""Use the RLTask self._task to create inputs to the policy model."""
# For now TD-0 estimation of the value. TODO(pkozakowski): Support others?
for np_trajectory in self._task.trajectory_batch_stream(
self._policy_batch_size,
epochs=self._replay_epochs,
max_slice_length=self._max_slice_length,
include_final_state=False,
):
(q_values, actions, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs)
shapes.assert_same_shape(q_values, act_log_probs)
# q_values shape: (batch_size, n_samples, length)
if len(q_values.shape) != 3:
raise ValueError('Q-values are expected to have shape [batch_size, ' +
'n_samples, length], got: %s' % str(q_values.shape))
if q_values.shape[1] != self._q_value_n_samples:
raise ValueError('Q-values dimension 1 should = n_samples, %d != %d'
% (q_values.shape[1], self._q_value_n_samples))
if q_values.shape[0] != self._policy_batch_size:
raise ValueError('Q-values dimension 0 should = policy batch size, ' +
'%d!=%d' %(q_values.shape[1], self._policy_batch_size))
mask = np_trajectory.mask
mask = np.reshape(mask, [mask.shape[0], 1] + list(mask.shape[1:]))
mask = jnp.broadcast_to(mask, q_values.shape)
shapes.assert_same_shape(mask, q_values)
yield (np_trajectory.observations, actions, q_values, act_log_probs, mask)
| 41.238971 | 109 | 0.673799 |
import functools
import os
import gym
import numpy as np
import tensorflow as tf
from trax import data
from trax import fastmath
from trax import layers as tl
from trax import shapes
from trax import supervised
from trax.fastmath import numpy as jnp
from trax.rl import advantages as rl_advantages
from trax.rl import training as rl_training
from trax.supervised import lr_schedules as lr
class ActorCriticAgent(rl_training.PolicyAgent):
on_policy = None
def __init__(self, task,
value_model=None,
value_optimizer=None,
value_lr_schedule=lr.multifactor,
value_batch_size=64,
value_train_steps_per_epoch=500,
value_evals_per_epoch=1,
value_eval_steps=1,
n_shared_layers=0,
added_policy_slice_length=0,
n_replay_epochs=1,
scale_value_targets=False,
q_value=False,
q_value_aggregate_max=True,
q_value_n_samples=1,
**kwargs):
self._n_shared_layers = n_shared_layers
self._value_batch_size = value_batch_size
self._value_train_steps_per_epoch = value_train_steps_per_epoch
self._value_evals_per_epoch = value_evals_per_epoch
self._value_eval_steps = value_eval_steps
self._task = task
self._max_slice_length = kwargs.get('max_slice_length', 1)
self._added_policy_slice_length = added_policy_slice_length
self._n_replay_epochs = n_replay_epochs
task.set_n_replay_epochs(n_replay_epochs)
if scale_value_targets:
self._value_network_scale = 1 / (1 - self._task.gamma)
else:
self._value_network_scale = 1
self._q_value = q_value
self._q_value_aggregate_max = q_value_aggregate_max
self._q_value_n_samples = q_value_n_samples
is_discrete = isinstance(self._task.action_space, gym.spaces.Discrete)
self._is_discrete = is_discrete
self._vocab_size = None
self._sample_all_discrete_actions = False
if q_value and is_discrete:
self._vocab_size = self.task.action_space.n
if self._q_value_n_samples == self._vocab_size:
# TODO(lukaszkaiser): set this explicitly once it's in AWR Trainer.
self._sample_all_discrete_actions = True
if q_value:
value_model = functools.partial(value_model,
inject_actions=True,
is_discrete=is_discrete,
vocab_size=self._vocab_size)
self._value_eval_model = value_model(mode='eval')
self._value_eval_model.init(self._value_model_signature)
self._value_eval_jit = tl.jit_forward(
self._value_eval_model.pure_fn, fastmath.device_count(), do_mean=False)
super().__init__(task, **kwargs)
value_output_dir = kwargs.get('output_dir', None)
if value_output_dir is not None:
value_output_dir = os.path.join(value_output_dir, 'value')
if not tf.io.gfile.isdir(value_output_dir):
tf.io.gfile.makedirs(value_output_dir)
self._value_inputs = data.inputs.Inputs(
train_stream=lambda _: self.value_batches_stream())
self._value_trainer = supervised.Trainer(
model=value_model,
optimizer=value_optimizer,
lr_schedule=value_lr_schedule(),
loss_fn=tl.L2Loss(),
inputs=self._value_inputs,
output_dir=value_output_dir,
metrics={'value_loss': tl.L2Loss()})
@property
def _value_model_signature(self):
obs_sig = shapes.signature(self._task.observation_space)
target_sig = mask_sig = shapes.ShapeDtype(
shape=(1, 1, 1),
)
inputs_sig = (obs_sig.replace(shape=(1, 1) + obs_sig.shape),)
if self._q_value:
act_sig = shapes.signature(self._task.action_space)
inputs_sig += (act_sig.replace(shape=(1, 1) + act_sig.shape),)
return (*inputs_sig, target_sig, mask_sig)
@property
def _replay_epochs(self):
if self.on_policy:
assert self._n_replay_epochs == 1, (
'Non-unit replay buffer size only makes sense for off-policy '
'algorithms.'
)
return [-(ep + 1) for ep in range(self._n_replay_epochs)]
def _run_value_model(self, observations, dist_inputs):
if dist_inputs is None:
dist_inputs = jnp.zeros(
observations.shape[:2] + (self._policy_dist.n_inputs,)
)
actions = None
if self._q_value:
if self._sample_all_discrete_actions:
act = np.arange(self._vocab_size)
# Add extra dimenstions so it's the same dimensionality as dist_inputs.
act = jnp.reshape(act, [-1] + [1] * (len(dist_inputs.shape) - 1))
dist_inputs = jnp.broadcast_to(
dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)
if self._sample_all_discrete_actions:
actions = act + jnp.zeros(dist_inputs.shape[:-1], dtype=jnp.int32)
actions = jnp.swapaxes(actions, 0, 1)
dist_inputs = jnp.swapaxes(dist_inputs, 0, 1)
if not self._sample_all_discrete_actions:
actions = self._policy_dist.sample(dist_inputs)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
obs = observations
obs = jnp.reshape(obs, [obs.shape[0], 1] + list(obs.shape[1:]))
inputs = (obs, actions)
else:
log_probs = None
inputs = (observations,)
n_devices = fastmath.device_count()
weights = tl.for_n_devices(self._value_eval_model.weights, n_devices)
state = tl.for_n_devices(self._value_eval_model.state, n_devices)
rng = self._value_eval_model.rng
values, _ = self._value_eval_jit(inputs, weights, state, rng)
values *= self._value_network_scale
values = jnp.squeeze(values, axis=-1)
return (values, actions, log_probs)
def _aggregate_values(self, values, aggregate_max, act_log_probs):
if self._q_value:
if aggregate_max:
values = jnp.max(values, axis=1)
elif self._sample_all_discrete_actions:
values = jnp.sum(values * jnp.exp(act_log_probs), axis=1)
else:
values = jnp.mean(values, axis=1)
return np.array(values)
def value_batches_stream(self):
max_slice_length = self._max_slice_length + self._added_policy_slice_length
for np_trajectory in self._task.trajectory_batch_stream(
self._value_batch_size,
max_slice_length=max_slice_length,
min_slice_length=(1 + self._added_policy_slice_length),
margin=self._added_policy_slice_length,
epochs=self._replay_epochs,
):
(values, _, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs
)
values = self._aggregate_values(
values, self._q_value_aggregate_max, act_log_probs)
advantages = self._advantage_estimator(
rewards=np_trajectory.rewards,
returns=np_trajectory.returns,
values=values,
dones=np_trajectory.dones,
gamma=self._task.gamma,
n_extra_steps=self._added_policy_slice_length,
)
length = advantages.shape[1]
values = values[:, :length]
target_returns = values + advantages
inputs = (np_trajectory.observations[:, :length],)
if self._q_value:
inputs += (np_trajectory.actions[:, :length],)
yield (
*inputs,
target_returns[:, :, None] / self._value_network_scale,
np_trajectory.mask[:, :length, None],
)
def policy_inputs(self, trajectory, values):
return NotImplementedError
def policy_batches_stream(self):
max_slice_length = self._max_slice_length + self._added_policy_slice_length
for np_trajectory in self._task.trajectory_batch_stream(
self._policy_batch_size,
epochs=self._replay_epochs,
max_slice_length=max_slice_length,
margin=self._added_policy_slice_length,
include_final_state=False):
(values, _, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs)
values = self._aggregate_values(values, False, act_log_probs)
if len(values.shape) != 2:
raise ValueError('Values are expected to have shape ' +
'[batch_size, length], got: %s' % str(values.shape))
if values.shape[0] != self._policy_batch_size:
raise ValueError('Values first dimension should = policy batch size, ' +
'%d != %d' %(values.shape[0], self._policy_batch_size))
yield self.policy_inputs(np_trajectory, values)
def train_epoch(self):
self._policy_trainer.model_state = self._policy_collect_model.state
if self._n_shared_layers > 0:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._policy_trainer, self._value_trainer
)
self._value_eval_model.weights = self._value_trainer.model_weights
self._value_eval_model.state = self._value_trainer.model_state
n_value_evals = rl_training.remaining_evals(
self._value_trainer.step,
self._epoch,
self._value_train_steps_per_epoch,
self._value_evals_per_epoch)
for _ in range(n_value_evals):
self._value_trainer.train_epoch(
self._value_train_steps_per_epoch // self._value_evals_per_epoch,
self._value_eval_steps,
)
if self._n_shared_layers > 0:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._value_trainer, self._policy_trainer
)
n_policy_evals = rl_training.remaining_evals(
self._policy_trainer.step,
self._epoch,
self._policy_train_steps_per_epoch,
self._policy_evals_per_epoch)
stopped_after_value = (n_value_evals == 0 and
n_policy_evals < self._policy_evals_per_epoch)
should_copy_weights = self._n_shared_layers > 0 and not stopped_after_value
if should_copy_weights:
_copy_model_weights_and_state(
0, self._n_shared_layers, self._value_trainer, self._policy_trainer
)
self._value_eval_model.weights = self._value_trainer.model_weights
self._value_eval_model.state = self._value_trainer.model_state
for _ in range(n_policy_evals):
self._policy_trainer.train_epoch(
self._policy_train_steps_per_epoch // self._policy_evals_per_epoch,
self._policy_eval_steps,
)
def close(self):
self._value_trainer.close()
super().close()
def _copy_model_weights_and_state(
start, end, from_trainer, to_trainer, copy_optimizer_slots=False
):
from_weights = from_trainer.model_weights
to_weights = list(to_trainer.model_weights)
shared_weights = from_weights[start:end]
to_weights[start:end] = shared_weights
to_trainer.model_weights = to_weights
from_state = from_trainer.model_state
to_state = list(to_trainer.model_state)
shared_state = from_state[start:end]
to_state[start:end] = shared_state
to_trainer.model_state = to_state
if copy_optimizer_slots:
# pylint: disable=protected-access
from_slots = from_trainer._opt_state.slots[0][start:end]
to_slots = to_trainer._opt_state.slots[0]
# The lines below do to_slots[start:end] = from_slots, but on tuples.
new_slots = to_slots[:start] + from_slots[start:end] + to_slots[end:]
new_slots = tuple([new_slots] + list(to_trainer._opt_state.slots[1:]))
to_trainer._opt_state = to_trainer._opt_state._replace(slots=new_slots)
# pylint: enable=protected-access
### Implementations of common actor-critic algorithms.
class AdvantageBasedActorCriticAgent(ActorCriticAgent):
def __init__(
self,
task,
advantage_estimator=rl_advantages.td_lambda,
advantage_normalization=True,
advantage_normalization_epsilon=1e-5,
**kwargs
):
self._advantage_estimator = advantage_estimator
self._advantage_normalization = advantage_normalization
self._advantage_normalization_epsilon = advantage_normalization_epsilon
super().__init__(task, **kwargs)
def policy_inputs(self, trajectory, values):
# How much TD to use is determined by the added policy slice length,
# as the policy batches need to be this much longer to calculate TD.
advantages = self._advantage_estimator(
rewards=trajectory.rewards,
returns=trajectory.returns,
values=values,
dones=trajectory.dones,
gamma=self._task.gamma,
n_extra_steps=self._added_policy_slice_length,
)
# Observations should be the same length as advantages - so if we are
# using n_extra_steps, we need to trim the length to match.
obs = trajectory.observations[:, :advantages.shape[1]]
act = trajectory.actions[:, :advantages.shape[1]]
mask = trajectory.mask[:, :advantages.shape[1]] # Mask to zero-out padding.
if trajectory.dist_inputs is not None:
dist_inputs = trajectory.dist_inputs[:, :advantages.shape[1]]
else:
dist_inputs = jnp.zeros(advantages.shape + (self._policy_dist.n_inputs,))
# Shape checks to help debugging.
if len(advantages.shape) != 2:
raise ValueError('Advantages are expected to have shape ' +
'[batch_size, length], got: %s' % str(advantages.shape))
if act.shape[0:2] != advantages.shape:
raise ValueError('First 2 dimensions of actions should be the same as in '
'advantages, %s != %s' % (act.shape[0:2],
advantages.shape))
if obs.shape[0:2] != advantages.shape:
raise ValueError('First 2 dimensions of observations should be the same '
'as in advantages, %s != %s' % (obs.shape[0:2],
advantages.shape))
if dist_inputs.shape[:2] != advantages.shape:
raise ValueError('First 2 dimensions of dist_inputs should be the same '
'as in advantages, %s != %s' % (dist_inputs.shape[:2],
advantages.shape))
if mask.shape != advantages.shape:
raise ValueError('Mask and advantages shapes should be the same'
', %s != %s' % (mask.shape, advantages.shape))
return (obs, act, advantages, dist_inputs, mask)
@property
def policy_loss_given_log_probs(self):
raise NotImplementedError
def _preprocess_advantages(self, advantages):
if self._advantage_normalization:
advantages = (
(advantages - jnp.mean(advantages)) /
(jnp.std(advantages) + self._advantage_normalization_epsilon)
)
return advantages
@property
def policy_loss(self, **unused_kwargs):
def LossInput(dist_inputs, actions, advantages, old_dist_inputs): # pylint: disable=invalid-name
advantages = self._preprocess_advantages(advantages)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
old_log_probs = self._policy_dist.log_prob(old_dist_inputs, actions)
return (log_probs, advantages, old_log_probs)
return tl.Serial(
tl.Fn('LossInput', LossInput, n_out=3),
# Policy loss is expected to consume
# (log_probs, advantages, old_log_probs, mask).
self.policy_loss_given_log_probs,
)
@property
def policy_metrics(self):
metrics = super().policy_metrics
metrics.update({
'advantage_mean': self.advantage_mean,
'advantage_std': self.advantage_std,
})
return metrics
@property
def advantage_mean(self):
return tl.Serial([
# (dist_inputs, advantages, old_dist_inputs, mask)
tl.Select([1]), # Select just the advantages.
tl.Fn('AdvantageMean', lambda x: jnp.mean(x)), # pylint: disable=unnecessary-lambda
])
@property
def advantage_std(self):
return tl.Serial([
# (dist_inputs, advantages, old_dist_inputs, mask)
tl.Select([1]), # Select just the advantages.
tl.Fn('AdvantageStd', lambda x: jnp.std(x)), # pylint: disable=unnecessary-lambda
])
class A2C(AdvantageBasedActorCriticAgent):
on_policy = True
def __init__(self, task, entropy_coeff=0.01, **kwargs):
self._entropy_coeff = entropy_coeff
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
# A2C is one of the most basic actor-critic RL algorithms.
# TODO(henrykm) re-factor f into rl_layers and finally share code between
# actor_critic.py and actor_critic_joint.py - requires change of inputs
# in actor_critic_joint.py from dist_inputs to log_probs.
def f(log_probs, advantages, old_log_probs, mask):
del old_log_probs # Not used in A2C.
# log_probs of the shape float32[128,1]
# advantages of the shape int32[128,1]
# mask of the shape int32[128,1]
if log_probs.shape != advantages.shape:
raise ValueError('New log-probs and advantages shapes '
'should be the same, %s != %s' % (log_probs.shape,
advantages.shape))
if log_probs.shape != mask.shape:
raise ValueError('New log-probs and mask shapes should be the same'
', %s != %s' % (log_probs.shape, mask.shape))
a2c_objective = -jnp.sum(log_probs * advantages * mask) / jnp.sum(mask)
entropy_vec = self._policy_dist.entropy(log_probs) * self._entropy_coeff
entropy_loss = jnp.mean(entropy_vec)
combined_loss = a2c_objective - entropy_loss
return combined_loss
return tl.Fn('A2CLoss', f)
class PPO(AdvantageBasedActorCriticAgent):
on_policy = True
def __init__(self, task, epsilon=0.2, entropy_coeff=0.01, **kwargs):
self._entropy_coeff = entropy_coeff
self._epsilon = epsilon
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
def f(new_log_probs, advantages, old_log_probs, mask):
# new_log_probs of the shape float32[128,1]
# advantages of the shape int32[128,1]
# old_log_probs of the shape int32[128,1]
# mask of the shape int32[128,1]
if new_log_probs.shape != advantages.shape:
raise ValueError('New log-probs and advantages shapes '
'should be the same, %s != %s' % (new_log_probs.shape,
advantages.shape))
if new_log_probs.shape != old_log_probs.shape:
raise ValueError('New log-probs and old log-probs shapes '
'should be the same, %s != %s' % (new_log_probs.shape,
old_log_probs.shape))
if new_log_probs.shape != mask.shape:
raise ValueError('New log-probs and mask shapes should be the same'
', %s != %s' % (new_log_probs.shape, mask.shape))
# The ratio between new_probs and old_probs expressed
# using log_probs and exponentaion
probs_ratio = jnp.exp(new_log_probs - old_log_probs)
if advantages.shape != probs_ratio.shape:
raise ValueError('New log-probs and old log probs shapes '
'should be the same, %s != %s' % (advantages.shape,
probs_ratio.shape))
unclipped_objective = probs_ratio * advantages
clipped_objective = jnp.clip(probs_ratio,
1 - self._epsilon,
1 + self._epsilon) * advantages
if unclipped_objective.shape != probs_ratio.shape:
raise ValueError('unclipped_objective and clipped_objective shapes '
'should be the same, %s != %s' % (
unclipped_objective.shape,
clipped_objective.shape))
ppo_objective = jnp.minimum(unclipped_objective, clipped_objective)
if ppo_objective.shape != mask.shape:
raise ValueError('ppo_objective and mask shapes '
'should be the same, %s != %s' % (
ppo_objective.shape,
mask.shape))
ppo_loss = -jnp.sum(ppo_objective * mask) / jnp.sum(mask)
entropy_vec = self._policy_dist.entropy(
new_log_probs) * self._entropy_coeff
entropy_loss = jnp.mean(entropy_vec)
combined_loss = ppo_loss - entropy_loss
return combined_loss
return tl.Fn('PPOLoss', f)
# AWR is an off-policy actor-critic RL algorithm.
def awr_weights(advantages, beta):
return jnp.exp(advantages / beta)
# Helper functions for computing AWR metrics.
def awr_metrics(beta, preprocess_layer=None):
return { # pylint: disable=g-complex-comprehension
'awr_weight_' + name: awr_weight_stat(name, fn, beta, preprocess_layer)
for (name, fn) in [
('mean', jnp.mean),
('std', jnp.std),
('min', jnp.min),
('max', jnp.max),
]
}
def awr_weight_stat(stat_name, stat_fn, beta, preprocess_layer):
# Select just the advantages if preprocess layer is not given.
preprocess = tl.Select([1]) if preprocess_layer is None else preprocess_layer
return tl.Serial([
preprocess,
tl.Fn(
'AWRWeight' + stat_name.capitalize(),
lambda x: stat_fn(awr_weights(x, beta)),
),
])
def AWRLoss(beta, w_max): # pylint: disable=invalid-name
def f(log_probs, advantages, old_log_probs, mask):
del old_log_probs # Not used in AWR.
weights = jnp.minimum(awr_weights(advantages, beta), w_max)
return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)
return tl.Fn('AWRLoss', f)
class AWR(AdvantageBasedActorCriticAgent):
on_policy = False
def __init__(self, task, beta=1.0, w_max=20.0, **kwargs):
self._beta = beta
self._w_max = w_max
super().__init__(task, **kwargs)
@property
def policy_loss_given_log_probs(self):
return AWRLoss(beta=self._beta, w_max=self._w_max) # pylint: disable=no-value-for-parameter
@property
def policy_metrics(self):
metrics = super().policy_metrics
metrics.update(awr_metrics(self._beta))
return metrics
def SamplingAWRLoss(beta, w_max, reweight=False, sampled_all_discrete=False): # pylint: disable=invalid-name
def f(log_probs, advantages, old_log_probs, mask):
if reweight: # Use new policy weights for sampled actions instead.
mask *= jnp.exp(fastmath.stop_gradient(log_probs) - old_log_probs)
if sampled_all_discrete: # Actions were sampled uniformly; weight them.
mask *= jnp.exp(old_log_probs)
weights = jnp.minimum(awr_weights(advantages, beta), w_max)
return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)
return tl.Fn('SamplingAWRLoss', f)
class SamplingAWR(AdvantageBasedActorCriticAgent):
on_policy = False
def __init__(self, task, beta=1.0, w_max=20.0, reweight=False, **kwargs):
self._beta = beta
self._w_max = w_max
self._reweight = reweight
super().__init__(task, q_value=True, **kwargs)
def _policy_inputs_to_advantages(self, preprocess):
def fn(dist_inputs, actions, q_values, act_log_probs, mask):
del dist_inputs, actions, mask
q_values = jnp.swapaxes(q_values, 0, 1)
act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)
if self._sample_all_discrete_actions:
values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)
else:
values = jnp.mean(q_values, axis=0)
advantages = q_values - values # Broadcasting values over n_samples
if preprocess:
advantages = self._preprocess_advantages(advantages)
return advantages
return tl.Fn('PolicyInputsToAdvantages', fn)
@property
def policy_metrics(self):
metrics = {
'policy_loss': self.policy_loss,
'advantage_mean': tl.Serial(
self._policy_inputs_to_advantages(False),
tl.Fn('Mean', lambda x: jnp.mean(x)) # pylint: disable=unnecessary-lambda
),
'advantage_std': tl.Serial(
self._policy_inputs_to_advantages(False),
tl.Fn('Std', lambda x: jnp.std(x)) # pylint: disable=unnecessary-lambda
)
}
metrics.update(awr_metrics(
self._beta, preprocess_layer=self._policy_inputs_to_advantages(True)))
return metrics
@property
def policy_loss(self, **unused_kwargs):
def LossInput(dist_inputs, actions, q_values, act_log_probs, mask): # pylint: disable=invalid-name
# (batch_size, n_samples, ...) -> (n_samples, batch_size, ...)
q_values = jnp.swapaxes(q_values, 0, 1)
mask = jnp.swapaxes(mask, 0, 1)
actions = jnp.swapaxes(actions, 0, 1)
act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)
# TODO(pkozakowski,lukaszkaiser): Try max here, or reweighting?
if self._sample_all_discrete_actions:
values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)
else:
values = jnp.mean(q_values, axis=0)
advantages = q_values - values # Broadcasting values over n_samples
advantages = self._preprocess_advantages(advantages)
# Broadcast inputs and calculate log-probs
dist_inputs = jnp.broadcast_to(
dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)
log_probs = self._policy_dist.log_prob(dist_inputs, actions)
return (log_probs, advantages, act_log_probs, mask)
return tl.Serial(
tl.Fn('LossInput', LossInput, n_out=4),
# Policy loss is expected to consume
# (log_probs, advantages, old_log_probs, mask).
SamplingAWRLoss(
beta=self._beta, w_max=self._w_max, reweight=self._reweight,
sampled_all_discrete=self._sample_all_discrete_actions)
)
def policy_batches_stream(self):
# For now TD-0 estimation of the value. TODO(pkozakowski): Support others?
for np_trajectory in self._task.trajectory_batch_stream(
self._policy_batch_size,
epochs=self._replay_epochs,
max_slice_length=self._max_slice_length,
include_final_state=False,
):
(q_values, actions, act_log_probs) = self._run_value_model(
np_trajectory.observations, np_trajectory.dist_inputs)
shapes.assert_same_shape(q_values, act_log_probs)
# q_values shape: (batch_size, n_samples, length)
if len(q_values.shape) != 3:
raise ValueError('Q-values are expected to have shape [batch_size, ' +
'n_samples, length], got: %s' % str(q_values.shape))
if q_values.shape[1] != self._q_value_n_samples:
raise ValueError('Q-values dimension 1 should = n_samples, %d != %d'
% (q_values.shape[1], self._q_value_n_samples))
if q_values.shape[0] != self._policy_batch_size:
raise ValueError('Q-values dimension 0 should = policy batch size, ' +
'%d!=%d' %(q_values.shape[1], self._policy_batch_size))
mask = np_trajectory.mask
mask = np.reshape(mask, [mask.shape[0], 1] + list(mask.shape[1:]))
mask = jnp.broadcast_to(mask, q_values.shape)
shapes.assert_same_shape(mask, q_values)
yield (np_trajectory.observations, actions, q_values, act_log_probs, mask)
| true | true |
1c47e51dc09808eb307a0a839a939f5466b84977 | 576 | py | Python | centralized/cbs/EdgeConstraint.py | mengwei1/multi_agent_path_planning | 079a4af80c074e571a802af4506d416db5c6946a | [
"MIT"
] | null | null | null | centralized/cbs/EdgeConstraint.py | mengwei1/multi_agent_path_planning | 079a4af80c074e571a802af4506d416db5c6946a | [
"MIT"
] | null | null | null | centralized/cbs/EdgeConstraint.py | mengwei1/multi_agent_path_planning | 079a4af80c074e571a802af4506d416db5c6946a | [
"MIT"
] | null | null | null | class EdgeConstraint(object):
def __init__(self, time, location_1, location_2):
self.time = time
self.location_1 = location_1
self.location_2 = location_2
def __eq__(self, other):
return self.time == other.time and self.location_1 == other.location_1 \
and self.location_2 == other.location_2
def __hash__(self):
return hash(str(self.time) + str(self.location_1) + str(self.location_2))
def __str__(self):
return '(' + str(self.time) + ', '+ str(self.location_1) +', '+ str(self.location_2) + ')'
| 36 | 98 | 0.631944 | class EdgeConstraint(object):
def __init__(self, time, location_1, location_2):
self.time = time
self.location_1 = location_1
self.location_2 = location_2
def __eq__(self, other):
return self.time == other.time and self.location_1 == other.location_1 \
and self.location_2 == other.location_2
def __hash__(self):
return hash(str(self.time) + str(self.location_1) + str(self.location_2))
def __str__(self):
return '(' + str(self.time) + ', '+ str(self.location_1) +', '+ str(self.location_2) + ')'
| true | true |
1c47e54c14f7d0bfeffe7567306b5ecd63f6ed02 | 473 | py | Python | old/markup_dude.py | karuvally/markup_dude | 2307a3e35ed0b899aa5c2256d4bedece09de8299 | [
"MIT"
] | 1 | 2021-04-04T08:59:17.000Z | 2021-04-04T08:59:17.000Z | old/markup_dude.py | karuvally/markup_dude | 2307a3e35ed0b899aa5c2256d4bedece09de8299 | [
"MIT"
] | null | null | null | old/markup_dude.py | karuvally/markup_dude | 2307a3e35ed0b899aa5c2256d4bedece09de8299 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Convert Markdown to HTML, made for https://karuvally.github.io
# Import required libraries
import argparse
# Parse markdown
def parse_md(md_file):
# The main function
def main():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = "Convert Markdown to HTML"
)
parser.add_argument(
"INPUT",
type = str,
help = "Markdown document for conversion"
)
main()
| 17.518519 | 64 | 0.64482 |
import argparse
def parse_md(md_file):
def main():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = "Convert Markdown to HTML"
)
parser.add_argument(
"INPUT",
type = str,
help = "Markdown document for conversion"
)
main()
| false | true |
1c47e7970da9d61be8311b50ed64ed5185e5dfd2 | 4,712 | py | Python | tests/test_albumentations_pytorch.py | brandongk-ubco/autoalbument | 1735ea4376694c2179ac62ce7d100a10b26f2558 | [
"MIT"
] | 135 | 2020-11-03T15:48:30.000Z | 2022-03-16T10:52:57.000Z | tests/test_albumentations_pytorch.py | brandongk-ubco/autoalbument | 1735ea4376694c2179ac62ce7d100a10b26f2558 | [
"MIT"
] | 31 | 2020-11-04T10:20:56.000Z | 2022-03-24T13:46:07.000Z | tests/test_albumentations_pytorch.py | brandongk-ubco/autoalbument | 1735ea4376694c2179ac62ce7d100a10b26f2558 | [
"MIT"
] | 16 | 2020-11-16T08:33:48.000Z | 2022-03-17T18:34:24.000Z | import albumentations.augmentations.functional as F
import pytest
import torch
from torch.autograd import gradcheck
import autoalbument.albumentations_pytorch.functional as PF
from tests.utils import assert_batches_match
class Base:
def scalar_to_tensor(self, arg, requires_grad=False, dtype=torch.float32):
if arg is None:
return None
return torch.tensor(arg, requires_grad=requires_grad, dtype=dtype)
def test_albumentations_match(self, image_batches, arg):
np_images, pytorch_batch = image_batches
tensor_arg = self.scalar_to_tensor(arg)
augmented_np_images = [self.albumentations_fn(image, arg) for image in np_images]
augmented_pytorch_batch = self.albumentations_pytorch_fn(pytorch_batch, tensor_arg)
assert_batches_match(augmented_np_images, augmented_pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
tensor_arg = self.scalar_to_tensor(arg, requires_grad=True, dtype=torch.float64)
gradcheck(self.albumentations_pytorch_fn, (gradcheck_batch, tensor_arg))
def albumentations_fn(self, image, arg):
raise NotImplementedError
def albumentations_pytorch_fn(self, pytorch_batch, arg):
raise NotImplementedError
@pytest.mark.parametrize("arg", [0.2, 0.4, 0.8])
class TestSolarize(Base):
def albumentations_fn(self, image, arg):
return F.solarize(image, threshold=arg)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.solarize(pytorch_batch, threshold=arg)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize("arg", [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [0.0, 0.7, -0.2]])
class TestShiftRgb(Base):
def albumentations_fn(self, image, arg):
return F.shift_rgb(image, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_rgb(pytorch_batch, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])
@pytest.mark.parametrize("arg", [-1.0, 0.1, 0.5, 1.0])
class TestBrightnessAdjust(Base):
def albumentations_fn(self, image, arg):
return F.brightness_contrast_adjust(image, beta=arg, beta_by_max=True)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.brightness_adjust(pytorch_batch, beta=arg)
@pytest.mark.parametrize("arg", [-1.0, 0.1, 0.5, 1.0])
class TestContrastAdjust(Base):
def albumentations_fn(self, image, arg):
return F.brightness_contrast_adjust(image, alpha=arg)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.contrast_adjust(pytorch_batch, alpha=arg)
@pytest.mark.parametrize("arg", [None])
class TestVflip(Base):
def albumentations_fn(self, image, arg):
return F.vflip(image)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.vflip(pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize("arg", [None])
class TestHflip(Base):
def albumentations_fn(self, image, arg):
return F.hflip(image)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.hflip(pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.01],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestShiftX(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_x(pytorch_batch, dx=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.01],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestShiftY(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_y(pytorch_batch, dy=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.1],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestScale(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.scale(pytorch_batch, scale=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.1],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestRotate(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.rotate(pytorch_batch, angle=arg)
def test_albumentations_match(self, image_batches, arg):
pass
| 28.215569 | 105 | 0.66702 | import albumentations.augmentations.functional as F
import pytest
import torch
from torch.autograd import gradcheck
import autoalbument.albumentations_pytorch.functional as PF
from tests.utils import assert_batches_match
class Base:
def scalar_to_tensor(self, arg, requires_grad=False, dtype=torch.float32):
if arg is None:
return None
return torch.tensor(arg, requires_grad=requires_grad, dtype=dtype)
def test_albumentations_match(self, image_batches, arg):
np_images, pytorch_batch = image_batches
tensor_arg = self.scalar_to_tensor(arg)
augmented_np_images = [self.albumentations_fn(image, arg) for image in np_images]
augmented_pytorch_batch = self.albumentations_pytorch_fn(pytorch_batch, tensor_arg)
assert_batches_match(augmented_np_images, augmented_pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
tensor_arg = self.scalar_to_tensor(arg, requires_grad=True, dtype=torch.float64)
gradcheck(self.albumentations_pytorch_fn, (gradcheck_batch, tensor_arg))
def albumentations_fn(self, image, arg):
raise NotImplementedError
def albumentations_pytorch_fn(self, pytorch_batch, arg):
raise NotImplementedError
@pytest.mark.parametrize("arg", [0.2, 0.4, 0.8])
class TestSolarize(Base):
def albumentations_fn(self, image, arg):
return F.solarize(image, threshold=arg)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.solarize(pytorch_batch, threshold=arg)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize("arg", [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [0.0, 0.7, -0.2]])
class TestShiftRgb(Base):
def albumentations_fn(self, image, arg):
return F.shift_rgb(image, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_rgb(pytorch_batch, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])
@pytest.mark.parametrize("arg", [-1.0, 0.1, 0.5, 1.0])
class TestBrightnessAdjust(Base):
def albumentations_fn(self, image, arg):
return F.brightness_contrast_adjust(image, beta=arg, beta_by_max=True)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.brightness_adjust(pytorch_batch, beta=arg)
@pytest.mark.parametrize("arg", [-1.0, 0.1, 0.5, 1.0])
class TestContrastAdjust(Base):
def albumentations_fn(self, image, arg):
return F.brightness_contrast_adjust(image, alpha=arg)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.contrast_adjust(pytorch_batch, alpha=arg)
@pytest.mark.parametrize("arg", [None])
class TestVflip(Base):
def albumentations_fn(self, image, arg):
return F.vflip(image)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.vflip(pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize("arg", [None])
class TestHflip(Base):
def albumentations_fn(self, image, arg):
return F.hflip(image)
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.hflip(pytorch_batch)
def test_gradients(self, gradcheck_batch, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.01],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestShiftX(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_x(pytorch_batch, dx=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.01],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestShiftY(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.shift_y(pytorch_batch, dy=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.1],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestScale(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.scale(pytorch_batch, scale=arg)
def test_albumentations_match(self, image_batches, arg):
pass
@pytest.mark.parametrize(
"arg",
[
[0.1],
[-0.5],
[0.5],
[1.0 - 1e-6],
[-1.0 + 1e-6],
],
)
class TestRotate(Base):
def albumentations_pytorch_fn(self, pytorch_batch, arg):
return PF.rotate(pytorch_batch, angle=arg)
def test_albumentations_match(self, image_batches, arg):
pass
| true | true |
1c47e7ea31e47aede41afd960aa65ae5de620bf1 | 1,379 | py | Python | notifications/migrations/0001_initial.py | ABERT-NOLA/App-Instagram | f1394a96baa8e19a5b4b8b1c96917b9da5f3fe43 | [
"MIT"
] | 1 | 2020-11-17T09:00:59.000Z | 2020-11-17T09:00:59.000Z | notifications/migrations/0001_initial.py | kahenya-anita/Insta-Clone | 4894e959c17170505e73aee6dc497aeb29d55a71 | [
"MIT"
] | null | null | null | notifications/migrations/0001_initial.py | kahenya-anita/Insta-Clone | 4894e959c17170505e73aee6dc497aeb29d55a71 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-15 12:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('post', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notification_type', models.IntegerField(choices=[(1, 'Like'), (2, 'Comment'), (3, 'Follow')])),
('text_preview', models.CharField(blank=True, max_length=90)),
('date', models.DateTimeField(auto_now_add=True)),
('is_seen', models.BooleanField(default=False)),
('post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='noti_post', to='post.post')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noti_from_user', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noti_to_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| 43.09375 | 154 | 0.643945 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('post', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notification_type', models.IntegerField(choices=[(1, 'Like'), (2, 'Comment'), (3, 'Follow')])),
('text_preview', models.CharField(blank=True, max_length=90)),
('date', models.DateTimeField(auto_now_add=True)),
('is_seen', models.BooleanField(default=False)),
('post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='noti_post', to='post.post')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noti_from_user', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noti_to_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c47e85f18698ee4a933fa7ead51a9efaadf9d71 | 6,007 | py | Python | views.py | penzance/student_locations | 44618a237a9061dbc9d705810ad88d255781f44d | [
"MIT"
] | 1 | 2015-06-12T13:48:42.000Z | 2015-06-12T13:48:42.000Z | views.py | penzance/student_locations | 44618a237a9061dbc9d705810ad88d255781f44d | [
"MIT"
] | null | null | null | views.py | penzance/student_locations | 44618a237a9061dbc9d705810ad88d255781f44d | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, render_to_response
from django.views.decorators.http import require_http_methods
from ims_lti_py.tool_config import ToolConfig
from django.conf import settings
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from student_locations.forms import StudentLocationForm
from student_locations.models import Locations
from student_locations.utils import validaterequiredltiparams, getparamfromsession
import logging
logger = logging.getLogger(__name__)
@require_http_methods(['GET'])
def index(request):
"""
Show the index file
"""
return render(request, 'student_locations/index.html')
@login_required()
@require_http_methods(['POST'])
def lti_launch(request):
"""
This method is here to build the LTI_LAUNCH dictionary containing all
the LTI parameters and place it into the session. This is nessesary as we
need to access these parameters throughout the application and they are only
available the first time the application loads.
"""
if request.user.is_authenticated():
if validaterequiredltiparams(request):
return redirect('sl:main')
else:
return render(request, 'student_locations/error.html', {'message': 'Error: The LTI parameter lis_course_offering_sourcedid is required by this LTI tool.'})
else:
return render(request, 'student_locations/error.html', {'message': 'Error: user is not authenticated!'})
@login_required()
@require_http_methods(['GET'])
def main(request):
"""
The main method dipslay the default view which is the map_view.
"""
key = settings.STUDENT_LOCATIONS_TOOL.get('google_map_api_v3_key')
return render(request, 'student_locations/map_view.html', {'request': request, 'api_key': key})
@login_required()
@require_http_methods(['GET'])
def user_edit_view(request):
"""
Displays the user edit view which allows users to enter their contact
and Location data for display on the google map.
"""
resource_link_id = getparamfromsession(request, 'resource_link_id')
user_id = getparamfromsession(request, 'user_id')
if not resource_link_id or not user_id:
return render(request, 'student_locations/error.html', {'message': 'Unable to retrieve params from session. You might want to try reloading the tool.'})
try:
student = Locations.objects.get(resource_link_id=resource_link_id, user_id=user_id)
except Locations.DoesNotExist:
student = None
if student:
form = StudentLocationForm(instance=student)
else:
form = StudentLocationForm()
return render(request, 'student_locations/user_edit_view.html', {'request': request, 'form': form})
@login_required()
def addoredituser(request):
"""
The action method for the user_edit_view form.
"""
resource_link_id = getparamfromsession(request, 'resource_link_id')
user_id = getparamfromsession(request, 'user_id')
try:
student = Locations.objects.get(resource_link_id=resource_link_id, user_id=user_id)
except Locations.DoesNotExist:
student = None
if student:
form = StudentLocationForm(instance=student, user_id=user_id, resource_link_id=resource_link_id, data=request.POST)
else:
logger.debug('student is None')
form = StudentLocationForm(user_id=user_id, resource_link_id=resource_link_id, data=request.POST)
if form.is_valid():
theform = form.save(commit=False)
theform.user_id = user_id
theform.resource_link_id = resource_link_id
theform.save()
key = settings.STUDENT_LOCATIONS_TOOL.get('google_map_api_v3_key')
return render(request, 'student_locations/map_view.html', {'request': request, 'api_key' : key})
else:
return render(request, 'student_locations/user_edit_view.html', {'request': request, 'form': form})
@login_required()
@require_http_methods(['GET'])
def table_view(request):
"""
renders the data and display of the table view of students
"""
resource_link_id = getparamfromsession(request, 'resource_link_id')
students = Locations.objects.filter(resource_link_id=resource_link_id)
return render(request, 'student_locations/table_view.html', {'request': request, 'data' : students})
@login_required()
@require_http_methods(['GET'])
def markers_class_xml(request):
"""
reders the XML containing the location data for the google map
"""
resource_link_id = getparamfromsession(request, 'resource_link_id')
students = Locations.objects.filter(resource_link_id=resource_link_id)
return render_to_response('student_locations/markers.xml',
{'data' : students},
context_instance=RequestContext(request))
@require_http_methods(['GET'])
def tool_config(request):
"""
This produces a Canvas specific XML config that can be used to
add this tool to the Canvas LMS
"""
if request.is_secure():
host = 'https://' + request.get_host()
else:
host = 'http://' + request.get_host()
url = host + reverse('sl:lti_launch')
lti_tool_config = ToolConfig(
title='Student Locations',
launch_url=url,
secure_launch_url=url,
)
account_nav_params = {
'enabled': 'true',
# optionally, supply a different URL for the link:
# 'url': 'http://library.harvard.edu',
'text': 'Student Locations',
}
lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')
lti_tool_config.set_ext_param('canvas.instructure.com', 'course_navigation', account_nav_params)
lti_tool_config.description = 'This LTI tool facilitates the display of Student Locations.'
resp = HttpResponse(lti_tool_config.to_xml(), content_type='text/xml', status=200)
return resp
| 37.080247 | 167 | 0.715832 | from django.shortcuts import render, redirect, render_to_response
from django.views.decorators.http import require_http_methods
from ims_lti_py.tool_config import ToolConfig
from django.conf import settings
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from student_locations.forms import StudentLocationForm
from student_locations.models import Locations
from student_locations.utils import validaterequiredltiparams, getparamfromsession
import logging
logger = logging.getLogger(__name__)
@require_http_methods(['GET'])
def index(request):
return render(request, 'student_locations/index.html')
@login_required()
@require_http_methods(['POST'])
def lti_launch(request):
if request.user.is_authenticated():
if validaterequiredltiparams(request):
return redirect('sl:main')
else:
return render(request, 'student_locations/error.html', {'message': 'Error: The LTI parameter lis_course_offering_sourcedid is required by this LTI tool.'})
else:
return render(request, 'student_locations/error.html', {'message': 'Error: user is not authenticated!'})
@login_required()
@require_http_methods(['GET'])
def main(request):
key = settings.STUDENT_LOCATIONS_TOOL.get('google_map_api_v3_key')
return render(request, 'student_locations/map_view.html', {'request': request, 'api_key': key})
@login_required()
@require_http_methods(['GET'])
def user_edit_view(request):
resource_link_id = getparamfromsession(request, 'resource_link_id')
user_id = getparamfromsession(request, 'user_id')
if not resource_link_id or not user_id:
return render(request, 'student_locations/error.html', {'message': 'Unable to retrieve params from session. You might want to try reloading the tool.'})
try:
student = Locations.objects.get(resource_link_id=resource_link_id, user_id=user_id)
except Locations.DoesNotExist:
student = None
if student:
form = StudentLocationForm(instance=student)
else:
form = StudentLocationForm()
return render(request, 'student_locations/user_edit_view.html', {'request': request, 'form': form})
@login_required()
def addoredituser(request):
resource_link_id = getparamfromsession(request, 'resource_link_id')
user_id = getparamfromsession(request, 'user_id')
try:
student = Locations.objects.get(resource_link_id=resource_link_id, user_id=user_id)
except Locations.DoesNotExist:
student = None
if student:
form = StudentLocationForm(instance=student, user_id=user_id, resource_link_id=resource_link_id, data=request.POST)
else:
logger.debug('student is None')
form = StudentLocationForm(user_id=user_id, resource_link_id=resource_link_id, data=request.POST)
if form.is_valid():
theform = form.save(commit=False)
theform.user_id = user_id
theform.resource_link_id = resource_link_id
theform.save()
key = settings.STUDENT_LOCATIONS_TOOL.get('google_map_api_v3_key')
return render(request, 'student_locations/map_view.html', {'request': request, 'api_key' : key})
else:
return render(request, 'student_locations/user_edit_view.html', {'request': request, 'form': form})
@login_required()
@require_http_methods(['GET'])
def table_view(request):
resource_link_id = getparamfromsession(request, 'resource_link_id')
students = Locations.objects.filter(resource_link_id=resource_link_id)
return render(request, 'student_locations/table_view.html', {'request': request, 'data' : students})
@login_required()
@require_http_methods(['GET'])
def markers_class_xml(request):
resource_link_id = getparamfromsession(request, 'resource_link_id')
students = Locations.objects.filter(resource_link_id=resource_link_id)
return render_to_response('student_locations/markers.xml',
{'data' : students},
context_instance=RequestContext(request))
@require_http_methods(['GET'])
def tool_config(request):
if request.is_secure():
host = 'https://' + request.get_host()
else:
host = 'http://' + request.get_host()
url = host + reverse('sl:lti_launch')
lti_tool_config = ToolConfig(
title='Student Locations',
launch_url=url,
secure_launch_url=url,
)
account_nav_params = {
'enabled': 'true',
'text': 'Student Locations',
}
lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')
lti_tool_config.set_ext_param('canvas.instructure.com', 'course_navigation', account_nav_params)
lti_tool_config.description = 'This LTI tool facilitates the display of Student Locations.'
resp = HttpResponse(lti_tool_config.to_xml(), content_type='text/xml', status=200)
return resp
| true | true |
1c47e8a94158577824e18e7417db7b1bfc6b3d7a | 3,111 | py | Python | sigeco/settings.py | edusantana/sigeco | 5e9e612e2721b170d770b73a36df4b92c94bed6f | [
"MIT"
] | null | null | null | sigeco/settings.py | edusantana/sigeco | 5e9e612e2721b170d770b73a36df4b92c94bed6f | [
"MIT"
] | null | null | null | sigeco/settings.py | edusantana/sigeco | 5e9e612e2721b170d770b73a36df4b92c94bed6f | [
"MIT"
] | null | null | null | """
Django settings for sigeco project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9l%mu66ff)sre+8$(j%&3%(3an+i+-di$o(v^wk0)y)!i92jit'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigeco.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigeco.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 25.292683 | 91 | 0.693346 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '9l%mu66ff)sre+8$(j%&3%(3an+i+-di$o(v^wk0)y)!i92jit'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigeco.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigeco.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c47e8b8f9a966f714a9afcbcc891fafd92fb95b | 1,232 | py | Python | tests/models.py | viralogic/py-queryable | 12034ee04eaa176676df84cb49665c6a9f741f01 | [
"MIT"
] | null | null | null | tests/models.py | viralogic/py-queryable | 12034ee04eaa176676df84cb49665c6a9f741f01 | [
"MIT"
] | 1 | 2018-10-04T22:13:18.000Z | 2018-10-18T04:01:59.000Z | tests/models.py | viralogic/py-queryable | 12034ee04eaa176676df84cb49665c6a9f741f01 | [
"MIT"
] | null | null | null | from py_queryable import Model
from py_queryable import Column, PrimaryKey, ForeignKey
class StubModel(Model):
__table_name__ = u'test_table'
test_int_column = Column(int, 'int_column')
class StubModel2(Model):
__table_name__ = u'test_table'
test_int_column = Column(int)
class StubPrimary(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int, 'int_pk')
class StubPrimaryString(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(unicode, 'unicode_pk')
class StubIntUnique(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int)
test_unique = Column(int, 'int_column', is_unique=True)
class StubForeignKey(Model):
__table_name__ = u"foreign_key_table"
test_pk = PrimaryKey(int, 'int_pk')
test_fk = ForeignKey(StubPrimary, 'test_fk', is_nullable=False)
class StubUpdateModel(Model):
__table_name__ = u"test_update_table"
key = PrimaryKey(int, 'key_column')
update_col = Column(int, 'update_column')
class Student(Model):
__table_name__ = u"student"
student_id = PrimaryKey(int, "student_id")
first_name = Column(unicode, "first_name")
last_name = Column(unicode, "last_name")
gpa = Column(int, "gpa")
| 25.142857 | 67 | 0.722403 | from py_queryable import Model
from py_queryable import Column, PrimaryKey, ForeignKey
class StubModel(Model):
__table_name__ = u'test_table'
test_int_column = Column(int, 'int_column')
class StubModel2(Model):
__table_name__ = u'test_table'
test_int_column = Column(int)
class StubPrimary(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int, 'int_pk')
class StubPrimaryString(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(unicode, 'unicode_pk')
class StubIntUnique(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int)
test_unique = Column(int, 'int_column', is_unique=True)
class StubForeignKey(Model):
__table_name__ = u"foreign_key_table"
test_pk = PrimaryKey(int, 'int_pk')
test_fk = ForeignKey(StubPrimary, 'test_fk', is_nullable=False)
class StubUpdateModel(Model):
__table_name__ = u"test_update_table"
key = PrimaryKey(int, 'key_column')
update_col = Column(int, 'update_column')
class Student(Model):
__table_name__ = u"student"
student_id = PrimaryKey(int, "student_id")
first_name = Column(unicode, "first_name")
last_name = Column(unicode, "last_name")
gpa = Column(int, "gpa")
| true | true |
1c47e91ab2f55e136165d27492cfb7b3e29d06ea | 2,056 | py | Python | scripts/_helpers.py | fneum/core-tso-data | 480b1ea7524adc5d5425165668989fc9dd412e90 | [
"MIT"
] | null | null | null | scripts/_helpers.py | fneum/core-tso-data | 480b1ea7524adc5d5425165668989fc9dd412e90 | [
"MIT"
] | 1 | 2022-02-07T14:41:52.000Z | 2022-02-07T14:41:52.000Z | scripts/_helpers.py | fneum/core-tso-data | 480b1ea7524adc5d5425165668989fc9dd412e90 | [
"MIT"
] | 4 | 2022-01-12T17:09:16.000Z | 2022-02-07T14:36:21.000Z | # from https://github.com/PyPSA/pypsa-eur/blob/master/scripts/_helpers.py
import pypsa
from pathlib import Path
def mock_snakemake(rulename, **wildcards):
"""
This function is expected to be executed from the 'scripts'-directory of '
the snakemake project. It returns a snakemake.script.Snakemake object,
based on the Snakefile.
If a rule has wildcards, you have to specify them in **wildcards.
Parameters
----------
rulename: str
name of the rule for which the snakemake object should be generated
**wildcards:
keyword arguments fixing the wildcards. Only necessary if wildcards are
needed.
"""
import snakemake as sm
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
script_dir = Path(__file__).parent.resolve()
assert (
Path.cwd().resolve() == script_dir
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
snakefile = p
break
workflow = sm.Workflow(snakefile, overwrite_configfiles=[])
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
dag = sm.dag.DAG(workflow, rules=[rule])
wc = Dict(wildcards)
job = sm.jobs.Job(rule, dag, wc)
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(
job.input,
job.output,
job.params,
job.wildcards,
job.threads,
job.resources,
job.log,
job.dag.workflow.config,
job.rule.name,
None,
)
# create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake
| 30.686567 | 89 | 0.646401 |
import pypsa
from pathlib import Path
def mock_snakemake(rulename, **wildcards):
import snakemake as sm
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
script_dir = Path(__file__).parent.resolve()
assert (
Path.cwd().resolve() == script_dir
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
snakefile = p
break
workflow = sm.Workflow(snakefile, overwrite_configfiles=[])
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
dag = sm.dag.DAG(workflow, rules=[rule])
wc = Dict(wildcards)
job = sm.jobs.Job(rule, dag, wc)
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(
job.input,
job.output,
job.params,
job.wildcards,
job.threads,
job.resources,
job.log,
job.dag.workflow.config,
job.rule.name,
None,
)
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake
| true | true |
1c47e955191e482356b4dccd864bc429173b5548 | 4,999 | py | Python | frontend/app/export.py | emsch/femida | 48de931c5d563e7fd354e6593a702f2397fd566f | [
"Apache-2.0"
] | null | null | null | frontend/app/export.py | emsch/femida | 48de931c5d563e7fd354e6593a702f2397fd566f | [
"Apache-2.0"
] | 52 | 2018-09-16T13:08:09.000Z | 2020-06-07T09:30:27.000Z | frontend/app/export.py | emsch/femida | 48de931c5d563e7fd354e6593a702f2397fd566f | [
"Apache-2.0"
] | 10 | 2018-09-26T20:06:41.000Z | 2020-06-16T17:35:59.000Z | #!/usr/bin/env python3
import datetime
import json
import xlsxwriter
from io import BytesIO
from flask import (
Blueprint,
send_file
)
from bson import json_util
from collections import Counter
mod_export = Blueprint('export', __name__)
from database import mongo # noqa
pdfs = mongo.db.pdfs
answers = mongo.db.answers
class Col:
def __init__(self, start_from=0):
self.i = start_from
def __call__(self):
val = self.i
self.i += 1
return val
def current(self):
return self.i
class Question:
def __init__(
self, id_=None, option=None, banned_options="",
yellow=None, red=None,
):
self.id = id_
self.banned_options = banned_options
if option is None:
self.options = Counter()
else:
self.options = Counter([self.clean_option(option)])
self.has_updates = False
self.has_contradicting_updates = False
self.updates = []
self.yellow = yellow
self.red = red
def clean_option(self, option):
for i in self.banned_options:
option = option.replace(i, "")
return option
def update(self, option):
if len(self.updates) > 0 and option not in self.updates:
self.has_contradicting_updates = True
self.updates.append(option)
self.options.update([self.clean_option(option)])
if len(self.options) > 1:
self.has_updates = True
def get_res_style(self):
res = self.get_res()
# ? counts = [i[1] for i in self.options.most_common()]
if not self.has_contradicting_updates:
# All good!
if self.has_updates:
return res, self.yellow
else:
return res, None
else:
# PROBLEM!
return res, self.red
def get_res(self):
if len(self.options) == 0:
return None
elif len(self.updates) > 0:
return self.updates[-1]
else:
return self.options.most_common(1)[0][0]
@mod_export.route('/export')
def export():
output = BytesIO()
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
red = workbook.add_format({'bold': True, 'bg_color': 'red'})
yellow = workbook.add_format({'bold': False, 'bg_color': 'yellow'})
bold = workbook.add_format({'bold': True})
header = ["№", "status", "surname", "name", "patronumic", "class", "type",
"variant", "requested_manual", "manual_checks", "img_test_form",
"img_fio", "UUID"]
header.extend([str(i) for i in range(1, 41)])
header.extend(["raw_json"])
for i, v in enumerate(header):
worksheet.write(0, i, v, bold)
for row, r in enumerate(answers.find()):
col = Col()
try:
worksheet.write(1+row, col(), 1+row)
worksheet.write(1+row, col(), r.get('status', ""))
# ФИО
for field in ['surname', 'name', 'patronymic', 'class', 'type', 'variant']:
question = Question(field, yellow=yellow, red=red)
for personal in r['personal']:
question.update(personal.get(field, ""))
worksheet.write(1+row, col(), *question.get_res_style())
# cnt
requested_manual = len(r.get('requested_manual', []))
if requested_manual > 0:
worksheet.write(1+row, col(), requested_manual, red)
else:
worksheet.write(1+row, col(), requested_manual)
manual_checks = len(r.get('manual_checks', []))
worksheet.write(1+row, col(), manual_checks)
# IMGS
worksheet.write(1+row, col(), 'http://femida.emsch.ru' + r.get('img_test_form', ""))
worksheet.write(1+row, col(), 'http://femida.emsch.ru' + r.get('img_fio', ""))
worksheet.write(1+row, col(), r.get('UUID', ""))
# answers
start = col.current()
for q in range(1, 41):
question = Question(
q, r.get('test_results', {}).get(str(q), ""), "F",
yellow, red
)
for update in r.get('test_updates', []):
if str(q) in update['updates']:
question.update(update['updates'][str(q)])
worksheet.write(1+row, start+q-1, *question.get_res_style())
col()
worksheet.write(1+row, col(), json.dumps(r, default=json_util.default))
except Exception as e:
worksheet.write(1+row, 0, 'ERROR OCCURED: ' + str(e))
workbook.close()
output.seek(0)
# finally return the file
attachment_filename = 'femida_%s.xlsx' % datetime.datetime.now().isoformat()[:19]
return send_file(output, attachment_filename=attachment_filename, as_attachment=True)
| 32.888158 | 96 | 0.561312 |
import datetime
import json
import xlsxwriter
from io import BytesIO
from flask import (
Blueprint,
send_file
)
from bson import json_util
from collections import Counter
mod_export = Blueprint('export', __name__)
from database import mongo
pdfs = mongo.db.pdfs
answers = mongo.db.answers
class Col:
def __init__(self, start_from=0):
self.i = start_from
def __call__(self):
val = self.i
self.i += 1
return val
def current(self):
return self.i
class Question:
def __init__(
self, id_=None, option=None, banned_options="",
yellow=None, red=None,
):
self.id = id_
self.banned_options = banned_options
if option is None:
self.options = Counter()
else:
self.options = Counter([self.clean_option(option)])
self.has_updates = False
self.has_contradicting_updates = False
self.updates = []
self.yellow = yellow
self.red = red
def clean_option(self, option):
for i in self.banned_options:
option = option.replace(i, "")
return option
def update(self, option):
if len(self.updates) > 0 and option not in self.updates:
self.has_contradicting_updates = True
self.updates.append(option)
self.options.update([self.clean_option(option)])
if len(self.options) > 1:
self.has_updates = True
def get_res_style(self):
res = self.get_res()
if not self.has_contradicting_updates:
if self.has_updates:
return res, self.yellow
else:
return res, None
else:
return res, self.red
def get_res(self):
if len(self.options) == 0:
return None
elif len(self.updates) > 0:
return self.updates[-1]
else:
return self.options.most_common(1)[0][0]
@mod_export.route('/export')
def export():
output = BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
red = workbook.add_format({'bold': True, 'bg_color': 'red'})
yellow = workbook.add_format({'bold': False, 'bg_color': 'yellow'})
bold = workbook.add_format({'bold': True})
header = ["№", "status", "surname", "name", "patronumic", "class", "type",
"variant", "requested_manual", "manual_checks", "img_test_form",
"img_fio", "UUID"]
header.extend([str(i) for i in range(1, 41)])
header.extend(["raw_json"])
for i, v in enumerate(header):
worksheet.write(0, i, v, bold)
for row, r in enumerate(answers.find()):
col = Col()
try:
worksheet.write(1+row, col(), 1+row)
worksheet.write(1+row, col(), r.get('status', ""))
for field in ['surname', 'name', 'patronymic', 'class', 'type', 'variant']:
question = Question(field, yellow=yellow, red=red)
for personal in r['personal']:
question.update(personal.get(field, ""))
worksheet.write(1+row, col(), *question.get_res_style())
requested_manual = len(r.get('requested_manual', []))
if requested_manual > 0:
worksheet.write(1+row, col(), requested_manual, red)
else:
worksheet.write(1+row, col(), requested_manual)
manual_checks = len(r.get('manual_checks', []))
worksheet.write(1+row, col(), manual_checks)
worksheet.write(1+row, col(), 'http://femida.emsch.ru' + r.get('img_test_form', ""))
worksheet.write(1+row, col(), 'http://femida.emsch.ru' + r.get('img_fio', ""))
worksheet.write(1+row, col(), r.get('UUID', ""))
start = col.current()
for q in range(1, 41):
question = Question(
q, r.get('test_results', {}).get(str(q), ""), "F",
yellow, red
)
for update in r.get('test_updates', []):
if str(q) in update['updates']:
question.update(update['updates'][str(q)])
worksheet.write(1+row, start+q-1, *question.get_res_style())
col()
worksheet.write(1+row, col(), json.dumps(r, default=json_util.default))
except Exception as e:
worksheet.write(1+row, 0, 'ERROR OCCURED: ' + str(e))
workbook.close()
output.seek(0)
attachment_filename = 'femida_%s.xlsx' % datetime.datetime.now().isoformat()[:19]
return send_file(output, attachment_filename=attachment_filename, as_attachment=True)
| true | true |
1c47ea8507d115500d6e829758a110c6bab5ce7d | 4,322 | py | Python | anima/env/fusion/render_merger.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 101 | 2015-02-08T22:20:11.000Z | 2022-03-21T18:56:42.000Z | anima/env/fusion/render_merger.py | Khosiyat/anima | f631c08400547f49ac5f1feeb730f22c255eb771 | [
"MIT"
] | 23 | 2016-11-30T08:33:21.000Z | 2021-01-26T12:11:12.000Z | anima/env/fusion/render_merger.py | Khosiyat/anima | f631c08400547f49ac5f1feeb730f22c255eb771 | [
"MIT"
] | 27 | 2015-01-03T06:49:45.000Z | 2021-12-28T03:30:54.000Z | # -*- coding: utf-8 -*-
"""Merges sliced renders in to one big plate
"""
try:
# for Fusion 6 and 7
import PeyeonScript as bmf
except ImportError:
# for Fusion 8+
import BlackmagicFusion as bmf
from anima.env.fusion.utils import NodeUtils
class RenderMerger(object):
"""A tool to merge sliced renders
"""
def __init__(self, path="", slices_in_x=5, slices_in_y=5, plate_width=0, plate_height=0):
self.fusion = bmf.scriptapp("Fusion")
self.comp = self.fusion.GetCurrentComp()
self.fusion_version = float(self.fusion.GetAttrs("FUSIONS_Version").split(".")[0])
self.path = path
self.slices_in_x = slices_in_x
self.slices_in_y = slices_in_y
self.plate_width = plate_height
self.plate_height = plate_width
def ui(self):
"""the UI for the script
"""
result = self.comp.AskUser(
'Chose Slices',
{
1: {
1: 'Slice Sequence',
2: 'FileBrowse',
'Save': False
},
2: {
1: 'Slice In Width',
2: 'Slider',
'Min': 1,
'Max': 10,
'Default': 5,
'Integer': True,
},
3: {
1: 'Slice In Height',
2: 'Slider',
'Min': 1,
'Max': 10,
'Default': 5,
'Integer': True,
}
}
)
self.path = result['Slice Sequence']
self.slices_in_x = int(result['Slice In Width'])
self.slices_in_y = int(result['Slice In Height'])
self.do_merge()
def calculate_total_width_height(self):
"""Calculates the total width and height of the resulting plate
:return (int, int): Returns the width and height of the resulting plate
"""
# calculate total width and height
self.comp.Lock()
loader = self.comp.Loader()
self.comp.Unlock()
NodeUtils.set_node_attr(loader, 'Clip', self.path)
# set input
loader.GetInputList()[10][0] = self.path
# set clip time start
loader.GetInputList()[15][0] = 0
# set clip time end
loader.GetInputList()[16][0] = 0
attrs = loader.GetAttrs()
plate_width = attrs['TOOLIT_Clip_Width'][1] * self.slices_in_x
plate_height = attrs['TOOLIT_Clip_Height'][1] * self.slices_in_y
# loader.Delete()
return plate_width, plate_height
def do_merge(self):
"""merges slices together
"""
width, height = self.calculate_total_width_height()
bg = self.comp.Background()
# set resolution
NodeUtils.set_node_attr(bg, "Width", width)
NodeUtils.set_node_attr(bg, "Height", height)
# make it black with no alpha
NodeUtils.set_node_attr(bg, "TopLeftRed", 0)
NodeUtils.set_node_attr(bg, "TopLeftGreen", 0)
NodeUtils.set_node_attr(bg, "TopLeftBlue", 0)
NodeUtils.set_node_attr(bg, "TopLeftAlpha", 0)
prev_merge = bg
t = 0
self.comp.Lock()
for i in range(self.slices_in_y):
# vertical stuff
for j in range(self.slices_in_x):
# horizontal stuff
loader = self.comp.Loader()
NodeUtils.set_node_attr(loader, 'Clip', self.path)
NodeUtils.set_node_attr(loader, "ClipTimeStart", t)
NodeUtils.set_node_attr(loader, "ClipTimeEnd", t)
merge = self.comp.Merge()
# set center offset
h_offset = 0.5 / self.slices_in_x + j * 1.0 / self.slices_in_x
v_offset = 0.5 / self.slices_in_y + i * 1.0 / self.slices_in_y
NodeUtils.set_node_attr(merge, "Center", {
1.0: h_offset,
2.0: v_offset,
3.0: 0.0
})
# connect it to the previous merges output
merge.Background = prev_merge
merge.Foreground = loader
prev_merge = merge
t += 1
self.comp.Unlock()
| 30.013889 | 93 | 0.519435 |
try:
import PeyeonScript as bmf
except ImportError:
import BlackmagicFusion as bmf
from anima.env.fusion.utils import NodeUtils
class RenderMerger(object):
def __init__(self, path="", slices_in_x=5, slices_in_y=5, plate_width=0, plate_height=0):
self.fusion = bmf.scriptapp("Fusion")
self.comp = self.fusion.GetCurrentComp()
self.fusion_version = float(self.fusion.GetAttrs("FUSIONS_Version").split(".")[0])
self.path = path
self.slices_in_x = slices_in_x
self.slices_in_y = slices_in_y
self.plate_width = plate_height
self.plate_height = plate_width
def ui(self):
result = self.comp.AskUser(
'Chose Slices',
{
1: {
1: 'Slice Sequence',
2: 'FileBrowse',
'Save': False
},
2: {
1: 'Slice In Width',
2: 'Slider',
'Min': 1,
'Max': 10,
'Default': 5,
'Integer': True,
},
3: {
1: 'Slice In Height',
2: 'Slider',
'Min': 1,
'Max': 10,
'Default': 5,
'Integer': True,
}
}
)
self.path = result['Slice Sequence']
self.slices_in_x = int(result['Slice In Width'])
self.slices_in_y = int(result['Slice In Height'])
self.do_merge()
def calculate_total_width_height(self):
self.comp.Lock()
loader = self.comp.Loader()
self.comp.Unlock()
NodeUtils.set_node_attr(loader, 'Clip', self.path)
loader.GetInputList()[10][0] = self.path
loader.GetInputList()[15][0] = 0
loader.GetInputList()[16][0] = 0
attrs = loader.GetAttrs()
plate_width = attrs['TOOLIT_Clip_Width'][1] * self.slices_in_x
plate_height = attrs['TOOLIT_Clip_Height'][1] * self.slices_in_y
return plate_width, plate_height
def do_merge(self):
width, height = self.calculate_total_width_height()
bg = self.comp.Background()
NodeUtils.set_node_attr(bg, "Width", width)
NodeUtils.set_node_attr(bg, "Height", height)
NodeUtils.set_node_attr(bg, "TopLeftRed", 0)
NodeUtils.set_node_attr(bg, "TopLeftGreen", 0)
NodeUtils.set_node_attr(bg, "TopLeftBlue", 0)
NodeUtils.set_node_attr(bg, "TopLeftAlpha", 0)
prev_merge = bg
t = 0
self.comp.Lock()
for i in range(self.slices_in_y):
for j in range(self.slices_in_x):
loader = self.comp.Loader()
NodeUtils.set_node_attr(loader, 'Clip', self.path)
NodeUtils.set_node_attr(loader, "ClipTimeStart", t)
NodeUtils.set_node_attr(loader, "ClipTimeEnd", t)
merge = self.comp.Merge()
h_offset = 0.5 / self.slices_in_x + j * 1.0 / self.slices_in_x
v_offset = 0.5 / self.slices_in_y + i * 1.0 / self.slices_in_y
NodeUtils.set_node_attr(merge, "Center", {
1.0: h_offset,
2.0: v_offset,
3.0: 0.0
})
merge.Background = prev_merge
merge.Foreground = loader
prev_merge = merge
t += 1
self.comp.Unlock()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.