content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
'''
Preprocessor for Foliant documentation authoring tool.
Calls Elasticsearch API to generate an index based on Markdown content.
'''
import re
import json
from os import getenv
from pathlib import Path
from urllib import request
from urllib.error import HTTPError
from markdown import markdown
from bs4 import BeautifulSoup
from foliant.preprocessors.base import BasePreprocessor
class Preprocessor(BasePreprocessor):
defaults = {
'es_url': 'http://127.0.0.1:9200/',
'index_name': '',
'index_copy_name': '',
'index_properties': {},
'actions': [
'delete',
'create'
],
'use_chapters': True,
'format': 'plaintext',
'escape_html': True,
'url_transform': [
{'\/?index\.md$': '/'},
{'\.md$': '/'},
{'^([^\/]+)': '/\g<1>'}
],
'require_env': False,
'targets': []
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = self.logger.getChild('elasticsearch')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _get_url(self, markdown_file_path: str) -> str:
url = str(markdown_file_path.relative_to(self.working_dir))
url_transformation_rules = self.options['url_transform']
if not isinstance(url_transformation_rules, list):
url_transformation_rules = [url_transformation_rules]
for url_transformation_rule in url_transformation_rules:
for pattern, replacement in url_transformation_rule.items():
url = re.sub(pattern, replacement, url)
return url
def _get_title(self, markdown_content: str) -> str or None:
headings_found = re.search(
r'^\#{1,6}\s+(.+?)(?:\s+\{\#\S+\})?\s*$',
markdown_content,
flags=re.MULTILINE
)
if headings_found:
return headings_found.group(1)
return None
def _get_chapters_paths(self) -> list:
def _recursive_process_chapters(chapters_subset):
if isinstance(chapters_subset, dict):
processed_chapters_subset = {}
for key, value in chapters_subset.items():
processed_chapters_subset[key] = _recursive_process_chapters(value)
elif isinstance(chapters_subset, list):
processed_chapters_subset = []
for item in chapters_subset:
processed_chapters_subset.append(_recursive_process_chapters(item))
elif isinstance(chapters_subset, str):
if chapters_subset.endswith('.md'):
chapters_paths.append(self.working_dir / chapters_subset)
processed_chapters_subset = chapters_subset
else:
processed_chapters_subset = chapters_subset
return processed_chapters_subset
chapters_paths = []
_recursive_process_chapters(self.config['chapters'])
self.logger.debug(f'Chapters files paths: {chapters_paths}')
return chapters_paths
def _http_request(
self,
request_url: str,
request_method: str = 'GET',
request_headers: dict or None = None,
request_data: bytes or None = None
) -> dict:
http_request = request.Request(request_url, method=request_method)
if request_headers:
http_request.headers = request_headers
if request_data:
http_request.data = request_data
try:
with request.urlopen(http_request) as http_response:
response_status = http_response.getcode()
response_headers = http_response.info()
response_data = http_response.read()
except HTTPError as http_response_not_ok:
response_status = http_response_not_ok.getcode()
response_headers = http_response_not_ok.info()
response_data = http_response_not_ok.read()
return {
'status': response_status,
'headers': response_headers,
'data': response_data
}
def _escape_html(self, content: str) -> str:
return content.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def _create_index(self, index_name: str) -> None:
if self.options['index_properties']:
create_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(
'Calling Elasticsearch API to create an index with specified properties, ' +
f'URL: {create_request_url}'
)
create_response = self._http_request(
create_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(self.options['index_properties'], ensure_ascii=False).encode('utf-8')
)
create_response_data = json.loads(create_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {create_response["status"]}')
self.logger.debug(f'Response headers: {create_response["headers"]}')
self.logger.debug(f'Response data: {create_response_data}')
if create_response['status'] == 200 and create_response_data.get('acknowledged', None) is True:
self.logger.debug('Index created')
elif create_response['status'] == 400 and create_response_data.get(
'error', {}
).get(
'type', ''
) == 'resource_already_exists_exception':
self.logger.debug('Index already exists')
else:
error_message = 'Failed to create an index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
else:
self.logger.debug('An index without specific properties will be created')
if self.options['use_chapters']:
self.logger.debug('Only files mentioned in chapters will be indexed')
markdown_files_paths = self._get_chapters_paths()
else:
self.logger.debug('All files of the project will be indexed')
markdown_files_paths = self.working_dir.rglob('*.md')
data_for_indexing = ''
for markdown_file_path in markdown_files_paths:
self.logger.debug(f'Processing the file: {markdown_file_path}')
with open(markdown_file_path, encoding='utf8') as markdown_file:
markdown_content = markdown_file.read()
if markdown_content:
url = self._get_url(markdown_file_path)
title = self._get_title(markdown_content)
if self.options['format'] == 'html' or self.options['format'] == 'plaintext':
self.logger.debug(f'Converting source Markdown content to: {self.options["format"]}')
content = markdown(markdown_content)
if self.options['format'] == 'plaintext':
soup = BeautifulSoup(content, 'lxml')
for non_text_node in soup(['style', 'script']):
non_text_node.extract()
content = soup.get_text()
if self.options['escape_html']:
self.logger.debug('Escaping HTML syntax')
if title:
title = self._escape_html(title)
content = self._escape_html(content)
else:
self.logger.debug('Leaving source Markdown content unchanged')
content = markdown_content
self.logger.debug(f'Adding the page, URL: {url}, title: {title}')
data_for_indexing += '{"index": {}}\n' + json.dumps(
{
'url': url,
'title': title,
'content': content
},
ensure_ascii=False
) + '\n'
else:
self.logger.debug('It seems that the file has no content')
self.logger.debug(f'Data for indexing: {data_for_indexing}')
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_bulk?refresh'
self.logger.debug(f'Calling Elasticsearch API to add the content to the index, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'POST',
{
'Content-Type': 'application/json; charset=utf-8'
},
data_for_indexing.encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] != 200 or update_response_data.get('errors', True):
error_message = 'Failed to add content to the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _delete_index(self, index_name: str) -> None:
delete_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(f'Calling Elasticsearch API to delete the index, URL: {delete_request_url}')
delete_response = self._http_request(
delete_request_url,
'DELETE'
)
delete_response_data = json.loads(delete_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {delete_response["status"]}')
self.logger.debug(f'Response headers: {delete_response["headers"]}')
self.logger.debug(f'Response data: {delete_response_data}')
if delete_response['status'] == 200 and delete_response_data.get('acknowledged', None) is True:
self.logger.debug('Index deleted')
elif delete_response['status'] == 404 and delete_response_data.get(
'error', {}
).get(
'type', ''
) == 'index_not_found_exception':
self.logger.debug('Index does not exist')
else:
error_message = 'Failed to delete the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _update_index_setting(self, index_name: str, settings_to_update: dict) -> None:
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_settings/'
self.logger.debug(f'Calling Elasticsearch API to update the index settings, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(
settings_to_update,
ensure_ascii=False
).encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] == 200 and update_response_data.get('acknowledged', None) is True:
self.logger.debug('Index settings updated')
else:
error_message = 'Failed to update the index settings'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _clone_index(self, index_name: str, index_copy_name: str) -> None:
clone_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_clone/{index_copy_name}/'
self.logger.debug(f'Calling Elasticsearch API to clone the index, URL: {clone_request_url}')
clone_response = self._http_request(
clone_request_url,
'POST'
)
clone_response_data = json.loads(clone_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {clone_response["status"]}')
self.logger.debug(f'Response headers: {clone_response["headers"]}')
self.logger.debug(f'Response data: {clone_response_data}')
if clone_response['status'] == 200 and clone_response_data.get('acknowledged', None) is True:
self.logger.debug('Index cloned')
else:
error_message = 'Failed to clone the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _copy_index(self, index_name: str, index_copy_name: str) -> None:
if not index_copy_name:
index_copy_name = index_name + '_copy'
self.logger.debug(f'Copying the index {index_name} to {index_copy_name}')
self.logger.debug(f'First, marking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': True
}
}
)
self.logger.debug(f'Second, deleting the index {index_copy_name}, if exists')
self._delete_index(index_copy_name)
self.logger.debug(f'Third, cloning the index {index_name} as {index_copy_name}')
self._clone_index(index_name, index_copy_name)
self.logger.debug(f'Fourth, unmarking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': False
}
}
)
self.logger.debug(f'Fifth, also unmarking the index {index_copy_name} as read-only')
self._update_index_setting(
index_copy_name,
{
'settings': {
'index.blocks.write': False
}
}
)
return None
def apply(self):
self.logger.info('Applying preprocessor')
envvar = 'FOLIANT_ELASTICSEARCH'
if not self.options['require_env'] or getenv(envvar) is not None:
self.logger.debug(
f'Allowed targets: {self.options["targets"]}, ' +
f'current target: {self.context["target"]}'
)
if not self.options['targets'] or self.context['target'] in self.options['targets']:
actions = self.options['actions']
if not isinstance(self.options['actions'], list):
actions = [actions]
for action in actions:
self.logger.debug(f'Applying action: {action}')
if action == 'create':
self._create_index(self.options['index_name'])
elif action == 'delete':
self._delete_index(self.options['index_name'])
elif action == 'copy':
self._copy_index(self.options['index_name'], self.options['index_copy_name'])
else:
self.logger.debug('Unknown action, skipping')
else:
self.logger.debug(f'Environment variable {envvar} is not set, skipping')
self.logger.info('Preprocessor applied')
| foliant/preprocessors/elasticsearch.py | 15,822 | Preprocessor for Foliant documentation authoring tool.
Calls Elasticsearch API to generate an index based on Markdown content. | 127 | en | 0.676076 |
"""
Settings for different models.
"""
import attr
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class BaseSettings:
"""
Base configuration settings.
"""
val_split = attr.ib(default=0.0)
test_split = attr.ib(default=0.15)
detect_type = attr.ib(default="dual")
verbose = attr.ib(default=0)
print_summary = attr.ib(default=False, metadata={"deprecated": True})
plot_model = attr.ib(default=False, metadata={"deprecated": True})
plot_dpi = attr.ib(default=400, metadata={"deprecated": True})
output_path = attr.ib(default="./output")
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class NeuralSettings(BaseSettings):
"""
Neural model settings.
"""
language = attr.ib(default="")
series = attr.ib(default="")
batch_size = attr.ib(default=32)
skip_step = attr.ib(default=5)
token_maxlen = attr.ib(default=30)
model_type = attr.ib(default="recurrent") # recurrent
fraction = attr.ib(default=0.995) # For Native model.
prediction_policy = attr.ib(default="zero") # zero, accuracy, fscore
fscore_beta = attr.ib(default=1.0)
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class EntropiesSettings(NeuralSettings):
"""
Entropy model settings.
"""
# While not strictly a child of NeuralSettings, it seems more convenient.
tf_verbose = attr.ib(default=0)
basis = attr.ib(default="all")
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class RecurrentSettings(EntropiesSettings):
"""
Recurrent model settings.
"""
# Architecture parameters
embedding_len = attr.ib(default=32)
rnn_output_len = attr.ib(default=32)
rnn_cell_type = attr.ib(default="GRU") # GRU, LSTM
rnn_levels = attr.ib(default=1) # 1, 2
# Dropout and regulation parameters
embedding_dropout = attr.ib(default=0.0)
recurrent_l2 = attr.ib(default=0.001)
rnn_activity_l2 = attr.ib(default=0.0)
recurrent_dropout = attr.ib(default=0.0)
rnn_output_dropout = attr.ib(default=0.2)
merge_embedding_dropout = attr.ib(default=0.2)
# Model fitting parameters
epochs = attr.ib(default=45)
learning_rate = attr.ib(default=0.01)
learning_rate_decay = attr.ib(default=0.95) # Adjust for batch size, data len.
restore_best_weights = attr.ib(default=True)
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class MarkovSettings(BaseSettings):
"""
Markov model settings.
"""
model = attr.ib(default="kni")
order = attr.ib(default=3)
p = attr.ib(default=0.995) # pylint: disable=invalid-name
smoothing = attr.ib(default=0.3)
| src/pybor/config.py | 2,765 | Base configuration settings.
Entropy model settings.
Markov model settings.
Neural model settings.
Recurrent model settings.
Settings for different models.
pylint: disable=too-many-instance-attributes,too-few-public-methods pylint: disable=too-many-instance-attributes,too-few-public-methods recurrent For Native model. zero, accuracy, fscore pylint: disable=too-many-instance-attributes,too-few-public-methods While not strictly a child of NeuralSettings, it seems more convenient. pylint: disable=too-many-instance-attributes,too-few-public-methods Architecture parameters GRU, LSTM 1, 2 Dropout and regulation parameters Model fitting parameters Adjust for batch size, data len. pylint: disable=too-many-instance-attributes,too-few-public-methods pylint: disable=invalid-name | 780 | en | 0.676754 |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 32585
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| contrib/linearize/linearize-hashes.py | 3,037 | !/usr/bin/python linearize-hashes.py: List blocks in a linear, no-fork version of the chain. Copyright (c) 2013-2014 The Bitcoin developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. assume replies are in-sequence skip comment lines parse key=value lines | 349 | en | 0.711358 |
from rest_framework import parsers, renderers, status
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.compat import coreapi, coreschema
from rest_framework.generics import DestroyAPIView, RetrieveAPIView
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.schemas import ManualSchema
from rest_framework.views import APIView
from django.contrib.auth import get_user_model
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import views as auth_views
from django.contrib.auth.views import LoginView as AuthLoginView
from django.contrib.auth.views import LogoutView as AuthLogoutView
from django.contrib.sites.shortcuts import get_current_site
from django.core import signing
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, TemplateView
import auditor
import conf
from api.users.forms import RegistrationForm
from api.users.utils import login_user, logout_user
from api.utils.views.post import PostAPIView
from db.models.tokens import Token
from event_manager.events.superuser import SUPERUSER_ROLE_GRANTED, SUPERUSER_ROLE_REVOKED
from event_manager.events.user import USER_ACTIVATED, USER_DELETED
from schemas.user import UserConfig
from signals import users as users_signals
class ObtainAuthToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
if coreapi is not None and coreschema is not None:
schema = ManualSchema(
fields=[
coreapi.Field(
name="username",
required=True,
location='form',
schema=coreschema.String(
title="Username",
description="Valid username for authentication",
),
),
coreapi.Field(
name="password",
required=True,
location='form',
schema=coreschema.String(
title="Password",
description="Valid password for authentication",
),
),
],
encoding="application/json",
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key})
obtain_auth_token = ObtainAuthToken.as_view()
class AuthTokenLogin(ObtainAuthToken):
"""Login user and return user's token."""
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
if not created and token.is_expired:
token.refresh()
response = Response({'token': token.key})
if request.data.get('login'):
auth_login(self.request, user)
response.set_cookie('token', value=token.key)
response.set_cookie('user', value=user.username)
return response
class AuthTokenLogout(APIView):
throttle_classes = ()
permission_classes = ()
def get(self, request, *args, **kwargs):
auth_logout(request)
response = Response()
response.delete_cookie('token')
response.delete_cookie('user')
return response
class RefreshSessionView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
auth_login(self.request, request.user)
return Response()
class LoginView(AuthLoginView):
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return HttpResponseRedirect('/')
response = super().dispatch(request, *args, **kwargs)
login_user(request=request, response=response, user=request.user, login=False)
return response
class LogoutView(AuthLogoutView):
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
logout_user(request=request, response=response, logout=False)
return response
class RegistrationView(FormView):
"""Register a new (inactive) user account, generate an activation key and email it to the user.
This is different from the model-based activation workflow in that
the activation key is the username, signed using Django's
TimestampSigner, with HMAC verification on activation.
"""
form_class = RegistrationForm
template_name = 'users/register.html'
email_body_template = 'users/activation_email.txt'
email_subject_template = 'users/activation_email_subject.txt'
success_url = 'users:registration_complete'
key_salt = 'users.tokens.RegistrationView'
def form_valid(self, form):
self.register(form)
return redirect(self.get_success_url())
def register(self, form):
new_user = self.create_inactive_user(form)
users_signals.user_registered.send(
sender=self.__class__,
user=new_user,
request=self.request)
return new_user
def create_inactive_user(self, form):
"""
Create the inactive user account and send an email containing
activation instructions.
"""
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user
def get_activation_key(self, user):
"""
Generate the activation key which will be emailed to the user.
"""
return signing.dumps(
obj=getattr(user, user.USERNAME_FIELD),
salt=self.key_salt
)
def get_email_context(self, activation_key):
"""
Build the template context used for the activation email.
"""
return {
'activation_key': activation_key,
'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'),
'site': get_current_site(self.request)
}
def send_activation_email(self, user):
"""
Send the activation email. The activation key is the username,
signed using TimestampSigner.
"""
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({
'user': user
})
subject = render_to_string(self.email_subject_template,
context)
# Force subject to a single line to avoid header-injection
# issues.
subject = ''.join(subject.splitlines())
message = render_to_string(self.email_body_template,
context)
user.email_user(subject, message, conf.get('DEFAULT_FROM_EMAIL'))
class SimpleRegistrationView(RegistrationView):
"""Registration and validation though a superuser."""
form_class = RegistrationForm
template_name = 'users/register.html'
def create_inactive_user(self, form):
"""Create the inactive user account and wait for validation from superuser"""
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user
def get(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return HttpResponseRedirect('/')
return super().get(request, *args, **kwargs)
class ActivationView(TemplateView):
"""
Given a valid activation key, activate the user's
account. Otherwise, show an error message stating the account
couldn't be activated.
"""
template_name = 'users/activate.html'
success_url = 'users:registration_activation_complete'
key_salt = 'users.tokens.RegistrationView'
def activate(self, *args, **kwargs):
# This is safe even if, somehow, there's no activation key,
# because unsign() will raise BadSignature rather than
# TypeError on a value of None.
username = self.validate_key(kwargs.get('activation_key'))
if username is not None:
user = self.get_user(username)
if user is not None:
user.is_active = True
user.save()
return user
return False
def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or ``None`` if not.
"""
try:
username = signing.loads(
activation_key,
salt=self.key_salt,
max_age=conf.get('ACCOUNT_ACTIVATION_DAYS') * 86400
)
return username
# SignatureExpired is a subclass of BadSignature, so this will
# catch either one.
except signing.BadSignature:
return None
def get_user(self, username):
"""
Given the verified username, look up and return the
corresponding user account if it exists, or ``None`` if it
doesn't.
"""
User = get_user_model() # noqa
try:
user = User.objects.get(**{
User.USERNAME_FIELD: username,
'is_active': False
})
return user
except User.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
"""The base activation logic; subclasses should leave this method
alone and implement activate(), which is called from this method.
"""
activated_user = self.activate(*args, **kwargs)
if activated_user:
users_signals.user_activated.send(
sender=self.__class__,
user=activated_user,
request=request
)
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
class PasswordResetView(auth_views.PasswordResetView):
template_name = 'users/password_reset.html'
subject_template_name = 'users/password_reset_subject.txt'
email_template_name = 'users/password_reset_body.txt'
success_url = reverse_lazy('users:password_reset_done')
class TokenView(TemplateView):
template_name = 'users/token.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
token, _ = Token.objects.get_or_create(user=self.request.user)
context['token'] = token.key
return context
class UserView(RetrieveAPIView):
"""Get user details."""
def retrieve(self, request, *args, **kwargs):
user = request.user
return Response(UserConfig.obj_to_dict(user))
class ActivateView(PostAPIView):
"""Activate user."""
queryset = get_user_model().objects.filter()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_active = True
user.save()
auditor.record(event_type=USER_ACTIVATED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
class DeleteView(DestroyAPIView):
"""Delete user."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
auditor.record(event_type=USER_DELETED,
instance=instance,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
class GrantSuperuserView(PostAPIView):
"""Grant a user the superuser role."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_staff = True
user.is_superuser = True
user.save()
auditor.record(event_type=SUPERUSER_ROLE_GRANTED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
class RevokeSuperuserView(PostAPIView):
"""Revoke the superuser role from user."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_staff = False
user.is_superuser = False
user.save()
auditor.record(event_type=SUPERUSER_ROLE_REVOKED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
| polyaxon/api/users/views.py | 14,377 | Activate user.
Given a valid activation key, activate the user's
account. Otherwise, show an error message stating the account
couldn't be activated.
Login user and return user's token.
Delete user.
Grant a user the superuser role.
Register a new (inactive) user account, generate an activation key and email it to the user.
This is different from the model-based activation workflow in that
the activation key is the username, signed using Django's
TimestampSigner, with HMAC verification on activation.
Revoke the superuser role from user.
Registration and validation though a superuser.
Get user details.
Create the inactive user account and send an email containing
activation instructions.
Create the inactive user account and wait for validation from superuser
The base activation logic; subclasses should leave this method
alone and implement activate(), which is called from this method.
Generate the activation key which will be emailed to the user.
Build the template context used for the activation email.
Given the verified username, look up and return the
corresponding user account if it exists, or ``None`` if it
doesn't.
Send the activation email. The activation key is the username,
signed using TimestampSigner.
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or ``None`` if not.
Force subject to a single line to avoid header-injection issues. This is safe even if, somehow, there's no activation key, because unsign() will raise BadSignature rather than TypeError on a value of None. SignatureExpired is a subclass of BadSignature, so this will catch either one. noqa | 1,663 | en | 0.832338 |
# encoding=utf8
"""Implementations of Cosine mixture functions."""
from numpy import cos, pi
from NiaPy.benchmarks.benchmark import Benchmark
__all__ = ['CosineMixture']
class CosineMixture(Benchmark):
r"""Implementations of Cosine mixture function.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Cosine Mixture Function**
:math:`f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-1, 1]`, for all :math:`i = 1, 2,..., D`.
**Global maximu:**
:math:`f(x^*) = -0.1 D`, at :math:`x^* = (0.0,...,0.0)`
LaTeX formats:
Inline:
$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$
Equation:
\begin{equation} f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2 \end{equation}
Domain:
$-1 \leq x_i \leq 1$
Reference:
http://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture
"""
Name = ['CosineMixture']
def __init__(self, Lower=-1.0, Upper=1.0):
r"""Initialize of Cosine mixture benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__`
"""
Benchmark.__init__(self, Lower, Upper)
@staticmethod
def latex_code():
r"""Return the latex code of the problem.
Returns:
str: Latex code
"""
return r'''$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$'''
def function(self):
r"""Return benchmark evaluation function.
Returns:
Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function
"""
def f(D, X):
r"""Fitness function.
Args:
D (int): Dimensionality of the problem
sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.
Returns:
float: Fitness value for the solution.
"""
v1, v2 = 0.0, 0.0
for i in range(D): v1, v2 = v1 + cos(5 * pi * X[i]), v2 + X[i] ** 2
return -0.1 * v1 - v2
return f
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| NiaPy/benchmarks/cosinemixture.py | 2,230 | Implementations of Cosine mixture function.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Cosine Mixture Function**
:math:`f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-1, 1]`, for all :math:`i = 1, 2,..., D`.
**Global maximu:**
:math:`f(x^*) = -0.1 D`, at :math:`x^* = (0.0,...,0.0)`
LaTeX formats:
Inline:
$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$
Equation:
\begin{equation} f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2 \end{equation}
Domain:
$-1 \leq x_i \leq 1$
Reference:
http://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture
Initialize of Cosine mixture benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__`
Fitness function.
Args:
D (int): Dimensionality of the problem
sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.
Returns:
float: Fitness value for the solution.
Return benchmark evaluation function.
Returns:
Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function
Return the latex code of the problem.
Returns:
str: Latex code
Implementations of Cosine mixture functions.
encoding=utf8 vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3 | 1,740 | en | 0.498401 |
# Read PV metadata and timeseries data
# Based on code in https://github.com/openclimatefix/pvoutput
# E.g. https://nbviewer.jupyter.org/github/openclimatefix/pvoutput/blob/master/examples/analyse_PV_data_for_9th_Aug_2019.ipynb
import cartopy.crs as ccrs
import numpy as np
import pandas as pd
import xarray as xr
METADATA_FILENAME = "data/PV/PVOutput.org/UK_PV_metadata.csv"
PV_STATS_FILENAME = "data/PV/PVOutput.org/UK_PV_stats.csv"
TIMESERIES_FILENAME = "data/PV/PVOutput.org/UK_PV_timeseries_batch.nc"
START_DATE = "2019-08-09"
END_DATE = "2019-08-09"
def load_pv_systems(
metadata_filename: str = METADATA_FILENAME,
stats_filename: str = PV_STATS_FILENAME,
timeseries_filename: str = TIMESERIES_FILENAME,
) -> xr.Dataset:
"""Load metadata about PV systems"""
# Load metadata
pv_metadata = pd.read_csv(metadata_filename, index_col="system_id")
# Load stats
pv_stats = pd.read_csv(
stats_filename,
index_col="system_id",
parse_dates=["actual_date_from", "actual_date_to", "record_efficiency_date"],
)
# Join
pv_systems = pv_metadata.join(
pv_stats[["actual_date_from", "actual_date_to", "outputs"]], how="left"
)
# Filter out systems with only a few outputs, and with no location
pv_systems_filtered = pv_systems.query(
"status_interval_minutes <= 60 and outputs > 100"
)
pv_systems_filtered = pv_systems_filtered.dropna(subset=["latitude", "longitude"])
# Restrict to systems that have timeseries data
system_ids = _get_system_ids_dataframe_from_timeseries(timeseries_filename)
pv_systems_filtered = pv_systems_filtered.join(system_ids, how="inner")
# Retain salient columns
pv_systems_filtered = pv_systems_filtered[["system_name", "latitude", "longitude"]]
# Convert to xarray
ds = xr.Dataset.from_dataframe(pv_systems_filtered)
# Convert latitude/longitude to easting/northing
ds = _transform_pv_systems(ds)
return ds
def _get_system_ids_dataframe_from_timeseries(
timeseries_filename: str = TIMESERIES_FILENAME,
) -> pd.DataFrame:
"""Get all the PV system IDs from the timeseries file"""
ds = xr.open_dataset(timeseries_filename)
system_ids = [int(x) for x in list(ds.data_vars.keys())]
df = pd.DataFrame({"system_id": system_ids})
df = df.set_index("system_id")
return df
def _transform_pv_systems(pv_systems: xr.Dataset) -> xr.Dataset:
"""Transform the system locations into the same coordinate system used by UKV"""
system_latitudes, system_longitudes = (
pv_systems["latitude"].values,
pv_systems["longitude"].values,
)
wgs84 = ccrs.Geodetic()
ukv_crs = ccrs.OSGB(approx=False)
locs = ukv_crs.transform_points(
src_crs=wgs84,
x=np.asanyarray(system_longitudes),
y=np.asanyarray(system_latitudes),
)[:, :-1]
new_coords = {
"easting": (["system_id"], locs[:, 0].astype("int32")),
"northing": (["system_id"], locs[:, 1].astype("int32")),
}
return pv_systems.assign_coords(new_coords)
# This is unused, but a useful check
def _transform_pv_systems_pyproj(pv_systems: xr.Dataset) -> xr.Dataset:
"""Transform the system locations into the same coordinate system used by UKV, using pyproj"""
import pyproj
system_latitudes, system_longitudes = (
pv_systems["latitude"].values,
pv_systems["longitude"].values,
)
transformer = pyproj.Transformer.from_crs("epsg:4326", "epsg:27700", always_xy=True)
locs = transformer.transform(
np.asanyarray(system_longitudes), np.asanyarray(system_latitudes)
)
print(locs)
new_coords = {
"easting": (["system_id"], locs[0]),
"northing": (["system_id"], locs[1]),
}
return pv_systems.assign_coords(new_coords)
def load_pv_timeseries(
start_date: str,
end_date: str,
metadata_filename: str = METADATA_FILENAME,
stats_filename: str = PV_STATS_FILENAME,
timeseries_filename: str = TIMESERIES_FILENAME,
) -> xr.Dataset:
"""Load the PV timeseries as an xarray dataset, restricted to a given time range, and including location metadata."""
ds = xr.open_dataset(timeseries_filename)
# Subset to given time range
subset = ds.sel(datetime=slice(start_date, end_date))
# Drop systems with no readings during this time
# I couldn't see how to do this with xarray, see https://stackoverflow.com/questions/52553925/python-xarray-remove-coordinates-with-all-missing-variables
df = subset.to_dataframe()
df = df.dropna(axis=1, how="all")
# Restrict to systems that are in the intersection of those in PV metadata and PV timeseries
pv_df = load_pv_systems(
metadata_filename, stats_filename, timeseries_filename
).to_dataframe()
pv_metadata_system_ids = pv_df.index.tolist() # indexed by system_id
timeseries_system_ids = [int(system_id) for system_id in df.columns.tolist()]
system_ids = list(
set(pv_metadata_system_ids).intersection(set(timeseries_system_ids))
)
system_id_columns = [str(system_id) for system_id in system_ids]
df = df[system_id_columns]
# Reshape table into tall and narrow form - this avoids one data variable per system in xarray
df["datetime"] = df.index
df = pd.melt(df, id_vars=["datetime"], var_name="system_id", value_name="pv_yield")
df = df.astype({"system_id": "int64"})
df = df.set_index(["system_id", "datetime"])
# Convert back to xarray
ds = xr.Dataset.from_dataframe(df)
# Add lat/long and easting/northing coordinates by doing a pandas lookup for each system
new_coords = {
"latitude": (
["system_id"],
pv_df.lookup(system_ids, ["latitude"] * len(system_ids)),
),
"longitude": (
["system_id"],
pv_df.lookup(system_ids, ["longitude"] * len(system_ids)),
),
"easting": (
["system_id"],
pv_df.lookup(system_ids, ["easting"] * len(system_ids)),
),
"northing": (
["system_id"],
pv_df.lookup(system_ids, ["northing"] * len(system_ids)),
),
}
ds = ds.assign_coords(new_coords)
return ds
if __name__ == "__main__":
pv_timeseries = load_pv_timeseries(START_DATE, END_DATE)
print(pv_timeseries)
pv_timeseries.to_netcdf("data/tmp/pv_timeseries.nc")
| predict_pv_yield_nwp/pv.py | 6,414 | Get all the PV system IDs from the timeseries file
Transform the system locations into the same coordinate system used by UKV
Transform the system locations into the same coordinate system used by UKV, using pyproj
Load metadata about PV systems
Load the PV timeseries as an xarray dataset, restricted to a given time range, and including location metadata.
Read PV metadata and timeseries data Based on code in https://github.com/openclimatefix/pvoutput E.g. https://nbviewer.jupyter.org/github/openclimatefix/pvoutput/blob/master/examples/analyse_PV_data_for_9th_Aug_2019.ipynb Load metadata Load stats Join Filter out systems with only a few outputs, and with no location Restrict to systems that have timeseries data Retain salient columns Convert to xarray Convert latitude/longitude to easting/northing This is unused, but a useful check Subset to given time range Drop systems with no readings during this time I couldn't see how to do this with xarray, see https://stackoverflow.com/questions/52553925/python-xarray-remove-coordinates-with-all-missing-variables Restrict to systems that are in the intersection of those in PV metadata and PV timeseries indexed by system_id Reshape table into tall and narrow form - this avoids one data variable per system in xarray Convert back to xarray Add lat/long and easting/northing coordinates by doing a pandas lookup for each system | 1,386 | en | 0.823491 |
"""
noxfile
~~~~~~~
Nox configuration script
"""
# pylint: disable=resource-leakage,3rd-party-module-not-gated
import datetime
import glob
import os
import shutil
import sys
import tempfile
# fmt: off
if __name__ == "__main__":
sys.stderr.write(
"Do not execute this file directly. Use nox instead, it will know how to handle this file\n"
)
sys.stderr.flush()
exit(1)
# fmt: on
import nox # isort:skip
from nox.command import CommandFailed # isort:skip
IS_PY3 = sys.version_info > (2,)
# Be verbose when runing under a CI context
CI_RUN = (
os.environ.get("JENKINS_URL")
or os.environ.get("CI")
or os.environ.get("DRONE") is not None
)
PIP_INSTALL_SILENT = CI_RUN is False
SKIP_REQUIREMENTS_INSTALL = "SKIP_REQUIREMENTS_INSTALL" in os.environ
EXTRA_REQUIREMENTS_INSTALL = os.environ.get("EXTRA_REQUIREMENTS_INSTALL")
# Global Path Definitions
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, "tests", "support", "coverage")
IS_DARWIN = sys.platform.lower().startswith("darwin")
IS_WINDOWS = sys.platform.lower().startswith("win")
IS_FREEBSD = sys.platform.lower().startswith("freebsd")
# Python versions to run against
_PYTHON_VERSIONS = ("3", "3.5", "3.6", "3.7", "3.8", "3.9")
# Nox options
# Reuse existing virtualenvs
nox.options.reuse_existing_virtualenvs = True
# Don't fail on missing interpreters
nox.options.error_on_missing_interpreters = False
# Change current directory to REPO_ROOT
os.chdir(REPO_ROOT)
RUNTESTS_LOGFILE = os.path.join(
"artifacts",
"logs",
"runtests-{}.log".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S.%f")),
)
# Prevent Python from writing bytecode
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
def find_session_runner(session, name, **kwargs):
for s, _ in session._runner.manifest.list_all_sessions():
if name not in s.signatures:
continue
for signature in s.signatures:
for key, value in kwargs.items():
param = "{}={!r}".format(key, value)
if IS_PY3:
# Under Python2 repr unicode string are always "u" prefixed, ie, u'a string'.
param = param.replace("u'", "'")
if param not in signature:
break
else:
return s
continue
session.error(
"Could not find a nox session by the name {!r} with the following keyword arguments: {!r}".format(
name, kwargs
)
)
def _create_ci_directories():
for dirname in ("logs", "coverage", "xml-unittests-output"):
path = os.path.join("artifacts", dirname)
if not os.path.exists(path):
os.makedirs(path)
def _get_session_python_version_info(session):
try:
version_info = session._runner._real_python_version_info
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session_py_version = session.run(
"python",
"-c",
'import sys; sys.stdout.write("{}.{}.{}".format(*sys.version_info))',
silent=True,
log=False,
)
version_info = tuple(
int(part) for part in session_py_version.split(".") if part.isdigit()
)
session._runner._real_python_version_info = version_info
finally:
session._runner.global_config.install_only = old_install_only_value
return version_info
def _get_session_python_site_packages_dir(session):
try:
site_packages_dir = session._runner._site_packages_dir
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
site_packages_dir = session.run(
"python",
"-c",
"import sys; from distutils.sysconfig import get_python_lib; sys.stdout.write(get_python_lib())",
silent=True,
log=False,
)
session._runner._site_packages_dir = site_packages_dir
finally:
session._runner.global_config.install_only = old_install_only_value
return site_packages_dir
def _get_pydir(session):
version_info = _get_session_python_version_info(session)
if version_info < (3, 5):
session.error("Only Python >= 3.5 is supported")
if IS_WINDOWS and version_info < (3, 6):
session.error("Only Python >= 3.6 is supported on Windows")
return "py{}.{}".format(*version_info)
def _install_system_packages(session):
"""
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
"""
version_info = _get_session_python_version_info(session)
py_version_keys = ["{}".format(*version_info), "{}.{}".format(*version_info)]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
session_site_packages_dir = os.path.relpath(session_site_packages_dir, REPO_ROOT)
for py_version in py_version_keys:
dist_packages_path = "/usr/lib/python{}/dist-packages".format(py_version)
if not os.path.isdir(dist_packages_path):
continue
for aptpkg in glob.glob(os.path.join(dist_packages_path, "*apt*")):
src = os.path.realpath(aptpkg)
dst = os.path.join(session_site_packages_dir, os.path.basename(src))
if os.path.exists(dst):
session.log("Not overwritting already existing %s with %s", dst, src)
continue
session.log("Copying %s into %s", src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
def _get_pip_requirements_file(session, transport, crypto=None, requirements_type="ci"):
assert requirements_type in ("ci", "pkg")
pydir = _get_pydir(session)
if IS_WINDOWS:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-windows.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
elif IS_DARWIN:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-darwin.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
elif IS_FREEBSD:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-freebsd.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
else:
_install_system_packages(session)
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-linux.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
def _upgrade_pip_setuptools_and_wheel(session):
if SKIP_REQUIREMENTS_INSTALL:
session.log(
"Skipping Python Requirements because SKIP_REQUIREMENTS_INSTALL was found in the environ"
)
return False
install_command = [
"python",
"-m",
"pip",
"install",
"--progress-bar=off",
"-U",
"pip>=20.2.4,<21.2",
"setuptools!=50.*,!=51.*,!=52.*",
"wheel",
]
session.run(*install_command, silent=PIP_INSTALL_SILENT)
return True
def _install_requirements(
session, transport, *extra_requirements, requirements_type="ci"
):
if not _upgrade_pip_setuptools_and_wheel(session):
return
# Install requirements
requirements_file = _get_pip_requirements_file(
session, transport, requirements_type=requirements_type
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if extra_requirements:
install_command = ["--progress-bar=off"]
install_command += list(extra_requirements)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if EXTRA_REQUIREMENTS_INSTALL:
session.log(
"Installing the following extra requirements because the"
" EXTRA_REQUIREMENTS_INSTALL environment variable was set: %s",
EXTRA_REQUIREMENTS_INSTALL,
)
# We pass --constraint in this step because in case any of these extra dependencies has a requirement
# we're already using, we want to maintain the locked version
install_command = ["--progress-bar=off", "--constraint", requirements_file]
install_command += EXTRA_REQUIREMENTS_INSTALL.split()
session.install(*install_command, silent=PIP_INSTALL_SILENT)
def _run_with_coverage(session, *test_cmd, env=None):
if SKIP_REQUIREMENTS_INSTALL is False:
session.install(
"--progress-bar=off", "coverage==5.2", silent=PIP_INSTALL_SILENT
)
session.run("coverage", "erase")
python_path_env_var = os.environ.get("PYTHONPATH") or None
if python_path_env_var is None:
python_path_env_var = SITECUSTOMIZE_DIR
else:
python_path_entries = python_path_env_var.split(os.pathsep)
if SITECUSTOMIZE_DIR in python_path_entries:
python_path_entries.remove(SITECUSTOMIZE_DIR)
python_path_entries.insert(0, SITECUSTOMIZE_DIR)
python_path_env_var = os.pathsep.join(python_path_entries)
if env is None:
env = {}
env.update(
{
# The updated python path so that sitecustomize is importable
"PYTHONPATH": python_path_env_var,
# The full path to the .coverage data file. Makes sure we always write
# them to the same directory
"COVERAGE_FILE": os.path.abspath(os.path.join(REPO_ROOT, ".coverage")),
# Instruct sub processes to also run under coverage
"COVERAGE_PROCESS_START": os.path.join(REPO_ROOT, ".coveragerc"),
}
)
try:
session.run(*test_cmd, env=env)
finally:
# Always combine and generate the XML coverage report
try:
session.run("coverage", "combine")
except CommandFailed:
# Sometimes some of the coverage files are corrupt which would trigger a CommandFailed
# exception
pass
# Generate report for salt code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "salt.xml"),
"--omit=tests/*",
"--include=salt/*",
)
# Generate report for tests code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "tests.xml"),
"--omit=salt/*",
"--include=tests/*",
)
# Move the coverage DB to artifacts/coverage in order for it to be archived by CI
shutil.move(".coverage", os.path.join("artifacts", "coverage", ".coverage"))
def _runtests(session):
session.error(
"""\n\nruntests.py support has been removed from Salt. Please try `nox -e '{0}'` """
"""or `nox -e '{0}' -- --help` to know more about the supported CLI flags.\n"""
"For more information, please check "
"https://docs.saltproject.io/en/latest/topics/development/tests/index.html#running-the-tests\n..".format(
session._runner.global_config.sessions[0].replace("runtests", "pytest")
)
)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def runtests_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def runtests(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp")
@nox.parametrize("coverage", [False, True])
def runtests_tcp(session, coverage):
"""
runtests.py session with TCP transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_m2crypto(session, coverage):
"""
runtests.py session with TCP transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_pycryptodome(session, coverage):
"""
runtests.py session with TCP transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-cloud")
@nox.parametrize("coverage", [False, True])
def runtests_cloud(session, coverage):
"""
runtests.py cloud tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tornado")
@nox.parametrize("coverage", [False, True])
def runtests_tornado(session, coverage):
"""
runtests.py tornado tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def pytest_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
# Install requirements
_install_requirements(session, transport)
if crypto:
session.run(
"pip",
"uninstall",
"-y",
"m2crypto",
"pycrypto",
"pycryptodome",
"pycryptodomex",
silent=True,
)
install_command = [
"--progress-bar=off",
"--constraint",
_get_pip_requirements_file(session, transport, crypto=True),
]
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--transport={}".format(transport),
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def pytest(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp")
@nox.parametrize("coverage", [False, True])
def pytest_tcp(session, coverage):
"""
pytest session with TCP transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_m2crypto(session, coverage):
"""
pytest session with TCP transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_pycryptodome(session, coverage):
"""
pytest session with TCP transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-cloud")
@nox.parametrize("coverage", [False, True])
def pytest_cloud(session, coverage):
"""
pytest cloud tests session
"""
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "cloud.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--run-expensive",
"-k",
"cloud",
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tornado")
@nox.parametrize("coverage", [False, True])
def pytest_tornado(session, coverage):
"""
pytest tornado tests session
"""
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
session.install(
"--progress-bar=off", "tornado==5.0.2", silent=PIP_INSTALL_SILENT
)
session.install(
"--progress-bar=off", "pyzmq==17.0.0", silent=PIP_INSTALL_SILENT
)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
] + session.posargs
_pytest(session, coverage, cmd_args)
def _pytest(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
env = {"CI_RUN": "1" if CI_RUN else "0"}
if IS_DARWIN:
# Don't nuke our multiprocessing efforts objc!
# https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr
env["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
if CI_RUN:
# We'll print out the collected tests on CI runs.
# This will show a full list of what tests are going to run, in the right order, which, in case
# of a test suite hang, helps us pinpoint which test is hanging
session.run(
"python", "-m", "pytest", *(cmd_args + ["--collect-only", "-qqq"]), env=env
)
try:
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args,
env=env
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
except CommandFailed: # pylint: disable=try-except-raise
# Not rerunning failed tests for now
raise
# pylint: disable=unreachable
# Re-run failed tests
session.log("Re-running failed tests")
for idx, parg in enumerate(cmd_args):
if parg.startswith("--junitxml="):
cmd_args[idx] = parg.replace(".xml", "-rerun-failed.xml")
cmd_args.append("--lf")
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
# pylint: enable=unreachable
class Tee:
"""
Python class to mimic linux tee behaviour
"""
def __init__(self, first, second):
self._first = first
self._second = second
def write(self, b):
wrote = self._first.write(b)
self._first.flush()
self._second.write(b)
self._second.flush()
def fileno(self):
return self._first.fileno()
def _lint(session, rcfile, flags, paths, tee_output=True):
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "lint.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if tee_output:
session.run("pylint", "--version")
pylint_report_path = os.environ.get("PYLINT_REPORT")
cmd_args = ["pylint", "--rcfile={}".format(rcfile)] + list(flags) + list(paths)
cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}}
if tee_output:
stdout = tempfile.TemporaryFile(mode="w+b")
cmd_kwargs["stdout"] = Tee(stdout, sys.__stdout__)
lint_failed = False
try:
session.run(*cmd_args, **cmd_kwargs)
except CommandFailed:
lint_failed = True
raise
finally:
if tee_output:
stdout.seek(0)
contents = stdout.read()
if contents:
if IS_PY3:
contents = contents.decode("utf-8")
else:
contents = contents.encode("utf-8")
sys.stdout.write(contents)
sys.stdout.flush()
if pylint_report_path:
# Write report
with open(pylint_report_path, "w") as wfh:
wfh.write(contents)
session.log("Report file written to %r", pylint_report_path)
stdout.close()
def _lint_pre_commit(session, rcfile, flags, paths):
if "VIRTUAL_ENV" not in os.environ:
session.error(
"This should be running from within a virtualenv and "
"'VIRTUAL_ENV' was not found as an environment variable."
)
if "pre-commit" not in os.environ["VIRTUAL_ENV"]:
session.error(
"This should be running from within a pre-commit virtualenv and "
"'VIRTUAL_ENV'({}) does not appear to be a pre-commit virtualenv.".format(
os.environ["VIRTUAL_ENV"]
)
)
from nox.virtualenv import VirtualEnv
# Let's patch nox to make it run inside the pre-commit virtualenv
try:
session._runner.venv = VirtualEnv( # pylint: disable=unexpected-keyword-arg
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
venv=True,
)
except TypeError:
# This is still nox-py2
session._runner.venv = VirtualEnv(
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
)
_lint(session, rcfile, flags, paths, tee_output=False)
@nox.session(python="3")
def lint(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
session.notify("lint-salt-{}".format(session.python))
session.notify("lint-tests-{}".format(session.python))
@nox.session(python="3", name="lint-salt")
def lint_salt(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/", "tasks/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python="3", name="lint-tests")
def lint_tests(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-salt-pre-commit")
def lint_salt_pre_commit(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-tests-pre-commit")
def lint_tests_pre_commit(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs(session, compress, update, clean):
"""
Build Salt's Documentation
"""
session.notify("docs-html-{}(compress={})".format(session.python, compress))
session.notify(
find_session_runner(
session,
"docs-man-{}".format(session.python),
compress=compress,
update=update,
clean=clean,
)
)
@nox.session(name="docs-html", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("compress", [False, True])
def docs_html(session, compress, clean):
"""
Build Salt's HTML Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "html", "SPHINXOPTS=-W", external=True)
if compress:
session.run("tar", "-cJvf", "html-archive.tar.xz", "_build/html", external=True)
os.chdir("..")
@nox.session(name="docs-man", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs_man(session, compress, update, clean):
"""
Build Salt's Manpages Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "man", "SPHINXOPTS=-W", external=True)
if update:
session.run("rm", "-rf", "man/", external=True)
session.run("cp", "-Rp", "_build/man", "man/", external=True)
if compress:
session.run("tar", "-cJvf", "man-archive.tar.xz", "_build/man", external=True)
os.chdir("..")
@nox.session(name="invoke", python="3")
def invoke(session):
"""
Run invoke tasks
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "invoke.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd = ["inv"]
files = []
# Unfortunately, invoke doesn't support the nargs functionality like argpase does.
# Let's make it behave properly
for idx, posarg in enumerate(session.posargs):
if idx == 0:
cmd.append(posarg)
continue
if posarg.startswith("--"):
cmd.append(posarg)
continue
files.append(posarg)
if files:
cmd.append("--files={}".format(" ".join(files)))
session.run(*cmd)
@nox.session(name="changelog", python="3")
@nox.parametrize("draft", [False, True])
def changelog(session, draft):
"""
Generate salt's changelog
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "changelog.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
town_cmd = ["towncrier", "--version={}".format(session.posargs[0])]
if draft:
town_cmd.append("--draft")
session.run(*town_cmd)
| noxfile.py | 35,872 | Python class to mimic linux tee behaviour
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
Generate salt's changelog
Build Salt's Documentation
Build Salt's HTML Documentation
Build Salt's Manpages Documentation
Run invoke tasks
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
pytest session with zeromq transport and default crypto
pytest cloud tests session
pytest session with zeromq transport and m2crypto
DO NOT CALL THIS NOX SESSION DIRECTLY
pytest session with zeromq transport and pycryptodome
pytest session with TCP transport and default crypto
pytest session with TCP transport and m2crypto
pytest session with TCP transport and pycryptodome
pytest tornado tests session
pytest session with zeromq transport and default crypto
pytest session with zeromq transport and m2crypto
pytest session with zeromq transport and pycryptodome
runtests.py session with zeromq transport and default crypto
runtests.py cloud tests session
runtests.py session with zeromq transport and m2crypto
DO NOT CALL THIS NOX SESSION DIRECTLY
runtests.py session with zeromq transport and pycryptodome
runtests.py session with TCP transport and default crypto
runtests.py session with TCP transport and m2crypto
runtests.py session with TCP transport and pycryptodome
runtests.py tornado tests session
runtests.py session with zeromq transport and default crypto
runtests.py session with zeromq transport and m2crypto
runtests.py session with zeromq transport and pycryptodome
noxfile
~~~~~~~
Nox configuration script
pylint: disable=resource-leakage,3rd-party-module-not-gated fmt: off fmt: on isort:skip isort:skip Be verbose when runing under a CI context Global Path Definitions Python versions to run against Nox options Reuse existing virtualenvs Don't fail on missing interpreters Change current directory to REPO_ROOT Prevent Python from writing bytecode Under Python2 repr unicode string are always "u" prefixed, ie, u'a string'. Force install only to be false for the following chunk of code For additional information as to why see: https://github.com/theacodes/nox/pull/181 Force install only to be false for the following chunk of code For additional information as to why see: https://github.com/theacodes/nox/pull/181 Install requirements We pass --constraint in this step because in case any of these extra dependencies has a requirement we're already using, we want to maintain the locked version The updated python path so that sitecustomize is importable The full path to the .coverage data file. Makes sure we always write them to the same directory Instruct sub processes to also run under coverage Always combine and generate the XML coverage report Sometimes some of the coverage files are corrupt which would trigger a CommandFailed exception Generate report for salt code coverage Generate report for tests code coverage Move the coverage DB to artifacts/coverage in order for it to be archived by CI Install requirements Install requirements Install requirements Create required artifacts directories Don't nuke our multiprocessing efforts objc! https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr We'll print out the collected tests on CI runs. This will show a full list of what tests are going to run, in the right order, which, in case of a test suite hang, helps us pinpoint which test is hanging pylint: disable=try-except-raise Not rerunning failed tests for now pylint: disable=unreachable Re-run failed tests pylint: enable=unreachable Write report Let's patch nox to make it run inside the pre-commit virtualenv pylint: disable=unexpected-keyword-arg This is still nox-py2 Unfortunately, invoke doesn't support the nargs functionality like argpase does. Let's make it behave properly | 4,344 | en | 0.807577 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset, build_dataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
Timer,
copy_model_to_gpu,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
update_classy_state,
)
from classy_vision.generic.util import get_torch_version
from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks
from classy_vision.losses import ClassyLoss, build_loss
from classy_vision.meters import ClassyMeter, build_meters
from classy_vision.models import ClassyModel, build_model
from classy_vision.optim import (
ClassyOptimizer,
build_optimizer,
build_optimizer_schedulers,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task
from .classy_task import ClassyTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("classification_task")
class ClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "spawn"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8, 0]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"], config["mixup"].get("num_classes")
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes """
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group,
ddp_comm_hooks.default_hooks.fp16_compress_hook,
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
# some settings are different in test only
self.train = False if self.test_only else state["train"]
if not self.test_only:
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
self.base_model.set_classy_state(state["base_model"])
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.check_inf_nan(loss)
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end("train")
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end("total")
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
if not self.train:
return
start_time = (
self.phase_start_time_train
if tag == "train"
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{
"tag": tag,
"phase_idx": self.train_phase_idx,
"epoch_duration": phase_duration,
"im_per_sec": im_per_sec,
}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
| classy_vision/tasks/classification_task.py | 49,493 | Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
Constructs a ClassificationTask
Explicitly synchronize buffers across all devices.
Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
Set train mode for model
Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
Creates data iterator(s) for the current phase.
Stop condition for training
Returns current evaluation phase
Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
Return local replica's batchsize for dataset (e.g. batchsize per GPU)
Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
Return global batchsize across all trainers
Returns the total number of "test" phases in the task
Returns the total number of "train" phases in the task
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
Returns loss used in training (can be wrapped with DDP)
Returns model used in training (can be wrapped with DDP)
Returns number of batches in current phase iterator
Returns current phase type. String with value "train" or "test"
Prepares task for training, populates all derived attributes
Runs backwards pass and update the optimizer
Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
Set task state
Args:
state: Dict containing state of a task
Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None.
Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details.
Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
Set hooks for task
Args:
hooks: List of hooks to apply during training
Set loss function for task
Args:
loss: loss for task
Set meters for task
Args:
meters: list of meters to compute during training
Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
Set model for task
Args:
model: Model to be trained
Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
Set optimizer for task
Args:
optimizer: optimizer for task
Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None.
Set test only flag
Args:
test_only: If true, only test phases will be run
Set the period of test phase.
Args:
test_phase_period: The period of test phase
Average the losses across the different replicas
Train step to be executed in train loop.
Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
!/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. Automatic Mixed Precision supported types Enable DistributedDataParallel's broadcast_buffers option, synchronizing model buffers every forward pass. Similar to FORWARD_PASS, but only synchronizes model buffers once per epoch, between train and test phases. If your motivation for synchronizing buffers is for buffers to be consistent during eval, use this instead of FORWARD_PASS to reduce training overhead. No Synchronized Batch Normalization Use torch.nn.SyncBatchNorm Use apex.parallel.SyncBatchNorm, needs apex to be installed this should ideally work with PyTorch Sync BN as well, but it fails while initializing DDP for some reason. TODO (zyan3): we move checkpoint hook to the end of the list because some hooks may change the state of the model, and we want to save changed state in the checkpoint. This is temporary fix. Check that the requested AMP type is known Check for CUDA availability, required for both Apex and Pytorch AMP Check for Apex availability Set Torch AMP grad scaler, used to prevent gradient underflow TODO Make distinction between epochs and phases in optimizer clear hooks config is optional NOTE: this is a private member and only meant to be used for logging/debugging purposes. See __repr__ implementation move the model and loss to the right device Initialize apex.amp. This updates the model and the PyTorch optimizer ( if training, which is wrapped by the ClassyOptimizer in self.optimizer). Please note this must happen before loading the checkpoint, cause there's amp state to be restored. Replace the original DDP wrap by the shard-aware ShardedDDP FP16 hook is stateless and only takes a process group as the state. We use the default process group so we set the state to None. some settings are different in test only we still want to be able to run when new hooks are added or old hooks are removed Process next sample Optional Pytorch AMP context Move some data to the task so hooks get a chance to access it Process next sample Copy sample to GPU Optional Pytorch AMP context only sync with DDP when we need to perform an optimizer step an optimizer step can be skipped if gradient accumulation is enabled Forward pass Backwards pass + optimizer step Move some data to the task so hooks get a chance to access it Gradient accumulation logic. We always set optimizer_period, even if gradient accumulation is disabled. Assumes all batches have the same size Handle gradient accumulation related gradient rescaling Clipping must happen after grad accumulation If using mixed precision, handle underflow-related scaling See https://pytorch.org/docs/stable/amp.htmlgradient-scaling for context Update meters Average losses across nodes Reset meters for next phase / epoch Reset loss history for next epoch Setup new phase Re-build dataloader & re-create iterator anytime membership changes. Set up pytorch module in train vs eval mode, update optimizer. Delete iterator explicitly so that all dataloader processes are cleaned up. TODO: Functions below should be better abstracted into the dataloader abstraction | 11,842 | en | 0.762314 |
"""The tests for day17."""
from days import day17
from ddt import ddt, data, unpack
import unittest
import helpers
@ddt
class MyTestCase(unittest.TestCase): # noqa D101
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '57'])
@unpack
def test_example_a(self, test_input, expected): # noqa D102
result = day17.part_a(test_input)
self.assertEqual(result, expected)
def test_answer_part_a(self): # noqa D102
result = day17.part_a(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '38021')
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '29'])
@unpack
def test_example_b(self, test_input, expected): # noqa D102
result = day17.part_b(test_input)
self.assertEqual(result, expected)
def test_answer_part_b(self): # noqa D102
result = day17.part_b(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '32069')
| test/test_day17.py | 1,354 | The tests for day17.
noqa D101 noqa D102 noqa D102 noqa D102 noqa D102 | 72 | gu | 0.384853 |
"""
pyart.lazydict
==============
A dictionary-like class supporting lazy loading of specified keys.
.. autosummary::
:toctree: generated/
:template: dev_template.rst
LazyLoadDict
"""
try:
# Python 3
from collections.abc import MutableMapping
except ImportError:
# Python 2.7, will be removed in next release after Py-ART Impressionism.
from collections import MutableMapping
import itertools
class LazyLoadDict(MutableMapping):
"""
A dictionary-like class supporting lazy loading of specified keys.
Keys which are lazy loaded are specified using the set_lazy method.
The callable object which produces the specified key is provided as the
second argument to this method. This object gets called when the value
of the key is loaded. After this initial call the results is cached
in the traditional dictionary which is used for supplemental access to
this key.
Testing for keys in this dictionary using the "key in d" syntax will
result in the loading of a lazy key, use "key in d.keys()" to prevent
this evaluation.
The comparison methods, __cmp__, __ge__, __gt__, __le__, __lt__, __ne__,
nor the view methods, viewitems, viewkeys, viewvalues, are implemented.
Neither is the the fromkeys method.
Parameters
----------
dic : dict
Dictionary containing key, value pairs which will be stored and
evaluated traditionally. This dictionary referenced not copied into
the LazyLoadDictionary and hence changed to this dictionary may change
the original. If this behavior is not desired copy dic in the
initalization.
Examples
--------
>>> d = LazyLoadDict({'key1': 'value1', 'key2': 'value2'})
>>> d.keys()
['key2', 'key1']
>>> lazy_func = lambda : 999
>>> d.set_lazy('lazykey1', lazy_func)
>>> d.keys()
['key2', 'key1', 'lazykey1']
>>> d['lazykey1']
999
"""
def __init__(self, dic):
""" initalize. """
self._dic = dic
self._lazyload = {}
# abstract methods
def __setitem__(self, key, value):
""" Set a key which will not be stored and evaluated traditionally. """
self._dic[key] = value
if key in self._lazyload:
del self._lazyload[key]
def __getitem__(self, key):
""" Get the value of a key, evaluating a lazy key if needed. """
if key in self._lazyload:
value = self._lazyload[key]()
self._dic[key] = value
del self._lazyload[key]
return self._dic[key]
def __delitem__(self, key):
""" Remove a lazy or traditional key from the dictionary. """
if key in self._lazyload:
del self._lazyload[key]
else:
del self._dic[key]
def __iter__(self):
""" Iterate over all lazy and traditional keys. """
return itertools.chain(self._dic.copy(), self._lazyload.copy())
def __len__(self):
""" Return the number of traditional and lazy keys. """
return len(self._dic) + len(self._lazyload)
# additional class to mimic dict behavior
def __str__(self):
""" Return a string representation of the object. """
if len(self._dic) == 0 or len(self._lazyload) == 0:
seperator = ''
else:
seperator = ', '
lazy_reprs = [(repr(k), repr(v)) for k, v in self._lazyload.items()]
lazy_strs = ['%s: LazyLoad(%s)' % r for r in lazy_reprs]
lazy_str = ", ".join(lazy_strs) + '}'
return str(self._dic)[:-1] + seperator + lazy_str
def has_key(self, key):
""" True if dictionary has key, else False. """
return key in self
def copy(self):
"""
Return a copy of the dictionary.
Lazy keys are not evaluated in the original or copied dictionary.
"""
dic = self.__class__(self._dic.copy())
# load all lazy keys into the copy
for key, value_callable in self._lazyload.items():
dic.set_lazy(key, value_callable)
return dic
# lazy dictionary specific methods
def set_lazy(self, key, value_callable):
""" Set a lazy key to load from a callable object. """
if key in self._dic:
del self._dic[key]
self._lazyload[key] = value_callable
| pycwr/configure/pyart_lazydict.py | 4,347 | A dictionary-like class supporting lazy loading of specified keys.
Keys which are lazy loaded are specified using the set_lazy method.
The callable object which produces the specified key is provided as the
second argument to this method. This object gets called when the value
of the key is loaded. After this initial call the results is cached
in the traditional dictionary which is used for supplemental access to
this key.
Testing for keys in this dictionary using the "key in d" syntax will
result in the loading of a lazy key, use "key in d.keys()" to prevent
this evaluation.
The comparison methods, __cmp__, __ge__, __gt__, __le__, __lt__, __ne__,
nor the view methods, viewitems, viewkeys, viewvalues, are implemented.
Neither is the the fromkeys method.
Parameters
----------
dic : dict
Dictionary containing key, value pairs which will be stored and
evaluated traditionally. This dictionary referenced not copied into
the LazyLoadDictionary and hence changed to this dictionary may change
the original. If this behavior is not desired copy dic in the
initalization.
Examples
--------
>>> d = LazyLoadDict({'key1': 'value1', 'key2': 'value2'})
>>> d.keys()
['key2', 'key1']
>>> lazy_func = lambda : 999
>>> d.set_lazy('lazykey1', lazy_func)
>>> d.keys()
['key2', 'key1', 'lazykey1']
>>> d['lazykey1']
999
Remove a lazy or traditional key from the dictionary.
Get the value of a key, evaluating a lazy key if needed.
initalize.
Iterate over all lazy and traditional keys.
Return the number of traditional and lazy keys.
Set a key which will not be stored and evaluated traditionally.
Return a string representation of the object.
Return a copy of the dictionary.
Lazy keys are not evaluated in the original or copied dictionary.
True if dictionary has key, else False.
Set a lazy key to load from a callable object.
pyart.lazydict
==============
A dictionary-like class supporting lazy loading of specified keys.
.. autosummary::
:toctree: generated/
:template: dev_template.rst
LazyLoadDict
Python 3 Python 2.7, will be removed in next release after Py-ART Impressionism. abstract methods additional class to mimic dict behavior load all lazy keys into the copy lazy dictionary specific methods | 2,256 | en | 0.785612 |
from apis.creat_account.api_account_setAlias import account_setAlias
from apis.creat_account.api_create_account import create_account, create_account_100
from apis.creat_account.api_get_addresslist import get_address_list
from apis.transfer.blockmgr_sendRawTransaction import sendRawTransaction
from apis.transfer.time_of_account_1 import transation_120_account_1
from apis.transfer_inquiry.api_chain_getBalance import check_transfer_balance, transfer_balance, \
getBalance_of_all_address_list, chain_getBalance
from apis.transfer.api_chain_transaction import transaction_one, random_transaction
from apis.transfer_inquiry.trace_getRawTransaction import getRawTransaction, getTransaction, decodeTrasnaction, \
getReceiveTransactionByAd, rebuild, getSendTransactionByAddr
from apis.vote_message.account_voteCredit import voteCredit
from apis.vote_message.chain_getCreditDetails import getVoteCreditDetails
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getPoolTransactions import getPoolTransactions
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getTransactionCount import getTransactionCount
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getTxInPool import blockmgrGetTxInPool
api_route = {
"create_account": create_account,
"create_account_100": create_account_100,
"get_address_list": get_address_list,
"account_setAlias": account_setAlias,
"transaction_one": transaction_one,
"random_transaction": random_transaction,
"chain_getBalance": chain_getBalance,
"getBalance_of_all_address_list": getBalance_of_all_address_list,
# "creat_one_wallet_account": creat_one_wallet_account,
"transation_120_account_1": transation_120_account_1,
"transfer_balance": transfer_balance,
"check_transfer_balance": check_transfer_balance,
"getRawTransaction": getRawTransaction,
"getTransaction": getTransaction,
"decodeTrasnaction": decodeTrasnaction,
"getSendTransactionByAddr": getSendTransactionByAddr,
"getReceiveTransactionByAd": getReceiveTransactionByAd,
"rebuild": rebuild,
"blockmgrGetTxInPool": blockmgrGetTxInPool,
"getPoolTransactions": getPoolTransactions,
"getTransactionCount": getTransactionCount,
"blockmgr_sendRawTransaction": sendRawTransaction,
"account_voteCredit": voteCredit,
"chain_getVoteCreditDetails": getVoteCreditDetails,
}
# API 总函数
def runCase(case_name):
"""
:param case_name:
:return: 注意格式 xxx(case_name)()
"""
return api_route.get(case_name)()
if __name__ == '__main__':
print(runCase("create_account_100"))
print(runCase("create_account"))
| apis/router.py | 2,632 | :param case_name:
:return: 注意格式 xxx(case_name)()
"creat_one_wallet_account": creat_one_wallet_account, API 总函数 | 114 | zh | 0.37862 |
from django.contrib import messages
from django.http import QueryDict
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.contrib.admin.views.decorators import staff_member_required
from django.template.defaulttags import register
from common.exporter import find_all_exporters
from common.utility import get_image_as_http_response
from common.importer import find_all_importers
from common.search_filters import SearchFilter
from common.label import get_complete_label_name
from django.urls import reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
import os
from .forms import *
from .models import *
from common.user import is_annotater
def get_task_statistics(tasks, user):
for task in tasks:
# Check if user has processed any
task.started = ImageAnnotation.objects.filter(task=task, user=user).count() > 0
task.finished = task.number_of_annotated_images == task.total_number_of_images
def index(request):
context = {}
if is_annotater(request.user):
# Show only tasks assigned to this user
tasks = Task.objects.filter(user=request.user)
get_task_statistics(tasks, request.user)
context['tasks'] = tasks
return render(request, 'annotationweb/index_annotater.html', context)
else:
# Admin page
# Classification tasks
tasks = Task.objects.all()
get_task_statistics(tasks, request.user)
context['tasks'] = tasks
return render(request, 'annotationweb/index_admin.html', context)
@staff_member_required
def export(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
if request.method == 'POST':
exporter_index = int(request.POST['exporter'])
return redirect('export_options', task_id=task.id, exporter_index=exporter_index)
else:
available_exporters = find_all_exporters(task.type)
# If only 1 exporter exists for this type, use that one
if len(available_exporters) == 1:
return redirect('export_options', task_id=task.id, exporter_index=0)
else:
return render(request, 'annotationweb/choose_exporter.html', {'exporters': available_exporters, 'task': task})
@staff_member_required
def export_options(request, task_id, exporter_index):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
available_exporters = find_all_exporters(task.type)
exporter = available_exporters[int(exporter_index)]()
exporter.task = task
if request.method == 'POST':
form = exporter.get_form(data=request.POST)
if form.is_valid():
success, message = exporter.export(form)
if success:
messages.success(request, 'Export finished: ' + message)
else:
messages.error(request, 'Export failed: ' + message)
return redirect('index')
else:
# Get unbound form
form = exporter.get_form()
return render(request, 'annotationweb/export_options.html', {'form': form, 'exporter_index': exporter_index, 'task': task})
@staff_member_required
def import_data(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
raise Http404('Dataset does not exist')
if request.method == 'POST':
importer_index = int(request.POST['importer'])
return redirect('import_options', dataset_id=dataset.id, importer_index=importer_index)
else:
available_importers = find_all_importers()
return render(request, 'annotationweb/choose_importer.html', {'importers': available_importers, 'dataset': dataset})
@staff_member_required
def import_options(request, dataset_id, importer_index):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
raise Http404('Dataset does not exist')
available_importers = find_all_importers()
importer = available_importers[int(importer_index)]()
importer.dataset = dataset
if request.method == 'POST':
form = importer.get_form(data=request.POST)
if form.is_valid():
success, message = importer.import_data(form)
if success:
messages.success(request, 'Import finished: ' + message)
else:
messages.error(request, 'Import failed: ' + message)
return redirect('index')
else:
# Get unbound form
form = importer.get_form()
return render(request, 'annotationweb/import_options.html', {'form': form, 'importer_index': importer_index, 'dataset': dataset})
def show_image(request, image_id, task_id):
try:
task = Task.objects.get(pk=task_id)
image = ImageSequence.objects.get(pk=image_id)
frame = int(image.nr_of_frames/2)
filename = image.format.replace('#', str(frame))
except Task.DoesNotExist:
raise Http404('Task does not exist')
except ImageSequence.DoesNotExist:
raise Http404('Image does not exist')
return get_image_as_http_response(filename, task.post_processing_method)
@staff_member_required
def new_task(request):
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = TaskForm()
context = {'form': form}
return render(request, 'annotationweb/new_task.html', context)
@staff_member_required
def new_label(request):
if request.method == 'POST':
form = LabelForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = LabelForm()
context = {'form': form}
return render(request, 'annotationweb/new_label.html', context)
@staff_member_required
def delete_task(request, task_id):
# TODO do cleanup after deleting task?
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('Task not found')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
task.delete()
messages.success(request, 'The task ' + task.name + ' was deleted.')
return redirect('index')
else:
return render(request, 'annotationweb/delete_task.html', {'task': task})
@staff_member_required
def datasets(request):
# Show all datasets
context = {}
context['datasets'] = Dataset.objects.all()
return render(request, 'annotationweb/datasets.html', context)
@staff_member_required
def new_dataset(request):
if request.method == 'POST':
form = DatasetForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'New dataset created')
return redirect('datasets')
else:
form = DatasetForm()
return render(request, 'annotationweb/new_dataset.html', {'form': form})
@staff_member_required
def delete_dataset(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('Dataset not found')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
dataset.delete()
messages.success(request, 'Dataset ' + dataset.name + ' was deleted.')
return redirect('datasets')
else:
return render(request, 'annotationweb/delete_dataset.html', {'dataset': dataset})
def get_start_and_total_frames(file_format):
# Find start_number and total number of frames automatically
i = 0
# Start frame can either be 0 or 1
start_frame = None
nr_of_frames = 0
while True:
exists = False
if os.path.isfile(file_format.replace('#', str(i))):
exists = True
nr_of_frames += 1
if start_frame is None:
if exists:
start_frame = i
elif i > 1:
break
else:
if not exists:
break
i += 1
return start_frame, nr_of_frames
@staff_member_required
def add_image_sequence(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
raise Http404('Subject does not exist')
if request.method == 'POST':
form = ImageSequenceForm(request.POST)
if form.is_valid():
new_image_sequence = form.save(commit=False) # Create new model, but don't save to DB
start_frame, total_nr_of_frames = get_start_and_total_frames(new_image_sequence.format)
print(start_frame, total_nr_of_frames)
if start_frame is None:
messages.error(request, 'No data existed with the provided filename format.')
else:
new_image_sequence.nr_of_frames = total_nr_of_frames
new_image_sequence.start_frame_nr = start_frame
new_image_sequence.subject = subject
new_image_sequence.save() # Save to db
messages.success(request, 'Sequence successfully added')
return redirect('dataset_details', subject.dataset.id)
else:
form = ImageSequenceForm()
return render(request, 'annotationweb/add_image_sequence.html', {'form': form, 'subject': subject})
@staff_member_required
def select_key_frames(request, task_id, image_id):
try:
image_sequence = ImageSequence.objects.get(pk=image_id)
task = Task.objects.get(pk=task_id)
except ImageSequence.DoesNotExist:
raise Http404('Image sequence does not exist')
except Task.DoesNotExist:
raise Http404('Task does not exist')
if request.method == 'POST':
frame_list = request.POST.getlist('frames')
if len(frame_list) == 0:
messages.error(request, 'You must select at least 1 frame')
else:
# Add annotation object if not exists
try:
annotation = ImageAnnotation.objects.get(image_id=image_id, task_id=task_id)
except ImageAnnotation.DoesNotExist:
annotation = ImageAnnotation()
annotation.image_id = image_id
annotation.task_id = task_id
annotation.rejected = False
annotation.user = request.user
annotation.finished = False
annotation.save()
# Add frames to db
for frame_nr in frame_list:
# Add new key frames if not exists
print(frame_nr)
try:
key_frame = KeyFrameAnnotation.objects.get(image_annotation=annotation, frame_nr=frame_nr)
# Already exists, do nothing
except KeyFrameAnnotation.DoesNotExist:
# Does not exist, add it
key_frame = KeyFrameAnnotation()
key_frame.image_annotation = annotation
key_frame.frame_nr = frame_nr
key_frame.save()
if annotation.finished:
# New frame, mark annotation as unfinished
annotation.finished = False
annotation.save()
# Delete frames that were not added
to_delete = KeyFrameAnnotation.objects.filter(image_annotation=annotation).exclude(frame_nr__in=frame_list)
deleted_count = len(to_delete)
to_delete.delete()
messages.success(request, 'The ' + str(len(frame_list)) + ' key frames were stored. ' + str(deleted_count) + ' key frames were deleted.')
return redirect('task', task_id)
else:
frames = KeyFrameAnnotation.objects.filter(image_annotation__image=image_sequence, image_annotation__task=task)
return render(request, 'annotationweb/add_key_frames.html', {'image_sequence': image_sequence, 'task': task, 'frames': frames})
def show_frame(request, image_sequence_id, frame_nr, task_id):
# Get image sequence the key frame belongs to
try:
task = Task.objects.get(pk=task_id)
image_sequence = ImageSequence.objects.get(pk=image_sequence_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
except ImageSequence.DoesNotExist:
raise Http404('Image sequence does not exist')
filename = image_sequence.format.replace('#', str(frame_nr))
return get_image_as_http_response(filename, task.post_processing_method)
@staff_member_required()
def dataset_details(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('The dataset does not exist')
return render(request, 'annotationweb/dataset_details.html', {'dataset': dataset})
@staff_member_required()
def new_subject(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('The dataset does not exist')
if request.method == 'POST':
form = SubjectForm(request.POST)
if form.is_valid():
subject = form.save(commit=False)
subject.dataset = dataset
subject.save()
messages.success(request, 'Subject added')
return redirect('dataset_details', dataset.id)
else:
form = SubjectForm()
return render(request, 'annotationweb/new_subject.html', {'dataset': dataset, 'form': form})
@staff_member_required()
def delete_subject(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
return Http404('The subject does not exist')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
subject.delete()
messages.success(request, 'The subject ' + subject.name + ' was deleted.')
return redirect('dataset_details', subject.dataset.id)
else:
return render(request, 'annotationweb/delete_subject.html', {'subject': subject})
@staff_member_required()
def subject_details(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
return Http404('The subject does not exist')
return render(request, 'annotationweb/subject_details.html', {'subject': subject})
@staff_member_required()
def delete_sequence(request, sequence_id):
try:
sequence = ImageSequence.objects.get(pk=sequence_id)
except ImageSequence.DoesNotExist:
return Http404('The sequence does not exist')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
sequence.delete()
messages.success(request, 'The subject ' + sequence.format + ' was deleted.')
return redirect('subject_details', sequence.subject.id)
else:
return render(request, 'annotationweb/delete_sequence.html', {'sequence': sequence})
def task_description(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
if task.type == task.CLASSIFICATION:
url = reverse('classification:label_image', args=[task_id])
elif task.type == task.BOUNDING_BOX:
url = reverse('boundingbox:process_image', args=[task_id])
elif task.type == task.LANDMARK:
url = reverse('landmark:process_image', args=[task_id])
elif task.type == task.CARDIAC_SEGMENTATION:
url = reverse('cardiac:segment_image', args=[task_id])
elif task.type == task.SPLINE_SEGMENTATION:
url = reverse('spline_segmentation:segment_image', args=[task_id])
else:
raise NotImplementedError()
return render(request, 'annotationweb/task_description.html', {'task': task, 'continue_url': url})
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.simple_tag
def complete_label(label):
return get_complete_label_name(label)
@register.filter(name='times')
def times(number):
return range(number)
def reset_filters(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
search_filters = SearchFilter(request, task)
search_filters.delete()
return redirect('task', task_id)
def task(request, task_id):
# Image list site
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
search_filters = SearchFilter(request, task)
if request.method == 'POST':
form = search_filters.create_form(data=request.POST)
else:
form = search_filters.create_form()
queryset = ImageSequence.objects.all()
# Get all processed images for given task
sort_by = search_filters.get_value('sort_by')
subjects_selected = search_filters.get_value('subject')
users_selected = search_filters.get_value('user')
image_quality = search_filters.get_value('image_quality')
metadata = search_filters.get_value('metadata')
if len(metadata) > 0:
metadata_dict = {}
for item in metadata:
parts = item.split(': ')
if len(parts) != 2:
raise Exception('Error: must be 2 parts')
name = parts[0]
value = parts[1]
if name in metadata_dict.keys():
metadata_dict[name].append(value)
else:
metadata_dict[name] = [value]
for name, values in metadata_dict.items():
queryset = queryset.filter(
imagemetadata__name=name,
imagemetadata__value__in=values
)
if sort_by == ImageListForm.SORT_IMAGE_ID:
queryset = queryset.filter(
subject__dataset__task=task,
subject__in=subjects_selected
)
elif sort_by == ImageListForm.SORT_NOT_ANNOTATED_IMAGE_ID:
queryset = queryset.filter(
subject__dataset__task=task,
subject__in=subjects_selected
).exclude(imageannotation__task=task, imageannotation__finished=True)
else:
if task.type == Task.CLASSIFICATION:
labels_selected = search_filters.get_value('label')
queryset = queryset.filter(
imageannotation__image_quality__in=image_quality,
imageannotation__task=task,
imageannotation__finished=True,
imageannotation__user__in=users_selected,
imageannotation__keyframeannotation__imagelabel__in=labels_selected,
subject__in=subjects_selected,
)
else:
queryset = queryset.filter(
imageannotation__image_quality__in=image_quality,
imageannotation__task=task,
imageannotation__finished=True,
imageannotation__user__in=users_selected,
subject__in=subjects_selected
)
if sort_by == ImageListForm.SORT_DATE_DESC:
queryset = queryset.order_by('-imageannotation__date')
else:
queryset = queryset.order_by('imageannotation__date')
paginator = Paginator(queryset, 12)
page = request.GET.get('page')
try:
images = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
images = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
images = paginator.page(paginator.num_pages)
for image in images:
# Get annotation
try:
image.annotation = ImageAnnotation.objects.get(image=image, task=task)
image.annotation_frames = KeyFrameAnnotation.objects.filter(image_annotation=image.annotation)
except:
pass
return_url = reverse('task', kwargs={'task_id': task_id})
if page is not None:
return_url += '?page=' + str(page)
request.session['return_to_url'] = return_url
return render(request, 'annotationweb/task.html', {'images': images, 'task': task, 'form': form})
def get_redirection(task):
if task.type == Task.CLASSIFICATION:
return 'classification:label_image'
elif task.type == Task.BOUNDING_BOX:
return 'boundingbox:process_image'
elif task.type == Task.LANDMARK:
return 'landmark:process_image'
elif task.type == Task.CARDIAC_SEGMENTATION:
return 'cardiac:segment_image'
elif task.type == Task.SPLINE_SEGMENTATION:
return 'spline_segmentation:segment_image'
# @register.simple_tag
# def urlencode_dict(dict):
# print(dict)
# url = ''
# if len(dict) > 0:
# first = True
# for key, value_list in dict.items():
# print(value_list)
# if type(value_list) is not list:
# value_list = [value_list]
# for value in value_list:
# if first:
# url += '?'
# first = False
# else:
# url += '&'
#
# url += key + '=' + str(value)
#
# return mark_safe(url)
def annotate_next_image(request, task_id):
# Find the task type and redirect
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
url = reverse(get_redirection(task), kwargs={'task_id': task.id})
return redirect(url + '?' + request.GET.urlencode())
def annotate_image(request, task_id, image_id):
# Find the task type and redirect
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
url = reverse(get_redirection(task), kwargs={'task_id': task.id, 'image_id': image_id})
return redirect(url + '?' + request.GET.urlencode())
| annotationweb/views.py | 22,269 | Check if user has processed any Show only tasks assigned to this user Admin page Classification tasks If only 1 exporter exists for this type, use that one Get unbound form Get unbound form TODO do cleanup after deleting task? Show all datasets Find start_number and total number of frames automatically Start frame can either be 0 or 1 Create new model, but don't save to DB Save to db Add annotation object if not exists Add frames to db Add new key frames if not exists Already exists, do nothing Does not exist, add it New frame, mark annotation as unfinished Delete frames that were not added Get image sequence the key frame belongs to Image list site Get all processed images for given task If page is not an integer, deliver first page. If page is out of range (e.g. 9999), deliver last page of results. Get annotation @register.simple_tag def urlencode_dict(dict): print(dict) url = '' if len(dict) > 0: first = True for key, value_list in dict.items(): print(value_list) if type(value_list) is not list: value_list = [value_list] for value in value_list: if first: url += '?' first = False else: url += '&' url += key + '=' + str(value) return mark_safe(url) Find the task type and redirect Find the task type and redirect | 1,424 | en | 0.729083 |
import copy
import torch
from torch import nn
import numpy as np
from tokens import *
def tokenize(corpus, callback=lambda sent: sent.split()):
return [callback(sent) for sent in corpus]
def add_start_stop_tokens(corpus):
return [[START_TOKEN] + sent + [STOP_TOKEN] for sent in corpus]
def padding(corpus, seq_len):
for sent in corpus:
while len(sent) < seq_len:
sent.append(PAD_TOKEN)
while len(sent) > seq_len:
sent.pop()
return corpus
def build_vocab(corpus):
vocab = set()
for sent in corpus:
vocab.update(set(sent))
vocab = list(vocab) + [UNK_TOKEN]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {idx: word for idx, word in enumerate(vocab)}
return vocab, word2idx, idx2word
def convert_to_idx(corpus, word2idx):
return [[word2idx.get(word, "<UNK>") for word in sent] for sent in corpus]
# Output Processing
def process_output_corpus(input_seqs, preds, trues):
new_seqs = []
new_preds = []
new_trues = []
for i in range(len(input_seqs)):
new_seq, new_pred, new_true = remove_special_tokens(
input_seqs[i], preds[i], trues[i]
)
new_seqs.append(new_seq)
new_preds.append(new_pred)
new_trues.append(new_true)
return new_seqs, new_preds, new_trues
def remove_special_tokens(input_seq, pred, true):
new_seq = []
new_pred = []
new_true = []
new_seq = input_seq[1:-1]
new_true = true[1:-1]
new_pred = pred[1:]
# if is truncated padding
while len(new_pred) < len(new_seq):
new_pred.append(PAD_TOKEN)
# if is expanded padding
while len(new_pred) > len(new_seq):
new_pred = new_pred[:-1]
return new_seq, new_pred, new_true
def convert_to_token(corpus, idx2token):
return [[idx2token[token_idx] for token_idx in sent] for sent in corpus]
def preprocess_utterances(utterances, utterance_dataset):
# tokenization
utterances = tokenize(utterances)
# add special tokens
utterances = add_start_stop_tokens(utterances)
tokenized_utterances = copy.deepcopy(utterances)
# padding
utterances = padding(utterances, utterance_dataset.seq_len)
word2idx = utterance_dataset.word2idx
utterances = [
[word2idx.get(token, word2idx[UNK_TOKEN]) for token in sent]
for sent in utterances
]
return utterances, tokenized_utterances
def read_glove_vector(glove_vec):
with open(glove_vec, "r", encoding="UTF-8") as f:
words = set()
word_to_vec_map = {}
for line in f:
w_line = line.split()
curr_word = w_line[0]
word_to_vec_map[curr_word] = np.array(w_line[1:], dtype=np.float64)
return word_to_vec_map
# functions for creating the embedding layer
def get_one_hot_matrix(vocab):
one_hot_matrix = np.zeros((len(vocab), len(vocab)))
np.fill_diagonal(one_hot_matrix, 1)
return one_hot_matrix
def get_glove_matrix(glove_map, vocab):
matrix_len = len(vocab)
emb_dim = len(list(glove_map.values())[0])
weights_matrix = np.zeros((matrix_len, emb_dim))
for i, word in enumerate(vocab):
try:
weights_matrix[i] = glove_map[word]
except KeyError:
if word in [PAD_TOKEN, START_TOKEN, STOP_TOKEN]:
weights_matrix[i] = np.zeros((emb_dim,))
else:
weights_matrix[i] = np.random.normal(
scale=0.6, size=(emb_dim,)
)
return weights_matrix
def create_emb_layer(weights_matrix, non_trainable=False):
num_embeddings, embedding_dim = weights_matrix.shape
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({"weight": torch.tensor(weights_matrix)})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
| nlp_243/hw3/utils.py | 3,927 | Output Processing if is truncated padding if is expanded padding tokenization add special tokens padding functions for creating the embedding layer | 147 | en | 0.292231 |
import numpy as np
'''
dccol : 1-8
dcpad : 1-10
mcecol: 0,1
mcerow: 0-32
'''
#w,h = 10,8
# def det2mce(detcol,detrow,detpol):
# dccol,dcpad = det2dc(detcol,detrow,detpol)
# if dccol<0 or dcpad<0:
# return -1,-1
# mcecol,mcerow = dc2mce(dccol,dcpad)
# return mcecol,mcerow
def mce2det(mcecol,mcerow):
if mcecol<=17:
detcol=mcecol
else:
detcol=mcecol-18
if mcerow<=17:
detrow=mcerow
detpol='A'
if mcerow>17:
detrow=mcerow-18
detpol='B'
#detcol,detrow,detpol = dc2det(dccol,dcpad)
im = 0 #not sure what this is
return im,detcol,detrow,detpol
| ba150_ModuleMapping_fake.py | 568 | w,h = 10,8 def det2mce(detcol,detrow,detpol): dccol,dcpad = det2dc(detcol,detrow,detpol) if dccol<0 or dcpad<0: return -1,-1 mcecol,mcerow = dc2mce(dccol,dcpad) return mcecol,mcerowdetcol,detrow,detpol = dc2det(dccol,dcpad)not sure what this is | 251 | en | 0.274345 |
# -*- coding: utf-8 -*-
"""Tests for :mod:`docdata`."""
| tests/__init__.py | 57 | Tests for :mod:`docdata`.
-*- coding: utf-8 -*- | 49 | en | 0.400124 |
from enum import Enum
from .factory import createFromUri
import os
import requests
class Record(object):
"""
Create a Polarion test record,
:param polarion: Polarion client object
:param test_run: Test run instance
:param polarion_record: The data from Polarion of this testrun
:param index: The index of this record in the test run
"""
class ResultType(Enum):
"""
Record result enum
"""
No = None
PASSED = 'passed'
FAILED = 'failed'
BLOCKED = 'blocked'
def __init__(self, polarion, test_run, polarion_record, index):
self._polarion = polarion
self._test_run = test_run
self._polarion_record = polarion_record
self._index = index
self._buildWorkitemFromPolarion()
def _buildWorkitemFromPolarion(self):
# parse all polarion attributes to this class
for attr, value in self._polarion_record.__dict__.items():
for key in value:
setattr(self, key, value[key])
self._testcase = self._polarion_record.testCaseURI
self._testcase_name = self._testcase.split('}')[1]
self._defect = self._polarion_record.defectURI
def _reloadFromPolarion(self):
service = self._polarion.getService('TestManagement')
self._polarion_record = service.getTestCaseRecords(self._test_run.uri, self._testcase)[0]
self._buildWorkitemFromPolarion()
# self._original_polarion_test_run = copy.deepcopy(self._polarion_test_run)
def setTestStepResult(self, step_number, result: ResultType, comment=None):
""""
Set the result of a test step
:param step_number: Step number
:param result: The result fo the test step
:param comment: An optional comment
"""
if self.testStepResults is None:
# get the number of test steps in
service = self._polarion.getService('TestManagement')
test_steps = service.getTestSteps(self.testCaseURI)
number_of_steps = 0
if test_steps.steps is not None:
number_of_steps = len(test_steps.steps.TestStep)
self.testStepResults = self._polarion.ArrayOfTestStepResultType()
for _i in range(number_of_steps):
self.testStepResults.TestStepResult.append(
self._polarion.TestStepResultType())
if step_number < len(self.testStepResults.TestStepResult):
self.testStepResults.TestStepResult[step_number].result = self._polarion.EnumOptionIdType(
id=result.value)
if comment is not None:
self.testStepResults.TestStepResult[step_number].comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
self.save()
def getResult(self):
"""
Get the test result of this record
:return: The test case result
:rtype: ResultType
"""
if self.result is not None:
return self.ResultType(self.result.id)
return self.ResultType.No
def getComment(self):
"""
Get a comment if available. The comment may contain HTML if edited in Polarion!
:return: Get the comment, may contain HTML
:rtype: string
"""
if self.comment is not None:
return self.comment.content
return None
@property
def testcase_id(self):
"""
The test case name including prefix
"""
return self._testcase_name
def getTestCaseName(self):
"""
Get the test case name including prefix
:return: The name
:rtype: string
"""
return self._testcase_name
def setComment(self, comment):
"""
tries to get the severity enum of this workitem type
When it fails to get it, the list will be empty
:param comment: Comment string, may contain HTML
"""
self.comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
def setResult(self, result: ResultType = ResultType.FAILED, comment=None):
"""
Set the result of this record and save it.
:param result: The result of this record
:param comment: Comment string, may contain HTML
"""
if comment is not None:
self.setComment(comment)
if self.result is not None:
self.result.id = result.value
else:
self.result = self._polarion.EnumOptionIdType(
id=result.value)
self.save()
def getExecutingUser(self):
"""
Gets the executing user if the test was executed
:return: The user
:rtype: User/None
"""
if self.executedByURI is not None:
return createFromUri(self._polarion, None, self.executedByURI)
return None
def hasAttachment(self):
"""
Checks if the Record has attachments
:return: True/False
:rtype: boolean
"""
if self.attachments is not None:
return True
return False
def getAttachment(self, file_name):
"""
Get the attachment data
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentAsFile(self, file_name, file_path):
"""
Save an attachment to file.
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachment(file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachment(self, file_name):
"""
Delete an attachment.
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestRecord(self._test_run.uri, self._index, file_name)
self._reloadFromPolarion()
def addAttachment(self, file_path, title):
"""
Upload an attachment
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestRecord(self._test_run.uri, self._index, file_name, title, file_content.read())
self._reloadFromPolarion()
def testStepHasAttachment(self, step_index):
"""
Checks if the a test step has attachments
:param step_index: The test step index
:return: True/False
:rtype: boolean
"""
if self.testStepResults is None:
return False
if self.testStepResults.TestStepResult[step_index].attachments is not None:
return True
return False
def getAttachmentFromTestStep(self, step_index, file_name):
"""
Get the attachment data from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.testStepResults.TestStepResult[step_index].attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentFromTestStepAsFile(self, step_index, file_name, file_path):
"""
Save an attachment to file from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachmentFromTestStep(step_index, file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachmentFromTestStep(self, step_index, file_name):
"""
Delete an attachment from a test step
:param step_index: The test step index
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestStep(self._test_run.uri, self._index, step_index, file_name)
self._reloadFromPolarion()
def addAttachmentToTestStep(self, step_index, file_path, title):
"""
Upload an attachment to a test step
:param step_index: The test step index
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestStep(self._test_run.uri, self._index, step_index, file_name, title, file_content.read())
self._reloadFromPolarion()
def save(self):
"""
Saves the current test record
"""
new_item = {}
for attr, value in self.__dict__.items():
if not attr.startswith('_'):
# only add if public value
new_item[attr] = value
service = self._polarion.getService('TestManagement')
service.executeTest(
self._test_run.uri, new_item)
self._reloadFromPolarion()
def __repr__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
def __str__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
| polarion/record.py | 10,697 | Create a Polarion test record,
:param polarion: Polarion client object
:param test_run: Test run instance
:param polarion_record: The data from Polarion of this testrun
:param index: The index of this record in the test run
Record result enum
Upload an attachment
:param file_path: Source file to upload
:param title: The title of the attachment
Upload an attachment to a test step
:param step_index: The test step index
:param file_path: Source file to upload
:param title: The title of the attachment
Delete an attachment.
:param file_name: The attachment file name
Delete an attachment from a test step
:param step_index: The test step index
:param file_name: The attachment file name
Get the attachment data
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
Get the attachment data from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
Get a comment if available. The comment may contain HTML if edited in Polarion!
:return: Get the comment, may contain HTML
:rtype: string
Gets the executing user if the test was executed
:return: The user
:rtype: User/None
Get the test result of this record
:return: The test case result
:rtype: ResultType
Get the test case name including prefix
:return: The name
:rtype: string
Checks if the Record has attachments
:return: True/False
:rtype: boolean
Saves the current test record
Save an attachment to file.
:param file_name: The attachment file name
:param file_path: File where to save the attachment
Save an attachment to file from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:param file_path: File where to save the attachment
tries to get the severity enum of this workitem type
When it fails to get it, the list will be empty
:param comment: Comment string, may contain HTML
Set the result of this record and save it.
:param result: The result of this record
:param comment: Comment string, may contain HTML
"
Set the result of a test step
:param step_number: Step number
:param result: The result fo the test step
:param comment: An optional comment
Checks if the a test step has attachments
:param step_index: The test step index
:return: True/False
:rtype: boolean
The test case name including prefix
parse all polarion attributes to this class self._original_polarion_test_run = copy.deepcopy(self._polarion_test_run) get the number of test steps in find the file find the file only add if public value | 2,541 | en | 0.731466 |
#!/usr/bin/env python
#
# Init file for Shotgun event daemon
#
# chkconfig: 345 99 00
# description: Shotgun event daemon
#
### BEGIN INIT INFO
# Provides: shotgunEvent
# Required-Start: $network
# Should-Start: $remote_fs
# Required-Stop: $network
# Should-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Short-Description: Shotgun event daemon
# Description: Shotgun event daemon
### END INIT INFO
"""
For an overview of shotgunEvents, please see raw documentation in the docs
folder or an html compiled version at:
http://shotgunsoftware.github.com/shotgunEvents
"""
from __future__ import print_function
__version__ = "1.0"
__version_info__ = (1, 0)
# Suppress the deprecation warning about imp until we get around to replacing it
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp
import datetime
import logging
import logging.handlers
import os
import pprint
import socket
import sys
import time
import traceback
from six.moves import configparser
import six.moves.cPickle as pickle
from distutils.version import StrictVersion
if sys.platform == "win32":
import win32serviceutil
import win32service
import win32event
import servicemanager
import daemonizer
import shotgun_api3 as sg
from shotgun_api3.lib.sgtimezone import SgTimezone
SG_TIMEZONE = SgTimezone()
CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0])
PYTHON_26 = StrictVersion("2.6")
PYTHON_27 = StrictVersion("2.7")
EMAIL_FORMAT_STRING = """Time: %(asctime)s
Logger: %(name)s
Path: %(pathname)s
Function: %(funcName)s
Line: %(lineno)d
%(message)s"""
def _setFilePathOnLogger(logger, path):
# Remove any previous handler.
_removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler)
# Add the file handler
handler = logging.handlers.TimedRotatingFileHandler(
path, "midnight", backupCount=10
)
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
def _removeHandlersFromLogger(logger, handlerTypes=None):
"""
Remove all handlers or handlers of a specified type from a logger.
@param logger: The logger who's handlers should be processed.
@type logger: A logging.Logger object
@param handlerTypes: A type of handler or list/tuple of types of handlers
that should be removed from the logger. If I{None}, all handlers are
removed.
@type handlerTypes: L{None}, a logging.Handler subclass or
I{list}/I{tuple} of logging.Handler subclasses.
"""
for handler in logger.handlers:
if handlerTypes is None or isinstance(handler, handlerTypes):
logger.removeHandler(handler)
def _addMailHandlerToLogger(
logger,
smtpServer,
fromAddr,
toAddrs,
emailSubject,
username=None,
password=None,
secure=None,
):
"""
Configure a logger with a handler that sends emails to specified
addresses.
The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.
@note: Any SMTPHandler already connected to the logger will be removed.
@param logger: The logger to configure
@type logger: A logging.Logger instance
@param toAddrs: The addresses to send the email to.
@type toAddrs: A list of email addresses that will be passed on to the
SMTPHandler.
"""
if smtpServer and fromAddr and toAddrs and emailSubject:
mailHandler = CustomSMTPHandler(
smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure
)
mailHandler.setLevel(logging.ERROR)
mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING)
mailHandler.setFormatter(mailFormatter)
logger.addHandler(mailHandler)
class Config(configparser.SafeConfigParser):
def __init__(self, path):
configparser.SafeConfigParser.__init__(self, os.environ)
self.read(path)
def getShotgunURL(self):
return self.get("shotgun", "server")
def getEngineScriptName(self):
return self.get("shotgun", "name")
def getEngineScriptKey(self):
return self.get("shotgun", "key")
def getEngineProxyServer(self):
try:
proxy_server = self.get("shotgun", "proxy_server").strip()
if not proxy_server:
return None
return proxy_server
except configparser.NoOptionError:
return None
def getEventIdFile(self):
return self.get("daemon", "eventIdFile")
def getEnginePIDFile(self):
return self.get("daemon", "pidFile")
def getPluginPaths(self):
return [s.strip() for s in self.get("plugins", "paths").split(",")]
def getSMTPServer(self):
return self.get("emails", "server")
def getSMTPPort(self):
if self.has_option("emails", "port"):
return self.getint("emails", "port")
return 25
def getFromAddr(self):
return self.get("emails", "from")
def getToAddrs(self):
return [s.strip() for s in self.get("emails", "to").split(",")]
def getEmailSubject(self):
return self.get("emails", "subject")
def getEmailUsername(self):
if self.has_option("emails", "username"):
return self.get("emails", "username")
return None
def getEmailPassword(self):
if self.has_option("emails", "password"):
return self.get("emails", "password")
return None
def getSecureSMTP(self):
if self.has_option("emails", "useTLS"):
return self.getboolean("emails", "useTLS") or False
return False
def getLogMode(self):
return self.getint("daemon", "logMode")
def getLogLevel(self):
return self.getint("daemon", "logging")
def getMaxEventBatchSize(self):
if self.has_option("daemon", "max_event_batch_size"):
return self.getint("daemon", "max_event_batch_size")
return 500
def getLogFile(self, filename=None):
if filename is None:
if self.has_option("daemon", "logFile"):
filename = self.get("daemon", "logFile")
else:
raise ConfigError("The config file has no logFile option.")
if self.has_option("daemon", "logPath"):
path = self.get("daemon", "logPath")
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
raise ConfigError(
"The logPath value in the config should point to a directory."
)
path = os.path.join(path, filename)
else:
path = filename
return path
def getTimingLogFile(self):
if (
not self.has_option("daemon", "timing_log")
or self.get("daemon", "timing_log") != "on"
):
return None
return self.getLogFile() + ".timing"
class Engine(object):
"""
The engine holds the main loop of event processing.
"""
def __init__(self, configPath):
""" """
self._continue = True
self._eventIdData = {}
# Read/parse the config
self.config = Config(configPath)
# Get config values
self._pluginCollections = [
PluginCollection(self, s) for s in self.config.getPluginPaths()
]
self._sg = sg.Shotgun(
self.config.getShotgunURL(),
self.config.getEngineScriptName(),
self.config.getEngineScriptKey(),
http_proxy=self.config.getEngineProxyServer(),
)
self._max_conn_retries = self.config.getint("daemon", "max_conn_retries")
self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep")
self._fetch_interval = self.config.getint("daemon", "fetch_interval")
self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid")
# Setup the loggers for the main engine
if self.config.getLogMode() == 0:
# Set the root logger for file output.
rootLogger = logging.getLogger()
rootLogger.config = self.config
_setFilePathOnLogger(rootLogger, self.config.getLogFile())
print(self.config.getLogFile())
# Set the engine logger for email output.
self.log = logging.getLogger("engine")
self.setEmailsOnLogger(self.log, True)
else:
# Set the engine logger for file and email output.
self.log = logging.getLogger("engine")
self.log.config = self.config
_setFilePathOnLogger(self.log, self.config.getLogFile())
self.setEmailsOnLogger(self.log, True)
self.log.setLevel(self.config.getLogLevel())
# Setup the timing log file
timing_log_filename = self.config.getTimingLogFile()
if timing_log_filename:
self.timing_logger = logging.getLogger("timing")
self.timing_logger.setLevel(self.config.getLogLevel())
_setFilePathOnLogger(self.timing_logger, timing_log_filename)
else:
self.timing_logger = None
super(Engine, self).__init__()
def setEmailsOnLogger(self, logger, emails):
# Configure the logger for email output
_removeHandlersFromLogger(logger, logging.handlers.SMTPHandler)
if emails is False:
return
smtpServer = self.config.getSMTPServer()
smtpPort = self.config.getSMTPPort()
fromAddr = self.config.getFromAddr()
emailSubject = self.config.getEmailSubject()
username = self.config.getEmailUsername()
password = self.config.getEmailPassword()
if self.config.getSecureSMTP():
secure = (None, None)
else:
secure = None
if emails is True:
toAddrs = self.config.getToAddrs()
elif isinstance(emails, (list, tuple)):
toAddrs = emails
else:
msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s."
raise ValueError(msg % type(emails))
_addMailHandlerToLogger(
logger,
(smtpServer, smtpPort),
fromAddr,
toAddrs,
emailSubject,
username,
password,
secure,
)
def start(self):
"""
Start the processing of events.
The last processed id is loaded up from persistent storage on disk and
the main loop is started.
"""
# TODO: Take value from config
socket.setdefaulttimeout(60)
# Notify which version of shotgun api we are using
self.log.info("Using SG Python API version %s" % sg.__version__)
try:
for collection in self._pluginCollections:
collection.load()
self._loadEventIdData()
self._mainLoop()
except KeyboardInterrupt:
self.log.warning("Keyboard interrupt. Cleaning up...")
except Exception as err:
msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s"
self.log.critical(msg, type(err), traceback.format_exc(err))
def _loadEventIdData(self):
"""
Load the last processed event id from the disk
If no event has ever been processed or if the eventIdFile has been
deleted from disk, no id will be recoverable. In this case, we will try
contacting Shotgun to get the latest event's id and we'll start
processing from there.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile and os.path.exists(eventIdFile):
try:
fh = open(eventIdFile, "rb")
try:
self._eventIdData = pickle.load(fh)
# Provide event id info to the plugin collections. Once
# they've figured out what to do with it, ask them for their
# last processed id.
noStateCollections = []
for collection in self._pluginCollections:
state = self._eventIdData.get(collection.path)
if state:
collection.setState(state)
else:
noStateCollections.append(collection)
# If we don't have a state it means there's no match
# in the id file. First we'll search to see the latest id a
# matching plugin name has elsewhere in the id file. We do
# this as a fallback in case the plugins directory has been
# moved. If there's no match, use the latest event id
# in Shotgun.
if noStateCollections:
maxPluginStates = {}
for collection in self._eventIdData.values():
for pluginName, pluginState in collection.items():
if pluginName in maxPluginStates.keys():
if pluginState[0] > maxPluginStates[pluginName][0]:
maxPluginStates[pluginName] = pluginState
else:
maxPluginStates[pluginName] = pluginState
lastEventId = self._getLastEventIdFromDatabase()
for collection in noStateCollections:
state = collection.getState()
for pluginName in state.keys():
if pluginName in maxPluginStates.keys():
state[pluginName] = maxPluginStates[pluginName]
else:
state[pluginName] = lastEventId
collection.setState(state)
except pickle.UnpicklingError:
fh.close()
# Backwards compatibility:
# Reopen the file to try to read an old-style int
fh = open(eventIdFile, "rb")
line = fh.readline().strip()
if line.isdigit():
# The _loadEventIdData got an old-style id file containing a single
# int which is the last id properly processed.
lastEventId = int(line)
self.log.debug(
"Read last event id (%d) from file.", lastEventId
)
for collection in self._pluginCollections:
collection.setState(lastEventId)
fh.close()
except OSError as err:
raise EventDaemonError(
"Could not load event id from file.\n\n%s"
% traceback.format_exc(err)
)
else:
# No id file?
# Get the event data from the database.
lastEventId = self._getLastEventIdFromDatabase()
if lastEventId:
for collection in self._pluginCollections:
collection.setState(lastEventId)
self._saveEventIdData()
def _getLastEventIdFromDatabase(self):
conn_attempts = 0
lastEventId = None
while lastEventId is None:
order = [{"column": "id", "direction": "desc"}]
try:
result = self._sg.find_one(
"EventLogEntry", filters=[], fields=["id"], order=order
)
except (sg.ProtocolError, sg.ResponseError, socket.error) as err:
conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err))
except Exception as err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
else:
lastEventId = result["id"]
self.log.info("Last event id (%d) from the SG database.", lastEventId)
return lastEventId
def _mainLoop(self):
"""
Run the event processing loop.
General behavior:
- Load plugins from disk - see L{load} method.
- Get new events from Shotgun
- Loop through events
- Loop through each plugin
- Loop through each callback
- Send the callback an event
- Once all callbacks are done in all plugins, save the eventId
- Go to the next event
- Once all events are processed, wait for the defined fetch interval time and start over.
Caveats:
- If a plugin is deemed "inactive" (an error occured during
registration), skip it.
- If a callback is deemed "inactive" (an error occured during callback
execution), skip it.
- Each time through the loop, if the pidFile is gone, stop.
"""
self.log.debug("Starting the event processing loop.")
while self._continue:
# Process events
events = self._getNewEvents()
for event in events:
for collection in self._pluginCollections:
collection.process(event)
self._saveEventIdData()
# if we're lagging behind Shotgun, we received a full batch of events
# skip the sleep() call in this case
if len(events) < self.config.getMaxEventBatchSize():
time.sleep(self._fetch_interval)
# Reload plugins
for collection in self._pluginCollections:
collection.load()
# Make sure that newly loaded events have proper state.
self._loadEventIdData()
self.log.debug("Shuting down event processing loop.")
def stop(self):
self._continue = False
def _getNewEvents(self):
"""
Fetch new events from Shotgun.
@return: Recent events that need to be processed by the engine.
@rtype: I{list} of Shotgun event dictionaries.
"""
nextEventId = None
for newId in [
coll.getNextUnprocessedEventId() for coll in self._pluginCollections
]:
if newId is not None and (nextEventId is None or newId < nextEventId):
nextEventId = newId
if nextEventId is not None:
filters = [["id", "greater_than", nextEventId - 1]]
fields = [
"id",
"event_type",
"attribute_name",
"meta",
"entity",
"user",
"project",
"session_uuid",
"created_at",
]
order = [{"column": "id", "direction": "asc"}]
conn_attempts = 0
while True:
try:
events = self._sg.find(
"EventLogEntry",
filters,
fields,
order,
limit=self.config.getMaxEventBatchSize(),
)
if events:
self.log.debug(
"Got %d events: %d to %d.",
len(events),
events[0]["id"],
events[-1]["id"],
)
return events
except (sg.ProtocolError, sg.ResponseError, socket.error) as err:
conn_attempts = self._checkConnectionAttempts(
conn_attempts, str(err)
)
except Exception as err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
return []
def _saveEventIdData(self):
"""
Save an event Id to persistant storage.
Next time the engine is started it will try to read the event id from
this location to know at which event it should start processing.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile is not None:
for collection in self._pluginCollections:
self._eventIdData[collection.path] = collection.getState()
for colPath, state in self._eventIdData.items():
if state:
try:
with open(eventIdFile, "wb") as fh:
# Use protocol 2 so it can also be loaded in Python 2
pickle.dump(self._eventIdData, fh, protocol=2)
except OSError as err:
self.log.error(
"Can not write event id data to %s.\n\n%s",
eventIdFile,
traceback.format_exc(err),
)
break
else:
self.log.warning("No state was found. Not saving to disk.")
def _checkConnectionAttempts(self, conn_attempts, msg):
conn_attempts += 1
if conn_attempts == self._max_conn_retries:
self.log.error(
"Unable to connect to SG (attempt %s of %s): %s",
conn_attempts,
self._max_conn_retries,
msg,
)
conn_attempts = 0
time.sleep(self._conn_retry_sleep)
else:
self.log.warning(
"Unable to connect to SG (attempt %s of %s): %s",
conn_attempts,
self._max_conn_retries,
msg,
)
return conn_attempts
class PluginCollection(object):
"""
A group of plugin files in a location on the disk.
"""
def __init__(self, engine, path):
if not os.path.isdir(path):
raise ValueError("Invalid path: %s" % path)
self._engine = engine
self.path = path
self._plugins = {}
self._stateData = {}
def setState(self, state):
if isinstance(state, int):
for plugin in self:
plugin.setState(state)
self._stateData[plugin.getName()] = plugin.getState()
else:
self._stateData = state
for plugin in self:
pluginState = self._stateData.get(plugin.getName())
if pluginState:
plugin.setState(pluginState)
def getState(self):
for plugin in self:
self._stateData[plugin.getName()] = plugin.getState()
return self._stateData
def getNextUnprocessedEventId(self):
eId = None
for plugin in self:
if not plugin.isActive():
continue
newId = plugin.getNextUnprocessedEventId()
if newId is not None and (eId is None or newId < eId):
eId = newId
return eId
def process(self, event):
for plugin in self:
if plugin.isActive():
plugin.process(event)
else:
plugin.logger.debug("Skipping: inactive.")
def load(self):
"""
Load plugins from disk.
General behavior:
- Loop on all paths.
- Find all valid .py plugin files.
- Loop on all plugin files.
- For any new plugins, load them, otherwise, refresh them.
"""
newPlugins = {}
for basename in os.listdir(self.path):
if not basename.endswith(".py") or basename.startswith("."):
continue
if basename in self._plugins:
newPlugins[basename] = self._plugins[basename]
else:
newPlugins[basename] = Plugin(
self._engine, os.path.join(self.path, basename)
)
newPlugins[basename].load()
self._plugins = newPlugins
def __iter__(self):
for basename in sorted(self._plugins.keys()):
yield self._plugins[basename]
class Plugin(object):
"""
The plugin class represents a file on disk which contains one or more
callbacks.
"""
def __init__(self, engine, path):
"""
@param engine: The engine that instanciated this plugin.
@type engine: L{Engine}
@param path: The path of the plugin file to load.
@type path: I{str}
@raise ValueError: If the path to the plugin is not a valid file.
"""
self._engine = engine
self._path = path
if not os.path.isfile(path):
raise ValueError("The path to the plugin is not a valid file - %s." % path)
self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0]
self._active = True
self._callbacks = []
self._mtime = None
self._lastEventId = None
self._backlog = {}
# Setup the plugin's logger
self.logger = logging.getLogger("plugin." + self.getName())
self.logger.config = self._engine.config
self._engine.setEmailsOnLogger(self.logger, True)
self.logger.setLevel(self._engine.config.getLogLevel())
if self._engine.config.getLogMode() == 1:
_setFilePathOnLogger(
self.logger, self._engine.config.getLogFile("plugin." + self.getName())
)
def getName(self):
return self._pluginName
def setState(self, state):
if isinstance(state, int):
self._lastEventId = state
elif isinstance(state, tuple):
self._lastEventId, self._backlog = state
else:
raise ValueError("Unknown state type: %s." % type(state))
def getState(self):
return (self._lastEventId, self._backlog)
def getNextUnprocessedEventId(self):
if self._lastEventId:
nextId = self._lastEventId + 1
else:
nextId = None
now = datetime.datetime.now()
for k in list(self._backlog):
v = self._backlog[k]
if v < now:
self.logger.warning("Timeout elapsed on backlog event id %d.", k)
del self._backlog[k]
elif nextId is None or k < nextId:
nextId = k
return nextId
def isActive(self):
"""
Is the current plugin active. Should it's callbacks be run?
@return: True if this plugin's callbacks should be run, False otherwise.
@rtype: I{bool}
"""
return self._active
def setEmails(self, *emails):
"""
Set the email addresses to whom this plugin should send errors.
@param emails: See L{LogFactory.getLogger}'s emails argument for info.
@type emails: A I{list}/I{tuple} of email addresses or I{bool}.
"""
self._engine.setEmailsOnLogger(self.logger, emails)
def load(self):
"""
Load/Reload the plugin and all its callbacks.
If a plugin has never been loaded it will be loaded normally. If the
plugin has been loaded before it will be reloaded only if the file has
been modified on disk. In this event callbacks will all be cleared and
reloaded.
General behavior:
- Try to load the source of the plugin.
- Try to find a function called registerCallbacks in the file.
- Try to run the registration function.
At every step along the way, if any error occurs the whole plugin will
be deactivated and the function will return.
"""
# Check file mtime
mtime = os.path.getmtime(self._path)
if self._mtime is None:
self._engine.log.info("Loading plugin at %s" % self._path)
elif self._mtime < mtime:
self._engine.log.info("Reloading plugin at %s" % self._path)
else:
# The mtime of file is equal or older. We don't need to do anything.
return
# Reset values
self._mtime = mtime
self._callbacks = []
self._active = True
try:
plugin = imp.load_source(self._pluginName, self._path)
except:
self._active = False
self.logger.error(
"Could not load the plugin at %s.\n\n%s",
self._path,
traceback.format_exc(),
)
return
regFunc = getattr(plugin, "registerCallbacks", None)
if callable(regFunc):
try:
regFunc(Registrar(self))
except:
self._engine.log.critical(
"Error running register callback function from plugin at %s.\n\n%s",
self._path,
traceback.format_exc(),
)
self._active = False
else:
self._engine.log.critical(
"Did not find a registerCallbacks function in plugin at %s.", self._path
)
self._active = False
def registerCallback(
self,
sgScriptName,
sgScriptKey,
callback,
matchEvents=None,
args=None,
stopOnError=True,
):
"""
Register a callback in the plugin.
"""
global sg
sgConnection = sg.Shotgun(
self._engine.config.getShotgunURL(),
sgScriptName,
sgScriptKey,
http_proxy=self._engine.config.getEngineProxyServer(),
)
self._callbacks.append(
Callback(
callback,
self,
self._engine,
sgConnection,
matchEvents,
args,
stopOnError,
)
)
def process(self, event):
if event["id"] in self._backlog:
if self._process(event):
self.logger.info("Processed id %d from backlog." % event["id"])
del self._backlog[event["id"]]
self._updateLastEventId(event)
elif self._lastEventId is not None and event["id"] <= self._lastEventId:
msg = "Event %d is too old. Last event processed was (%d)."
self.logger.debug(msg, event["id"], self._lastEventId)
else:
if self._process(event):
self._updateLastEventId(event)
return self._active
def _process(self, event):
for callback in self:
if callback.isActive():
if callback.canProcess(event):
msg = "Dispatching event %d to callback %s."
self.logger.debug(msg, event["id"], str(callback))
if not callback.process(event):
# A callback in the plugin failed. Deactivate the whole
# plugin.
self._active = False
break
else:
msg = "Skipping inactive callback %s in plugin."
self.logger.debug(msg, str(callback))
return self._active
def _updateLastEventId(self, event):
BACKLOG_TIMEOUT = (
5 # time in minutes after which we consider a pending event won't happen
)
if self._lastEventId is not None and event["id"] > self._lastEventId + 1:
event_date = event["created_at"].replace(tzinfo=None)
if datetime.datetime.now() > (
event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT)
):
# the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event
# with a lower id should have shown up in the EventLog by now if it actually happened
if event["id"] == self._lastEventId + 2:
self.logger.info(
"Event %d never happened - ignoring.", self._lastEventId + 1
)
else:
self.logger.info(
"Events %d-%d never happened - ignoring.",
self._lastEventId + 1,
event["id"] - 1,
)
else:
# in this case, we want to add the missing events to the backlog as they could show up in the
# EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range
# them to show up until they expire
expiration = datetime.datetime.now() + datetime.timedelta(
minutes=BACKLOG_TIMEOUT
)
for skippedId in range(self._lastEventId + 1, event["id"]):
self.logger.info("Adding event id %d to backlog.", skippedId)
self._backlog[skippedId] = expiration
self._lastEventId = event["id"]
def __iter__(self):
"""
A plugin is iterable and will iterate over all its L{Callback} objects.
"""
return self._callbacks.__iter__()
def __str__(self):
"""
Provide the name of the plugin when it is cast as string.
@return: The name of the plugin.
@rtype: I{str}
"""
return self.getName()
class Registrar(object):
"""
See public API docs in docs folder.
"""
def __init__(self, plugin):
"""
Wrap a plugin so it can be passed to a user.
"""
self._plugin = plugin
self._allowed = ["logger", "setEmails", "registerCallback"]
def getLogger(self):
"""
Get the logger for this plugin.
@return: The logger configured for this plugin.
@rtype: L{logging.Logger}
"""
# TODO: Fix this ugly protected member access
return self.logger
def __getattr__(self, name):
if name in self._allowed:
return getattr(self._plugin, name)
raise AttributeError(
"type object '%s' has no attribute '%s'" % (type(self).__name__, name)
)
class Callback(object):
"""
A part of a plugin that can be called to process a Shotgun event.
"""
def __init__(
self,
callback,
plugin,
engine,
shotgun,
matchEvents=None,
args=None,
stopOnError=True,
):
"""
@param callback: The function to run when a Shotgun event occurs.
@type callback: A function object.
@param engine: The engine that will dispatch to this callback.
@type engine: L{Engine}.
@param shotgun: The Shotgun instance that will be used to communicate
with your Shotgun server.
@type shotgun: L{sg.Shotgun}
@param matchEvents: The event filter to match events against before invoking callback.
@type matchEvents: dict
@param args: Any datastructure you would like to be passed to your
callback function. Defaults to None.
@type args: Any object.
@raise TypeError: If the callback is not a callable object.
"""
if not callable(callback):
raise TypeError(
"The callback must be a callable object (function, method or callable class instance)."
)
self._name = None
self._shotgun = shotgun
self._callback = callback
self._engine = engine
self._logger = None
self._matchEvents = matchEvents
self._args = args
self._stopOnError = stopOnError
self._active = True
# Find a name for this object
if hasattr(callback, "__name__"):
self._name = callback.__name__
elif hasattr(callback, "__class__") and hasattr(callback, "__call__"):
self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback)))
else:
raise ValueError(
"registerCallback should be called with a function or a callable object instance as callback argument."
)
# TODO: Get rid of this protected member access
self._logger = logging.getLogger(plugin.logger.name + "." + self._name)
self._logger.config = self._engine.config
def canProcess(self, event):
if not self._matchEvents:
return True
if "*" in self._matchEvents:
eventType = "*"
else:
eventType = event["event_type"]
if eventType not in self._matchEvents:
return False
attributes = self._matchEvents[eventType]
if attributes is None or "*" in attributes:
return True
if event["attribute_name"] and event["attribute_name"] in attributes:
return True
return False
def process(self, event):
"""
Process an event with the callback object supplied on initialization.
If an error occurs, it will be logged appropriately and the callback
will be deactivated.
@param event: The Shotgun event to process.
@type event: I{dict}
"""
# set session_uuid for UI updates
if self._engine._use_session_uuid:
self._shotgun.set_session_uuid(event["session_uuid"])
if self._engine.timing_logger:
start_time = datetime.datetime.now(SG_TIMEZONE.local)
try:
self._callback(self._shotgun, self._logger, event, self._args)
error = False
except:
error = True
# Get the local variables of the frame of our plugin
tb = sys.exc_info()[2]
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s"
self._logger.critical(
msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals)
)
if self._stopOnError:
self._active = False
if self._engine.timing_logger:
callback_name = self._logger.name.replace("plugin.", "")
end_time = datetime.datetime.now(SG_TIMEZONE.local)
duration = self._prettyTimeDeltaFormat(end_time - start_time)
delay = self._prettyTimeDeltaFormat(start_time - event["created_at"])
msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s"
data = [
event["id"],
event["created_at"].isoformat(),
callback_name,
start_time.isoformat(),
end_time.isoformat(),
duration,
str(error),
delay,
]
self._engine.timing_logger.info(msg_format, *data)
return self._active
def _prettyTimeDeltaFormat(self, time_delta):
days, remainder = divmod(time_delta.total_seconds(), 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
return "%02d:%02d:%02d:%02d.%06d" % (
days,
hours,
minutes,
seconds,
time_delta.microseconds,
)
def isActive(self):
"""
Check if this callback is active, i.e. if events should be passed to it
for processing.
@return: True if this callback should process events, False otherwise.
@rtype: I{bool}
"""
return self._active
def __str__(self):
"""
The name of the callback.
@return: The name of the callback
@rtype: I{str}
"""
return self._name
class CustomSMTPHandler(logging.handlers.SMTPHandler):
"""
A custom SMTPHandler subclass that will adapt it's subject depending on the
error severity.
"""
LEVEL_SUBJECTS = {
logging.ERROR: "ERROR - SG event daemon.",
logging.CRITICAL: "CRITICAL - SG event daemon.",
}
def __init__(
self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None
):
args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials]
if credentials:
# Python 2.7 implemented the secure argument
if CURRENT_PYTHON_VERSION >= PYTHON_27:
args.append(secure)
else:
self.secure = secure
logging.handlers.SMTPHandler.__init__(self, *args)
def getSubject(self, record):
subject = logging.handlers.SMTPHandler.getSubject(self, record)
if record.levelno in self.LEVEL_SUBJECTS:
return subject + " " + self.LEVEL_SUBJECTS[record.levelno]
return subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
# Mostly copied from Python 2.7 implementation.
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(),
msg,
)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.close()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventDaemonError(Exception):
"""
Base error for the Shotgun event system.
"""
pass
class ConfigError(EventDaemonError):
"""
Used when an error is detected in the config file.
"""
pass
if sys.platform == "win32":
class WindowsService(win32serviceutil.ServiceFramework):
"""
Windows service wrapper
"""
_svc_name_ = "ShotgunEventDaemon"
_svc_display_name_ = "Shotgun Event Handler"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self._engine = Engine(_getConfigPath())
def SvcStop(self):
"""
Stop the Windows service.
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self._engine.stop()
def SvcDoRun(self):
"""
Start the Windows service.
"""
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ""),
)
self.main()
def main(self):
"""
Primary Windows entry point
"""
self._engine.start()
class LinuxDaemon(daemonizer.Daemon):
"""
Linux Daemon wrapper or wrapper used for foreground operation on Windows
"""
def __init__(self):
self._engine = Engine(_getConfigPath())
super(LinuxDaemon, self).__init__(
"shotgunEvent", self._engine.config.getEnginePIDFile()
)
def start(self, daemonize=True):
if not daemonize:
# Setup the stdout logger
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(levelname)s:%(name)s:%(message)s")
)
logging.getLogger().addHandler(handler)
super(LinuxDaemon, self).start(daemonize)
def _run(self):
"""
Start the engine's main loop
"""
self._engine.start()
def _cleanup(self):
self._engine.stop()
def main():
""" """
if CURRENT_PYTHON_VERSION <= PYTHON_26:
print(
"Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer."
)
return 3
action = None
if len(sys.argv) > 1:
action = sys.argv[1]
if sys.platform == "win32" and action != "foreground":
win32serviceutil.HandleCommandLine(WindowsService)
return 0
if action:
daemon = LinuxDaemon()
# Find the function to call on the daemon and call it
func = getattr(daemon, action, None)
if action[:1] != "_" and func is not None:
func()
return 0
print("Unknown command: %s" % action)
print("usage: %s start|stop|restart|foreground" % sys.argv[0])
return 2
def _getConfigPath():
"""
Get the path of the shotgunEventDaemon configuration file.
"""
paths = ["/etc", os.path.dirname(__file__)]
# Get the current path of the daemon script
scriptPath = sys.argv[0]
if scriptPath != "" and scriptPath != "-c":
# Make absolute path and eliminate any symlinks if any.
scriptPath = os.path.abspath(scriptPath)
scriptPath = os.path.realpath(scriptPath)
# Add the script's directory to the paths we'll search for the config.
paths[:0] = [os.path.dirname(scriptPath)]
# Search for a config file.
for path in paths:
path = os.path.join(path, "shotgunEventDaemon.conf")
if os.path.exists(path):
return path
# No config file was found
raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths))
if __name__ == "__main__":
sys.exit(main())
| src/shotgunEventDaemon.py | 46,186 | A part of a plugin that can be called to process a Shotgun event.
Used when an error is detected in the config file.
A custom SMTPHandler subclass that will adapt it's subject depending on the
error severity.
The engine holds the main loop of event processing.
Base error for the Shotgun event system.
Linux Daemon wrapper or wrapper used for foreground operation on Windows
The plugin class represents a file on disk which contains one or more
callbacks.
A group of plugin files in a location on the disk.
See public API docs in docs folder.
Windows service wrapper
Start the Windows service.
Stop the Windows service.
@param engine: The engine that instanciated this plugin.
@type engine: L{Engine}
@param path: The path of the plugin file to load.
@type path: I{str}
@raise ValueError: If the path to the plugin is not a valid file.
Wrap a plugin so it can be passed to a user.
@param callback: The function to run when a Shotgun event occurs.
@type callback: A function object.
@param engine: The engine that will dispatch to this callback.
@type engine: L{Engine}.
@param shotgun: The Shotgun instance that will be used to communicate
with your Shotgun server.
@type shotgun: L{sg.Shotgun}
@param matchEvents: The event filter to match events against before invoking callback.
@type matchEvents: dict
@param args: Any datastructure you would like to be passed to your
callback function. Defaults to None.
@type args: Any object.
@raise TypeError: If the callback is not a callable object.
A plugin is iterable and will iterate over all its L{Callback} objects.
Provide the name of the plugin when it is cast as string.
@return: The name of the plugin.
@rtype: I{str}
The name of the callback.
@return: The name of the callback
@rtype: I{str}
Configure a logger with a handler that sends emails to specified
addresses.
The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.
@note: Any SMTPHandler already connected to the logger will be removed.
@param logger: The logger to configure
@type logger: A logging.Logger instance
@param toAddrs: The addresses to send the email to.
@type toAddrs: A list of email addresses that will be passed on to the
SMTPHandler.
Get the path of the shotgunEventDaemon configuration file.
Fetch new events from Shotgun.
@return: Recent events that need to be processed by the engine.
@rtype: I{list} of Shotgun event dictionaries.
Load the last processed event id from the disk
If no event has ever been processed or if the eventIdFile has been
deleted from disk, no id will be recoverable. In this case, we will try
contacting Shotgun to get the latest event's id and we'll start
processing from there.
Run the event processing loop.
General behavior:
- Load plugins from disk - see L{load} method.
- Get new events from Shotgun
- Loop through events
- Loop through each plugin
- Loop through each callback
- Send the callback an event
- Once all callbacks are done in all plugins, save the eventId
- Go to the next event
- Once all events are processed, wait for the defined fetch interval time and start over.
Caveats:
- If a plugin is deemed "inactive" (an error occured during
registration), skip it.
- If a callback is deemed "inactive" (an error occured during callback
execution), skip it.
- Each time through the loop, if the pidFile is gone, stop.
Remove all handlers or handlers of a specified type from a logger.
@param logger: The logger who's handlers should be processed.
@type logger: A logging.Logger object
@param handlerTypes: A type of handler or list/tuple of types of handlers
that should be removed from the logger. If I{None}, all handlers are
removed.
@type handlerTypes: L{None}, a logging.Handler subclass or
I{list}/I{tuple} of logging.Handler subclasses.
Start the engine's main loop
Save an event Id to persistant storage.
Next time the engine is started it will try to read the event id from
this location to know at which event it should start processing.
Emit a record.
Format the record and send it to the specified addressees.
Get the logger for this plugin.
@return: The logger configured for this plugin.
@rtype: L{logging.Logger}
Is the current plugin active. Should it's callbacks be run?
@return: True if this plugin's callbacks should be run, False otherwise.
@rtype: I{bool}
Check if this callback is active, i.e. if events should be passed to it
for processing.
@return: True if this callback should process events, False otherwise.
@rtype: I{bool}
Load plugins from disk.
General behavior:
- Loop on all paths.
- Find all valid .py plugin files.
- Loop on all plugin files.
- For any new plugins, load them, otherwise, refresh them.
Load/Reload the plugin and all its callbacks.
If a plugin has never been loaded it will be loaded normally. If the
plugin has been loaded before it will be reloaded only if the file has
been modified on disk. In this event callbacks will all be cleared and
reloaded.
General behavior:
- Try to load the source of the plugin.
- Try to find a function called registerCallbacks in the file.
- Try to run the registration function.
At every step along the way, if any error occurs the whole plugin will
be deactivated and the function will return.
Primary Windows entry point
Process an event with the callback object supplied on initialization.
If an error occurs, it will be logged appropriately and the callback
will be deactivated.
@param event: The Shotgun event to process.
@type event: I{dict}
Register a callback in the plugin.
Set the email addresses to whom this plugin should send errors.
@param emails: See L{LogFactory.getLogger}'s emails argument for info.
@type emails: A I{list}/I{tuple} of email addresses or I{bool}.
Start the processing of events.
The last processed id is loaded up from persistent storage on disk and
the main loop is started.
For an overview of shotgunEvents, please see raw documentation in the docs
folder or an html compiled version at:
http://shotgunsoftware.github.com/shotgunEvents
!/usr/bin/env python Init file for Shotgun event daemon chkconfig: 345 99 00 description: Shotgun event daemon BEGIN INIT INFO Provides: shotgunEvent Required-Start: $network Should-Start: $remote_fs Required-Stop: $network Should-Stop: $remote_fs Default-Start: 2 3 4 5 Short-Description: Shotgun event daemon Description: Shotgun event daemon END INIT INFO Suppress the deprecation warning about imp until we get around to replacing it Remove any previous handler. Add the file handler Read/parse the config Get config values Setup the loggers for the main engine Set the root logger for file output. Set the engine logger for email output. Set the engine logger for file and email output. Setup the timing log file Configure the logger for email output TODO: Take value from config Notify which version of shotgun api we are using Provide event id info to the plugin collections. Once they've figured out what to do with it, ask them for their last processed id. If we don't have a state it means there's no match in the id file. First we'll search to see the latest id a matching plugin name has elsewhere in the id file. We do this as a fallback in case the plugins directory has been moved. If there's no match, use the latest event id in Shotgun. Backwards compatibility: Reopen the file to try to read an old-style int The _loadEventIdData got an old-style id file containing a single int which is the last id properly processed. No id file? Get the event data from the database. Process events if we're lagging behind Shotgun, we received a full batch of events skip the sleep() call in this case Reload plugins Make sure that newly loaded events have proper state. Use protocol 2 so it can also be loaded in Python 2 Setup the plugin's logger Check file mtime The mtime of file is equal or older. We don't need to do anything. Reset values A callback in the plugin failed. Deactivate the whole plugin. time in minutes after which we consider a pending event won't happen the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event with a lower id should have shown up in the EventLog by now if it actually happened in this case, we want to add the missing events to the backlog as they could show up in the EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range them to show up until they expire TODO: Fix this ugly protected member access Find a name for this object TODO: Get rid of this protected member access set session_uuid for UI updates Get the local variables of the frame of our plugin Python 2.7 implemented the secure argument Mostly copied from Python 2.7 implementation. Setup the stdout logger Find the function to call on the daemon and call it Get the current path of the daemon script Make absolute path and eliminate any symlinks if any. Add the script's directory to the paths we'll search for the config. Search for a config file. No config file was found | 8,961 | en | 0.839895 |
"""
Project Euler Problem 7: https://projecteuler.net/problem=7
10001st prime
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we
can see that the 6th prime is 13.
What is the 10001st prime number?
References:
- https://en.wikipedia.org/wiki/Prime_number
"""
import itertools
import math
def prime_check(number: int) -> bool:
"""
Determines whether a given number is prime or not
>>> prime_check(2)
True
>>> prime_check(15)
False
>>> prime_check(29)
True
"""
if number % 2 == 0 and number > 2:
return False
return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
def prime_generator():
"""
Generate a sequence of prime numbers
"""
num = 2
while True:
if prime_check(num):
yield num
num += 1
def solution(nth: int = 10001) -> int:
"""
Returns the n-th prime number.
>>> solution(6)
13
>>> solution(1)
2
>>> solution(3)
5
>>> solution(20)
71
>>> solution(50)
229
>>> solution(100)
541
"""
return next(itertools.islice(prime_generator(), nth - 1, nth))
if __name__ == "__main__":
print(f"{solution() = }")
| project_euler/problem_007/sol3.py | 1,222 | Determines whether a given number is prime or not
>>> prime_check(2)
True
>>> prime_check(15)
False
>>> prime_check(29)
True
Generate a sequence of prime numbers
Returns the n-th prime number.
>>> solution(6)
13
>>> solution(1)
2
>>> solution(3)
5
>>> solution(20)
71
>>> solution(50)
229
>>> solution(100)
541
Project Euler Problem 7: https://projecteuler.net/problem=7
10001st prime
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we
can see that the 6th prime is 13.
What is the 10001st prime number?
References:
- https://en.wikipedia.org/wiki/Prime_number | 586 | en | 0.688 |
import requests
from collections import OrderedDict
from django.utils.http import urlencode
from allauth.socialaccount.providers.core.oauth2.client import (
OAuth2Client,
OAuth2Error,
)
class WeixinOAuth2Client(OAuth2Client):
def get_redirect_url(self, authorization_url, extra_params):
params = {
'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'scope': self.scope,
'response_type': 'code'
}
if self.state:
params['state'] = self.state
params.update(extra_params)
sorted_params = OrderedDict()
for param in sorted(params):
sorted_params[param] = params[param]
return '%s?%s' % (authorization_url, urlencode(sorted_params))
def get_access_token(self, code):
data = {'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'grant_type': 'authorization_code',
'secret': self.consumer_secret,
'scope': self.scope,
'code': code}
params = None
self._strip_empty_keys(data)
url = self.access_token_url
if self.access_token_method == 'GET':
params = data
data = None
# TODO: Proper exception handling
resp = requests.request(self.access_token_method,
url,
params=params,
data=data)
access_token = None
if resp.status_code == 200:
access_token = resp.json()
if not access_token or 'access_token' not in access_token:
raise OAuth2Error('Error retrieving access token: %s'
% resp.content)
return access_token
| allauth/socialaccount/providers/other/weixin/client.py | 1,813 | TODO: Proper exception handling | 31 | es | 0.211752 |
# ---------------------------------------------------------------------
# Rotek.BT.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
from noc.core.validators import is_float
class Script(GetMetricsScript):
name = "Rotek.BT.get_metrics"
@metrics(["Environment | Sensor Status"], volatile=False, access="S") # SNMP version
def get_sensor_status(self, metrics):
for metric in metrics:
port = metric.labels[0].rsplit("::", 1)[-1]
if "st" in port:
continue
value = 1
port = metric.labels[0].rsplit("::", 1)[-1]
status = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if status is None:
continue
if metric.ifindex == 1 and int(status) == 0:
value = 0
elif metric.ifindex == 2:
if is_float(status) and (-55 < float(status) < 600):
value = 0
elif metric.ifindex in [4, 6] and float(status) > 0:
value = 0
elif metric.ifindex == 9 and int(status) != 2:
value = 0
self.set_metric(
id=("Environment | Sensor Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Temperature"], volatile=False, access="S") # SNMP version
def get_temperature(self, metrics):
for metric in metrics:
if not metric.labels:
continue
port = metric.labels[0].rsplit("::", 1)[-1]
if "temp" in port:
value = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if value is None:
continue
if is_float(value):
self.set_metric(
id=("Environment | Temperature", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Voltage"], volatile=False, access="S") # SNMP version
def get_voltage(self, metrics):
for metric in metrics:
value = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if value is None:
continue
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Voltage", metric.labels),
labels=["noc::module::battery", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Power | Input | Status"], volatile=False, access="S") # SNMP version
def get_power_input_status(self, metrics):
for metric in metrics:
value = 1
res = self.snmp.get("1.3.6.1.4.1.41752.5.15.1.9.0")
port = metric.labels[0].rsplit("::", 1)[-1]
if res not in [1, 2, 3]:
value = 0
self.set_metric(
id=("Environment | Power | Input | Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
| sa/profiles/Rotek/BT/get_metrics.py | 3,526 | --------------------------------------------------------------------- Rotek.BT.get_metrics --------------------------------------------------------------------- Copyright (C) 2007-2020 The NOC Project See LICENSE for details --------------------------------------------------------------------- NOC modules SNMP version SNMP version SNMP version SNMP version | 358 | en | 0.196884 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_path = r"C:\Users\iamdo\Downloads\chromedriver.exe" # specify your driver location
driver = webdriver.Chrome(chrome_path)
driver.get("https://github.com/login")
username = "your email" # specify your email
password = "your password" # specify your password
usernamefield = driver.find_element_by_name("login")
usernamefield.clear()
usernamefield.send_keys(username)
pfield = driver.find_element_by_name("password")
pfield.clear()
pfield.send_keys(password)
driver.find_element_by_name("commit").click()
driver.find_element_by_xpath("""/html/body/div[5]/div/aside[1]/div[2]/div[1]/div/h2/a""").click()
reponame = driver.find_element_by_name("repository[name]")
reponame.send_keys("Test") # specify your repository name
driver.find_element_by_xpath("""//*[@id="repository_visibility_public"]""").click()
driver.find_element_by_xpath("""//*[@id="repository_auto_init"]""").click()
driver.find_element_by_xpath("""//*[@id="repo-new-license-details"]/summary""").click()
driver.find_element_by_xpath("""//*[@id="license-label-mit"]""").click()
time.sleep(1)
driver.find_element_by_xpath("""//*[@id="new_repository"]/div[3]/button""").click()
time.sleep(4)
driver.close()
| github.py | 1,294 | specify your driver location specify your email specify your password specify your repository name | 98 | bn | 0.06914 |
from django.contrib import admin
from .models import Post
#pythonadmin.site.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ("pk","text", "pub_date", "author")
search_fields = ("text",)
list_filter = ("pub_date",)
empty_value_display = "-пусто-"
admin.site.register(Post, PostAdmin)
| yatube/posts/admin.py | 328 | pythonadmin.site.register(Post) | 31 | en | 0.208742 |
import asyncio
from typing import List
from app.common import SkipListing
from app.scrapers.base import BaseScraper
class MaartenScraper(BaseScraper):
MAKELAARDIJ: str = "maarten"
BASE_URL: str = "https://www.maartenmakelaardij.nl"
# Specific functions
async def extract_object_urls(self, soup) -> List[str]:
"""
Extract apartment object urls
"""
items = soup.find_all("a")
urls: List[str] = []
for item in items:
if "woning/rotterdam-" in item["href"]:
urls.append(item["href"])
return list(set(urls))
async def get_page_url(self, page_num: int) -> str:
"""
Format page url
"""
return f"{self.BASE_URL}/aanbod/rotterdam/"
async def get_apartment_urls(self) -> List[str]:
"""
Fetch list of apartment urls from inventory
"""
urls = await self.scrape_page(0)
return urls
def extract_features(self, soup):
"""
Extract feature metadata from listing
"""
meta_data = {
"makelaardij": self.MAKELAARDIJ,
"building": {},
"unit": {"energy": {}, "tags": []},
}
dt = soup.find_all("dt")
dd = soup.find_all("dd")
# Features
for ind, key in enumerate(dt):
if "Bouwjaar" in key.string:
meta_data["building"]["year_constructed"] = self.find_int(
dd[ind].string
)
elif "Woonoppervlakte" in key.string:
meta_data["unit"]["area"] = self.find_float(dd[ind].text.split(" ")[0])
elif "Aantal kamers" in key.string:
meta_data["unit"]["num_rooms"] = self.find_int(dd[ind].text)
elif "verdiepingen" in key.string:
meta_data["unit"]["num_floors"] = self.find_int(dd[ind].text)
elif "Status" in key.string:
meta_data["available"] = "Beschikbaar" in dd[ind].text
elif "Buitenruimte" in key.string and "TUIN" in dd[ind].text:
meta_data["unit"]["tags"].append("garden")
# Other fields
meta_data["address"] = soup.find("span", {"class": "adres"}).string
meta_data["asking_price"] = self.find_int(
soup.find("span", {"class": "price"}).string.replace(".", "")
)
description = soup.find("div", {"id": "read-more-content"}).children
for p in description:
p_text = str(p.text)
if "Eigen grond" in p_text:
meta_data["unit"]["own_land"] = True
elif "erfpacht" in p_text:
meta_data["unit"]["own_land"] = False
if "Energielabel" in p_text:
label = p_text.split("Energielabel: ")[1][0]
meta_data["unit"]["energy"]["label"] = label
break
# Bounce broken listings
if not meta_data["unit"].get("area"):
raise SkipListing("Unable to find area")
return meta_data
if __name__ == "__main__":
scraper = MaartenScraper()
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.start())
| server/app/scrapers/maarten.py | 3,199 | Extract feature metadata from listing
Specific functions Features Other fields Bounce broken listings | 103 | en | 0.768303 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('institution_name', models.CharField(max_length=200)),
('address_1', models.CharField(max_length=100)),
('address_2', models.CharField(max_length=100, null=True, blank=True)),
('city', models.CharField(max_length=100)),
('postcode', models.CharField(max_length=10)),
('country', models.CharField(max_length=100)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='addresses')),
],
),
]
| lims/addressbook/migrations/0001_initial.py | 1,034 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'artemis.settings')
from django.conf import settings
app = Celery('artemis')
app.config_from_object('django.conf:settings', )
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| artemis/celery.py | 551 | set the default Django settings module for the 'celery' program. Load task modules from all registered Django app configs. | 122 | en | 0.362336 |
import logging
import json
import paho.mqtt.client as mqttc
from ioctlgw import version
from ioctlgw.componentstate import ComponentState
LOG = logging.getLogger(__name__)
class MqttConnector(object):
def __init__(self, service):
self.service = service
self.config = self.service.config
self.mqtt_config = self.config["mqtt"]
self.mqtt = mqttc.Client()
self.mqtt_base_topic = self.mqtt_config["topic"]
self.mqtt.on_connect = self.mqtt_on_connect
self.mqtt.on_disconnect = self.mqtt_on_disconnect
self.mqtt.on_message = self.mqtt_on_message
self.mqtt.on_subscribe = self.mqtt_on_subscribe
# MQTT status jobs
self.service.scheduler.add_job(self.publish_status)
self.service.scheduler.add_job(self.publish_status, 'interval', seconds=10, jitter=5)
def start(self):
# Start a background thread to maintain the MQTT connection
LOG.info("MQTT Starting")
if "user" in self.mqtt_config and "pass" in self.mqtt_config:
self.mqtt.username_pw_set(self.mqtt_config["user"], self.mqtt_config["pass"])
mqtt_host = self.mqtt_config["host"]
mqtt_port = self.mqtt_config["port"]
LOG.info("MQTT Connecting to %s:%s", mqtt_host, mqtt_port)
self.mqtt.connect(mqtt_host, mqtt_port, 60)
# Subscribe to interesting MQTT topics
topics = [
"/boards/+/digitaloutput/+/command"
]
for topic_suffix in topics:
self.mqtt.subscribe(f"{self.mqtt_base_topic}{topic_suffix}")
self.mqtt.loop_start()
def mqtt_on_connect(self, client, data, flags, rc):
LOG.info("MQTT Connected %s", rc)
def mqtt_on_disconnect(self, client, userdata, rc):
if rc == 0:
LOG.warning("Unexpected MQTT disconnection.")
else:
LOG.warning("Unexpected MQTT disconnection. Will auto-reconnect")
def mqtt_on_subscribe(self, client, userdata, mid, gqos):
LOG.info("MQTT Subscribed %s", mid)
def mqtt_on_message(self, client, userdata, msg):
LOG.info("MQTT Message %s %s", msg.topic, str(msg.payload))
if msg.topic.startswith(self.mqtt_base_topic):
topic = msg.topic[len(self.mqtt_base_topic) + 1:]
parts = topic.split("/")
# TODO: check number of parts
controller_name = parts[1]
component = parts[2]
num = int(parts[3])
iocontroller = self.service.controllers[controller_name]
if controller_name not in self.service.controllers.keys():
LOG.warning("Message for unknown iocontroller '%s'", controller_name)
return
if component not in ["digitaloutput"]:
LOG.warning("Message for unknown component '%s'", component)
return
if num > iocontroller.num_digital_outputs:
LOG.warning("Output too high for this board: %s", num)
return
action = msg.payload.decode('utf-8').strip().upper()
if action not in ["OFF", "ON"]:
LOG.warning("Unsupported action '%s'", action)
return
LOG.debug("Requesting %s %s %s %s %s", iocontroller, controller_name, component, num, action)
iocontroller.request_digitaloutput(ComponentState(component="digitaloutput", num=num, status=action))
def mqtt_publish_message(self, suffix, payload, qos=0):
topic = "%s/%s" % (self.mqtt_base_topic, suffix)
self.mqtt.publish(topic=topic, payload=payload, qos=0)
LOG.info("%s %s", topic, payload)
def board_connection_event(self, name, event):
self.mqtt_publish_message(suffix=f"boards/{name}/connection", payload=event)
def board_io_event(self, name, state):
self.mqtt_publish_message(suffix=f"boards/{name}/{state.component}/{state.num}/status", payload=state.status)
def board_status(self, name, raw_msg):
assert True
def publish_status(self):
status = {
"version": version()
}
self.mqtt_publish_message(suffix="status", payload=json.dumps(status))
uptime = {
"minutes": self.service.uptime,
"started": self.service.startup.isoformat()
}
self.mqtt_publish_message(suffix="uptime", payload=json.dumps(uptime))
| ioctlgw/mqttconnector.py | 4,395 | MQTT status jobs Start a background thread to maintain the MQTT connection Subscribe to interesting MQTT topics TODO: check number of parts | 139 | en | 0.697205 |
from tithiwa import *
tithiwabot = Tithiwa()
tithiwabot.open_session()
print("'" + tithiwabot.get_my_name() + "', '" + tithiwabot.get_my_about() + "'")
tithiwabot.quit()
browser = 3
#doing something else with browser
tithiwabot = Tithiwa(browser)
tithiwabot.browser = webdriver.Chrome()
tithiwabot.open_session()
print("'" + tithiwabot.get_my_name() + "', '" + tithiwabot.get_my_about() + "'")
tithiwabot.quit()
| tithiwa/temp.py | 414 | doing something else with browser | 33 | en | 0.905629 |
from flask import Flask
from flask import make_response
from flask import render_template
from flask import request
from flask import session
from blog_site.common.database import Database
from blog_site.webapp.models.blog import Blog
from blog_site.webapp.models.user import User
app = Flask(__name__)
app.secret_key = '\x1e\x14\xe6\xa0\xc5\xcc\xd9\x7f\xe5\xe8\x1cZ\xc5\xf2r\xb0W#\xed\xb6\xc8'
@app.route('/')
def home_temmplate():
return render_template("home.html")
@app.route('/login')
def login_template():
return render_template("login.html")
@app.route('/register')
def register_template():
return render_template("register.html")
@app.before_first_request
def init_database():
Database.initialize()
@app.route('/auth/login', methods=['POST'])
def login_user():
email = request.form['email']
password = request.form['password']
if User.login_valid(email, password):
User.login(email)
else:
session['email'] = None
return render_template("login-error.html")
return render_template("profile.html", email=session['email'])
@app.route('/auth/register', methods=['POST'])
def register_user():
email = request.form['email']
password = request.form['password']
confirm_password = request.form['confirm-password']
if password == confirm_password:
User.register(email, password)
else:
# mismatch passwords
# TODO: Insert validation error
return render_template("register.html")
return render_template("register-success.html", email=session['email'])
@app.route('/blogs/<string:user_id>')
@app.route('/blogs')
def user_blogs(user_id=None):
blogs = None
user = None
if user_id is not None:
user = User.get_by_id(user_id)
else:
if session['email'] is not None:
user = User.get_by_email(session['email'])
blogs = user.get_blogs()
return render_template("user_blogs.html", blogs=blogs, email=user.email)
# TODO: User should be authenticated first before navigating to the post
@app.route('/posts/<string:blog_id>/')
def blog_posts(blog_id):
blog = Blog.from_mongo_in_blog_object(blog_id)
posts = blog.get_post()
return render_template("user_blog_posts.html", blog_title=blog.title, blog_id=blog_id, posts=posts)
@app.route('/blogs/new/', methods=['GET', 'POST'])
def create_new_blog():
if request.method == 'GET':
return render_template("new_blog.html")
else:
title = request.form['title']
description = request.form['description']
user = User.get_by_email(session['email'])
new_blog = Blog(user.email, title, description, user._id)
new_blog.save_to_mongo()
return make_response(blog_posts(user._id))
@app.route('/post/new/<string:blog_id>', methods=['GET', 'POST'])
def create_new_post(blog_id):
if request.method == 'GET':
return render_template("new_post.html", blog_id=blog_id)
else:
title = request.form['title']
content = request.form['content']
blog = Blog.from_mongo_in_blog_object(blog_id)
blog.new_post(title, content)
return make_response(blog_posts(blog_id))
if __name__ == '__main__':
app.run(port=8660, debug='True')
| blog_site/webapp/web_app.py | 3,269 | mismatch passwords TODO: Insert validation error TODO: User should be authenticated first before navigating to the post | 119 | en | 0.575734 |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TASK_STATE_ROUTE_FORMAT = "%s__r%s"
TASK_STATE_TRANSITION_FORMAT = "%s__t%s"
INBOUND_CRITERIA_WIP = "inbound_criteria_wip"
INBOUND_CRITERIA_SATISFIED = "inbound_criteria_satisfied"
INBOUND_CRITERIA_NOT_SATISFIED = "inbound_criteria_not_satisfied"
| orquesta/constants.py | 835 | Copyright 2019 Extreme Networks, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 559 | en | 0.862032 |
import time
from grapht.graph import DictGraph, StreamGraph
__author__ = 'willmcginnis'
if __name__ == '__main__':
print('DictGraph')
g = {0: [2], 1: [2, 3], 2: [1, 3], 3: [1, 2, 4, 6], 4: [3, 5], 5: [4, 6, 7], 6: [3, 4, 5], 7: [5, 8], 8: [7], 9: [8], 10: [9], 11: [8, 9], 12: [11], 13: [12], 14: [13], 15: [1]}
gp = DictGraph(g)
print('Original Adjacency Matrix')
print(gp.get_dense())
print('Second Connections')
print(gp.get_n_connection(n=2).toarray())
print('Third Connections')
print(gp.get_n_connection(n=3).toarray())
print('\n\nStream Graph')
# NOTE: You'll need a graph in a postgres db to actually do this.
gp2 = StreamGraph(max_dim=28000000)
gp2.from_psql(username='postgres',
password='admin',
database='',
host='localhost',
schema='directed',
table='graph')
print('Number of non-zero elements')
edges = gp2.get_nnz()
print(edges)
print('Calculating 2nd Degree connections for a %s edge graph' % (edges, ))
start_time = time.time()
temp = gp2.get_n_connection(n=2)
elapsed = time.time() - start_time
print('TIME: %s' % (str(elapsed), ))
print('\nMost Connected N')
res = gp2.most_connected_n(n=25)
print(res) | examples/adjacency.py | 1,317 | NOTE: You'll need a graph in a postgres db to actually do this. | 63 | en | 0.977025 |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class AuditEvent_EntitySchema:
"""
A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
identifier: Identifies a specific instance of the entity. The reference should always be
version specific.
reference: Identifies a specific instance of the entity. The reference should be version
specific.
type: The type of the object that was involved in this audit event.
role: Code representing the role the entity played in the event being audited.
lifecycle: Identifier for the data life-cycle stage for the entity.
securityLabel: Security labels for the identified entity.
name: A name of the entity in the audit event.
description: Text that describes the entity in more detail.
query: The query parameters for a query-type entities.
detail: Tagged value pairs for conveying additional information about the entity.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema
from spark_fhir_schemas.stu3.complex_types.auditevent_detail import (
AuditEvent_DetailSchema,
)
if (
max_recursion_limit
and nesting_list.count("AuditEvent_Entity") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["AuditEvent_Entity"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies a specific instance of the entity. The reference should always be
# version specific.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies a specific instance of the entity. The reference should be version
# specific.
StructField(
"reference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The type of the object that was involved in this audit event.
StructField(
"type",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Code representing the role the entity played in the event being audited.
StructField(
"role",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifier for the data life-cycle stage for the entity.
StructField(
"lifecycle",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Security labels for the identified entity.
StructField(
"securityLabel",
ArrayType(
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A name of the entity in the audit event.
StructField("name", StringType(), True),
# Text that describes the entity in more detail.
StructField("description", StringType(), True),
# The query parameters for a query-type entities.
StructField("query", StringType(), True),
# Tagged value pairs for conveying additional information about the entity.
StructField(
"detail",
ArrayType(
AuditEvent_DetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| spark_fhir_schemas/stu3/complex_types/auditevent_entity.py | 11,448 | A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
identifier: Identifies a specific instance of the entity. The reference should always be
version specific.
reference: Identifies a specific instance of the entity. The reference should be version
specific.
type: The type of the object that was involved in this audit event.
role: Code representing the role the entity played in the event being audited.
lifecycle: Identifier for the data life-cycle stage for the entity.
securityLabel: Security labels for the identified entity.
name: A name of the entity in the audit event.
description: Text that describes the entity in more detail.
query: The query parameters for a query-type entities.
detail: Tagged value pairs for conveying additional information about the entity.
This file is auto-generated by generate_schema so do not edit manually noinspection PyPep8Naming noinspection PyDefaultArgument add my name to recursion list for later unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Identifies a specific instance of the entity. The reference should always be version specific. Identifies a specific instance of the entity. The reference should be version specific. The type of the object that was involved in this audit event. Code representing the role the entity played in the event being audited. Identifier for the data life-cycle stage for the entity. Security labels for the identified entity. A name of the entity in the audit event. Text that describes the entity in more detail. The query parameters for a query-type entities. Tagged value pairs for conveying additional information about the entity. | 2,970 | en | 0.893216 |
# Standard library
from unittest import mock
# Third-party
import polib
from django.test import TestCase, override_settings
from django.utils.translation import override
# First-party/Local
from i18n import DEFAULT_LANGUAGE_CODE
from licenses import FREEDOM_LEVEL_MAX, FREEDOM_LEVEL_MID, FREEDOM_LEVEL_MIN
from licenses.models import LegalCode, License
from licenses.tests.factories import (
LegalCodeFactory,
LicenseFactory,
TranslationBranchFactory,
)
from licenses.tests.test_transifex import TEST_TRANSIFEX_SETTINGS
from licenses.transifex import TransifexHelper
class LegalCodeQuerySetTest(TestCase):
def test_translated(self):
bylicense30ported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code=""
)
bylicense40 = LicenseFactory(
license_code="by-nc", version="4.0", jurisdiction_code=""
)
cc0v1license = LicenseFactory(
license_code="CC0", version="1.0", jurisdiction_code=""
)
should_be_translated = [
LegalCodeFactory(license=bylicense40),
LegalCodeFactory(license=cc0v1license),
]
should_not_be_translated = [
LegalCodeFactory(license=bylicense30ported),
LegalCodeFactory(license=bylicense30unported),
]
self.assertCountEqual(
should_be_translated, list(LegalCode.objects.translated())
)
self.assertCountEqual(
should_not_be_translated,
set(LegalCode.objects.all()) - set(LegalCode.objects.translated()),
)
def test_valid(self):
bylicense30ported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code=""
)
nonbylicense30ported = LicenseFactory(
license_code="xyz", version="3.0", jurisdiction_code="ar"
)
nonbylicense30unported = LicenseFactory(
license_code="xyz", version="3.0", jurisdiction_code=""
)
bylicense40 = LicenseFactory(
license_code="by-nc", version="4.0", jurisdiction_code=""
)
nonbylicense40 = LicenseFactory(
license_code="xyz", version="4.0", jurisdiction_code=""
)
cc0v1license = LicenseFactory(
license_code="CC0", version="1.0", jurisdiction_code=""
)
noncc0v1license = LicenseFactory(
license_code="xyz", version="1.0", jurisdiction_code=""
)
# Test valid()
should_be_valid = [
LegalCodeFactory(license=bylicense30ported),
LegalCodeFactory(license=bylicense30unported),
LegalCodeFactory(license=bylicense40),
LegalCodeFactory(license=cc0v1license),
]
should_not_be_valid = [
LegalCodeFactory(license=nonbylicense30ported),
LegalCodeFactory(license=nonbylicense30unported),
LegalCodeFactory(license=nonbylicense40),
LegalCodeFactory(license=noncc0v1license),
]
self.assertCountEqual(should_be_valid, list(LegalCode.objects.valid()))
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all()) - set(LegalCode.objects.valid()),
)
# Test validgroups()
self.assertCountEqual(
should_be_valid,
list(LegalCode.objects.validgroups()["by4.0"])
+ list(LegalCode.objects.validgroups()["by3.0"])
+ list(LegalCode.objects.validgroups()["zero1.0"]),
)
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all())
- set(
list(LegalCode.objects.validgroups()["by4.0"])
+ list(LegalCode.objects.validgroups()["by3.0"])
+ list(LegalCode.objects.validgroups()["zero1.0"])
),
)
class LegalCodeModelTest(TestCase):
def test_str(self):
LegalCodeFactory()
legal_code = LegalCode.objects.first()
self.assertEqual(
str(legal_code),
f"LegalCode<{legal_code.language_code},"
f" {str(legal_code.license)}>",
)
def test_translation_domain(self):
data = [
# (expected, license_code, version, jurisdiction, language)
("by-sa_30", "by-sa", "3.0", "", "fr"),
("by-sa_30_xx", "by-sa", "3.0", "xx", "fr"),
]
for expected, license_code, version, jurisdiction, language in data:
with self.subTest(expected):
legalcode = LegalCodeFactory(
license__license_code=license_code,
license__version=version,
license__jurisdiction_code=jurisdiction,
language_code=language,
)
self.assertEqual(expected, legalcode.translation_domain)
@override_settings(DATA_REPOSITORY_DIR="/foo")
def test_translation_filename(self):
data = [
# (expected, license_code, version, jurisdiction, language)
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03.po",
"by-sa",
"0.3",
"",
"de",
),
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03_xx.po",
"by-sa",
"0.3",
"xx",
"de",
),
]
for expected, license_code, version, jurisdiction, language in data:
with self.subTest(expected):
license = LicenseFactory(
license_code=license_code,
version=version,
jurisdiction_code=jurisdiction,
)
self.assertEqual(
expected,
LegalCodeFactory(
license=license, language_code=language
).translation_filename(),
)
def test_plain_text_url(self):
lc = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="en",
)
lc1 = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="fr",
)
lc2 = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="ar",
)
self.assertEqual(lc.plain_text_url, f"{lc.license_url}/index.txt")
self.assertEqual(lc1.plain_text_url, f"{lc1.license_url}.txt")
self.assertEqual(lc2.plain_text_url, f"{lc2.license_url}.txt")
def test_get_pofile(self):
legalcode = LegalCodeFactory()
test_pofile = polib.POFile()
test_translation_filename = "/dev/null"
with mock.patch.object(LegalCode, "translation_filename") as mock_tf:
mock_tf.return_value = test_translation_filename
with mock.patch.object(polib, "pofile") as mock_pofile:
mock_pofile.return_value = test_pofile
result = legalcode.get_pofile()
mock_pofile.assert_called_with("", encoding="utf-8")
self.assertEqual(test_pofile, result)
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_english_pofile(self):
legalcode = LegalCodeFactory(language_code="es")
legalcode_en = LegalCodeFactory(
license=legalcode.license, language_code=DEFAULT_LANGUAGE_CODE
)
test_pofile = polib.POFile()
with mock.patch.object(
License, "get_legalcode_for_language_code"
) as mock_glfl:
mock_glfl.return_value = legalcode_en
with mock.patch.object(legalcode_en, "get_pofile") as mock_gp:
mock_gp.return_value = test_pofile
self.assertEqual(test_pofile, legalcode.get_english_pofile())
self.assertEqual(
test_pofile, legalcode_en.get_english_pofile()
)
mock_glfl.assert_called_with(DEFAULT_LANGUAGE_CODE)
mock_gp.assert_called_with()
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_translation_object(self):
# get_translation_object on the model calls the
# i18n.utils.get_translation_object.
legalcode = LegalCodeFactory(
license__version="4.0",
license__license_code="by-sa",
language_code="de",
)
with mock.patch("licenses.models.get_translation_object") as mock_djt:
legalcode.get_translation_object()
mock_djt.assert_called_with(
domain="by-sa_40", django_language_code="de"
)
def test_branch_name(self):
legalcode = LegalCodeFactory(
license__version="4.0",
license__license_code="by-sa",
language_code="de",
)
self.assertEqual("cc4-de", legalcode.branch_name())
legalcode = LegalCodeFactory(
license__version="3.5",
license__license_code="other",
language_code="de",
)
self.assertEqual("other-35-de", legalcode.branch_name())
legalcode = LegalCodeFactory(
license__version="3.5",
license__license_code="other",
language_code="de",
license__jurisdiction_code="xyz",
)
self.assertEqual("other-35-de-xyz", legalcode.branch_name())
def test_has_english(self):
license = LicenseFactory()
lc_fr = LegalCodeFactory(license=license, language_code="fr")
self.assertFalse(lc_fr.has_english())
lc_en = LegalCodeFactory(license=license, language_code="en")
self.assertTrue(lc_fr.has_english())
self.assertTrue(lc_en.has_english())
def _test_get_deed_or_license_path(self, data):
for (
version,
license_code,
jurisdiction_code,
language_code,
expected_deed_path,
expected_deed_symlinks,
expected_license_path,
expected_license_symlinks,
) in data:
license = LicenseFactory(
license_code=license_code,
version=version,
jurisdiction_code=jurisdiction_code,
)
legalcode = LegalCodeFactory(
license=license, language_code=language_code
)
self.assertEqual(
[expected_deed_path, expected_deed_symlinks],
legalcode.get_file_and_links("deed"),
)
self.assertEqual(
[expected_license_path, expected_license_symlinks],
legalcode.get_file_and_links("legalcode"),
)
def test_get_deed_or_license_path_by4(self):
"""
4.0 formula:
/licenses/VERSION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml
4.0 examples:
/licenses/4.0/by-nc-nd_deed_en.html
/licenses/4.0/by-nc-nd_legalcode_en.html
/licenses/4.0/by_deed_en.html
/licenses/4.0/by_legalcode_en.html
/licenses/4.0/by_deed_zh-Hans.html
/licenses/4.0/by_legalcode_zh-Hans.html
"""
self._test_get_deed_or_license_path(
[
(
"4.0",
"by-nc-nd",
"",
"en",
"licenses/by-nc-nd/4.0/deed.en.html",
["deed.html", "index.html"],
"licenses/by-nc-nd/4.0/legalcode.en.html",
["legalcode.html"],
),
(
"4.0",
"by",
"",
"en",
"licenses/by/4.0/deed.en.html",
["deed.html", "index.html"],
"licenses/by/4.0/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"4.0",
"by",
"",
"zh-Hans",
"licenses/by/4.0/deed.zh-Hans.html",
[],
"licenses/by/4.0/legalcode.zh-Hans.html",
[],
),
]
)
def test_get_deed_or_license_path_by3(self):
"""
3.0 formula:
/licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html
3.0 examples:
/licenses/3.0/xu/by_deed_en.html
/licenses/3.0/xu/by_legalcode_en.html
/licenses/3.0/am/by_deed_hy.html
/licenses/3.0/am/by_legalcode_hy.html
/licenses/3.0/rs/by_deed_rs-Cyrl.html
/licenses/3.0/rs/by_legalcode_rs-Cyrl.html
For jurisdiction, I used "xu" to mean "unported".
See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501
"""
# Unported
self._test_get_deed_or_license_path(
[
(
"3.0",
"by",
"",
"en",
"licenses/by/3.0/xu/deed.en.html",
[
"../licenses/by/3.0/xu/deed.en.html",
"../deed.html",
"../index.html",
],
"licenses/by/3.0/xu/legalcode.en.html",
[
"../licenses/by/3.0/xu/legalcode.en.html",
"../legalcode.html",
],
),
]
)
# Ported with multiple languages
self._test_get_deed_or_license_path(
[
(
"3.0",
"by",
"ca",
"en",
"licenses/by/3.0/ca/deed.en.html",
["deed.html", "index.html"],
"licenses/by/3.0/ca/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"3.0",
"by-sa",
"ca",
"fr",
"licenses/by-sa/3.0/ca/deed.fr.html",
[],
"licenses/by-sa/3.0/ca/legalcode.fr.html",
[],
),
]
)
# Ported with single language
self._test_get_deed_or_license_path(
[
(
"3.0",
"by-nc-nd",
"am",
"hy",
"licenses/by-nc-nd/3.0/am/deed.hy.html",
["deed.html", "index.html"],
"licenses/by-nc-nd/3.0/am/legalcode.hy.html",
["legalcode.html"],
),
]
)
def test_get_deed_or_license_path_cc0(self):
"""
cc0 formula:
/publicdomain/VERSION/LICENSE_deed_LANGAUGE.html
/publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html
cc0 examples:
/publicdomain/1.0/zero_deed_en.html
/publicdomain/1.0/zero_legalcode_en.html
/publicdomain/1.0/zero_deed_ja.html
/publicdomain/1.0/zero_legalcode_ja.html
"""
self._test_get_deed_or_license_path(
[
(
"1.0",
"CC0",
"",
"en",
"publicdomain/zero/1.0/deed.en.html",
["deed.html", "index.html"],
"publicdomain/zero/1.0/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"1.0",
"CC0",
"",
"ja",
"publicdomain/zero/1.0/deed.ja.html",
[],
"publicdomain/zero/1.0/legalcode.ja.html",
[],
),
]
)
class LicenseModelTest(TestCase):
def test_nc(self):
self.assertFalse(LicenseFactory(license_code="xyz").nc)
self.assertTrue(LicenseFactory(license_code="by-nc-xyz").nc)
def test_nd(self):
self.assertFalse(LicenseFactory(license_code="xyz").nd)
self.assertTrue(LicenseFactory(license_code="by-nd-xyz").nd)
def test_sa(self):
self.assertFalse(LicenseFactory(license_code="xyz").sa)
self.assertTrue(LicenseFactory(license_code="xyz-sa").sa)
def test_get_metadata(self):
# Ported
license = LicenseFactory(
**{
"license_code": "by-nc",
"version": "3.0",
"title_english": "The Title",
"jurisdiction_code": "xyz",
"permits_derivative_works": False,
"permits_reproduction": False,
"permits_distribution": True,
"permits_sharing": True,
"requires_share_alike": True,
"requires_notice": True,
"requires_attribution": True,
"requires_source_code": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
}
)
LegalCodeFactory(license=license, language_code="pt")
LegalCodeFactory(license=license, language_code="en")
data = license.get_metadata()
expected_data = {
"jurisdiction": "xyz",
"license_code": "by-nc",
"permits_derivative_works": False,
"permits_distribution": True,
"permits_reproduction": False,
"permits_sharing": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
"requires_attribution": True,
"requires_notice": True,
"requires_share_alike": True,
"requires_source_code": True,
"title_english": "The Title",
"translations": {
"en": {
"deed": "/licenses/by-nc/3.0/xyz/",
"license": "/licenses/by-nc/3.0/xyz/legalcode",
"title": "The Title",
},
"pt": {
"deed": "/licenses/by-nc/3.0/xyz/deed.pt",
"license": "/licenses/by-nc/3.0/xyz/legalcode.pt",
"title": "The Title",
},
},
"version": "3.0",
}
for key in expected_data.keys():
self.assertEqual(expected_data[key], data[key])
# Unported
license = LicenseFactory(
**{
"license_code": "by-nc",
"version": "3.0",
"title_english": "The Title",
"jurisdiction_code": "",
"permits_derivative_works": False,
"permits_reproduction": False,
"permits_distribution": True,
"permits_sharing": True,
"requires_share_alike": True,
"requires_notice": True,
"requires_attribution": True,
"requires_source_code": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
}
)
LegalCodeFactory(license=license, language_code="en")
data = license.get_metadata()
expected_data = {
"license_code": "by-nc",
"permits_derivative_works": False,
"permits_distribution": True,
"permits_reproduction": False,
"permits_sharing": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
"requires_attribution": True,
"requires_notice": True,
"requires_share_alike": True,
"requires_source_code": True,
"title_english": "The Title",
"translations": {
"en": {
"deed": "/licenses/by-nc/3.0/",
"license": "/licenses/by-nc/3.0/legalcode",
"title": "The Title",
},
},
"version": "3.0",
}
for key in expected_data.keys():
self.assertEqual(expected_data[key], data[key])
def test_logos(self):
# Every license includes "cc-logo"
self.assertIn("cc-logo", LicenseFactory().logos())
self.assertEqual(
["cc-logo", "cc-zero"], LicenseFactory(license_code="CC0").logos()
)
self.assertEqual(
["cc-logo", "cc-by"],
LicenseFactory(
license_code="by",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=False,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc"],
LicenseFactory(
license_code="by-nc",
version="3.0",
prohibits_commercial_use=True,
requires_share_alike=False,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nd"],
LicenseFactory(
license_code="by-nd",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=False,
permits_derivative_works=False,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-sa"],
LicenseFactory(
license_code="by-sa",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc", "cc-sa"],
LicenseFactory(
license_code="by-nc-sa",
version="4.0",
prohibits_commercial_use=True,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc", "cc-sa"],
LicenseFactory(
license_code="by-nc-sa",
version="3.0",
prohibits_commercial_use=True,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
def test_get_legalcode_for_language_code(self):
license = LicenseFactory()
lc_pt = LegalCodeFactory(license=license, language_code="pt")
lc_en = LegalCodeFactory(license=license, language_code="en")
with override(language="pt"):
result = license.get_legalcode_for_language_code(None)
self.assertEqual(lc_pt.id, result.id)
result = license.get_legalcode_for_language_code("pt")
self.assertEqual(lc_pt.id, result.id)
result = license.get_legalcode_for_language_code("en")
self.assertEqual(lc_en.id, result.id)
with self.assertRaises(LegalCode.DoesNotExist):
license.get_legalcode_for_language_code("en_us")
result = license.get_legalcode_for_language_code("en-us")
self.assertEqual(lc_en.id, result.id)
def test_resource_name(self):
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code="zys"
)
self.assertEqual("QWERTY 2.7 ZYS", license.resource_name)
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code=""
)
self.assertEqual("QWERTY 2.7", license.resource_name)
def test_resource_slug(self):
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code="zys"
)
self.assertEqual("qwerty_27_zys", license.resource_slug)
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code=""
)
self.assertEqual("qwerty_27", license.resource_slug)
def test_str(self):
license = LicenseFactory(
license_code="bx-oh", version="1.3", jurisdiction_code="any"
)
self.assertEqual(
str(license),
f"License<{license.license_code},{license.version},"
f"{license.jurisdiction_code}>",
)
def test_rdf(self):
license = LicenseFactory(
license_code="bx-oh", version="1.3", jurisdiction_code="any"
)
self.assertEqual("RDF Generation Not Implemented", license.rdf())
# def test_default_language_code(self):
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual(
# DEFAULT_LANGUAGE_CODE, license.default_language_code()
# )
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="fr"
# )
# self.assertEqual("fr", license.default_language_code())
#
# def test_get_deed_url(self):
# # https://creativecommons.org/licenses/by-sa/4.0/
# # https://creativecommons.org/licenses/by-sa/4.0/deed.es
# # https://creativecommons.org/licenses/by/3.0/es/
# # https://creativecommons.org/licenses/by/3.0/es/deed.fr
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="ae"
# )
# self.assertEqual("/licenses/bx-oh/1.3/ae/", license.deed_url)
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual("/licenses/bx-oh/1.3/", license.deed_url)
#
# def test_get_deed_url_for_language(self):
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="ae"
# )
# self.assertEqual(
# "/licenses/bx-oh/1.3/ae/deed.fr",
# license.get_deed_url_for_language("fr"),
# )
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual(
# "/licenses/bx-oh/1.3/deed.es",
# license.get_deed_url_for_language("es"),
# )
def test_sampling_plus(self):
self.assertTrue(
LicenseFactory(license_code="nc-sampling+").sampling_plus
)
self.assertTrue(LicenseFactory(license_code="sampling+").sampling_plus)
self.assertFalse(LicenseFactory(license_code="sampling").sampling_plus)
self.assertFalse(LicenseFactory(license_code="MIT").sampling_plus)
self.assertFalse(
LicenseFactory(license_code="by-nc-nd-sa").sampling_plus
)
def test_level_of_freedom(self):
data = [
("by", FREEDOM_LEVEL_MAX),
("devnations", FREEDOM_LEVEL_MIN),
("sampling", FREEDOM_LEVEL_MIN),
("sampling+", FREEDOM_LEVEL_MID),
("by-nc", FREEDOM_LEVEL_MID),
("by-nd", FREEDOM_LEVEL_MID),
("by-sa", FREEDOM_LEVEL_MAX),
]
for license_code, expected_freedom in data:
with self.subTest(license_code):
license = LicenseFactory(license_code=license_code)
self.assertEqual(expected_freedom, license.level_of_freedom)
@override_settings(
TRANSIFEX=TEST_TRANSIFEX_SETTINGS,
DATA_REPOSITORY_DIR="/trans/repo",
)
def test_tx_upload_messages(self):
language_code = "es"
legalcode = LegalCodeFactory(language_code=language_code)
license = legalcode.license
test_pofile = polib.POFile()
with mock.patch.object(
license, "get_legalcode_for_language_code"
) as mock_glflc:
mock_glflc.return_value = legalcode
with mock.patch.object(
TransifexHelper, "upload_messages_to_transifex"
) as mock_umtt:
with mock.patch.object(
LegalCode, "get_pofile"
) as mock_get_pofile:
mock_get_pofile.return_value = test_pofile
license.tx_upload_messages()
mock_glflc.assert_called_with("en")
mock_umtt.assert_called_with(legalcode=legalcode)
def test_superseded(self):
lic1 = LicenseFactory()
lic2 = LicenseFactory(is_replaced_by=lic1)
self.assertTrue(lic2.superseded)
self.assertFalse(lic1.superseded)
class TranslationBranchModelTest(TestCase):
def test_str(self):
tc = TranslationBranchFactory(complete=False)
expected = f"Translation branch {tc.branch_name}. In progress."
self.assertEqual(expected, str(tc))
def test_stats(self):
language_code = "es"
lc1 = LegalCodeFactory(language_code=language_code)
tb = TranslationBranchFactory(
language_code=language_code, legalcodes=[lc1]
)
class MockPofile(list):
def untranslated_entries(self):
return [1, 2, 3, 4, 5]
def translated_entries(self):
return [1, 2, 3]
mock_pofile = MockPofile()
with mock.patch.object(LegalCode, "get_pofile") as mock_get_pofile:
mock_get_pofile.return_value = mock_pofile
stats = tb.stats
self.assertEqual(
{
"percent_messages_translated": 37,
"number_of_total_messages": 8,
"number_of_translated_messages": 3,
"number_of_untranslated_messages": 5,
},
stats,
)
| licenses/tests/test_models.py | 30,759 | 3.0 formula:
/licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html
3.0 examples:
/licenses/3.0/xu/by_deed_en.html
/licenses/3.0/xu/by_legalcode_en.html
/licenses/3.0/am/by_deed_hy.html
/licenses/3.0/am/by_legalcode_hy.html
/licenses/3.0/rs/by_deed_rs-Cyrl.html
/licenses/3.0/rs/by_legalcode_rs-Cyrl.html
For jurisdiction, I used "xu" to mean "unported".
See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501
4.0 formula:
/licenses/VERSION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml
4.0 examples:
/licenses/4.0/by-nc-nd_deed_en.html
/licenses/4.0/by-nc-nd_legalcode_en.html
/licenses/4.0/by_deed_en.html
/licenses/4.0/by_legalcode_en.html
/licenses/4.0/by_deed_zh-Hans.html
/licenses/4.0/by_legalcode_zh-Hans.html
cc0 formula:
/publicdomain/VERSION/LICENSE_deed_LANGAUGE.html
/publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html
cc0 examples:
/publicdomain/1.0/zero_deed_en.html
/publicdomain/1.0/zero_legalcode_en.html
/publicdomain/1.0/zero_deed_ja.html
/publicdomain/1.0/zero_legalcode_ja.html
Standard library Third-party First-party/Local Test valid() Test validgroups() (expected, license_code, version, jurisdiction, language) (expected, license_code, version, jurisdiction, language) get_translation_object on the model calls the i18n.utils.get_translation_object. Unported Ported with multiple languages Ported with single language Ported Unported Every license includes "cc-logo" def test_default_language_code(self): license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="" ) self.assertEqual( DEFAULT_LANGUAGE_CODE, license.default_language_code() ) license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="fr" ) self.assertEqual("fr", license.default_language_code()) def test_get_deed_url(self): https://creativecommons.org/licenses/by-sa/4.0/ https://creativecommons.org/licenses/by-sa/4.0/deed.es https://creativecommons.org/licenses/by/3.0/es/ https://creativecommons.org/licenses/by/3.0/es/deed.fr license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="ae" ) self.assertEqual("/licenses/bx-oh/1.3/ae/", license.deed_url) license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="" ) self.assertEqual("/licenses/bx-oh/1.3/", license.deed_url) def test_get_deed_url_for_language(self): license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="ae" ) self.assertEqual( "/licenses/bx-oh/1.3/ae/deed.fr", license.get_deed_url_for_language("fr"), ) license = LicenseFactory( license_code="bx-oh", version="1.3", jurisdiction_code="" ) self.assertEqual( "/licenses/bx-oh/1.3/deed.es", license.get_deed_url_for_language("es"), ) | 3,004 | en | 0.352811 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to support the RLTuner and NoteRNNLoader classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
# internal imports
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
LSTM_STATE_NAME = 'lstm'
# Number of output note classes. This is a property of the dataset.
NUM_CLASSES = 38
# Default batch size.
BATCH_SIZE = 128
# Music-related constants.
INITIAL_MIDI_VALUE = 48
NUM_SPECIAL_EVENTS = 2
MIN_NOTE = 48 # Inclusive
MAX_NOTE = 84 # Exclusive
TRANSPOSE_TO_KEY = 0 # C Major
DEFAULT_QPM = 80.0
# Music theory constants used in defining reward functions.
# Note that action 2 = midi note 48.
C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26]
C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28,
30, 31, 33, 35, 37]
C_MAJOR_TONIC = 14
A_MINOR_TONIC = 23
# The number of half-steps in musical intervals, in order of dissonance
OCTAVE = 12
FIFTH = 7
THIRD = 4
SIXTH = 9
SECOND = 2
FOURTH = 5
SEVENTH = 11
HALFSTEP = 1
# Special intervals that have unique rewards
REST_INTERVAL = -1
HOLD_INTERVAL = -1.5
REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2
HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5
IN_KEY_THIRD = -3
IN_KEY_FIFTH = -5
# Indicate melody direction
ASCENDING = 1
DESCENDING = -1
# Indicate whether a melodic leap has been resolved or if another leap was made
LEAP_RESOLVED = 1
LEAP_DOUBLED = -1
def default_hparams():
"""Generates the hparams used to train note rnn used in paper."""
return tf.contrib.training.HParams(use_dynamic_rnn=True,
batch_size=BATCH_SIZE,
lr=0.0002,
l2_reg=2.5e-5,
clip_norm=5,
initial_learning_rate=0.5,
decay_steps=1000,
decay_rate=0.85,
rnn_layer_sizes=[100],
skip_first_n_losses=32,
one_hot_length=NUM_CLASSES,
exponentially_decay_learning_rate=True)
def basic_rnn_hparams():
"""Generates the hparams used to train a basic_rnn.
These are the hparams used in the .mag file found at
https://github.com/tensorflow/magenta/tree/master/magenta/models/
melody_rnn#pre-trained
Returns:
Hyperparameters of the downloadable basic_rnn pre-trained model.
"""
# TODO(natashajaques): ability to restore basic_rnn from any .mag file.
return tf.contrib.training.HParams(batch_size=128,
rnn_layer_sizes=[512, 512],
one_hot_length=NUM_CLASSES)
def default_dqn_hparams():
"""Generates the default hparams for RLTuner DQN model."""
return tf.contrib.training.HParams(random_action_probability=0.1,
store_every_nth=1,
train_every_nth=5,
minibatch_size=32,
discount_rate=0.95,
max_experience=100000,
target_network_update_rate=0.01)
def autocorrelate(signal, lag=1):
"""Gives the correlation coefficient for the signal's correlation with itself.
Args:
signal: The signal on which to compute the autocorrelation. Can be a list.
lag: The offset at which to correlate the signal with itself. E.g. if lag
is 1, will compute the correlation between the signal and itself 1 beat
later.
Returns:
Correlation coefficient.
"""
n = len(signal)
x = np.asarray(signal) - np.mean(signal)
c0 = np.var(signal)
return (x[lag:] * x[:n - lag]).sum() / float(n) / c0
def linear_annealing(n, total, p_initial, p_final):
"""Linearly interpolates a probability between p_initial and p_final.
Current probability is based on the current step, n. Used to linearly anneal
the exploration probability of the RLTuner.
Args:
n: The current step.
total: The total number of steps that will be taken (usually the length of
the exploration period).
p_initial: The initial probability.
p_final: The final probability.
Returns:
The current probability (between p_initial and p_final).
"""
if n >= total:
return p_final
else:
return p_initial - (n * (p_initial - p_final)) / (total)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sample_softmax(softmax_vect):
"""Samples a note from an array of softmax probabilities.
Tries to do this with numpy, which requires that the probabilities add to 1.0
with extreme precision. If this fails, uses a manual implementation.
Args:
softmax_vect: An array of probabilities.
Returns:
The index of the note that was chosen/sampled.
"""
try:
sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect))
return sample
except: # pylint: disable=bare-except
r = random.uniform(0, np.sum(softmax_vect))
upto = 0
for i in range(len(softmax_vect)):
if upto + softmax_vect[i] >= r:
return i
upto += softmax_vect[i]
tf.logging.warn("Error! sample softmax function shouldn't get here")
print("Error! sample softmax function shouldn't get here")
return len(softmax_vect) - 1
def decoder(event_list, transpose_amount):
"""Translates a sequence generated by RLTuner to MonophonicMelody form.
Args:
event_list: Integer list of encoded notes.
transpose_amount: Key to transpose to.
Returns:
Integer list of MIDI values.
"""
return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else
e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list]
def make_onehot(int_list, one_hot_length):
"""Convert each int to a one-hot vector.
A one-hot vector is 0 everywhere except at the index equal to the
encoded value.
For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...]
Args:
int_list: A list of ints, each of which will get a one-hot encoding.
one_hot_length: The length of the one-hot vector to be created.
Returns:
A list of one-hot encodings of the ints.
"""
return [[1.0 if j == i else 0.0 for j in range(one_hot_length)]
for i in int_list]
def get_inner_scope(scope_str):
"""Takes a tensorflow scope string and finds the inner scope.
Inner scope is one layer more internal.
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with outer scope stripped off.
"""
idx = scope_str.find('/')
return scope_str[idx + 1:]
def trim_variable_postfixes(scope_str):
"""Trims any extra numbers added to a tensorflow scope string.
Necessary to align variables in graph and checkpoint
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with extra numbers trimmed off.
"""
idx = scope_str.find(':')
return scope_str[:idx]
def get_variable_names(graph, scope):
"""Finds all the variable names in a graph that begin with a given scope.
Args:
graph: A tensorflow graph.
scope: A string scope.
Returns:
List of variables.
"""
with graph.as_default():
return [v.name for v in tf.global_variables() if v.name.startswith(scope)]
def get_next_file_name(directory, prefix, extension):
"""Finds next available filename in directory by appending numbers to prefix.
E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already
contains 'myfile.png' and 'myfile1.png', this function will return
'myfile2.png'.
Args:
directory: Path to the relevant directory.
prefix: The filename prefix to use.
extension: String extension of the file, eg. '.mid'.
Returns:
String name of the file.
"""
name = directory + '/' + prefix + '.' + extension
i = 0
while os.path.isfile(name):
i += 1
name = directory + '/' + prefix + str(i) + '.' + extension
return name
def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False):
"""Makes a default LSTM cell for use in the NoteRNNLoader graph.
This model is only to be used for loading the checkpoint from the research
paper. In general, events_rnn_graph.make_rnn_cell should be used instead.
Args:
rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
RNN.
state_is_tuple: A boolean specifying whether to use tuple of hidden matrix
and cell matrix as a state instead of a concatenated matrix.
Returns:
A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
"""
cells = []
for num_units in rnn_layer_sizes:
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple)
return cell
def log_sum_exp(xs):
"""Computes the log sum exp value of a tensor."""
maxes = tf.reduce_max(xs, keep_dims=True)
xs -= maxes
return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
| magenta/models/rl_tuner/rl_tuner_ops.py | 9,922 | Gives the correlation coefficient for the signal's correlation with itself.
Args:
signal: The signal on which to compute the autocorrelation. Can be a list.
lag: The offset at which to correlate the signal with itself. E.g. if lag
is 1, will compute the correlation between the signal and itself 1 beat
later.
Returns:
Correlation coefficient.
Generates the hparams used to train a basic_rnn.
These are the hparams used in the .mag file found at
https://github.com/tensorflow/magenta/tree/master/magenta/models/
melody_rnn#pre-trained
Returns:
Hyperparameters of the downloadable basic_rnn pre-trained model.
Translates a sequence generated by RLTuner to MonophonicMelody form.
Args:
event_list: Integer list of encoded notes.
transpose_amount: Key to transpose to.
Returns:
Integer list of MIDI values.
Generates the default hparams for RLTuner DQN model.
Generates the hparams used to train note rnn used in paper.
Takes a tensorflow scope string and finds the inner scope.
Inner scope is one layer more internal.
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with outer scope stripped off.
Finds next available filename in directory by appending numbers to prefix.
E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already
contains 'myfile.png' and 'myfile1.png', this function will return
'myfile2.png'.
Args:
directory: Path to the relevant directory.
prefix: The filename prefix to use.
extension: String extension of the file, eg. '.mid'.
Returns:
String name of the file.
Finds all the variable names in a graph that begin with a given scope.
Args:
graph: A tensorflow graph.
scope: A string scope.
Returns:
List of variables.
Linearly interpolates a probability between p_initial and p_final.
Current probability is based on the current step, n. Used to linearly anneal
the exploration probability of the RLTuner.
Args:
n: The current step.
total: The total number of steps that will be taken (usually the length of
the exploration period).
p_initial: The initial probability.
p_final: The final probability.
Returns:
The current probability (between p_initial and p_final).
Computes the log sum exp value of a tensor.
Convert each int to a one-hot vector.
A one-hot vector is 0 everywhere except at the index equal to the
encoded value.
For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...]
Args:
int_list: A list of ints, each of which will get a one-hot encoding.
one_hot_length: The length of the one-hot vector to be created.
Returns:
A list of one-hot encodings of the ints.
Makes a default LSTM cell for use in the NoteRNNLoader graph.
This model is only to be used for loading the checkpoint from the research
paper. In general, events_rnn_graph.make_rnn_cell should be used instead.
Args:
rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
RNN.
state_is_tuple: A boolean specifying whether to use tuple of hidden matrix
and cell matrix as a state instead of a concatenated matrix.
Returns:
A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
Samples a note from an array of softmax probabilities.
Tries to do this with numpy, which requires that the probabilities add to 1.0
with extreme precision. If this fails, uses a manual implementation.
Args:
softmax_vect: An array of probabilities.
Returns:
The index of the note that was chosen/sampled.
Compute softmax values for each sets of scores in x.
Trims any extra numbers added to a tensorflow scope string.
Necessary to align variables in graph and checkpoint
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with extra numbers trimmed off.
Helper functions to support the RLTuner and NoteRNNLoader classes.
Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. internal imports pylint: disable=redefined-builtin Number of output note classes. This is a property of the dataset. Default batch size. Music-related constants. Inclusive Exclusive C Major Music theory constants used in defining reward functions. Note that action 2 = midi note 48. The number of half-steps in musical intervals, in order of dissonance Special intervals that have unique rewards Indicate melody direction Indicate whether a melodic leap has been resolved or if another leap was made TODO(natashajaques): ability to restore basic_rnn from any .mag file. pylint: disable=bare-except | 4,978 | en | 0.819911 |
# -*- coding: utf-8 -*-
'''SRFax (www.srfax.com) python library'''
import re
import os.path
import base64
import logging
import requests
URL = 'https://www.srfax.com/SRF_SecWebSvc.php'
LOGGER = logging.getLogger(__name__)
RE_E164 = re.compile(r'^\+\d{7,15}$') # TODO: Replace this with phonenumberslite?
RE_NANP = re.compile(r'^\+1')
class SRFaxError(Exception):
'''SRFax Exception'''
def __init__(self, error_code, message, cause=None, retry=False):
self.error_code = error_code
self.message = message
self.cause = cause
self.retry = retry
super(SRFaxError, self).__init__(error_code, message, cause, retry)
# TODO: In Python3.4, this causes 'AttributeError: 'NoneType' object has no attribute '__context__''
# LOGGER.exception("%s" % (self))
def get_error_code(self):
'''Get exception error code'''
return self.error_code
def get_cause(self):
'''Get exception cause'''
return self.cause
def get_retry(self):
'''Get retry option (should we retry the request?)'''
return self.retry
class SRFax(object):
'''SRFax class'''
def __init__(self, access_id, access_pwd, caller_id=None,
sender_email=None, account_code=None, url=None):
self.access_id = access_id
self.access_pwd = access_pwd
self.caller_id = caller_id
self.sender_email = sender_email
self.account_code = account_code
self.url = url or URL
def queue_fax(self, to_fax_number, filepath,
caller_id=None, sender_email=None, account_code=None):
'''Queue fax for sending'''
to_fax_number = SRFax.verify_fax_numbers(to_fax_number)
fax_type = 'BROADCAST' if len(to_fax_number) > 1 else 'SINGLE'
to_fax_number = '|'.join(to_fax_number)
try:
if isinstance(filepath, basestring):
filepath = [filepath]
except NameError:
if isinstance(filepath, str):
filepath = [filepath]
if not isinstance(filepath, list):
raise TypeError('filepath not properly defined')
if len(filepath) > 5:
raise Exception('More than 5 files defined in filepath')
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sCallerID': caller_id or self.caller_id,
'sSenderEmail': sender_email or self.sender_email,
'sFaxType': fax_type,
'sToFaxNumber': to_fax_number,
'sAccountCode': account_code or self.account_code or '',
}
SRFax.verify_parameters(params)
for i in range(len(filepath)):
path = filepath[i]
basename = os.path.basename(path)
if not isinstance(basename, str):
basename = basename.decode('utf-8')
params['sFileName_%d' % (i + 1)] = basename
content = SRFax.get_file_content(path)
if not isinstance(content, str):
content = content.decode()
params['sFileContent_%d' % (i + 1)] = content
return self.process_request('Queue_Fax', params)
def get_fax_status(self, fax_id):
'''Get fax status'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sFaxDetailsID': fax_id,
}
SRFax.verify_parameters(params)
response = self.process_request('Get_FaxStatus', params)
if len(response) == 1:
response = response[0]
return response
def get_fax_inbox(self, period='ALL'):
'''Get fax inbox'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sPeriod': period,
}
SRFax.verify_parameters(params)
return self.process_request('Get_Fax_Inbox', params)
def get_fax_outbox(self, period='ALL'):
'''Get fax outbox'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sPeriod': period,
}
SRFax.verify_parameters(params)
return self.process_request('Get_Fax_Outbox', params)
def retrieve_fax(self, fax_filename, folder, fax_id):
'''Retrieve fax content in Base64 format'''
assert folder in ['IN', 'OUT']
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sFaxFileName': fax_filename,
'sDirection': folder,
'sFaxDetailsID': fax_id
}
SRFax.verify_parameters(params)
response = self.process_request('Retrieve_Fax', params)
if len(response) == 1:
response = response[0]
return response
def delete_fax(self, fax_filename, folder):
'''Delete fax files from server'''
assert folder in ['IN', 'OUT']
if isinstance(fax_filename, str):
fax_filename = [fax_filename]
if not isinstance(fax_filename, list):
raise TypeError('fax_filename not properly defined')
if len(fax_filename) > 5:
raise Exception('More than 5 files defined in fax_filename')
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sDirection': folder,
}
SRFax.verify_parameters(params)
for i in range(len(fax_filename)):
params['sFileName_%d' % (i + 1)] = fax_filename[i]
return self.process_request('Delete_Fax', params)
def process_request(self, method, params):
'''Process SRFax SOAP request'''
params['action'] = method
try:
response = requests.post(self.url, json=params)
except Exception as exc:
raise SRFaxError('REQUESTFAILED', 'REST request failed',
cause=exc, retry=True)
return SRFax.process_response(response)
@staticmethod
def process_response(response):
'''Process SRFax SOAP response'''
if not response:
raise SRFaxError('INVALIDRESPONSE', 'Empty response', retry=True)
if response.ok: # TODO: What if it isn't??
response = response.json()
if 'Status' not in response or 'Result' not in response:
raise SRFaxError('INVALIDRESPONSE',
'Status and/or Result not in response: %s'
% (response), retry=True)
result = response['Result']
LOGGER.debug('Result: %s' % (result))
if response['Status'] != 'Success':
errmsg = result
if (isinstance(errmsg, list) and len(errmsg) == 1
and 'ErrorCode' in errmsg[0]):
errmsg = errmsg[0]['ErrorCode']
raise SRFaxError('REQUESTFAILED', errmsg)
if result is None:
result = True
return result
@staticmethod
def verify_parameters(params):
'''Verify that dict values are set'''
for key in params.keys():
if params[key] is None:
raise TypeError('%s not set' % (key))
@staticmethod
def is_e164_number(number):
'''Simple check if number is in E.164 format'''
if isinstance(number, str) and RE_E164.match(number):
return True
return False
@staticmethod
def is_nanp_number(number):
'''Simple check if number is inside North American Numbering Plan'''
if isinstance(number, str) and RE_NANP.match(number):
return True
return False
@staticmethod
def verify_fax_numbers(to_fax_number):
'''Verify and prepare fax numbers for use at SRFax'''
try:
if isinstance(to_fax_number, basestring):
to_fax_number = [to_fax_number]
except NameError:
if isinstance(to_fax_number, str):
to_fax_number = [to_fax_number]
if not isinstance(to_fax_number, list):
raise TypeError('to_fax_number not properly defined')
for i in range(len(to_fax_number)):
number = str(to_fax_number[i])
if not SRFax.is_e164_number(number):
raise TypeError('Number not in E.164 format: %s'
% (number))
if SRFax.is_nanp_number(number):
to_fax_number[i] = number[1:]
else:
to_fax_number[i] = '011' + number[1:]
return to_fax_number
@staticmethod
def get_file_content(filepath):
'''Read and return file content Base64 encoded'''
if not os.path.exists(filepath):
raise Exception('File does not exists: %s' % (filepath))
if not os.path.isfile(filepath):
raise Exception('Not a file: %s' % (filepath))
content = None
try:
fdp = open(filepath, 'rb')
except IOError:
raise
else:
content = fdp.read()
fdp.close()
if not content:
raise Exception('Error reading file or file empty: %s'
% (filepath))
return base64.b64encode(content)
| srfax/srfax.py | 9,351 | SRFax class
SRFax Exception
Delete fax files from server
Get exception cause
Get exception error code
Get fax inbox
Get fax outbox
Get fax status
Read and return file content Base64 encoded
Get retry option (should we retry the request?)
Simple check if number is in E.164 format
Simple check if number is inside North American Numbering Plan
Process SRFax SOAP request
Process SRFax SOAP response
Queue fax for sending
Retrieve fax content in Base64 format
Verify and prepare fax numbers for use at SRFax
Verify that dict values are set
SRFax (www.srfax.com) python library
-*- coding: utf-8 -*- TODO: Replace this with phonenumberslite? TODO: In Python3.4, this causes 'AttributeError: 'NoneType' object has no attribute '__context__'' LOGGER.exception("%s" % (self)) TODO: What if it isn't?? | 796 | en | 0.657561 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/city/shared_hospital_corellia.iff"
result.attribute_template_id = -1
result.stfName("building_name","hospital")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | data/scripts/templates/object/building/player/city/shared_hospital_corellia.py | 455 | NOTICE: THIS FILE IS AUTOGENERATED MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES BEGIN MODIFICATIONS END MODIFICATIONS | 168 | en | 0.698026 |
from MoveGetter import MoveGetter
import chess
class CommandLineMoveGetter(MoveGetter):
def getMove(self, board):
print("\n")
print(board)
self.printLegalMoves(board)
return self.getMoveFromCLI(board)
def printLegalMoves(self, board):
for index, move in enumerate(board.legal_moves):
print(str(index) + ": ", end="")
print(board.san(move))
def getMoveFromCLI(self, board):
selection = -1
while(selection < 0 or selection >= len(board.legal_moves)):
try:
selection = int(input("Select a move "))
except ValueError:
print("Invalid input")
# print(board.legal_moves)
for index, move in enumerate(board.legal_moves):
if index == selection:
return move | CommandLineMoveGetter.py | 862 | print(board.legal_moves) | 24 | pt | 0.323314 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pathlib
from qiime2 import sdk
from qiime2.plugin import model
def identity_transformer(view):
return view
class ModelType:
@staticmethod
def from_view_type(view_type):
if issubclass(view_type, model.base.FormatBase):
if issubclass(view_type,
model.SingleFileDirectoryFormatBase):
# HACK: this is necessary because we need to be able to "act"
# like a FileFormat when looking up transformers, but our
# input/output coercion still needs to bridge the
# transformation as we do not have transitivity
# In other words we have DX and we have transformers of X
# In a perfect world we would automatically define DX -> X and
# let transitivity handle it, but since that doesn't exist, we
# need to treat DX as if it were X and coerce behind the scenes
# TODO: redo this when transformers are transitive
return SingleFileDirectoryFormatType(view_type)
# Normal format type
return FormatType(view_type)
else:
# TODO: supporting stdlib.typing may require an alternate
# model type as `isinstance` is a meaningless operation
# for them so validation would need to be handled differently
return ObjectType(view_type)
def __init__(self, view_type):
self._pm = sdk.PluginManager()
self._view_type = view_type
def make_transformation(self, other, recorder=None):
# TODO: do something with the recorder.
transformer = self._get_transformer_to(other)
if transformer is None:
raise Exception("No transformation from %r to %r" %
(self._view_type, other._view_type))
def transformation(view):
view = self.coerce_view(view)
self.validate(view)
new_view = transformer(view)
new_view = other.coerce_view(new_view)
other.validate(new_view)
if transformer is not identity_transformer:
other.set_user_owned(new_view, False)
return new_view
return transformation
def _get_transformer_to(self, other):
transformer = self._lookup_transformer(self._view_type,
other._view_type)
if transformer is None:
return other._get_transformer_from(self)
return transformer
def has_transformation(self, other):
""" Checks to see if there exist transformers for other
Parameters
----------
other : ModelType subclass
The object being checked for transformer
Returns
-------
bool
Does the specified transformer exist for other?
"""
transformer = self._get_transformer_to(other)
return transformer is not None
def _get_transformer_from(self, other):
return None
def coerce_view(self, view):
return view
def _lookup_transformer(self, from_, to_):
if from_ == to_:
return identity_transformer
try:
return self._pm.transformers[from_][to_].transformer
except KeyError:
return None
def set_user_owned(self, view, value):
pass
class FormatType(ModelType):
def coerce_view(self, view):
if type(view) is str or isinstance(view, pathlib.Path):
return self._view_type(view, mode='r')
if isinstance(view, self._view_type):
# wrap original path (inheriting the lifetime) and return a
# read-only instance
return self._view_type(view.path, mode='r')
return view
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not an instance of %r."
% (view, self._view_type))
# Formats have a validate method, so defer to it
view.validate()
def set_user_owned(self, view, value):
view.path._user_owned = value
class SingleFileDirectoryFormatType(FormatType):
def __init__(self, view_type):
# Single file directory formats have only one file named `file`
# allowing us construct a model type from the format of `file`
self._wrapped_view_type = view_type.file.format
super().__init__(view_type)
def _get_transformer_to(self, other):
# Legend:
# - Dx: single directory format of x
# - Dy: single directory format of y
# - x: input format x
# - y: output format y
# - ->: implicit transformer
# - =>: registered transformer
# - |: or, used when multiple situation are possible
# It looks like all permutations because it is...
# Dx -> y | Dy via Dx => y | Dy
transformer = self._wrap_transformer(self, other)
if transformer is not None:
return transformer
# Dx -> Dy via Dx -> x => y | Dy
transformer = self._wrap_transformer(self, other, wrap_input=True)
if transformer is not None:
return transformer
if type(other) is type(self):
# Dx -> Dy via Dx -> x => y -> Dy
transformer = self._wrap_transformer(
self, other, wrap_input=True, wrap_output=True)
if transformer is not None:
return transformer
# Out of options, try for Dx -> Dy via Dx => y -> Dy
return other._get_transformer_from(self)
def _get_transformer_from(self, other):
# x | Dx -> Dy via x | Dx => y -> Dy
# IMPORTANT: reverse other and self, this method is like __radd__
return self._wrap_transformer(other, self, wrap_output=True)
def _wrap_transformer(self, in_, out_, wrap_input=False,
wrap_output=False):
input = in_._wrapped_view_type if wrap_input else in_._view_type
output = out_._wrapped_view_type if wrap_output else out_._view_type
transformer = self._lookup_transformer(input, output)
if transformer is None:
return None
if wrap_input:
transformer = in_._wrap_input(transformer)
if wrap_output:
transformer = out_._wrap_output(transformer)
return transformer
def _wrap_input(self, transformer):
def wrapped(view):
return transformer(view.file.view(self._wrapped_view_type))
return wrapped
def _wrap_output(self, transformer):
def wrapped(view):
new_view = self._view_type()
file_view = transformer(view)
if transformer is not identity_transformer:
self.set_user_owned(file_view, False)
new_view.file.write_data(file_view, self._wrapped_view_type)
return new_view
return wrapped
class ObjectType(ModelType):
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not of type %r, cannot transform further."
% (view, self._view_type))
| qiime2/core/transform.py | 7,580 | Checks to see if there exist transformers for other
Parameters
----------
other : ModelType subclass
The object being checked for transformer
Returns
-------
bool
Does the specified transformer exist for other?
---------------------------------------------------------------------------- Copyright (c) 2016-2017, QIIME 2 development team. Distributed under the terms of the Modified BSD License. The full license is in the file LICENSE, distributed with this software. ---------------------------------------------------------------------------- HACK: this is necessary because we need to be able to "act" like a FileFormat when looking up transformers, but our input/output coercion still needs to bridge the transformation as we do not have transitivity In other words we have DX and we have transformers of X In a perfect world we would automatically define DX -> X and let transitivity handle it, but since that doesn't exist, we need to treat DX as if it were X and coerce behind the scenes TODO: redo this when transformers are transitive Normal format type TODO: supporting stdlib.typing may require an alternate model type as `isinstance` is a meaningless operation for them so validation would need to be handled differently TODO: do something with the recorder. wrap original path (inheriting the lifetime) and return a read-only instance Formats have a validate method, so defer to it Single file directory formats have only one file named `file` allowing us construct a model type from the format of `file` Legend: - Dx: single directory format of x - Dy: single directory format of y - x: input format x - y: output format y - ->: implicit transformer - =>: registered transformer - |: or, used when multiple situation are possible It looks like all permutations because it is... Dx -> y | Dy via Dx => y | Dy Dx -> Dy via Dx -> x => y | Dy Dx -> Dy via Dx -> x => y -> Dy Out of options, try for Dx -> Dy via Dx => y -> Dy x | Dx -> Dy via x | Dx => y -> Dy IMPORTANT: reverse other and self, this method is like __radd__ | 2,046 | en | 0.824131 |
import torch
import torch.nn.functional as F
def clamp_probs(probs):
eps = torch.finfo(probs.dtype).eps
return torch.clamp(probs, min=eps, max=1-eps)
def concrete_sample(logits, temperature, shape=torch.Size([])):
'''
Sampling for Concrete distribution.
See Eq. 10 of Maddison et al., 2017.
'''
uniform_shape = torch.Size(shape) + logits.shape
u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,
device=logits.device))
gumbels = - torch.log(- torch.log(u))
scores = (logits + gumbels) / temperature
return scores.softmax(dim=-1)
def bernoulli_concrete_sample(logits, temperature, shape=torch.Size([])):
'''
Sampling for BinConcrete distribution.
See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.
'''
uniform_shape = torch.Size(shape) + logits.shape
u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,
device=logits.device))
return torch.sigmoid((F.logsigmoid(logits) - F.logsigmoid(-logits)
+ torch.log(u) - torch.log(1 - u)) / temperature)
| selection/layers/utils.py | 1,150 | Sampling for BinConcrete distribution.
See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.
Sampling for Concrete distribution.
See Eq. 10 of Maddison et al., 2017. | 184 | en | 0.781941 |
import os
import go_vncdriver
import tensorflow as tf
import argparse
import json
import envs
from model import policies
import checkpoint_utils
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('logdir', type=str, help="Log directory path")
args = parser.parse_args()
with open(args.logdir + "/hparams.json") as f:
hparams = json.load(f)
env = envs.create_env(**hparams)
obs = env.reset()
policyType = policies[hparams['policy']]
policy = policyType(env.observation_space.shape, env.action_space.n, **hparams)
features = policy.get_initial_features()
sess = tf.Session()
#import ipdb; ipdb.set_trace()
checkpoint_utils.init_from_checkpoint(args.logdir + '/train', {'global/':'/'})
#saver = tf.train.Saver(sharded=True)
#saver.restore(sess, os.path.join(args.logdir, 'train/model.ckpt-0'))
sess.run(tf.global_variables_initializer())
with sess.as_default():
while True:
env.render()
fetched = policy.act(obs, *features)
action, value_, features = fetched[0], fetched[1], fetched[2:]
obs, reward, done, info = env.step(action.argmax())
if done:
obs = env.reset()
| universe-starter-agent/run.py | 1,186 | import ipdb; ipdb.set_trace()saver = tf.train.Saver(sharded=True)saver.restore(sess, os.path.join(args.logdir, 'train/model.ckpt-0')) | 133 | en | 0.281142 |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: © 2022 Decompollaborate
# SPDX-License-Identifier: MIT
from __future__ import annotations
import enum
@enum.unique
class FileSectionType(enum.Enum):
Unknown = -2
Invalid = -1
Text = 1
Data = 2
Rodata = 3
Bss = 4
Reloc = 5
@staticmethod
def fromId(sectionId: int) -> FileSectionType:
if sectionId == 1:
return FileSectionType.Text
if sectionId == 2:
return FileSectionType.Data
if sectionId == 3:
return FileSectionType.Rodata
if sectionId == 4:
return FileSectionType.Bss
if sectionId == 5:
return FileSectionType.Reloc
return FileSectionType.Invalid
@staticmethod
def fromStr(x: str) -> FileSectionType:
if x == ".text":
return FileSectionType.Text
if x == ".data":
return FileSectionType.Data
if x == ".rodata":
return FileSectionType.Rodata
if x == ".bss":
return FileSectionType.Bss
if x == ".reloc":
return FileSectionType.Reloc
return FileSectionType.Invalid
def toStr(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".reloc"
return ""
def toCapitalizedStr(self) -> str:
if self == FileSectionType.Text:
return "Text"
if self == FileSectionType.Data:
return "Data"
if self == FileSectionType.Rodata:
return "RoData"
if self == FileSectionType.Bss:
return "Bss"
if self == FileSectionType.Reloc:
return "Reloc"
return ""
def toSectionName(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".ovl"
return ""
FileSections_ListBasic = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss]
FileSections_ListAll = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss, FileSectionType.Reloc]
| spimdisasm/common/FileSectionType.py | 2,633 | !/usr/bin/env python3 SPDX-FileCopyrightText: © 2022 Decompollaborate SPDX-License-Identifier: MIT | 98 | de | 0.391041 |
import numpy as np
import pytest
from pytools.obj_array import make_obj_array
# {{{ test_unstructured_vertex_grid
@pytest.mark.parametrize("ambient_dim", [2, 3])
@pytest.mark.parametrize("dformat", ["xml", "hdf", "binary"])
def test_unstructured_vertex_grid(ambient_dim, dformat, npoints=64):
"""Test constructing a vertex grid with different ways to define the
points and connectivity.
"""
# {{{ set up connectivity
from pyvisfile.xdmf import NumpyDataArray, DataArray, _data_item_from_numpy
connectivity = np.arange(npoints, dtype=np.uint32)
points = np.random.rand(ambient_dim, npoints)
if dformat == "xml":
connectivity = NumpyDataArray(connectivity, name="connectivity")
points = NumpyDataArray(points.T, name="points")
elif dformat in ["hdf", "binary"]:
if dformat == "hdf":
cdata = "geometry.h5:/Grid/Connectivity"
pdata = "geometry.h5:/Grid/Points"
else:
cdata = "connectivity.out"
pdata = "points.out"
connectivity = DataArray((
_data_item_from_numpy(connectivity,
name="connectivity",
data=cdata),
))
points = DataArray((
_data_item_from_numpy(points.T,
name="points",
data=pdata),
))
else:
raise ValueError(f"unknown format: '{dformat}'")
# }}}
# {{{ set up grids
from pyvisfile.xdmf import TopologyType
from pyvisfile.xdmf import XdmfUnstructuredGrid
grid = XdmfUnstructuredGrid(
points, connectivity,
topology_type=TopologyType.Polyvertex,
name="polyvertex")
# }}}
from pyvisfile.xdmf import XdmfWriter
writer = XdmfWriter((grid,))
filename = f"test_unstructured_vertex_{dformat}_{ambient_dim}d.xmf"
writer.write_pretty(filename)
# }}}
# {{{ test_unstructured_simplex_grid
def _simplex_box_connectivity(*, npoints, nelements, nvertices):
# NOTE: largely copied from meshmode/mesh/generation.py::generate_box_mesh
ambient_dim = len(npoints)
point_indices = np.arange(np.prod(npoints)).reshape(npoints)
connectivity = np.empty((nelements, nvertices), dtype=np.uint32)
ielement = 0
from itertools import product
if ambient_dim == 1:
raise NotImplementedError
elif ambient_dim == 2:
for i, j in product(range(npoints[0] - 1), repeat=ambient_dim):
a = point_indices[i + 0, j + 0]
b = point_indices[i + 1, j + 0]
c = point_indices[i + 0, j + 1]
d = point_indices[i + 1, j + 1]
connectivity[ielement + 0, :] = (a, b, c)
connectivity[ielement + 1, :] = (d, c, b)
ielement += 2
elif ambient_dim == 3:
for i, j, k in product(range(npoints[0] - 1), repeat=ambient_dim):
a000 = point_indices[i, j, k]
a001 = point_indices[i, j, k+1]
a010 = point_indices[i, j+1, k]
a011 = point_indices[i, j+1, k+1]
a100 = point_indices[i+1, j, k]
a101 = point_indices[i+1, j, k+1]
a110 = point_indices[i+1, j+1, k]
a111 = point_indices[i+1, j+1, k+1]
connectivity[ielement + 0, :] = (a000, a100, a010, a001)
connectivity[ielement + 1, :] = (a101, a100, a001, a010)
connectivity[ielement + 2, :] = (a101, a011, a010, a001)
connectivity[ielement + 3, :] = (a100, a010, a101, a110)
connectivity[ielement + 4, :] = (a011, a010, a110, a101)
connectivity[ielement + 5, :] = (a011, a111, a101, a110)
ielement += 6
else:
raise NotImplementedError
assert ielement == nelements
from pyvisfile.xdmf import NumpyDataArray
return NumpyDataArray(connectivity, name="connectivity")
@pytest.mark.parametrize("ambient_dim", [2, 3])
def test_unstructured_simplex_grid(ambient_dim, nelements=16):
"""Test constructing a grid with a more complicated topology."""
from pyvisfile.xdmf import TopologyType
if ambient_dim == 1:
topology_type = TopologyType.Polyline
simplices_per_quad = 1
if ambient_dim == 2:
topology_type = TopologyType.Triangle
simplices_per_quad = 2
elif ambient_dim == 3:
topology_type = TopologyType.Tetrahedron
simplices_per_quad = 6
else:
raise ValueError("unsupported dimension")
# {{{ points and connectivity
x = np.linspace(-1.0, 1.0, nelements + 1)
npoints = len(x)
points = np.empty((ambient_dim,) + (npoints,) * ambient_dim)
for idim in range(ambient_dim):
points[idim] = x.reshape((npoints,) + (1,) * (ambient_dim - 1 - idim))
from pyvisfile.xdmf import NumpyDataArray
points = NumpyDataArray(points.reshape(ambient_dim, -1).T, name="points")
from pyvisfile.xdmf import _XDMF_ELEMENT_NODE_COUNT
connectivity = _simplex_box_connectivity(
npoints=(npoints,) * ambient_dim,
nelements=simplices_per_quad * nelements**ambient_dim,
nvertices=_XDMF_ELEMENT_NODE_COUNT[topology_type]
)
# }}}
# {{{ attributes
temperature = np.sin(2.0 * np.pi * points.ary[:, 0]) \
+ np.cos(2.0 * np.pi * points.ary[:, 1])
temperature = NumpyDataArray(temperature, name="temperature")
velocity = points.ary + np.array([0, 1, 2][:ambient_dim]).reshape(1, -1)
velocity = NumpyDataArray(velocity, name="velocity")
vorticity = NumpyDataArray(make_obj_array(velocity.ary), name="vorticity")
# }}}
# {{{ write grids
from pyvisfile.xdmf import XdmfUnstructuredGrid
grid = XdmfUnstructuredGrid(
points, connectivity,
topology_type=topology_type,
name="simplex")
grid.add_attribute(temperature)
grid.add_attribute(velocity)
grid.add_attribute(vorticity)
from pyvisfile.xdmf import XdmfWriter
writer = XdmfWriter((grid,))
filename = f"test_unstructured_simplex_{ambient_dim}d.xmf"
writer.write_pretty(filename)
# }}}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
pytest.main([__file__])
# vim: fdm=marker
| test/test_xdmf.py | 6,252 | Test constructing a grid with a more complicated topology.
Test constructing a vertex grid with different ways to define the
points and connectivity.
{{{ test_unstructured_vertex_grid {{{ set up connectivity }}} {{{ set up grids }}} }}} {{{ test_unstructured_simplex_grid NOTE: largely copied from meshmode/mesh/generation.py::generate_box_mesh {{{ points and connectivity }}} {{{ attributes }}} {{{ write grids }}} vim: fdm=marker | 433 | en | 0.575792 |
import unittest
import torch
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
BertConfig,
BertForSequenceClassification,
GlueDataset,
GlueDataTrainingArguments,
Trainer,
TrainingArguments,
)
from transformers.adapters.composition import Fuse
from transformers.testing_utils import slow
class TestAdapterTrainer(unittest.TestCase):
def test_resume_training(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.set_active_adapters("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer.train()
# create second model that should resume the training of the first
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.set_active_adapters("adapter")
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_resume_training_with_fusion(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model.set_active_adapters(Fuse("adapter", "additional_adapter"))
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
do_save_adapter_fusion=True,
)
trainer.train()
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model_resume.set_active_adapters(Fuse("adapter", "additional_adapter"))
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_full_model=False,
do_save_adapters=True,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_auto_set_save_adapters(self):
model = BertForSequenceClassification(
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
)
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
)
trainer = Trainer(
model=model,
args=training_args,
)
self.assertFalse(trainer.do_save_full_model)
self.assertTrue(trainer.do_save_adapters)
self.assertTrue(trainer.do_save_adapter_fusion)
@slow
def test_training_load_best_model_at_end_full_model(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
num_train_epochs=2,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
do_save_adapters=False,
do_save_full_model=True,
)
trainer.train()
self.assertIsNotNone(trainer.model.active_adapters)
if __name__ == "__main__":
unittest.main()
| tests/test_adapter_trainer.py | 6,792 | create second model that should resume the training of the first | 64 | en | 0.945171 |
from sklearn.metrics import roc_curve, auc
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import json
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from pylab import rcParams
# rcParams['figure.figsize'] = 20, 20
rcParams['figure.figsize'] = 15, 15
def results(x_true, x_pred, y_true, y_pred, classes, params, path=None, name=None):
if path is None and name is None:
path = f'models/{params["model_type"]}/{params["exp_name"]}/'
name = f'{params["model_type"]}-{params["exp_name"]}'
# Create folder
Path(path).mkdir(parents=True, exist_ok=True)
# Log
log_file = open(f'{path}log.json', "w")
json.dump(params, log_file, indent=4)
# Train results
x_pred_ = x_pred.argmax(dim=1)
#classification report
report = classification_report(x_true, x_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'train_accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'train_classification_report.csv')
# AUC curve
x_true_ohe = np.zeros((len(x_pred), len(classes)))
for idx, lbl in enumerate(x_true):
x_true_ohe[idx][lbl] = 1
x_pred = x_pred.detach().numpy()
plot_multiclass_roc(x_true_ohe,x_pred, classes=classes, path=path, name='train-'+name)
# Confusion matrix
cm = confusion_matrix(x_true, x_pred_)
plot_confusion_matrix(cm, classes, path=path, name='train-'+name)
# Test results
y_pred_ = y_pred.argmax(dim=1)
#classification report
report = classification_report(y_true, y_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'test-accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'test-classification_report.csv')
# AUC curve
y_true_ohe = np.zeros((len(y_pred), len(classes)))
for idx, lbl in enumerate(y_true):
y_true_ohe[idx][lbl] = 1
y_pred = y_pred.detach().numpy()
plot_multiclass_roc(y_true_ohe,y_pred, classes=classes, path=path, name='test-'+name)
# Confusion matrix
cm = confusion_matrix(y_true, y_pred_)
plot_confusion_matrix(cm, classes, path=path, name='test-'+name)
# plot_confusion_matrix(cm, list(range(len(classes))), path=path, name='test-'+name)
def get_color(idx):
if idx < 10:
return '#f500dc'
elif idx < 20:
return '#00f500'
elif idx < 30:
return '#00e0f5'
elif idx < 40:
return '#000cf5'
elif idx < 50:
return '#f5e900'
elif idx < 60:
return '#f58f00'
else:
return '#f50000'
def plot_multiclass_roc(y_true, y_pred, classes, path, name):
n_classes = len(classes)
lw=1
items = []
labels = ['item_id', 'fpr', 'tpr', 'roc_auc']
for i in range(n_classes):
fpr, tpr, _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc = auc(fpr, tpr)
items.append((i, fpr, tpr, roc_auc))
df = pd.DataFrame.from_records(items, columns=labels)
df = df.sort_values(by=['roc_auc'], ascending=False)
for idx, (_, row) in enumerate(df.iterrows()):
color = get_color(idx)
plt.plot(row['fpr'], row['tpr'], lw=lw, color=color,
label=f'{classes[row["item_id"]]} (area = {row["roc_auc"]:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic for {name}')
plt.legend(loc='lower right',
fancybox=True, shadow=True, ncol=3, prop={'size': 12})
plt.savefig(f'{path}{name}-roc.png', bbox_inches='tight')
plt.clf()
plt.close()
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(shrink=0.75)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title(f'Confusion Matrix for {name}')
plt.savefig(f'{path}{name}-cm.png', bbox_inches='tight')
plt.clf()
plt.close() | utils.py | 5,001 | rcParams['figure.figsize'] = 20, 20 Create folder Log Train resultsclassification report AUC curve Confusion matrix Test resultsclassification report AUC curve Confusion matrix plot_confusion_matrix(cm, list(range(len(classes))), path=path, name='test-'+name) | 259 | en | 0.457655 |
import datetime
import os
import re
import ujson
from django.conf import settings
from django.http import HttpResponse
from django.test import override_settings
from mock import MagicMock, patch
import urllib
from typing import Any, Dict, List
from zerver.lib.actions import do_create_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
HostRequestMock, queries_captured, get_user_messages
)
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.test_runner import slow
from zerver.models import (
get_realm, get_stream, get_user, UserProfile, UserMessage, Recipient,
flush_per_request_caches, DefaultStream, Realm,
)
from zerver.views.home import home, sent_time_in_epoch_seconds
class HomeTest(ZulipTestCase):
def test_home(self) -> None:
# Keep this list sorted!!!
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Keyboard shortcuts',
'Loading...',
'Manage streams',
'Narrow by topic',
'Next message',
'Search streams',
'Welcome to Zulip',
# Verify that the app styles get included
'app-stubentry.js',
'var page_params',
]
# Keep this list sorted!!!
expected_keys = [
"alert_words",
"avatar_source",
"avatar_url",
"avatar_url_medium",
"bot_types",
"can_create_streams",
"cross_realm_bots",
"custom_profile_field_types",
"custom_profile_fields",
"debug_mode",
"default_language",
"default_language_name",
"delivery_email",
"dense_mode",
"development_environment",
"email",
"emojiset",
"emojiset_choices",
"enable_desktop_notifications",
"enable_digest_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enable_online_push_notifications",
"enable_sounds",
"enable_stream_desktop_notifications",
"enable_stream_email_notifications",
"enable_stream_push_notifications",
"enable_stream_sounds",
"enter_sends",
"first_in_realm",
"full_name",
"furthest_read_time",
"has_mobile_devices",
"have_initial_messages",
"high_contrast_mode",
"hotspots",
"initial_servertime",
"is_admin",
"is_guest",
"jitsi_server_url",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"max_avatar_file_size",
"max_icon_file_size",
"max_message_id",
"maxfilesize",
"message_content_in_email_notifications",
"muted_topics",
"narrow",
"narrow_stream",
"needs_tutorial",
"never_subscribed",
"night_mode",
"password_min_guesses",
"password_min_length",
"pm_content_in_desktop_notifications",
"pointer",
"poll_timeout",
"presences",
"prompt_for_invites",
"queue_id",
"realm_add_emoji_by_admins_only",
"realm_allow_community_topic_editing",
"realm_allow_edit_history",
"realm_allow_message_deleting",
"realm_allow_message_editing",
"realm_authentication_methods",
"realm_available_video_chat_providers",
"realm_bot_creation_policy",
"realm_bot_domain",
"realm_bots",
"realm_create_stream_by_admins_only",
"realm_default_language",
"realm_default_stream_groups",
"realm_default_streams",
"realm_default_twenty_four_hour_time",
"realm_description",
"realm_disallow_disposable_email_addresses",
"realm_domains",
"realm_email_auth_enabled",
"realm_email_changes_disabled",
"realm_embedded_bots",
"realm_emoji",
"realm_filters",
"realm_google_hangouts_domain",
"realm_icon_source",
"realm_icon_url",
"realm_inline_image_preview",
"realm_inline_url_embed_preview",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_is_zephyr_mirror_realm",
"realm_mandatory_topics",
"realm_message_content_delete_limit_seconds",
"realm_message_content_edit_limit_seconds",
"realm_message_retention_days",
"realm_name",
"realm_name_changes_disabled",
"realm_name_in_notifications",
"realm_non_active_users",
"realm_notifications_stream_id",
"realm_password_auth_enabled",
"realm_presence_disabled",
"realm_push_notifications_enabled",
"realm_restricted_to_domain",
"realm_send_welcome_emails",
"realm_show_digest_email",
"realm_signup_notifications_stream_id",
"realm_uri",
"realm_user_groups",
"realm_users",
"realm_video_chat_provider",
"realm_waiting_period_threshold",
"root_domain_uri",
"save_stacktraces",
"search_pills_enabled",
"server_generation",
"server_inline_image_preview",
"server_inline_url_embed_preview",
"stream_description_max_length",
"stream_name_max_length",
"subscriptions",
"test_suite",
"timezone",
"translate_emoticons",
"translation_data",
"twenty_four_hour_time",
"two_fa_enabled",
"two_fa_enabled_user",
"unread_msgs",
"unsubscribed",
"use_websockets",
"user_id",
"warn_no_email",
"zulip_version",
]
email = self.example_email("hamlet")
# Verify fails if logged-out
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
self.login(email)
# Create bot for realm_bots testing. Must be done before fetching home_page.
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
self.client_post("/json/bots", bot_info)
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page(stream='Denmark')
self.assert_length(queries, 41)
self.assert_length(cache_mock.call_args_list, 7)
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
raise AssertionError('%s not in result' % (html_bit,))
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
# TODO: Inspect the page_params data further.
# print(ujson.dumps(page_params, indent=2))
realm_bots_expected_keys = [
'api_key',
'avatar_url',
'bot_type',
'default_all_public_streams',
'default_events_register_stream',
'default_sending_stream',
'email',
'full_name',
'is_active',
'owner',
'services',
'user_id',
]
realm_bots_actual_keys = sorted([str(key) for key in page_params['realm_bots'][0].keys()])
self.assertEqual(realm_bots_actual_keys, realm_bots_expected_keys)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login(self.example_email("iago"))
result = self._get_home_page()
# Should be successful because otp device is not configured.
self.assertEqual(result.status_code, 200)
def test_home_under_2fa_with_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
user_profile = self.example_user('iago')
self.create_default_device(user_profile)
self.login(user_profile.email)
result = self._get_home_page()
# User should not log in because otp device is configured but
# 2fa login function was not called.
self.assertEqual(result.status_code, 302)
self.login_2fa(user_profile)
result = self._get_home_page()
# Should be successful after calling 2fa login function.
self.assertEqual(result.status_code, 200)
def test_num_queries_for_realm_admin(self) -> None:
# Verify number of queries for Realm admin isn't much higher than for normal users.
self.login(self.example_email("iago"))
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_length(cache_mock.call_args_list, 6)
self.assert_length(queries, 37)
@slow("Creates and subscribes 10 users in a loop. Should use bulk queries.")
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user('hamlet')
other_user = self.example_user('cordelia')
realm_id = main_user.realm_id
self.login(main_user.email)
# Try to make page-load do extra work for various subscribed
# streams.
for i in range(10):
stream_name = 'test_stream_' + str(i)
stream = self.make_stream(stream_name)
DefaultStream.objects.create(
realm_id=realm_id,
stream_id=stream.id
)
for user in [main_user, other_user]:
self.subscribe(user, stream_name)
# Simulate hitting the page the first time to avoid some noise
# related to initial logins.
self._get_home_page()
# Then for the second page load, measure the number of queries.
flush_per_request_caches()
with queries_captured() as queries2:
result = self._get_home_page()
self.assert_length(queries2, 35)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode('utf-8')
self.assertIn('test_stream_7', html)
def _get_home_page(self, **kwargs: Any) -> HttpResponse:
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result: HttpResponse) -> Dict[str, Any]:
html = result.content.decode('utf-8')
lines = html.split('\n')
page_params_line = [l for l in lines if re.match(r'^\s*var page_params', l)][0]
page_params_json = page_params_line.split(' = ')[1].rstrip(';')
page_params = ujson.loads(page_params_json)
return page_params
def _sanity_check(self, result: HttpResponse) -> None:
'''
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
'''
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
raise AssertionError('Home page probably did not load.')
def test_terms_of_service(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login(email)
for user_tos_version in [None, '1.1', '2.0.3.4']:
user.tos_version = user_tos_version
user.save()
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('There are new Terms of Service', html)
def test_terms_of_service_first_time_template(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login(email)
user.tos_version = None
user.save()
with \
self.settings(FIRST_TIME_TOS_TEMPLATE='hello.html'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
self.assert_in_response("most productive group chat", result)
def test_accept_terms_of_service(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
result = self.client_post('/accounts/accept_terms/', {'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result['Location'], '/')
def test_bad_narrow(self) -> None:
email = self.example_email("hamlet")
self.login(email)
with patch('logging.exception') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once()
self.assertEqual(mock.call_args_list[0][0][0], "Narrow parsing exception")
self._sanity_check(result)
def test_bad_pointer(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
user_profile.pointer = 999999
user_profile.save()
self.login(email)
with patch('logging.warning') as mock:
result = self._get_home_page()
mock.assert_called_once_with('hamlet@zulip.com has invalid pointer 999999')
self._sanity_check(result)
def test_topic_narrow(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
def test_notifications_stream(self) -> None:
email = self.example_email("hamlet")
realm = get_realm('zulip')
realm.notifications_stream_id = get_stream('Denmark', realm).id
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_notifications_stream_id'], get_stream('Denmark', realm).id)
def create_bot(self, owner: UserProfile, bot_email: str, bot_name: str) -> UserProfile:
user = do_create_user(
email=bot_email,
password='123',
realm=owner.realm,
full_name=bot_name,
short_name=bot_name,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=owner
)
return user
def create_non_active_user(self, realm: Realm, email: str, name: str) -> UserProfile:
user = do_create_user(
email=email,
password='123',
realm=realm,
full_name=name,
short_name=name,
)
# Doing a full-stack deactivation would be expensive here,
# and we really only need to flip the flag to get a valid
# test.
user.is_active = False
user.save()
return user
def test_signup_notifications_stream(self) -> None:
email = self.example_email("hamlet")
realm = get_realm('zulip')
realm.signup_notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_signup_notifications_stream_id'], get_stream('Denmark', realm).id)
@slow('creating users and loading home page')
def test_people(self) -> None:
hamlet = self.example_user('hamlet')
realm = get_realm('zulip')
self.login(hamlet.email)
for i in range(3):
self.create_bot(
owner=hamlet,
bot_email='bot-%d@zulip.com' % (i,),
bot_name='Bot %d' % (i,),
)
for i in range(3):
self.create_non_active_user(
realm=realm,
email='defunct-%d@zulip.com' % (i,),
name='Defunct User %d' % (i,),
)
result = self._get_home_page()
page_params = self._get_page_params(result)
'''
We send three lists of users. The first two below are disjoint
lists of users, and the records we send for them have identical
structure.
The realm_bots bucket is somewhat redundant, since all bots will
be in one of the first two buckets. They do include fields, however,
that normal users don't care about, such as default_sending_stream.
'''
buckets = [
'realm_users',
'realm_non_active_users',
'realm_bots',
]
for field in buckets:
users = page_params[field]
self.assertTrue(len(users) >= 3, field)
for rec in users:
self.assertEqual(rec['user_id'],
get_user(rec['email'], realm).id)
if field == 'realm_bots':
self.assertNotIn('is_bot', rec)
self.assertIn('is_active', rec)
self.assertIn('owner', rec)
else:
self.assertIn('is_bot', rec)
self.assertNotIn('is_active', rec)
active_emails = {p['email'] for p in page_params['realm_users']}
non_active_emails = {p['email'] for p in page_params['realm_non_active_users']}
bot_emails = {p['email'] for p in page_params['realm_bots']}
self.assertIn(hamlet.email, active_emails)
self.assertIn('defunct-1@zulip.com', non_active_emails)
# Bots can show up in multiple buckets.
self.assertIn('bot-2@zulip.com', bot_emails)
self.assertIn('bot-2@zulip.com', active_emails)
# Make sure nobody got mis-bucketed.
self.assertNotIn(hamlet.email, non_active_emails)
self.assertNotIn('defunct-1@zulip.com', active_emails)
cross_bots = page_params['cross_realm_bots']
self.assertEqual(len(cross_bots), 5)
cross_bots.sort(key=lambda d: d['email'])
notification_bot = self.notification_bot()
by_email = lambda d: d['email']
self.assertEqual(sorted(cross_bots, key=by_email), sorted([
dict(
user_id=get_user('new-user-bot@zulip.com', get_realm('zulip')).id,
is_admin=False,
email='new-user-bot@zulip.com',
full_name='Zulip New User Bot',
is_bot=True
),
dict(
user_id=get_user('emailgateway@zulip.com', get_realm('zulip')).id,
is_admin=False,
email='emailgateway@zulip.com',
full_name='Email Gateway',
is_bot=True
),
dict(
user_id=get_user('feedback@zulip.com', get_realm('zulip')).id,
is_admin=False,
email='feedback@zulip.com',
full_name='Zulip Feedback Bot',
is_bot=True
),
dict(
user_id=notification_bot.id,
is_admin=False,
email=notification_bot.email,
full_name='Notification Bot',
is_bot=True
),
dict(
user_id=get_user('welcome-bot@zulip.com', get_realm('zulip')).id,
is_admin=False,
email='welcome-bot@zulip.com',
full_name='Welcome Bot',
is_bot=True
),
], key=by_email))
def test_new_stream(self) -> None:
user_profile = self.example_user("hamlet")
stream_name = 'New stream'
self.subscribe(user_profile, stream_name)
self.login(user_profile.email)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['pointer'], -1)
self.assertEqual(page_params['max_message_id'], -1)
self.assertEqual(page_params['have_initial_messages'], False)
def test_invites_by_admins_only(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.is_realm_admin = True
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
def test_show_invites_for_guest_users(self) -> None:
user_profile = self.example_user('polonius')
email = user_profile.email
realm = user_profile.realm
realm.invite_by_admins_only = False
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
self.assertFalse(get_realm('zulip').invite_by_admins_only)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
def test_desktop_home(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_get("/desktop_home")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/desktop_home/"))
result = self.client_get("/desktop_home/")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result['Location']).path
self.assertEqual(path, "/")
def test_apps_view(self) -> None:
result = self.client_get('/apps')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'].endswith('/apps/'))
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'] == 'https://zulipchat.com/apps/')
with self.settings(ZILENCER_ENABLED=True):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 200)
html = result.content.decode('utf-8')
self.assertIn('Apps for every platform.', html)
def test_generate_204(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_get("/api/v1/generate_204")
self.assertEqual(result.status_code, 204)
def test_message_sent_time(self) -> None:
epoch_seconds = 1490472096
pub_date = datetime.datetime.fromtimestamp(epoch_seconds)
user_message = MagicMock()
user_message.message.pub_date = pub_date
self.assertEqual(sent_time_in_epoch_seconds(user_message), epoch_seconds)
def test_handlebars_compile_error(self) -> None:
request = HostRequestMock()
with self.settings(DEVELOPMENT=True, TEST_SUITE=False):
with patch('os.path.exists', return_value=True):
result = home(request)
self.assertEqual(result.status_code, 500)
self.assert_in_response('Error compiling handlebars templates.', result)
def test_subdomain_homepage(self) -> None:
email = self.example_email("hamlet")
self.login(email)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
with patch('zerver.views.home.get_subdomain', return_value=""):
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_in_response('most productive group chat', result)
with patch('zerver.views.home.get_subdomain', return_value="subdomain"):
result = self._get_home_page()
self._sanity_check(result)
def send_test_message(self, content: str, sender_name: str='iago',
stream_name: str='Denmark', topic_name: str='foo') -> None:
sender = self.example_email(sender_name)
self.send_stream_message(sender, stream_name,
content=content, topic_name=topic_name)
def soft_activate_and_get_unread_count(self, stream: str='Denmark', topic: str='foo') -> int:
stream_narrow = self._get_home_page(stream=stream, topic=topic)
page_params = self._get_page_params(stream_narrow)
return page_params['unread_msgs']['count']
def test_unread_count_user_soft_deactivation(self) -> None:
# In this test we make sure if a soft deactivated user had unread
# messages before deactivation they remain same way after activation.
long_term_idle_user = self.example_user('hamlet')
self.login(long_term_idle_user.email)
message = 'Test Message 1'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 1)
query_count = len(queries)
user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(user_msg_list[-1].content, message)
self.logout()
do_soft_deactivate_users([long_term_idle_user])
self.login(long_term_idle_user.email)
message = 'Test Message 2'
self.send_test_message(message)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
# Test here for query count to be at least 5 greater than previous count
# This will assure indirectly that add_missing_messages() was called.
self.assertGreaterEqual(len(queries) - query_count, 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
@slow("Loads home page data several times testing different cases")
def test_multiple_user_soft_deactivations(self) -> None:
long_term_idle_user = self.example_user('hamlet')
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_test_message('Testing', sender_name='hamlet')
do_soft_deactivate_users([long_term_idle_user])
message = 'Test Message 1'
self.send_test_message(message)
self.login(long_term_idle_user.email)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 2'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 3)
# Test here for query count to be at least 5 less than previous count.
# This will assure add_missing_messages() isn't repeatedly called.
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
do_soft_deactivate_users([long_term_idle_user])
message = 'Test Message 3'
self.send_test_message(message)
self.login(long_term_idle_user.email)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 4)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 4'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 5)
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
def test_url_language(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login(user.email)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/de/')
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
# TODO: Verify that the actual language we're using in the
# translation data is German.
def test_translation_data(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login(user.email)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
| zerver/tests/test_home.py | 31,081 | Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
Keep this list sorted!!! Verify that the app styles get included Keep this list sorted!!! Verify fails if logged-out Create bot for realm_bots testing. Must be done before fetching home_page. Verify succeeds once logged-in TODO: Inspect the page_params data further. print(ujson.dumps(page_params, indent=2)) Should be successful because otp device is not configured. User should not log in because otp device is configured but 2fa login function was not called. Should be successful after calling 2fa login function. Verify number of queries for Realm admin isn't much higher than for normal users. Try to make page-load do extra work for various subscribed streams. Simulate hitting the page the first time to avoid some noise related to initial logins. Then for the second page load, measure the number of queries. Do a sanity check that our new streams were in the payload. Doing a full-stack deactivation would be expensive here, and we really only need to flip the flag to get a valid test. Bots can show up in multiple buckets. Make sure nobody got mis-bucketed. In this test we make sure if a soft deactivated user had unread messages before deactivation they remain same way after activation. Test here for query count to be at least 5 greater than previous count This will assure indirectly that add_missing_messages() was called. We are sending this message to ensure that long_term_idle_user has at least one UserMessage row. Test here for query count to be at least 5 less than previous count. This will assure add_missing_messages() isn't repeatedly called. TODO: Verify that the actual language we're using in the translation data is German. | 1,774 | en | 0.92562 |
################################################
# backend.py is part of COVID.codelongandpros.repl.co
# You should have recieved a copy of the three-clause BSD license.
# If you did not, it is located at:
# https://opensource.org/licenses/BSD-3-Clause
# Made by Scott Little, with help from StackOverflow
################################################
import csv
import matplotlib.pyplot as plt
from imageio import imwrite
def get_file():
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'
import requests
r = requests.get(url)
with open('cases.csv', 'wb') as f:
f.write(r.content)
# Retrieve HTTP meta-data
print(r.status_code)
print(r.headers['content-type'])
print(r.encoding)
def get_cases(stat):
x = []
y = []
d = [0]
dx = [0]
if len(stat) == 0:
return 1
dat = 0
state = stat
reader = csv.DictReader(open("cases.csv"))
for raw in reader:
if raw['state'] == state:
dat+=1
x.append(dat)
dx.append(dat)
y.append(raw['cases'])
d.append(raw['deaths'])
else:
continue
fig, axs = plt.subplots(2,figsize=(12,10))
fig.suptitle(f"COVID-19 Cases/Deaths in {stat}")
axs[0].plot(x, y)
axs[1].plot(dx, d)
axs[0].set_ylabel('Cases')
axs[1].set_ylabel("Deaths")
for axe in axs:
axe.set_xlabel("Days since 2020-01-21")
plt.savefig('static/plots/plot.png', bbox_inches='tight', dpi=400)
return 0
def overwrite():
import numpy as np
img = np.zeros([100,100,3],dtype=np.uint8)
img.fill(255) # or img[:] = 255
imwrite('static/plots/plot.png', img) | backend.py | 1,598 | backend.py is part of COVID.codelongandpros.repl.co You should have recieved a copy of the three-clause BSD license. If you did not, it is located at: https://opensource.org/licenses/BSD-3-Clause Made by Scott Little, with help from StackOverflow Retrieve HTTP meta-data or img[:] = 255 | 288 | en | 0.903333 |
import numpy as np
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the boxes are integers, convert them to floats (due to divisions)
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
scores = boxes[:,4]
# compute the area of the boxes and sort the boxes by their score
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(scores)[::-1]
# keep looking while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add its value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest coordinates for the start of the overlap area and the smallest coordinates for the end
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the overlap
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the list that have an overlap over the threshold
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the boxes that were picked
return boxes[pick].astype("float")
| coffee-maturation/src/models/non_maximum.py | 1,713 | if there are no boxes, return an empty list if the boxes are integers, convert them to floats (due to divisions) initialize the list of picked indexes grab the coordinates of the bounding boxes compute the area of the boxes and sort the boxes by their score keep looking while some indexes still remain in the indexes list grab the last index in the indexes list and add its value to the list of picked indexes find the largest coordinates for the start of the overlap area and the smallest coordinates for the end compute the width and height of the overlap compute the ratio of overlap delete all indexes from the list that have an overlap over the threshold return only the boxes that were picked | 699 | en | 0.937786 |
import time
import pyperclip
import csv
import subprocess
import serial
ser = serial.Serial('/dev/cu.usbmodemFD131', baudrate=9600, timeout=None)
clipboard_old = pyperclip.paste()
musicFile = "music/yes_1.mp3"
musicFile_rick = "music/rickroll.mp3"
failText = "Fail. No, bubbles, for you."
rickText = "Fail. But don't worry. I'm never, gonna give you up."
#local information
def check_status(bar=1,bulge=0):
numFails = 0
clipboard_old = pyperclip.paste()
while True:
clipboard = pyperclip.paste()
if (clipboard != clipboard_old):
print "New ID!",clipboard
clipboard_old = clipboard
#Load data object for that classification
# Have lookup table of the form id, bar, bulge where bar&bulge are out of 1,0
classification=read_object_classification(clipboard_old) #in the form [id,bar,bulge]
#classification=['1ds4',1,0] #example of a barred galaxy withotu a bulge
print "Galaxy data",classification,"Location data",bar,bulge
status=bar==classification[1] and bulge==classification[2]
if status:
print "Success :) Do the things!"
ser.write('1\n')
return_code = subprocess.call(["afplay", musicFile])
ser.write('0\n')
time.sleep(0.5)
ser.write('M\n')
time.sleep(8)
ser.write('N\n')
else:
numFails += 1
if (numFails%5 != 0):
print "Fail :( No bubbles for you"
return_code = subprocess.call(["say", failText])
else:
print "Fail :( No bubbles for you, but here's a Rickroll anyway..."
return_code = subprocess.call(["say", rickText])
#ser.write('1\n')
return_code = subprocess.call(["afplay", musicFile_rick])
#ser.write('0\n')
print '-------------'
time.sleep(0.5)
headers={'Content-Type':'application/json','Accept':'application/vnd.api+json; version=1'}
def read_object_classification(clipboard_old):
filename="classification_data.csv"
with open(filename) as f:
reader=csv.reader(f,delimiter=',')
next(reader)
for row in reader:
if row[0]==str(clipboard_old):
row=[int(item) for item in row]
return row
print "Id not found. Return dummy data"
return ['0000000',2,2]
def write_example_file():
filename="classification_data.csv"
IDS=['1243233','2345473','2233432','9987679','3345363','3934322']
bulge=[0,0,0,1,1,1]
bar=[1,0,0,1,0,1]
with open(filename,'w') as f:
writer=csv.writer(f)
writer.writerow(['Id','bulge','bar'])
for i in range(len(IDS)):
writer.writerow([IDS[i],bulge[i],bar[i]])
| qrcodetoclassification.py | 2,959 | local informationLoad data object for that classification Have lookup table of the form id, bar, bulge where bar&bulge are out of 1,0in the form [id,bar,bulge]classification=['1ds4',1,0] example of a barred galaxy withotu a bulgeser.write('1\n')ser.write('0\n') | 265 | en | 0.650204 |
import numpy as np
import pytest
from astropy.cosmology import default_cosmology
from skypy.linear.eisenstein_hu import power_spectrum
def test_eisenstein_hu():
""" Test Eisenstein & Hu Linear matter power spectrum with
and without wiggles using astropy default cosmology"""
cosmology = default_cosmology.get()
A_s = 2.1982e-09
n_s = 0.969453
kwmap = 0.02
# Test that a scalar input gives a scalar output
scalar_input = 1
scalar_output_w = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap,
wiggle=True)
scalar_output_nw = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap,
wiggle=False)
assert np.isscalar(scalar_output_w)
assert np.isscalar(scalar_output_nw)
# Test that an array input gives an array output
array_shape = (10,)
array_input = np.random.uniform(size=array_shape)
array_output_w = power_spectrum(array_input, A_s, n_s, cosmology, kwmap,
wiggle=True)
array_output_nw = power_spectrum(array_input, A_s, n_s, cosmology, kwmap,
wiggle=False)
assert array_output_w.shape == array_shape
assert array_output_nw.shape == array_shape
# Test pk against precomputed values for default_cosmology
wavenumber = np.logspace(-3, 1, num=5, base=10.0)
pk_eisensteinhu_w = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap,
wiggle=True)
pk_eisensteinhu_nw = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap,
wiggle=False)
pk_cosmosis_w = np.array([6.47460158e+03, 3.71610099e+04, 9.65702614e+03,
1.14604456e+02, 3.91399918e-01])
pk_cosmosis_nw = np.array([6.47218600e+03, 3.77330704e+04, 1.00062077e+04,
1.13082980e+02, 3.83094714e-01])
assert np.allclose(pk_eisensteinhu_w, pk_cosmosis_w)
assert np.allclose(pk_eisensteinhu_nw, pk_cosmosis_nw)
# Test for failure when wavenumber <= 0
negative_wavenumber_scalar = 0
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap,
wiggle=True)
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap,
wiggle=False)
negative_wavenumber_array = [0, 1, -2, 3]
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap,
wiggle=True)
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap,
wiggle=False)
| skypy/linear/tests/test_eisenstein_hu.py | 2,794 | Test Eisenstein & Hu Linear matter power spectrum with
and without wiggles using astropy default cosmology
Test that a scalar input gives a scalar output Test that an array input gives an array output Test pk against precomputed values for default_cosmology Test for failure when wavenumber <= 0 | 297 | en | 0.579481 |
"""Contains the CLI."""
import sys
import json
import logging
import oyaml as yaml
import click
# For the profiler
import pstats
from io import StringIO
# To enable colour cross platform
import colorama
from sqlfluff.cli.formatters import (
format_rules,
format_violation,
format_linting_result_header,
format_linting_stats,
colorize,
format_dialect_warning,
format_dialects,
CallbackFormatter,
)
from sqlfluff.cli.helpers import cli_table, get_package_version
# Import from sqlfluff core.
from sqlfluff.core import (
Linter,
FluffConfig,
SQLLintError,
dialect_selector,
dialect_readout,
TimingSummary,
)
class RedWarningsFilter(logging.Filter):
"""This filter makes all warnings or above red."""
def filter(self, record):
"""Filter any warnings (or above) to turn them red."""
if record.levelno >= logging.WARNING:
record.msg = colorize(record.msg, "red") + " "
return True
def set_logging_level(verbosity, logger=None, stderr_output=False):
"""Set up logging for the CLI.
We either set up global logging based on the verbosity
or, if `logger` is specified, we only limit to a single
sqlfluff logger. Verbosity is applied in the same way.
Implementation: If `logger` is not specified, the handler
is attached to the `sqlfluff` logger. If it is specified
then it attaches the the logger in question. In addition
if `logger` is specified, then that logger will also
not propagate.
"""
fluff_logger = logging.getLogger("sqlfluff")
# Don't propagate logging
fluff_logger.propagate = False
# Enable colorama
colorama.init()
# Set up the log handler to log to stdout
handler = logging.StreamHandler(stream=sys.stderr if stderr_output else sys.stdout)
# NB: the unicode character at the beginning is to squash any badly
# tamed ANSI colour statements, and return us to normality.
handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s"))
# Set up a handler to colour warnings red.
handler.addFilter(RedWarningsFilter())
if logger:
focus_logger = logging.getLogger("sqlfluff.{0}".format(logger))
focus_logger.addHandler(handler)
else:
fluff_logger.addHandler(handler)
# NB: We treat the parser logger slightly differently because it's noisier.
# It's important that we set levels for all each time so
# that we don't break tests by changing the granularity
# between tests.
parser_logger = logging.getLogger("sqlfluff.parser")
if verbosity < 3:
fluff_logger.setLevel(logging.WARNING)
parser_logger.setLevel(logging.NOTSET)
elif verbosity == 3:
fluff_logger.setLevel(logging.INFO)
parser_logger.setLevel(logging.WARNING)
elif verbosity == 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.INFO)
elif verbosity > 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.DEBUG)
def common_options(f):
"""Add common options to commands via a decorator.
These are applied to all of the cli commands.
"""
f = click.version_option()(f)
f = click.option(
"-v",
"--verbose",
count=True,
help=(
"Verbosity, how detailed should the output be. This is *stackable*, so `-vv`"
" is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`."
),
)(f)
f = click.option(
"-n",
"--nocolor",
is_flag=True,
help="No color - if this is set then the output will be without ANSI color codes.",
)(f)
return f
def core_options(f):
"""Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`.
"""
f = click.option(
"--dialect", default=None, help="The dialect of SQL to lint (default=ansi)"
)(f)
f = click.option(
"--templater", default=None, help="The templater to use (default=jinja)"
)(f)
f = click.option(
"--rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules, to check',
help=(
"Narrow the search to only specific rules. For example "
"specifying `--rules L001` will only search for rule `L001` (Unnecessary "
"trailing whitespace). Multiple rules can be specified with commas e.g. "
"`--rules L001,L002` will specify only looking for violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--exclude-rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules to exclude',
help=(
"Exclude specific rules. For example "
"specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary "
"trailing whitespace) from the set of considered rules. This could either "
"be the whitelist, or the general set if there is no specific whitelist. "
"Multiple rules can be specified with commas e.g. "
"`--exclude-rules L001,L002` will exclude violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--ignore",
default=None,
help=(
"Ignore particular families of errors so that they don't cause a failed "
"run. For example `--ignore parsing` would mean that any parsing errors "
"are ignored and don't influence the success or fail of a run. Multiple "
"options are possible if comma separated e.g. `--ignore parsing,templating`."
),
)(f)
f = click.option(
"--bench",
is_flag=True,
help="Set this flag to engage the benchmarking tool output.",
)(f)
f = click.option(
"--logger",
type=click.Choice(["parser", "linter", "rules"], case_sensitive=False),
help="Choose to limit the logging to one of the loggers.",
)(f)
return f
def get_config(**kwargs):
"""Get a config object from kwargs."""
if kwargs.get("dialect", None):
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(kwargs["dialect"])
except KeyError:
click.echo("Error: Unknown dialect {0!r}".format(kwargs["dialect"]))
sys.exit(66)
# Instantiate a config object (filtering out the nulls)
overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}
return FluffConfig.from_root(overrides=overrides)
def get_linter_and_formatter(cfg, silent=False):
"""Get a linter object given a config."""
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(cfg.get("dialect"))
except KeyError:
click.echo("Error: Unknown dialect {0!r}".format(cfg.get("dialect")))
sys.exit(66)
if not silent:
# Instantiate the linter and return (with an output function)
formatter = CallbackFormatter(
callback=lambda m: click.echo(m, color=cfg.get("color")),
verbosity=cfg.get("verbose"),
output_line_length=cfg.get("output_line_length"),
)
return Linter(config=cfg, formatter=formatter), formatter
else:
# Instantiate the linter and return. NB: No formatter
# in the Linter and a black formatter otherwise.
formatter = CallbackFormatter(callback=lambda m: None, verbosity=0)
return Linter(config=cfg), formatter
@click.group()
@click.version_option()
def cli():
"""Sqlfluff is a modular sql linter for humans."""
@cli.command()
@common_options
def version(**kwargs):
"""Show the version of sqlfluff."""
c = get_config(**kwargs)
if c.get("verbose") > 0:
# Instantiate the linter
lnt, formatter = get_linter_and_formatter(c)
# Dispatch the detailed config from the linter.
formatter.dispatch_config(lnt)
else:
# Otherwise just output the package version.
click.echo(get_package_version(), color=c.get("color"))
@cli.command()
@common_options
def rules(**kwargs):
"""Show the current rules in use."""
c = get_config(**kwargs)
lnt, _ = get_linter_and_formatter(c)
click.echo(format_rules(lnt), color=c.get("color"))
@cli.command()
@common_options
def dialects(**kwargs):
"""Show the current dialects available."""
c = get_config(**kwargs)
click.echo(format_dialects(dialect_readout), color=c.get("color"))
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--format",
"format",
default="human",
type=click.Choice(["human", "json", "yaml"], case_sensitive=False),
help="What format to return the lint result in.",
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
@click.option(
"--disregard-sqlfluffignores",
is_flag=True,
help=("Perform the operation regardless of .sqlfluffignore configurations"),
)
@click.option(
"-p",
"--parallel",
type=int,
default=1,
help="If set to a value higher than 1, run SQLFluff in parallel, "
"speeding up processing.",
)
@click.argument("paths", nargs=-1)
def lint(
paths,
parallel,
format,
nofail,
disregard_sqlfluffignores,
logger=None,
bench=False,
**kwargs,
):
"""Lint SQL files via passing a list of files or using stdin.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Linting SQL files:
sqlfluff lint path/to/file.sql
sqlfluff lint directory/of/sql/files
Linting a file via stdin (note the lone '-' character):
cat path/to/file.sql | sqlfluff lint -
echo 'select col from tbl' | sqlfluff lint -
"""
c = get_config(**kwargs)
non_human_output = format in ("json", "yaml")
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# add stdin if specified via lone '-'
if ("-",) == paths:
result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin")
else:
# Output the results as we go
if verbose >= 1:
click.echo(format_linting_result_header())
try:
result = lnt.lint_paths(
paths,
ignore_non_existent_files=False,
ignore_files=not disregard_sqlfluffignores,
parallel=parallel,
)
except IOError:
click.echo(
colorize(
"The path(s) {0!r} could not be accessed. Check it/they exist(s).".format(
paths
),
"red",
)
)
sys.exit(1)
# Output the final stats
if verbose >= 1:
click.echo(format_linting_stats(result, verbose=verbose))
if format == "json":
click.echo(json.dumps(result.as_records()))
elif format == "yaml":
click.echo(yaml.dump(result.as_records()))
if bench:
click.echo("==== overall timings ====")
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
if not nofail:
if not non_human_output:
click.echo("All Finished 📜 🎉!")
sys.exit(result.stats()["exit code"])
else:
sys.exit(0)
def do_fixes(lnt, result, formatter=None, **kwargs):
"""Actually do the fixes."""
click.echo("Persisting Changes...")
res = result.persist_changes(formatter=formatter, **kwargs)
if all(res.values()):
click.echo("Done. Please check your files to confirm.")
return True
# If some failed then return false
click.echo("Done. Some operations failed. Please check your files to confirm.")
click.echo("Some errors cannot be fixed or there is another error blocking it.")
return False
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--force",
is_flag=True,
help=(
"skip the confirmation prompt and go straight to applying "
"fixes. **Use this with caution.**"
),
)
@click.option(
"--fixed-suffix", default=None, help="An optional suffix to add to fixed files."
)
@click.option(
"--parallel",
type=int,
default=1,
help="If set to a value higher than 1, run SQLFluff in parallel, "
"speeding up processing.",
)
@click.argument("paths", nargs=-1)
def fix(force, paths, parallel, bench=False, fixed_suffix="", logger=None, **kwargs):
"""Fix SQL files.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
# some quick checks
fixing_stdin = ("-",) == paths
c = get_config(**kwargs)
lnt, formatter = get_linter_and_formatter(c, silent=fixing_stdin)
verbose = c.get("verbose")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin)
# handle stdin case. should output formatted sql to stdout and nothing else.
if fixing_stdin:
stdin = sys.stdin.read()
result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True)
stdout = result.paths[0].files[0].fix_string()[0]
click.echo(stdout, nl=False)
sys.exit()
# Lint the paths (not with the fix argument at this stage), outputting as we go.
click.echo("==== finding fixable violations ====")
try:
result = lnt.lint_paths(
paths, fix=True, ignore_non_existent_files=False, parallel=parallel
)
except IOError:
click.echo(
colorize(
"The path(s) {0!r} could not be accessed. Check it/they exist(s).".format(
paths
),
"red",
)
)
sys.exit(1)
# NB: We filter to linting violations here, because they're
# the only ones which can be potentially fixed.
if result.num_violations(types=SQLLintError, fixable=True) > 0:
click.echo("==== fixing violations ====")
click.echo(
"{0} fixable linting violations found".format(
result.num_violations(types=SQLLintError, fixable=True)
)
)
if force:
click.echo(colorize("FORCE MODE", "red") + ": Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1)
else:
click.echo(
"Are you sure you wish to attempt to fix these? [Y/n] ", nl=False
)
c = click.getchar().lower()
click.echo("...")
if c in ("y", "\r", "\n"):
click.echo("Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1)
else:
click.echo("All Finished 📜 🎉!")
elif c == "n":
click.echo("Aborting...")
else:
click.echo("Invalid input, please enter 'Y' or 'N'")
click.echo("Aborting...")
else:
click.echo("==== no fixable linting violations found ====")
if result.num_violations(types=SQLLintError, fixable=False) > 0:
click.echo(
" [{0} unfixable linting violations found]".format(
result.num_violations(types=SQLLintError, fixable=False)
)
)
click.echo("All Finished 📜 🎉!")
if bench:
click.echo("==== overall timings ====")
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
sys.exit(0)
def quoted_presenter(dumper, data):
"""Re-presenter which always double quotes string values needing escapes."""
if "\n" in data or "\t" in data or "'" in data:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"')
else:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="")
@cli.command()
@common_options
@core_options
@click.argument("path", nargs=1)
@click.option(
"--recurse", default=0, help="The depth to recursively parse to (0 for unlimited)"
)
@click.option(
"-c",
"--code-only",
is_flag=True,
help="Output only the code elements of the parse tree.",
)
@click.option(
"-f",
"--format",
default="human",
type=click.Choice(["human", "json", "yaml"], case_sensitive=False),
help="What format to return the parse result in.",
)
@click.option(
"--profiler", is_flag=True, help="Set this flag to engage the python profiler."
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs):
"""Parse SQL files and just spit out the result.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
c = get_config(**kwargs)
# We don't want anything else to be logged if we want json or yaml output
non_human_output = format in ("json", "yaml")
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
recurse = c.get("recurse")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# TODO: do this better
nv = 0
if profiler:
# Set up the profiler if required
try:
import cProfile
except ImportError:
click.echo("The cProfiler is not available on your platform.")
sys.exit(1)
pr = cProfile.Profile()
pr.enable()
try:
# handle stdin if specified via lone '-'
if "-" == path:
# put the parser result in a list to iterate later
result = [
lnt.parse_string(
sys.stdin.read(), "stdin", recurse=recurse, config=lnt.config
),
]
else:
# A single path must be specified for this command
result = lnt.parse_path(path, recurse=recurse)
# iterative print for human readout
if format == "human":
timing = TimingSummary()
for parsed_string in result:
timing.add(parsed_string.time_dict)
if parsed_string.tree:
click.echo(parsed_string.tree.stringify(code_only=code_only))
else:
# TODO: Make this prettier
click.echo("...Failed to Parse...")
nv += len(parsed_string.violations)
if parsed_string.violations:
click.echo("==== parsing violations ====")
for v in parsed_string.violations:
click.echo(format_violation(v))
if (
parsed_string.violations
and parsed_string.config.get("dialect") == "ansi"
):
click.echo(format_dialect_warning())
if verbose >= 2:
click.echo("==== timings ====")
click.echo(cli_table(parsed_string.time_dict.items()))
if verbose >= 2 or bench:
click.echo("==== overall timings ====")
timing_summary = timing.summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
else:
# collect result and print as single payload
# will need to zip in the file paths
filepaths = ["stdin"] if "-" == path else lnt.paths_from_path(path)
result = [
dict(
filepath=filepath,
segments=parsed.as_record(code_only=code_only, show_raw=True)
if parsed
else None,
)
for filepath, (parsed, _, _, _, _) in zip(filepaths, result)
]
if format == "yaml":
# For yaml dumping always dump double quoted strings if they contain tabs or newlines.
yaml.add_representer(str, quoted_presenter)
click.echo(yaml.dump(result))
elif format == "json":
click.echo(json.dumps(result))
except IOError:
click.echo(
colorize(
"The path {0!r} could not be accessed. Check it exists.".format(path),
"red",
)
)
sys.exit(1)
if profiler:
pr.disable()
profiler_buffer = StringIO()
ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative")
ps.print_stats()
click.echo("==== profiler stats ====")
# Only print the first 50 lines of it
click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50]))
if nv > 0 and not nofail:
sys.exit(66)
else:
sys.exit(0)
# This "__main__" handler allows invoking SQLFluff using "python -m", which
# simplifies the use of cProfile, e.g.:
# python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql
if __name__ == "__main__":
cli.main(sys.argv[1:])
| src/sqlfluff/cli/commands.py | 23,233 | This filter makes all warnings or above red.
Sqlfluff is a modular sql linter for humans.
Add common options to commands via a decorator.
These are applied to all of the cli commands.
Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`.
Show the current dialects available.
Actually do the fixes.
Filter any warnings (or above) to turn them red.
Fix SQL files.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Get a config object from kwargs.
Get a linter object given a config.
Lint SQL files via passing a list of files or using stdin.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Linting SQL files:
sqlfluff lint path/to/file.sql
sqlfluff lint directory/of/sql/files
Linting a file via stdin (note the lone '-' character):
cat path/to/file.sql | sqlfluff lint -
echo 'select col from tbl' | sqlfluff lint -
Parse SQL files and just spit out the result.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Re-presenter which always double quotes string values needing escapes.
Show the current rules in use.
Set up logging for the CLI.
We either set up global logging based on the verbosity
or, if `logger` is specified, we only limit to a single
sqlfluff logger. Verbosity is applied in the same way.
Implementation: If `logger` is not specified, the handler
is attached to the `sqlfluff` logger. If it is specified
then it attaches the the logger in question. In addition
if `logger` is specified, then that logger will also
not propagate.
Show the version of sqlfluff.
Contains the CLI.
For the profiler To enable colour cross platform Import from sqlfluff core. Don't propagate logging Enable colorama Set up the log handler to log to stdout NB: the unicode character at the beginning is to squash any badly tamed ANSI colour statements, and return us to normality. Set up a handler to colour warnings red. NB: We treat the parser logger slightly differently because it's noisier. It's important that we set levels for all each time so that we don't break tests by changing the granularity between tests. short_help='Specify a particular rule, or comma separated rules, to check', short_help='Specify a particular rule, or comma separated rules to exclude', We're just making sure it exists at this stage - it will be fetched properly in the linter Instantiate a config object (filtering out the nulls) We're just making sure it exists at this stage - it will be fetched properly in the linter Instantiate the linter and return (with an output function) Instantiate the linter and return. NB: No formatter in the Linter and a black formatter otherwise. Instantiate the linter Dispatch the detailed config from the linter. Otherwise just output the package version. Set up logging. add stdin if specified via lone '-' Output the results as we go Output the final stats If some failed then return false some quick checks Set up logging. handle stdin case. should output formatted sql to stdout and nothing else. Lint the paths (not with the fix argument at this stage), outputting as we go. NB: We filter to linting violations here, because they're the only ones which can be potentially fixed. We don't want anything else to be logged if we want json or yaml output Set up logging. TODO: do this better Set up the profiler if required handle stdin if specified via lone '-' put the parser result in a list to iterate later A single path must be specified for this command iterative print for human readout TODO: Make this prettier collect result and print as single payload will need to zip in the file paths For yaml dumping always dump double quoted strings if they contain tabs or newlines. Only print the first 50 lines of it This "__main__" handler allows invoking SQLFluff using "python -m", which simplifies the use of cProfile, e.g.: python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql | 4,719 | en | 0.799271 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
A1 = [1, 2, 4, 5, 6, 6, 8, 9]
A2 = [2, 5, 6, 7, 8, 8, 9]
def find_closest_num(A, target):
min_diff = float("inf")
low = 0
high = len(A) - 1
closest_num = None
# Edge cases for empty list of list
# with only one element:
if len(A) == 0:
return None
if len(A) == 1:
return A[0]
while low <= high:
mid = (low + high)//2
# Ensure you do not read beyond the bounds
# of the list.
if mid+1 < len(A):
min_diff_right = abs(A[mid + 1] - target)
if mid > 0:
min_diff_left = abs(A[mid - 1] - target)
# Check if the absolute value between left
# and right elements are smaller than any
# seen prior.
if min_diff_left < min_diff:
min_diff = min_diff_left
closest_num = A[mid - 1]
if min_diff_right < min_diff:
min_diff = min_diff_right
closest_num = A[mid + 1]
# Move the mid-point appropriately as is done
# via binary search.
if A[mid] < target:
low = mid + 1
elif A[mid] > target:
high = mid - 1
# If the element itself is the target, the closest
# number to it is itself. Return the number.
else:
return A[mid]
return closest_num
print(find_closest_num(A1, 11))
print(find_closest_num(A2, 4))
# In[ ]:
| dsa/closestnumber.py | 1,459 | !/usr/bin/env python coding: utf-8 In[1]: Edge cases for empty list of list with only one element: Ensure you do not read beyond the bounds of the list. Check if the absolute value between left and right elements are smaller than any seen prior. Move the mid-point appropriately as is done via binary search. If the element itself is the target, the closest number to it is itself. Return the number. In[ ]: | 407 | en | 0.869023 |
from threading import Thread
from time import sleep
from pytezos import pytezos
import argparse
contract_dict = {}
def contract_origin_search(p, contract_hash, verbose = 0):
start = 0
end = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
found = -1
data = None
while found == -1:
anchor = int((end+start)/2)
try:
data = contract.storage(anchor)
try:
data = contract.storage(anchor-1)
end=anchor
except Exception:
found = anchor
except Exception :
start = anchor
if verbose:
print("Ntf origin:", contract_hash, found, data, "\n")
return found, data
def contract_all_update_search(p, contract_hash, start=-1, end=-1):
results = []
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash, verbose=1)
start = start
if origin_level > start or start ==-1:
start = origin_level
results.append([origin_level, data])
else:
data = contract.storage(start)
results.append([start, data])
end = end
if end > head_level or end ==-1:
end = head_level
for lvl in range(start+1, end+1):
if contract_hash not in contract_dict.keys():
break
data = contract.storage(lvl)
if data != results[len(results)-1][1]:
print("Ntf past", contract_hash, lvl, data, "\n")
results.append([lvl, data])
sleep(2) # TO REMOVE, added as test vector has too many updates
return start, end, results
def contract_first_update_search(p, contract_hash, start=-1):
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash)
if start > head_level:
return -1, [-1, None]
start = start
if origin_level > start:
start = origin_level
for lvl in range(start+1, head_level+1):
new_data = contract.storage(lvl)
if new_data != data:
print("Ntf first:", contract_hash, start, lvl, new_data, "\n")
return start, [lvl, new_data]
return start, [-1, None]
def contract_last_update_search(p, contract_hash, end=-1):
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash)
if end > 0 and end < origin_level:
return -1, [-1, None]
end = end
if end == -1 or end > head_level:
end = head_level
for lvl in range(end, origin_level, -1):
new_data = contract.storage(lvl)
prev_data = contract.storage(lvl-1)
if new_data != prev_data:
print("Ntf end:", contract_hash, end, lvl, new_data, "\n")
return end, [lvl, new_data]
return end, [-1, None]
def read_from_head(p):
global contract_dict
while len(contract_dict) != 0:
for contract_hash in contract_dict.keys():
head_level = p.shell.head.header()["level"]
data = p.contract(contract_hash).storage(head_level)
if data != contract_dict[contract_hash]["last_data"]:
print("Ntf head:", contract_hash, head_level, data, "\n")
contract_dict[contract_hash]["last_data"] = data
sleep(5) # TO REMOVE
def main():
global contract_dict
# Instantiate the parser
parser = argparse.ArgumentParser(description='Optional app description')
parser.add_argument('-c', '--contract', type=str, help="the hash of the contract to scan")
parser.add_argument("-net", "--network", type=str, help="the network, such as mainnet, carthagenet, dalphanet, delphinet or a RPC node uri", default="mainnet")
parser.add_argument("-org", "--origin", help="find the level when the contract was deployed", action="store_true")
parser.add_argument("-fst", "--first", help="find the contract's first update", action="store_true")
parser.add_argument("-lst", "--last", help="find the contract's last update", action="store_true")
parser.add_argument("-stt", "--start", type=int, help="index from where to start the scan", default=-1)
parser.add_argument("-hash", "--hash", type=int, help="block hash from where to scan")
parser.add_argument("-end", "--end", type=int, help="index until which to start the scan (from which for last update)", default=-1)
args = parser.parse_args()
contract_hash = args.contract
if args.contract is None:
print("Error: Specify contract hash", "\n")
return
# Set network and get head's level
network = args.network
p = pytezos.using(shell="mainnet")
head_level = -1
try:
p = pytezos.using(shell=network)
head_level = p.shell.head.header()["level"]
except Exception as e:
print("Error: Network error", e, "\n")
return
# Set the scan lower and upper bounds
start = args.start
if args.hash is not None:
try:
block = p.shell.chains.main.blocks[args.hash]
start = block.header()["level"]
except Exception as e:
print("Error: block not found", e, "\n")
return
end = args.end
# Check contract exists
ci = None
storage = None
try:
ci = p.contract(contract_hash)
storage = ci.storage(head_level)
except Exception as e:
print("Error: contract not found", e, "\n")
return
# Return first update's level if asked
if args.first == True:
Thread(target=contract_first_update_search, args=(p, contract_hash,), kwargs={"start":start}).start()
# Return last update's level if asked
if args.last == True:
thread = Thread(target=contract_last_update_search, args=(p, contract_hash,), kwargs={"end":end}).start()
# Return origination's level if asked
if args.origin == True:
thread = Thread(target=contract_origin_search, args=(p, contract_hash,), kwargs={"verbose":1}).start()
# Return all updates' levels if asked
if (args.first == False and args.last == False and args.origin == False):
if contract_hash not in contract_dict.keys():
end2 = head_level
if end <= head_level:
end2 = head_level
Thread(target=contract_all_update_search, args=(p, contract_hash,), kwargs={"start":start, "end":end2}).start()
if end == -1 or end > head_level:
contract_dict[contract_hash]={"last_data":storage}
Thread(target=read_from_head, args=(p,)).start()
else:
print("Error: contract already being scanned.", "\n")
# Start loop to enter or remove notification requests
while len(contract_dict) != 0:
try:
# Send hint and listen to input
inputs = input("\n\nFunctions:\n add <hash> --start <start> --end <end>\n remove <hash>\n origin <hash> \n first <hash> --start <start>\n last <hash> --end <end>\n list\n\n").strip()
inputs = inputs.split(" ")
# Parse input and look for function
if inputs[0].lower() in ["add", "remove", "origin", "first", "last", "list"]:
if inputs[0].lower() == "list":
for key in contract_dict.keys():
print(key)
print("\n")
else:
try:
contract_hash = inputs[1]
storage = p.contract(contract_hash).storage()
originated_level, originated_data = contract_origin_search(p, contract_hash)
head_level = p.shell.head.header()["level"]
# Check scan lower bound
start = -1
if "--start" in inputs:
stt = int(inputs[inputs.index("--start")+1])
start = stt
if stt < originated_level:
start = originated_level
# Check scan upper bound
end = -1
if "--end" in inputs:
end = int(inputs[inputs.index("--end")+1])
# Return first update's level if asked
if inputs[0] == "first":
Thread(target=contract_first_update_search, args=(p, contract_hash,), kwargs={"start":start}).start()
# Return last update's level if asked
if inputs[0] == "last":
Thread(target=contract_last_update_search, args=(p, contract_hash,), kwargs={"end":end}).start()
# Return origination's level if asked
if inputs[0] == "origin":
Thread(target=contract_origin_search, args=(p, contract_hash,), kwargs={"verbose":1}).start()
# Return all updates' levels if asked
if inputs[0] == "add":
end2 = head_level
if end <= head_level:
end2 = end
Thread(target=contract_all_update_search, args=(p, contract_hash,), kwargs={"start":start, "end":end2}).start()
if (end == -1 or end > head_level) and contract_hash not in contract_dict.keys():
contract_dict[contract_hash]={"last_data":storage}
if inputs[0] == "remove":
if contract_hash in contract_dict.keys():
del contract_dict[contract_hash]
print("Contract "+str(contract_hash)+" removed.\n")
except Exception as e:
print("Error: contract not found", e, "\n")
else:
print("Error command not recognized", inputs, "\n")
except Exception as e:
print(e)
print("No more contract to scan, closing program.\n")
def test_contract_origin():
contract = "KT19kgnqC5VWoxktLRdRUERbyUPku9YioE8W"
origin_lvl = 1073618
lvl, _ = contract_origin_search("mainnet", contract)
assert origin_lvl == lvl
def test_contract_first_update():
contract = "KT19kgnqC5VWoxktLRdRUERbyUPku9YioE8W"
first_update_lvl = 1073622
start, [lvl, _] = contract_first_update_search("mainnet", contract)
assert first_update_lvl == lvl
if __name__ == "__main__":
main()
| tezos-sandbox/watchtower/delphinet/passive_watchtower.py | 10,872 | TO REMOVE, added as test vector has too many updates TO REMOVE Instantiate the parser Set network and get head's level Set the scan lower and upper bounds Check contract exists Return first update's level if asked Return last update's level if asked Return origination's level if asked Return all updates' levels if asked Start loop to enter or remove notification requests Send hint and listen to input Parse input and look for function Check scan lower bound Check scan upper bound Return first update's level if asked Return last update's level if asked Return origination's level if asked Return all updates' levels if asked | 628 | en | 0.765149 |
from functools import partial
from dictknife.langhelpers import as_jsonpointer as _as_jsonpointer
from dictknife.langhelpers import as_path_node as _as_path_node
from dictknife import accessing
from dictknife import naming
def _make_key(k0, k1, *, sep="/"):
if k1 is None:
return _as_jsonpointer(str(k0))
return "{}{}{}".format(_as_jsonpointer(str(k0)), sep, k1)
def unflatten(d, *, sep="/", accessor=accessing.Accessor()):
r = accessor.make_dict()
for k, v in d.items():
accessor.assign(r, [_as_path_node(x) for x in k.split(sep)], v)
return _fix_unflatten_list(r)
def _fix_unflatten_list(d):
if hasattr(d, "keys"):
for k in list(d.keys()):
d[k] = _fix_unflatten_list(d[k])
# list ?
if "0" in d and str(len(d) - 1) in d:
r = []
for i in range(len(d)):
k = str(i)
if k not in d:
return d
r.append(d[k])
return r
return d
def flatten(d, *, sep="/"):
if isinstance(d, (list, tuple)):
return {
_make_key(i, k, sep=sep): v
for i, row in enumerate(d)
for k, v in flatten(row, sep=sep).items()
}
elif hasattr(d, "get"):
return {
_make_key(k, k2, sep=sep): v2
for k, v in d.items()
for k2, v2 in flatten(v, sep=sep).items()
}
elif hasattr(d, "__next__"):
# todo: as generator
return flatten(list(d), sep=sep)
else:
# todo: peformance improvement
return {None: _as_jsonpointer(d) if hasattr(d, "replace") else d}
def rows(d, *, kname="name", vname="value"):
return [{kname: k, vname: v} for k, v in d.items()]
def update_keys(d, *, key, coerce=str): # side effect!
if hasattr(d, "keys"):
for k, v in list(d.items()):
d[key(coerce(k))] = d.pop(k)
update_keys(v, key=key, coerce=coerce)
elif isinstance(d, (list, tuple)):
for x in d:
update_keys(x, key=key, coerce=coerce)
return d
str_dict = partial(update_keys, key=str)
normalize_dict = partial(update_keys, key=naming.normalize)
snakecase_dict = partial(update_keys, key=naming.snakecase)
camelcase_dict = partial(update_keys, key=naming.camelcase)
kebabcase_dict = partial(update_keys, key=naming.kebabcase)
pascalcase_dict = partial(update_keys, key=naming.pascalcase)
def only_num(d):
return {
k: v
for k, v in d.items()
if (isinstance(v, (int, float)) and not isinstance(v, bool))
or (hasattr(v, "isdigit") and v.isdigit())
}
def only_str(d):
return {k: v for k, v in d.items() if isinstance(v, str)}
def shrink(
d,
*,
max_length_of_string: int = 100,
cont_suffix: str = "...",
max_length_of_list: int = 3,
with_tail: bool = False,
mutable: bool = False,
):
# todo: random select
# todo: cont suffix for list
from dictknife.accessing import get_modifier
modifier = get_modifier(mutable=mutable)
def _map(d):
if isinstance(d, (list, tuple)):
xs = d
if len(xs) > max_length_of_list:
xs = d[:max_length_of_list]
if with_tail:
xs.extend(d[-max_length_of_list:])
return modifier.modify_list(_map, xs)
elif hasattr(d, "keys"):
return modifier.modify_dict(_map, d)
elif isinstance(d, str):
s = d
if len(s) > max_length_of_string:
s = s[:max_length_of_string] + cont_suffix
return s
else:
return d
return _map(d)
| dictknife/transform.py | 3,666 | list ? todo: as generator todo: peformance improvement side effect! todo: random select todo: cont suffix for list | 114 | en | 0.252054 |
# Copyright (C) 2019 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from distutils.version import StrictVersion
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton)
from electrum_dash import version
from electrum_dash import constants
from electrum_dash import ecc
from electrum_dash.i18n import _
from electrum_dash.util import make_aiohttp_session
from electrum_dash.logging import Logger
class UpdateCheck(QWidget, Logger):
url = "https://raw.githubusercontent.com/akhavr/electrum-pac/master/.latest-version"
download_url = "https://github.com/PACGlobalOfficial/electrum-pac/releases"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"XuKFPN7RDbrvNsPddPyUPzVqwdhvfB67cx",
)
def __init__(self, main_window, latest_version=None):
self.main_window = main_window
QWidget.__init__(self)
self.setWindowTitle('PacGlobal Electrum - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread(self.main_window)
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
v = version.ELECTRUM_VERSION
if 'rc' in v:
v = v[:v.index('rc')]
return latest_version > StrictVersion(v)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of PacGlobal Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while PacGlobal Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self, main_window):
QThread.__init__(self)
Logger.__init__(self)
self.main_window = main_window
async def get_update_info(self):
async with make_aiohttp_session(proxy=self.main_window.network.proxy) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "1Lqm1HphuhxKZQEawzPse8gJtgjm9kUKT4": "IA+2QG3xPRn4HAIFdpu9eeaCYC7S5wS/sDxn54LJx6BdUTBpse3ibtfq8C43M7M1VfpGkD5tsdwl5C6IfpZD/gQ="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return StrictVersion(version_num.strip())
def run(self):
network = self.main_window.network
if not network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
| electrum_dash/gui/qt/update_checker.py | 6,159 | Copyright (C) 2019 The Electrum developers Distributed under the MIT software license, see the accompanying file LICENCE or http://www.opensource.org/licenses/mit-license.php example signed_version_dict: { "version": "3.9.9", "signatures": { "1Lqm1HphuhxKZQEawzPse8gJtgjm9kUKT4": "IA+2QG3xPRn4HAIFdpu9eeaCYC7S5wS/sDxn54LJx6BdUTBpse3ibtfq8C43M7M1VfpGkD5tsdwl5C6IfpZD/gQ=" } } | 394 | en | 0.538937 |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests that application of gates and state preparations
works correctly an a device.
"""
# pylint: disable=no-self-use
# pylint: disable=too-many-arguments
# pylint: disable=pointless-statement
from cmath import exp
from math import cos, sin, sqrt
import pytest
import numpy as np
import pennylane as qml
from scipy.linalg import block_diag
from flaky import flaky
pytestmark = pytest.mark.skip_unsupported
np.random.seed(42)
# ==========================================================
# Some useful global variables
# gates for which device support is tested
ops = {
"BasisState": qml.BasisState(np.array([0]), wires=[0]),
"CNOT": qml.CNOT(wires=[0, 1]),
"CRX": qml.CRX(0, wires=[0, 1]),
"CRY": qml.CRY(0, wires=[0, 1]),
"CRZ": qml.CRZ(0, wires=[0, 1]),
"CRot": qml.CRot(0, 0, 0, wires=[0, 1]),
"CSWAP": qml.CSWAP(wires=[0, 1, 2]),
"CZ": qml.CZ(wires=[0, 1]),
"CY": qml.CY(wires=[0, 1]),
"DiagonalQubitUnitary": qml.DiagonalQubitUnitary(np.array([1, 1]), wires=[0]),
"Hadamard": qml.Hadamard(wires=[0]),
"MultiRZ": qml.MultiRZ(0, wires=[0]),
"PauliX": qml.PauliX(wires=[0]),
"PauliY": qml.PauliY(wires=[0]),
"PauliZ": qml.PauliZ(wires=[0]),
"PhaseShift": qml.PhaseShift(0, wires=[0]),
"ControlledPhaseShift": qml.ControlledPhaseShift(0, wires=[0, 1]),
"QubitStateVector": qml.QubitStateVector(np.array([1.0, 0.0]), wires=[0]),
"QubitUnitary": qml.QubitUnitary(np.eye(2), wires=[0]),
"ControlledQubitUnitary": qml.ControlledQubitUnitary(np.eye(2), control_wires=[1], wires=[0]),
"MultiControlledX": qml.MultiControlledX(control_wires=[1, 2], wires=[0]),
"RX": qml.RX(0, wires=[0]),
"RY": qml.RY(0, wires=[0]),
"RZ": qml.RZ(0, wires=[0]),
"Rot": qml.Rot(0, 0, 0, wires=[0]),
"S": qml.S(wires=[0]),
"SWAP": qml.SWAP(wires=[0, 1]),
"ISWAP": qml.ISWAP(wires=[0, 1]),
"T": qml.T(wires=[0]),
"SX": qml.SX(wires=[0]),
"Toffoli": qml.Toffoli(wires=[0, 1, 2]),
"QFT": qml.QFT(wires=[0, 1, 2]),
"IsingXX": qml.IsingXX(0, wires=[0, 1]),
"IsingZZ": qml.IsingZZ(0, wires=[0, 1]),
"SingleExcitation": qml.SingleExcitation(0, wires=[0, 1]),
"SingleExcitationPlus": qml.SingleExcitationPlus(0, wires=[0, 1]),
"SingleExcitationMinus": qml.SingleExcitationMinus(0, wires=[0, 1]),
"DoubleExcitation": qml.DoubleExcitation(0, wires=[0, 1, 2, 3]),
"DoubleExcitationPlus": qml.DoubleExcitationPlus(0, wires=[0, 1, 2, 3]),
"DoubleExcitationMinus": qml.DoubleExcitationMinus(0, wires=[0, 1, 2, 3]),
"QubitCarry": qml.QubitCarry(wires=[0, 1, 2, 3]),
"QubitSum:": qml.QubitSum(wires=[0, 1, 2]),
}
all_ops = ops.keys()
# non-parametrized qubit gates
I = np.identity(2)
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
H = np.array([[1, 1], [1, -1]]) / sqrt(2)
S = np.diag([1, 1j])
T = np.diag([1, np.exp(1j * np.pi / 4)])
SX = 0.5 * np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]])
SWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
ISWAP = np.array([[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]])
CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
CZ = np.diag([1, 1, 1, -1])
CY = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]])
toffoli = np.diag([1 for i in range(8)])
toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
CSWAP = block_diag(I, I, SWAP)
# parametrized qubit gates
phase_shift = lambda phi: np.array([[1, 0], [0, np.exp(1j * phi)]])
rx = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * X
ry = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Y
rz = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Z
rot = lambda a, b, c: rz(c) @ (ry(b) @ rz(a))
crz = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-1j * theta / 2), 0],
[0, 0, 0, np.exp(1j * theta / 2)],
]
)
cry = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cos(theta / 2), -sin(theta / 2)],
[0, 0, sin(theta / 2), cos(theta / 2)],
]
)
crx = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cos(theta / 2), 1j * sin(-theta / 2)],
[0, 0, 1j * sin(-theta / 2), cos(theta / 2)],
]
)
crot = lambda phi, theta, omega: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[
0,
0,
exp(-0.5j * (phi + omega)) * cos(theta / 2),
-exp(0.5j * (phi - omega)) * sin(theta / 2),
],
[
0,
0,
exp(-0.5j * (phi - omega)) * sin(theta / 2),
exp(0.5j * (phi + omega)) * cos(theta / 2),
],
]
)
IsingXX = lambda phi: np.array(
[
[cos(phi / 2), 0, 0, -1j * sin(phi / 2)],
[0, cos(phi / 2), -1j * sin(phi / 2), 0],
[0, -1j * sin(phi / 2), cos(phi / 2), 0],
[-1j * sin(phi / 2), 0, 0, cos(phi / 2)],
]
)
IsingZZ = lambda phi: np.array(
[
[exp(-1.0j * phi / 2), 0, 0, 0],
[0, exp(1.0j * phi / 2), 0, 0],
[0, 0, exp(1.0j * phi / 2), 0],
[0, 0, 0, exp(-1.0j * phi / 2)],
]
)
# list of all non-parametrized single-qubit gates,
# along with the PennyLane operation name
single_qubit = [
(qml.PauliX, X),
(qml.PauliY, Y),
(qml.PauliZ, Z),
(qml.Hadamard, H),
(qml.S, S),
(qml.T, T),
(qml.SX, SX),
]
# list of all parametrized single-qubit gates
# taking a single parameter
single_qubit_param = [
(qml.PhaseShift, phase_shift),
(qml.RX, rx),
(qml.RY, ry),
(qml.RZ, rz),
]
# list of all non-parametrized two-qubit gates
two_qubit = [(qml.CNOT, CNOT), (qml.SWAP, SWAP), (qml.ISWAP, ISWAP), (qml.CZ, CZ), (qml.CY, CY)]
# list of all parametrized two-qubit gates
two_qubit_param = [
(qml.CRX, crx),
(qml.CRY, cry),
(qml.CRZ, crz),
(qml.IsingXX, IsingXX),
(qml.IsingZZ, IsingZZ),
]
two_qubit_multi_param = [(qml.CRot, crot)]
# list of all three-qubit gates
three_qubit = [(qml.Toffoli, toffoli), (qml.CSWAP, CSWAP)]
# single qubit unitary matrix
theta = 0.8364
phi = -0.1234
U = np.array(
[
[
np.cos(theta / 2) * np.exp(np.complex(0, -phi / 2)),
-np.sin(theta / 2) * np.exp(np.complex(0, phi / 2)),
],
[
np.sin(theta / 2) * np.exp(np.complex(0, -phi / 2)),
np.cos(theta / 2) * np.exp(np.complex(0, phi / 2)),
],
]
)
# two qubit unitary matrix
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / sqrt(3)
# single qubit Hermitian observable
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
# ===============================================================
class TestSupportedGates:
"""Test that the device can implement all gates that it claims to support."""
@pytest.mark.parametrize("operation", all_ops)
def test_supported_gates_can_be_implemented(self, device_kwargs, operation):
"""Test that the device can implement all its supported gates."""
device_kwargs["wires"] = 4 # maximum size of current gates
dev = qml.device(**device_kwargs)
assert hasattr(dev, "operations")
if operation in dev.operations:
@qml.qnode(dev)
def circuit():
ops[operation]
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray))
@pytest.mark.parametrize("operation", all_ops)
def test_inverse_gates_can_be_implemented(self, device_kwargs, operation):
"""Test that the device can implement the inverse of all its supported gates.
This test is skipped for devices that do not support inverse operations."""
device_kwargs["wires"] = 4
dev = qml.device(**device_kwargs)
supports_inv = (
"supports_inverse_operations" in dev.capabilities()
and dev.capabilities()["supports_inverse_operations"]
)
if not supports_inv:
pytest.skip("Device does not support inverse operations.")
assert hasattr(dev, "operations")
if operation in dev.operations:
@qml.qnode(dev)
def circuit():
ops[operation].queue().inv()
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray))
@flaky(max_runs=10)
class TestGatesQubit:
"""Test qubit-based devices' probability vector after application of gates."""
@pytest.mark.parametrize(
"basis_state",
[
np.array([0, 0, 1, 0]),
np.array([0, 0, 1, 0]),
np.array([1, 0, 1, 0]),
np.array([1, 1, 1, 1]),
],
)
def test_basis_state(self, device, basis_state, tol, skip_if):
"""Test basis state initialization."""
n_wires = 4
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
@qml.qnode(dev)
def circuit():
qml.BasisState(basis_state, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.zeros([2 ** n_wires])
expected[np.ravel_multi_index(basis_state, [2] * n_wires)] = 1
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_qubit_state_vector(self, device, init_state, tol, skip_if):
"""Test QubitStateVector initialisation."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
return qml.probs(range(n_wires))
res = circuit()
expected = np.abs(rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test PauliX application."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test single qubit gates taking a single scalar argument."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(func(gamma) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_rotation(self, device, init_state, tol, skip_if):
"""Test three axis rotation gate."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
a = 0.542
b = 1.3432
c = -0.654
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(rot(a, b, c) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test two qubit gates."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("param", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, param, tol, skip_if):
"""Test parametrized two qubit gates taking a single scalar argument."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(param, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(func(param) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
"""Test QubitUnitary gate."""
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
if "QubitUnitary" not in dev.operations:
pytest.skip("Skipped because device does not support QubitUnitary.")
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires)))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test three qubit gates without parameters."""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=[0, 1, 2])
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@flaky(max_runs=10)
class TestInverseGatesQubit:
"""Test the device's probability vector after application of inverse of gates."""
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse single qubit gate application."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(1)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test inverse single qubit gates taking one scalar parameter."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_rotation(self, device, init_state, tol, skip_if):
"""Test inverse three axis rotation gate."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = rot(a, b, c)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse two qubit gates."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test inverse of two qubit gates taking one parameter."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(2)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
"""Test inverse QubitUnitary gate."""
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires))).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse three qubit gates without parameters."""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(3)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
| pennylane/devices/tests/test_gates.py | 21,321 | Test qubit-based devices' probability vector after application of gates.
Test the device's probability vector after application of inverse of gates.
Test that the device can implement all gates that it claims to support.
Test basis state initialization.
Test that the device can implement the inverse of all its supported gates.
This test is skipped for devices that do not support inverse operations.
Test QubitStateVector initialisation.
Test QubitUnitary gate.
Test inverse QubitUnitary gate.
Test three axis rotation gate.
Test inverse three axis rotation gate.
Test PauliX application.
Test inverse single qubit gate application.
Test single qubit gates taking a single scalar argument.
Test inverse single qubit gates taking one scalar parameter.
Test that the device can implement all its supported gates.
Test three qubit gates without parameters.
Test inverse three qubit gates without parameters.
Test two qubit gates.
Test inverse two qubit gates.
Test parametrized two qubit gates taking a single scalar argument.
Test inverse of two qubit gates taking one parameter.
Tests that application of gates and state preparations
works correctly an a device.
Copyright 2018-2021 Xanadu Quantum Technologies Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=no-self-use pylint: disable=too-many-arguments pylint: disable=pointless-statement ========================================================== Some useful global variables gates for which device support is tested non-parametrized qubit gates parametrized qubit gates list of all non-parametrized single-qubit gates, along with the PennyLane operation name list of all parametrized single-qubit gates taking a single parameter list of all non-parametrized two-qubit gates list of all parametrized two-qubit gates list of all three-qubit gates single qubit unitary matrix two qubit unitary matrix single qubit Hermitian observable =============================================================== maximum size of current gates | 2,478 | en | 0.788506 |
# Generated by Django 3.0.3 on 2020-08-03 15:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecommerce_platform', '0010_userprofile_address'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='address',
),
]
| obsidian_traders/ecommerce_platform/migrations/0011_remove_userprofile_address.py | 363 | Generated by Django 3.0.3 on 2020-08-03 15:29 | 45 | en | 0.646847 |
import pandas as pd
import numpy as np
def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]):
""" Plot the correlation coefficient for various exponential scalings of input features
>>> np.random.seed(314159)
>>> df = pd.DataFrame()
>>> df['output'] = np.random.randn(1000)
>>> df['x10'] = df.output * 10
>>> df['sq'] = df.output ** 2
>>> df['sqrt'] = df.output ** .5
>>> optimize_feature_power(df, output_column_name='output').round(2)
x10 sq sqrt
power
2.00 -0.08 1.00 0.83
1.00 1.00 -0.08 0.97
0.80 1.00 0.90 0.99
0.50 0.97 0.83 1.00
0.25 0.93 0.76 0.99
0.10 0.89 0.71 0.97
0.01 0.86 0.67 0.95
Returns:
DataFrame:
columns are the input_columns from the source dataframe (df)
rows are correlation with output for each attempted exponent used to scale the input features
"""
output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name
input_column_names = [colname for colname in df.columns if output_column_name != colname]
results = np.zeros((len(exponents), len(input_column_names)))
for rownum, exponent in enumerate(exponents):
for colnum, column_name in enumerate(input_column_names):
results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name])
results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))
# results.plot(logx=True)
return results
| src/nlpia/features.py | 1,590 | Plot the correlation coefficient for various exponential scalings of input features
>>> np.random.seed(314159)
>>> df = pd.DataFrame()
>>> df['output'] = np.random.randn(1000)
>>> df['x10'] = df.output * 10
>>> df['sq'] = df.output ** 2
>>> df['sqrt'] = df.output ** .5
>>> optimize_feature_power(df, output_column_name='output').round(2)
x10 sq sqrt
power
2.00 -0.08 1.00 0.83
1.00 1.00 -0.08 0.97
0.80 1.00 0.90 0.99
0.50 0.97 0.83 1.00
0.25 0.93 0.76 0.99
0.10 0.89 0.71 0.97
0.01 0.86 0.67 0.95
Returns:
DataFrame:
columns are the input_columns from the source dataframe (df)
rows are correlation with output for each attempted exponent used to scale the input features
results.plot(logx=True) | 751 | en | 0.442601 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regression task.
Find commit ranges where regressions were introduced."""
from builtins import range
import random
import time
from base import errors
from base import tasks
from bot import testcase_manager
from bot.tasks import setup
from bot.tasks import task_creation
from build_management import build_manager
from build_management import revisions
from datastore import data_handler
from datastore import data_types
from google_cloud_utils import big_query
from metrics import logs
from system import environment
# Number of revisions before the maximum to test before doing a bisect. This
# is also used as a cap for revisions to test near the minimum if the minimum
# happens to be a bad build.
EXTREME_REVISIONS_TO_TEST = 3
# Number of earlier revisions to check when validating ranges.
REVISIONS_TO_TEST_FOR_VALIDATION = 2
# Maximum revisions to look back when validating.
EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION = 10
def write_to_big_query(testcase, regression_range_start, regression_range_end):
"""Write the regression range to BigQuery."""
big_query.write_range(
table_id='regressions',
testcase=testcase,
range_name='regression',
start=regression_range_start,
end=regression_range_end)
def _save_current_regression_range_indices(testcase_id, regression_range_start,
regression_range_end):
"""Save current regression range indices in case we die in middle of task."""
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata(
'last_regression_min', regression_range_start, update_testcase=False)
testcase.set_metadata(
'last_regression_max', regression_range_end, update_testcase=False)
testcase.put()
def save_regression_range(testcase_id, regression_range_start,
regression_range_end):
"""Saves the regression range and creates blame and impact task if needed."""
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = '%d:%d' % (regression_range_start, regression_range_end)
data_handler.update_testcase_comment(
testcase, data_types.TaskState.FINISHED,
'regressed in range %s' % testcase.regression)
write_to_big_query(testcase, regression_range_start, regression_range_end)
# Force impacts update after regression range is updated. In several cases,
# we might not have a production build to test with, so regression range is
# used to decide impacts.
task_creation.create_impact_task_if_needed(testcase)
# Get blame information using the regression range result.
task_creation.create_blame_task_if_needed(testcase)
# If there is a fine grained bisection service available, request it.
task_creation.request_bisection(testcase, 'regressed')
def _testcase_reproduces_in_revision(testcase,
testcase_file_path,
job_type,
revision,
should_log=True,
min_revision=None,
max_revision=None):
"""Test to see if a test case reproduces in the specified revision."""
if should_log:
log_message = 'Testing r%d' % revision
if min_revision is not None and max_revision is not None:
log_message += ' (current range %d:%d)' % (min_revision, max_revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
log_message)
build_manager.setup_build(revision)
if not build_manager.check_app_path():
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = 'Bad build at r%d. Skipping' % revision
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(
testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
return result.is_crash()
def found_regression_near_extreme_revisions(testcase, testcase_file_path,
job_type, revision_list, min_index,
max_index):
"""Test to see if we regressed near either the min or max revision."""
# Test a few of the most recent revisions.
last_known_crashing_revision = revision_list[max_index]
for offset in range(1, EXTREME_REVISIONS_TO_TEST + 1):
current_index = max_index - offset
if current_index < min_index:
break
# If we don't crash in a recent revision, we regressed in one of the
# commits between the current revision and the one at the next index.
try:
is_crash = _testcase_reproduces_in_revision(
testcase, testcase_file_path, job_type, revision_list[current_index])
except errors.BadBuildError:
# Skip this revision.
continue
if not is_crash:
save_regression_range(testcase.key.id(), revision_list[current_index],
last_known_crashing_revision)
return True
last_known_crashing_revision = revision_list[current_index]
# Test to see if we crash in the oldest revision we can run. This is a pre-
# condition for our binary search. If we do crash in that revision, it
# implies that we regressed between the first commit and our first revision,
# which we represent as 0:|min_revision|.
for _ in range(EXTREME_REVISIONS_TO_TEST):
min_revision = revision_list[min_index]
try:
crashes_in_min_revision = _testcase_reproduces_in_revision(
testcase,
testcase_file_path,
job_type,
min_revision,
should_log=False)
except errors.BadBuildError:
# If we find a bad build, potentially try another.
if min_index + 1 >= max_index:
break
min_index += 1
continue
if crashes_in_min_revision:
save_regression_range(testcase.key.id(), 0, min_revision)
return True
return False
# We should have returned above. If we get here, it means we tried too many
# builds near the min revision, and they were all bad.
raise errors.BadBuildError(revision_list[min_index], job_type)
def validate_regression_range(testcase, testcase_file_path, job_type,
revision_list, min_index):
"""Ensure that we found the correct min revision by testing earlier ones."""
earlier_revisions = revision_list[
min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index]
revision_count = min(len(earlier_revisions), REVISIONS_TO_TEST_FOR_VALIDATION)
revisions_to_test = random.sample(earlier_revisions, revision_count)
for revision in revisions_to_test:
try:
if _testcase_reproduces_in_revision(testcase, testcase_file_path,
job_type, revision):
testcase = data_handler.get_testcase_by_id(testcase.key.id())
testcase.regression = 'NA'
error_message = (
'Low confidence in regression range. Test case crashes in '
'revision r%d but not later revision r%d' %
(revision, revision_list[min_index]))
data_handler.update_testcase_comment(
testcase, data_types.TaskState.ERROR, error_message)
return False
except errors.BadBuildError:
pass
return True
def find_regression_range(testcase_id, job_type):
"""Attempt to find when the testcase regressed."""
deadline = tasks.get_task_completion_deadline()
testcase = data_handler.get_testcase_by_id(testcase_id)
if not testcase:
return
if testcase.regression:
logs.log_error(
'Regression range is already set as %s, skip.' % testcase.regression)
return
# This task is not applicable for custom binaries.
if build_manager.is_custom_binary():
testcase.regression = 'NA'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Not applicable for custom binaries')
return
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
# Setup testcase and its dependencies.
file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
if not file_list:
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Failed to setup testcase')
tasks.add_task('regression', testcase_id, job_type)
return
build_bucket_path = build_manager.get_primary_bucket_path()
revision_list = build_manager.get_revisions_list(
build_bucket_path, testcase=testcase)
if not revision_list:
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Failed to fetch revision list')
tasks.add_task('regression', testcase_id, job_type)
return
# Don't burden NFS server with caching these random builds.
environment.set_value('CACHE_STORE', False)
# Pick up where left off in a previous run if necessary.
min_revision = testcase.get_metadata('last_regression_min')
max_revision = testcase.get_metadata('last_regression_max')
first_run = not min_revision and not max_revision
if not min_revision:
min_revision = revisions.get_first_revision_in_list(revision_list)
if not max_revision:
max_revision = testcase.crash_revision
min_index = revisions.find_min_revision_index(revision_list, min_revision)
if min_index is None:
raise errors.BuildNotFoundError(min_revision, job_type)
max_index = revisions.find_max_revision_index(revision_list, max_revision)
if max_index is None:
raise errors.BuildNotFoundError(max_revision, job_type)
# Make sure that the revision where we noticed the crash, still crashes at
# that revision. Otherwise, our binary search algorithm won't work correctly.
max_revision = revision_list[max_index]
crashes_in_max_revision = _testcase_reproduces_in_revision(
testcase, testcase_file_path, job_type, max_revision, should_log=False)
if not crashes_in_max_revision:
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Known crash revision %d did not crash' % max_revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
task_creation.mark_unreproducible_if_flaky(testcase, True)
return
# If we've made it this far, the test case appears to be reproducible. Clear
# metadata from previous runs had it been marked as potentially flaky.
task_creation.mark_unreproducible_if_flaky(testcase, False)
# On the first run, check to see if we regressed near either the min or max
# revision.
if first_run and found_regression_near_extreme_revisions(
testcase, testcase_file_path, job_type, revision_list, min_index,
max_index):
return
while time.time() < deadline:
min_revision = revision_list[min_index]
max_revision = revision_list[max_index]
# If the min and max revisions are one apart (or the same, if we only have
# one build), this is as much as we can narrow the range.
if max_index - min_index <= 1:
# Verify that the regression range seems correct, and save it if so.
if not validate_regression_range(testcase, testcase_file_path, job_type,
revision_list, min_index):
return
save_regression_range(testcase_id, min_revision, max_revision)
return
middle_index = (min_index + max_index) // 2
middle_revision = revision_list[middle_index]
try:
is_crash = _testcase_reproduces_in_revision(
testcase,
testcase_file_path,
job_type,
middle_revision,
min_revision=min_revision,
max_revision=max_revision)
except errors.BadBuildError:
# Skip this revision.
del revision_list[middle_index]
max_index -= 1
continue
if is_crash:
max_index = middle_index
else:
min_index = middle_index
_save_current_regression_range_indices(
testcase_id, revision_list[min_index], revision_list[max_index])
# If we've broken out of the above loop, we timed out. We'll finish by
# running another regression task and picking up from this point.
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = 'Timed out, current range r%d:r%d' % (
revision_list[min_index], revision_list[max_index])
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
tasks.add_task('regression', testcase_id, job_type)
def execute_task(testcase_id, job_type):
"""Run regression task and handle potential errors."""
try:
find_regression_range(testcase_id, job_type)
except errors.BuildSetupError as error:
# If we failed to setup a build, it is likely a bot error. We can retry
# the task in this case.
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = 'Build setup failed r%d' % error.revision
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task(
'regression', testcase_id, job_type, wait_time=build_fail_wait)
except errors.BadBuildError:
# Though bad builds when narrowing the range are recoverable, certain builds
# being marked as bad may be unrecoverable. Recoverable ones should not
# reach this point.
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = 'NA'
error_message = 'Unable to recover from bad build'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
| src/python/bot/tasks/regression_task.py | 14,930 | Save current regression range indices in case we die in middle of task.
Test to see if a test case reproduces in the specified revision.
Run regression task and handle potential errors.
Attempt to find when the testcase regressed.
Test to see if we regressed near either the min or max revision.
Saves the regression range and creates blame and impact task if needed.
Ensure that we found the correct min revision by testing earlier ones.
Write the regression range to BigQuery.
Regression task.
Find commit ranges where regressions were introduced.
Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Number of revisions before the maximum to test before doing a bisect. This is also used as a cap for revisions to test near the minimum if the minimum happens to be a bad build. Number of earlier revisions to check when validating ranges. Maximum revisions to look back when validating. Force impacts update after regression range is updated. In several cases, we might not have a production build to test with, so regression range is used to decide impacts. Get blame information using the regression range result. If there is a fine grained bisection service available, request it. Test a few of the most recent revisions. If we don't crash in a recent revision, we regressed in one of the commits between the current revision and the one at the next index. Skip this revision. Test to see if we crash in the oldest revision we can run. This is a pre- condition for our binary search. If we do crash in that revision, it implies that we regressed between the first commit and our first revision, which we represent as 0:|min_revision|. If we find a bad build, potentially try another. We should have returned above. If we get here, it means we tried too many builds near the min revision, and they were all bad. This task is not applicable for custom binaries. Setup testcase and its dependencies. Don't burden NFS server with caching these random builds. Pick up where left off in a previous run if necessary. Make sure that the revision where we noticed the crash, still crashes at that revision. Otherwise, our binary search algorithm won't work correctly. If we've made it this far, the test case appears to be reproducible. Clear metadata from previous runs had it been marked as potentially flaky. On the first run, check to see if we regressed near either the min or max revision. If the min and max revisions are one apart (or the same, if we only have one build), this is as much as we can narrow the range. Verify that the regression range seems correct, and save it if so. Skip this revision. If we've broken out of the above loop, we timed out. We'll finish by running another regression task and picking up from this point. If we failed to setup a build, it is likely a bot error. We can retry the task in this case. Though bad builds when narrowing the range are recoverable, certain builds being marked as bad may be unrecoverable. Recoverable ones should not reach this point. | 3,495 | en | 0.919973 |
# -*- coding: utf-8 -*-
"""
@Remark: 自定义视图集
"""
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.viewsets import ModelViewSet
from utils.filters import DataLevelPermissionsFilter
from utils.jsonResponse import SuccessResponse,ErrorResponse
from utils.permission import CustomPermission
from django.http import Http404
from django.shortcuts import get_object_or_404 as _get_object_or_404
from django.core.exceptions import ValidationError
from utils.exception import APIException
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.permissions import IsAuthenticated
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise APIException(message='该对象不存在或者无访问权限')
class CustomModelViewSet(ModelViewSet):
"""
自定义的ModelViewSet:
统一标准的返回格式;新增,查询,修改可使用不同序列化器
(1)ORM性能优化, 尽可能使用values_queryset形式
(2)create_serializer_class 新增时,使用的序列化器
(3)update_serializer_class 修改时,使用的序列化器
"""
values_queryset = None
ordering_fields = '__all__'
create_serializer_class = None
update_serializer_class = None
filter_fields = ()
# filter_fields = '__all__'
search_fields = ()
extra_filter_backends = [DataLevelPermissionsFilter]
permission_classes = [CustomPermission,IsAuthenticated]
filter_backends = [DjangoFilterBackend, OrderingFilter, SearchFilter]
def filter_queryset(self, queryset):
for backend in set(set(self.filter_backends) | set(self.extra_filter_backends or [])):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def get_queryset(self):
if getattr(self, 'values_queryset', None):
return self.values_queryset
return super().get_queryset()
def get_serializer_class(self):
action_serializer_name = f"{self.action}_serializer_class"
action_serializer_class = getattr(self, action_serializer_name, None)
if action_serializer_class:
return action_serializer_class
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, request=request)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return SuccessResponse(data=serializer.data, msg="新增成功")
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, request=request)
return self.get_paginated_response(serializer.data)
# result = self.get_paginated_response(serializer.data)
# print(51,result.data)
# return JsonResponse(code=2000,msg="获取成功", data=result.data)
serializer = self.get_serializer(queryset, many=True, request=request)
return SuccessResponse(data=serializer.data, msg="获取成功")
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return SuccessResponse(data=serializer.data, msg="获取成功")
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, request=request, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return SuccessResponse(data=serializer.data, msg="更新成功")
#增强drf得批量删除功能 :http请求方法:delete 如: url /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户
def get_object_list(self):
queryset = self.filter_queryset(self.get_queryset())
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {f"{self.lookup_field}__in": self.kwargs[lookup_url_kwarg].split(',')}
obj = queryset.filter(**filter_kwargs)
self.check_object_permissions(self.request, obj)
return obj
#重写delete方法,让它支持批量删除 如: /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户
def destroy(self, request, *args, **kwargs):
instance = self.get_object_list()
self.perform_destroy(instance)
return SuccessResponse(data=[], msg="删除成功")
def perform_destroy(self, instance):
instance.delete()
#原来得单id删除方法
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return SuccessResponse(data=[], msg="删除成功")
#新的批量删除方法
keys = openapi.Schema(description='主键列表', type=openapi.TYPE_ARRAY, items=openapi.TYPE_STRING)
@swagger_auto_schema(request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['keys'],
properties={'keys': keys}
), operation_summary='批量删除')
@action(methods=['delete'], detail=False)
def multiple_delete(self, request, *args, **kwargs):
#print(request.data)
request_data = request.data
keys = request_data.get('keys', None)
if keys:
self.get_queryset().filter(id__in=keys).delete()
return SuccessResponse(data=[], msg="删除成功")
else:
return ErrorResponse(msg="未获取到keys字段")
| backend/utils/viewset.py | 6,746 | 自定义的ModelViewSet:
统一标准的返回格式;新增,查询,修改可使用不同序列化器
(1)ORM性能优化, 尽可能使用values_queryset形式
(2)create_serializer_class 新增时,使用的序列化器
(3)update_serializer_class 修改时,使用的序列化器
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
@Remark: 自定义视图集
-*- coding: utf-8 -*- filter_fields = '__all__' result = self.get_paginated_response(serializer.data) print(51,result.data) return JsonResponse(code=2000,msg="获取成功", data=result.data) If 'prefetch_related' has been applied to a queryset, we need to forcibly invalidate the prefetch cache on the instance.增强drf得批量删除功能 :http请求方法:delete 如: url /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户重写delete方法,让它支持批量删除 如: /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户原来得单id删除方法 def destroy(self, request, *args, **kwargs): instance = self.get_object() self.perform_destroy(instance) return SuccessResponse(data=[], msg="删除成功")新的批量删除方法print(request.data) | 937 | zh | 0.35457 |
# -*- coding: utf-8 -*-
from base.log import *
import os
def get_url(trackId,trackPointId,type1,seq,imageType):
cmd = 'http://10.11.5.34:13100/krs/image/get?trackPointId=%s&type=%s&seq=%s&imageType=%s' %(trackPointId,type1,seq,imageType)
return cmd
def main():
url = get_url('123', '123', '00', '004', 'jpg')
print url
if __name__ == '__main__':
main()
| mesh_krs_imagequery.py | 365 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
enum = 0
enum1 = 0
enum2 = 0
prob = 0
p1 = 0
p2 = 0
parity = 0
for z1 in range(1, 6):
for y1 in range(z1+1, 7):
for z2 in range(1, z1+1):
for y2 in range(z2+1, y1+1):
""" for y2 in range(1, y1):
for z2 in range(y2, z1+1):
for z3 in range(1, z2+1):
if y1 == y2:
enum1 = 1
elif y1 > y2:
enum1 = 2
else:
enum1 = 0
p1 = enum1/36
if z1 == z2 == z3:
enum2 = 1
elif z1 != z2 != z3:
enum2 = 6
else:
enum2 = 3
p2 = enum2/216
enum += enum1 * enum2
prob += p1 * p2 """
# print(y1, z1, y2, z2)
if z1 == z2:
enum1 = 1
elif z1 > z2:
enum1 = 2
else:
enum1 = 0
p1 = enum1 / 36
if y1 == y2:
enum2 = 1
elif y1 > y2:
enum2 = 2
else:
enum2 = 0
p2 = enum2 / 36
enum += enum1 * enum2
prob += p1 * p2
print(enum, prob) | src/compute_probs.py | 1,565 | print(y1, z1, y2, z2) | 21 | pt | 0.514697 |
import datetime
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, List, Optional
import dateutil.parser
import pytest
import requests
from determined import experimental
from determined.common import api, yaml
from determined.common.api import authentication, certs
from tests import config as conf
from tests.cluster import utils as cluster_utils
def maybe_create_native_experiment(context_dir: str, command: List[str]) -> Optional[int]:
target_env = os.environ.copy()
target_env["DET_MASTER"] = conf.make_master_url()
with subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=context_dir, env=target_env
) as p:
assert p.stdout is not None
for line in p.stdout:
m = re.search(r"Created experiment (\d+)\n", line.decode())
if m is not None:
return int(m.group(1))
return None
def create_native_experiment(context_dir: str, command: List[str]) -> int:
experiment_id = maybe_create_native_experiment(context_dir, command)
if experiment_id is None:
pytest.fail(f"Failed to create experiment in {context_dir}: {command}")
return experiment_id
def maybe_create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> subprocess.CompletedProcess:
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
config_file,
model_def_file,
]
if create_args is not None:
command += create_args
env = os.environ.copy()
env["DET_DEBUG"] = "true"
return subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> int:
completed_process = maybe_create_experiment(config_file, model_def_file, create_args)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
m = re.search(r"Created experiment (\d+)\n", str(completed_process.stdout))
assert m is not None
return int(m.group(1))
def pause_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "pause", str(experiment_id)]
subprocess.check_call(command)
def activate_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "activate", str(experiment_id)]
subprocess.check_call(command)
def change_experiment_state(experiment_id: int, new_state: str) -> None:
# TODO(DET-5678): refactor tests to not use cli singleton auth.
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.patch(
conf.make_master_url(),
"experiments/{}".format(experiment_id),
headers={"Content-Type": "application/merge-patch+json"},
json={"state": new_state},
)
assert r.status_code == requests.codes.no_content, r.text
def cancel_experiment(experiment_id: int) -> None:
change_experiment_state(experiment_id, "STOPPING_CANCELED")
# We may never observe the STOPPING_CANCELED state.
wait_for_experiment_state(experiment_id, "CANCELED")
def cancel_experiment_v1(experiment_id: int) -> None:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.post(conf.make_master_url(), "/api/v1/experiments/{}/cancel".format(experiment_id))
r.raise_for_status()
wait_for_experiment_state(experiment_id, "CANCELED")
def wait_for_experiment_state(
experiment_id: int,
target_state: str,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
log_every: int = 60,
) -> None:
for seconds_waited in range(max_wait_secs):
try:
state = experiment_state(experiment_id)
# Ignore network errors while polling for experiment state to avoid a
# single network flake to cause a test suite failure. If the master is
# unreachable multiple times, this test will fail after max_wait_secs.
except api.errors.MasterNotFoundException:
logging.warning(
"Network failure ignored when polling for state of "
"experiment {}".format(experiment_id)
)
time.sleep(1)
continue
if state == target_state:
return
if is_terminal_state(state):
if state != target_state:
report_failed_experiment(experiment_id)
pytest.fail(
f"Experiment {experiment_id} terminated in {state} state, expected {target_state}"
)
if seconds_waited > 0 and seconds_waited % log_every == 0:
print(
f"Waited {seconds_waited} seconds for experiment {experiment_id} "
f"(currently {state}) to reach {target_state}"
)
time.sleep(1)
else:
if target_state == "COMPLETED":
cancel_experiment(experiment_id)
report_failed_experiment(experiment_id)
pytest.fail(
"Experiment did not reach target state {} after {} seconds".format(
target_state, max_wait_secs
)
)
def experiment_has_active_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "tasks").json()
for task in r.values():
if "Experiment {}".format(experiment_id) in task["name"] and len(task["containers"]) > 0:
return True
return False
def wait_for_experiment_active_workload(
experiment_id: int, max_ticks: int = conf.MAX_TASK_SCHEDULED_SECS
) -> None:
for _ in range(conf.MAX_TASK_SCHEDULED_SECS):
if experiment_has_active_workload(experiment_id):
return
time.sleep(1)
pytest.fail(
f"The only trial cannot be scheduled within {max_ticks} seconds.",
)
def wait_for_experiment_workload_progress(
experiment_id: int, max_ticks: int = conf.MAX_TRIAL_BUILD_SECS
) -> None:
for _ in range(conf.MAX_TRIAL_BUILD_SECS):
trials = experiment_trials(experiment_id)
if len(trials) > 0:
only_trial = trials[0]
if len(only_trial["steps"]) > 1:
return
time.sleep(1)
pytest.fail(
f"Trial cannot finish first workload within {max_ticks} seconds.",
)
def experiment_has_completed_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
trials = experiment_trials(experiment_id)
if not any(trials):
return False
return any(any(s["state"] == "COMPLETED" for s in t["steps"]) for t in trials)
def experiment_json(experiment_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments/{}".format(experiment_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def experiment_state(experiment_id: int) -> str:
state = experiment_json(experiment_id)["state"] # type: str
return state
def experiment_trials(experiment_id: int) -> List[Dict[str, Any]]:
trials = experiment_json(experiment_id)["trials"] # type: List[Dict[str, Any]]
return trials
def num_experiments() -> int:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
def cancel_single(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def cancel_single_v1(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment_v1(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def is_terminal_state(state: str) -> bool:
return state in ("CANCELED", "COMPLETED", "ERROR")
def trial_metrics(trial_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "trials/{}/metrics".format(trial_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def get_flat_metrics(trial_id: int, metric: str) -> List:
full_trial_metrics = trial_metrics(trial_id)
metrics = [m for step in full_trial_metrics["steps"] for m in step["metrics"]["batch_metrics"]]
return [v[metric] for v in metrics]
def num_trials(experiment_id: int) -> int:
return len(experiment_trials(experiment_id))
def num_active_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ACTIVE" else 0 for t in experiment_trials(experiment_id))
def num_completed_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "COMPLETED" else 0 for t in experiment_trials(experiment_id))
def num_error_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ERROR" else 0 for t in experiment_trials(experiment_id))
def trial_logs(trial_id: int) -> List[str]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
return [tl["message"] for tl in api.trial_logs(conf.make_master_url(), trial_id)]
def check_if_string_present_in_trial_logs(trial_id: int, target_string: str) -> bool:
logs = trial_logs(trial_id)
for log_line in logs:
if target_string in log_line:
return True
return False
def assert_equivalent_trials(A: int, B: int, validation_metrics: List[str]) -> None:
full_trial_metrics1 = trial_metrics(A)
full_trial_metrics2 = trial_metrics(B)
assert len(full_trial_metrics1["steps"]) == len(full_trial_metrics2["steps"])
for step1, step2 in zip(full_trial_metrics1["steps"], full_trial_metrics2["steps"]):
metric1 = step1["metrics"]["batch_metrics"]
metric2 = step2["metrics"]["batch_metrics"]
for batch1, batch2 in zip(metric1, metric2):
assert len(batch1) == len(batch2) == 2
assert batch1["loss"] == pytest.approx(batch2["loss"])
if step1["validation"] is not None or step2["validation"] is not None:
assert step1["validation"] is not None
assert step2["validation"] is not None
for metric in validation_metrics:
val1 = step1.get("validation").get("metrics").get("validation_metrics").get(metric)
val2 = step2.get("validation").get("metrics").get("validation_metrics").get(metric)
assert val1 == pytest.approx(val2)
def assert_performed_initial_validation(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
zeroth_step = steps[0]
assert zeroth_step["validation"] is not None
assert zeroth_step["validation"]["total_batches"] == 0
assert zeroth_step["validation"]["state"] == "COMPLETED"
def assert_performed_final_checkpoint(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
last_step = steps[-1]
assert last_step["checkpoint"] is not None
assert last_step["checkpoint"]["state"] == "COMPLETED"
def run_describe_cli_tests(experiment_id: int) -> None:
"""
Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
"""
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
# "det experiment describe" with metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--metrics",
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
def run_list_cli_tests(experiment_id: int) -> None:
"""
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
"""
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"list-checkpoints",
"--best",
str(1),
str(experiment_id),
]
)
def report_failed_experiment(experiment_id: int) -> None:
trials = experiment_trials(experiment_id)
active = sum(1 for t in trials if t["state"] == "ACTIVE")
paused = sum(1 for t in trials if t["state"] == "PAUSED")
stopping_completed = sum(1 for t in trials if t["state"] == "STOPPING_COMPLETED")
stopping_canceled = sum(1 for t in trials if t["state"] == "STOPPING_CANCELED")
stopping_error = sum(1 for t in trials if t["state"] == "STOPPING_ERROR")
completed = sum(1 for t in trials if t["state"] == "COMPLETED")
canceled = sum(1 for t in trials if t["state"] == "CANCELED")
errored = sum(1 for t in trials if t["state"] == "ERROR")
stopping_killed = sum(1 for t in trials if t["state"] == "STOPPING_KILLED")
print(
f"Experiment {experiment_id}: {len(trials)} trials, {completed} completed, "
f"{active} active, {paused} paused, {stopping_completed} stopping-completed, "
f"{stopping_canceled} stopping-canceled, {stopping_error} stopping-error, "
f"{stopping_killed} stopping-killed, {canceled} canceled, {errored} errored",
file=sys.stderr,
)
for trial in trials:
print_trial_logs(trial["id"])
def report_failed_trial(trial_id: int, state: str) -> None:
print(f"Trial {trial_id} was not COMPLETED but {state}", file=sys.stderr)
print_trial_logs(trial_id)
def print_trial_logs(trial_id: int) -> None:
print("******** Start of logs for trial {} ********".format(trial_id), file=sys.stderr)
print("".join(trial_logs(trial_id)), file=sys.stderr)
print("******** End of logs for trial {} ********".format(trial_id), file=sys.stderr)
def run_basic_test(
config_file: str,
model_def_file: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
assert os.path.isdir(model_def_file)
experiment_id = create_experiment(config_file, model_def_file, create_args)
wait_for_experiment_state(experiment_id, "COMPLETED", max_wait_secs=max_wait_secs)
assert num_active_trials(experiment_id) == 0
verify_completed_experiment_metadata(experiment_id, expected_trials)
return experiment_id
def verify_completed_experiment_metadata(
experiment_id: int, num_expected_trials: Optional[int]
) -> None:
# If `expected_trials` is None, the expected number of trials is
# non-deterministic.
if num_expected_trials is not None:
assert num_trials(experiment_id) == num_expected_trials
assert num_completed_trials(experiment_id) == num_expected_trials
# Check that every trial and step is COMPLETED.
trials = experiment_trials(experiment_id)
assert len(trials) > 0
for trial in trials:
if trial["state"] != "COMPLETED":
report_failed_trial(trial["id"], trial["state"])
pytest.fail(f"Trial {trial['id']} was not COMPLETED but {trial['state']}")
assert len(trial["steps"]) > 0
# Check that batches appear in increasing order.
batch_ids = [s["total_batches"] for s in trial["steps"]]
assert all(x <= y for x, y in zip(batch_ids, batch_ids[1:]))
for step in trial["steps"]:
assert step["state"] == "COMPLETED"
if step["validation"]:
validation = step["validation"]
assert validation["state"] == "COMPLETED"
if step["checkpoint"]:
checkpoint = step["checkpoint"]
assert checkpoint["state"] in {"COMPLETED", "DELETED"}
# The last step of every trial should have a checkpoint.
for trial in trials:
last_step = trial["steps"][-1]
assert last_step["checkpoint"]
# When the experiment completes, all slots should now be free. This
# requires terminating the experiment's last container, which might
# take some time.
max_secs_to_free_slots = 30
for _ in range(max_secs_to_free_slots):
if cluster_utils.num_free_slots() == cluster_utils.num_slots():
break
time.sleep(1)
else:
raise AssertionError("Slots failed to free after experiment {}".format(experiment_id))
# Run a series of CLI tests on the finished experiment, to sanity check
# that basic CLI commands don't raise errors.
run_describe_cli_tests(experiment_id)
run_list_cli_tests(experiment_id)
# Use Determined to run an experiment that we expect to fail.
def run_failure_test(
config_file: str, model_def_file: str, error_str: Optional[str] = None
) -> None:
experiment_id = create_experiment(config_file, model_def_file)
wait_for_experiment_state(experiment_id, "ERROR")
# The searcher is configured with a `max_trials` of 8. Since the
# first step of each trial results in an error, there should be no
# completed trials.
#
# Most of the trials should result in ERROR, but depending on that
# seems fragile: if we support task preemption in the future, we
# might start a trial but cancel it before we hit the error in the
# model definition.
assert num_active_trials(experiment_id) == 0
assert num_completed_trials(experiment_id) == 0
assert num_error_trials(experiment_id) >= 1
# For each failed trial, check for the expected error in the logs.
trials = experiment_trials(experiment_id)
for t in trials:
if t["state"] != "ERROR":
continue
trial_id = t["id"]
logs = trial_logs(trial_id)
if error_str is not None:
assert any(error_str in line for line in logs)
def get_validation_metric_from_last_step(
experiment_id: int, trial_id: int, validation_metric_name: str
) -> float:
trial = experiment_trials(experiment_id)[trial_id]
last_validation = trial["steps"][len(trial["steps"]) - 1]["validation"]
return last_validation["metrics"]["validation_metrics"][validation_metric_name] # type: ignore
class ExperimentDurations:
def __init__(
self,
experiment_duration: datetime.timedelta,
training_duration: datetime.timedelta,
validation_duration: datetime.timedelta,
checkpoint_duration: datetime.timedelta,
):
self.experiment_duration = experiment_duration
self.training_duration = training_duration
self.validation_duration = validation_duration
self.checkpoint_duration = checkpoint_duration
def __str__(self) -> str:
duration_strs = []
duration_strs.append(f"experiment duration: {self.experiment_duration}")
duration_strs.append(f"training duration: {self.training_duration}")
duration_strs.append(f"validation duration: {self.validation_duration}")
duration_strs.append(f"checkpoint duration: {self.checkpoint_duration}")
return "\n".join(duration_strs)
def get_experiment_durations(experiment_id: int, trial_idx: int) -> ExperimentDurations:
experiment_metadata = experiment_json(experiment_id)
end_time = dateutil.parser.parse(experiment_metadata["end_time"])
start_time = dateutil.parser.parse(experiment_metadata["start_time"])
experiment_duration = end_time - start_time
training_duration = datetime.timedelta(seconds=0)
validation_duration = datetime.timedelta(seconds=0)
checkpoint_duration = datetime.timedelta(seconds=0)
for step in experiment_metadata["trials"][trial_idx]["steps"]:
end_time = dateutil.parser.parse(step["end_time"])
start_time = dateutil.parser.parse(step["start_time"])
training_duration += end_time - start_time
if "validation" in step and step["validation"]:
end_time = dateutil.parser.parse(step["validation"]["end_time"])
start_time = dateutil.parser.parse(step["validation"]["start_time"])
validation_duration += end_time - start_time
if "checkpoint" in step and step["checkpoint"]:
end_time = dateutil.parser.parse(step["checkpoint"]["end_time"])
start_time = dateutil.parser.parse(step["checkpoint"]["start_time"])
checkpoint_duration += end_time - start_time
return ExperimentDurations(
experiment_duration, training_duration, validation_duration, checkpoint_duration
)
def run_basic_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = run_basic_test(
tf.name,
model_def_path,
expected_trials,
create_args,
max_wait_secs=max_wait_secs,
)
return experiment_id
def run_failure_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
error_str: Optional[str] = None,
) -> None:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
run_failure_test(tf.name, model_def_path, error_str=error_str)
def shared_fs_checkpoint_config() -> Dict[str, str]:
return {
"type": "shared_fs",
"host_path": "/tmp",
"storage_path": "determined-integration-checkpoints",
}
def s3_checkpoint_config(secrets: Dict[str, str], prefix: Optional[str] = None) -> Dict[str, str]:
config_dict = {
"type": "s3",
"access_key": secrets["INTEGRATIONS_S3_ACCESS_KEY"],
"secret_key": secrets["INTEGRATIONS_S3_SECRET_KEY"],
"bucket": secrets["INTEGRATIONS_S3_BUCKET"],
}
if prefix is not None:
config_dict["prefix"] = prefix
return config_dict
def s3_checkpoint_config_no_creds() -> Dict[str, str]:
return {"type": "s3", "bucket": "determined-ai-examples"}
def root_user_home_bind_mount() -> Dict[str, str]:
return {"host_path": "/tmp", "container_path": "/root"}
def _export_and_load_model(experiment_id: int, master_url: str) -> None:
experimental.Determined(master_url).get_experiment(experiment_id).top_checkpoint().load()
def export_and_load_model(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode
| e2e_tests/tests/experiment/experiment.py | 25,463 | Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
TODO(DET-5678): refactor tests to not use cli singleton auth. We may never observe the STOPPING_CANCELED state. Ignore network errors while polling for experiment state to avoid a single network flake to cause a test suite failure. If the master is unreachable multiple times, this test will fail after max_wait_secs. type: Dict[str, Any] type: str type: List[Dict[str, Any]] type: Dict[str, Any] "det experiment describe" without metrics. "det experiment describe" with metrics. If `expected_trials` is None, the expected number of trials is non-deterministic. Check that every trial and step is COMPLETED. Check that batches appear in increasing order. The last step of every trial should have a checkpoint. When the experiment completes, all slots should now be free. This requires terminating the experiment's last container, which might take some time. Run a series of CLI tests on the finished experiment, to sanity check that basic CLI commands don't raise errors. Use Determined to run an experiment that we expect to fail. The searcher is configured with a `max_trials` of 8. Since the first step of each trial results in an error, there should be no completed trials. Most of the trials should result in ERROR, but depending on that seems fragile: if we support task preemption in the future, we might start a trial but cancel it before we hit the error in the model definition. For each failed trial, check for the expected error in the logs. type: ignore We run this in a subprocess to avoid module name collisions when performing checkpoint export of different models. | 1,869 | en | 0.870579 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Reading outputs from E+
# <codecell>
# some initial set up
# if you have not installed epp, and only downloaded it
# you will need the following lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
# <headingcell level=2>
# Using titletable() to get at the tables
# <markdowncell>
# So far we have been making changes to the IDF input file.
# How about looking at the outputs.
#
# Energyplus makes nice htmlout files that look like this.
# <codecell>
from eppy import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet1) #display the image below
# <markdowncell>
# If you look at the clipping of the html file above, you see tables with data in them. Eppy has functions that let you access of these tables and get the data from any of it's cells.
#
# Let us say you want to find the "Net Site Energy".
#
# This is in table "Site and Source Energy".
#
# The number you want is in the third row, second column and it's value is "47694.47"
#
# Let us use eppy to extract this number
# <codecell>
from eppy import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
filehandle = open(fname, 'r').read() # get a file handle to the html file
htables = readhtml.titletable(filehandle) # reads the tables with their titles
# <markdowncell>
# If you open the python file readhtml.py and look at the function titletable, you can see the function documentation.
#
# It says the following
# <rawcell>
# """return a list of [(title, table), .....]
# title = previous item with a <b> tag
# table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]"""
#
# <markdowncell>
# The documentation says that it returns a list.
# Let us take a look inside this list.
# Let us look at the first item in the list.
# <codecell>
firstitem = htables[0]
print(firstitem)
# <markdowncell>
# Ughh !!! that is ugly. Hard to see what it is.
# Let us use a python module to print it pretty
# <codecell>
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(firstitem)
# <markdowncell>
# Nice. that is a little clearer
# <codecell>
firstitem_title = firstitem[0]
pp.pprint(firstitem_title)
# <codecell>
firstitem_table = firstitem[1]
pp.pprint(firstitem_table)
# <markdowncell>
# How do we get to value of "Net Site Energy".
# We know it is in the third row, second column of the table.
#
# Easy.
# <codecell>
thirdrow = firstitem_table[2] # we start counting with 0. So 0, 1, 2 is third row
print(thirdrow)
# <codecell>
thirdrow_secondcolumn = thirdrow[1]
thirdrow_secondcolumn
# <markdowncell>
# the text from the html table is in unicode.
# That is why you see that weird 'u' letter.
#
# Let us convert it to a floating point number
# <codecell>
net_site_energy = float(thirdrow_secondcolumn)
net_site_energy
# <markdowncell>
# Let us have a little fun with the tables.
#
# Get the titles of all the tables
# <codecell>
alltitles = [htable[0] for htable in htables]
alltitles
# <markdowncell>
# Now let us grab the tables with the titles "Building Area" and "Site to Source Energy Conversion Factors"
# <markdowncell>
# twotables = [htable for htable in htables if htable[0] in ["Building Area", "Site to Source Energy Conversion Factors"]]
# twotables
# <markdowncell>
# Let us leave readtables for now.
#
# It gives us the basic functionality to read any of the tables in the html output file.
# <headingcell level=2>
# Using lines_table() to get at the tables
# <markdowncell>
# We have been using titletable() to get at the tables. There is a constraint using function titletable(). Titletable() assumes that there is a unique title (in HTML bold) just above the table. It is assumed that this title will adequetly describe the table. This is true in most cases and titletable() is perfectly good to use. Unfortuntely there are some tables that do not follow this rule. The snippet below shows one of them.
# <codecell>
from eppy import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet2) # display the image below
# <markdowncell>
# Notice that the HTML snippet shows a table with three lines above it. The first two lines have information that describe the table. We need to look at both those lines to understand what the table contains. So we need a different function that will capture all those lines before the table. The funtion lines_table() described below will do this.
# <codecell>
from eppy import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_8_1/ASHRAE30pct.PI.Final11_OfficeMedium_STD2010_Chicago-baseTable.html" # the html file you want to read
filehandle = open(fname, 'r').read() # get a file handle to the html file
ltables = readhtml.lines_table(filehandle) # reads the tables with their titles
# <markdowncell>
# The html snippet shown above is the last table in HTML file we just opened. We have used lines_table() to read the tables into the variable ltables. We can get to the last table by ltable[-1]. Let us print it and see what we have.
# <codecell>
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(ltables[-1])
# <markdowncell>
# We can see that ltables has captured all the lines before the table. Let us make our code more explicit to see this
# <codecell>
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
pp.pprint(lines_before_table)
# <markdowncell>
# We found this table the easy way this time, because we knew it was the last one. How do we find it if we don't know where it is in the file ? Python comes to our rescue :-) Let assume that we want to find the table that has the following two lines before it.
#
# - Report: FANGER DURING COOLING AND ADAPTIVE COMFORT
# - For: PERIMETER_MID_ZN_4
# <codecell>
line1 = 'Report: FANGER DURING COOLING AND ADAPTIVE COMFORT'
line2 = 'For: PERIMETER_MID_ZN_4'
#
# check if those two lines are before the table
line1 in lines_before_table and line2 in lines_before_table
# <codecell>
# find all the tables where those two lines are before the table
[ltable for ltable in ltables
if line1 in ltable[0] and line2 in ltable[0]]
# <markdowncell>
# That worked !
#
# What if you want to find the words "FANGER" and "PERIMETER_MID_ZN_4" before the table. The following code will do it.
# <codecell>
# sample code to illustrate what we are going to do
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
# join lines_before_table into a paragraph of text
justtext = '\n'.join(lines_before_table)
print(justtext)
# <codecell>
"FANGER" in justtext and "PERIMETER_MID_ZN_4" in justtext
# <codecell>
# Let us combine the this trick to find the table
[ltable for ltable in ltables
if "FANGER" in '\n'.join(ltable[0]) and "PERIMETER_MID_ZN_4" in '\n'.join(ltable[0])]
# <headingcell level=2>
# Extracting data from the tables
# <markdowncell>
# The tables in the HTML page in general have text in the top header row. The first vertical row has text. The remaining cells have numbers. We can identify the numbers we need by looking at the labelin the top row and the label in the first column. Let us construct a simple example and explore this.
# <codecell>
# ignore the following three lines. I am using them to construct the table below
from IPython.display import HTML
atablestring = '<TABLE cellpadding="4" style="border: 1px solid #000000; border-collapse: collapse;" border="1">\n <TR>\n <TD> </TD>\n <TD>a b</TD>\n <TD>b c</TD>\n <TD>c d</TD>\n </TR>\n <TR>\n <TD>x y</TD>\n <TD>1</TD>\n <TD>2</TD>\n <TD>3</TD>\n </TR>\n <TR>\n <TD>y z</TD>\n <TD>4</TD>\n <TD>5</TD>\n <TD>6</TD>\n </TR>\n <TR>\n <TD>z z</TD>\n <TD>7</TD>\n <TD>8</TD>\n <TD>9</TD>\n </TR>\n</TABLE>'
HTML(atablestring)
# <markdowncell>
# This table is actually in the follwoing form:
# <codecell>
atable = [["", "a b", "b c", "c d"],
["x y", 1, 2, 3 ],
["y z", 4, 5, 6 ],
["z z", 7, 8, 9 ],]
# <markdowncell>
# We can see the labels in the table. So we an look at row "x y" and column "c d". The value there is 3
# <markdowncell>
# right now we can get to it by saying atable[1][3]
# <codecell>
print(atable[1][3])
# <markdowncell>
# readhtml has some functions that will let us address the values by the labels. We use a structure from python called named tuples to do this. The only limitation is that the labels have to be letters or digits. Named tuples does not allow spaces in the labels. We could replace the space with an underscore ' _ '. So "a b" will become "a_b". So we can look for row "x_y" and column "c_d". Let us try this out.
# <codecell>
from eppy import readhtml
h_table = readhtml.named_grid_h(atable)
# <codecell>
print(h_table.x_y.c_d)
# <markdowncell>
# We can still get to the value by index
# <codecell>
print(h_table[0][2])
# <markdowncell>
# Note that we used atable[1][3], but here we used h_table[0][2]. That is because h_table does not count the rows and columns where the labels are.
# <markdowncell>
# We can also do the following:
# <codecell>
print(h_table.x_y[2])
# or
print(h_table[0].c_d)
# <markdowncell>
# Wow … that is pretty cool. What if we want to just check what the labels are ?
# <codecell>
print(h_table._fields)
# <markdowncell>
# That gives us the horizontal lables. How about the vertical labels ?
# <codecell>
h_table.x_y._fields
# <markdowncell>
# There you go !!!
# <markdowncell>
# How about if I want to use the labels differently ? Say I want to refer to the row first and then to the column. That woul be saying table.c_d.x_y. We can do that by using a different function
# <codecell>
v_table = readhtml.named_grid_v(atable)
print(v_table.c_d.x_y)
# <markdowncell>
# And we can do the following
# <codecell>
print(v_table[2][0])
print(v_table.c_d[0])
print(v_table[2].x_y)
# <markdowncell>
# Let us try to get the numbers in the first column and then get their sum
# <codecell>
v_table.a_b
# <markdowncell>
# Look like we got the right column. But not in the right format. We really need a list of numbers
# <codecell>
[cell for cell in v_table.a_b]
# <markdowncell>
# That looks like waht we wanted. Now let us get the sum
# <codecell>
values_in_first_column = [cell for cell in v_table.a_b]
print(values_in_first_column)
print(sum(values_in_first_column)) # sum is a builtin function that will sum a list
# <markdowncell>
# To get the first row we use the variable h_table
# <codecell>
values_in_first_row = [cell for cell in h_table.x_y]
print(values_in_first_row)
print(sum(values_in_first_row))
# <codecell>
| docs/Outputs_Tutorial.py | 11,037 | -*- coding: utf-8 -*- <nbformat>3.0</nbformat> <headingcell level=1> Reading outputs from E+ <codecell> some initial set up if you have not installed epp, and only downloaded it you will need the following lines pathnameto_eppy = 'c:/eppy' <headingcell level=2> Using titletable() to get at the tables <markdowncell> So far we have been making changes to the IDF input file. How about looking at the outputs. Energyplus makes nice htmlout files that look like this. <codecell>no need to know this code, it just shows the image belowdisplay the image below <markdowncell> If you look at the clipping of the html file above, you see tables with data in them. Eppy has functions that let you access of these tables and get the data from any of it's cells. Let us say you want to find the "Net Site Energy". This is in table "Site and Source Energy". The number you want is in the third row, second column and it's value is "47694.47" Let us use eppy to extract this number <codecell> the eppy module with functions to read the html the html file you want to read get a file handle to the html file reads the tables with their titles <markdowncell> If you open the python file readhtml.py and look at the function titletable, you can see the function documentation. It says the following <rawcell> """return a list of [(title, table), .....] title = previous item with a <b> tag table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]""" <markdowncell> The documentation says that it returns a list. Let us take a look inside this list. Let us look at the first item in the list. <codecell> <markdowncell> Ughh !!! that is ugly. Hard to see what it is. Let us use a python module to print it pretty <codecell> <markdowncell> Nice. that is a little clearer <codecell> <codecell> <markdowncell> How do we get to value of "Net Site Energy". We know it is in the third row, second column of the table. Easy. <codecell> we start counting with 0. So 0, 1, 2 is third row <codecell> <markdowncell> the text from the html table is in unicode. That is why you see that weird 'u' letter. Let us convert it to a floating point number <codecell> <markdowncell> Let us have a little fun with the tables. Get the titles of all the tables <codecell> <markdowncell> Now let us grab the tables with the titles "Building Area" and "Site to Source Energy Conversion Factors" <markdowncell> twotables = [htable for htable in htables if htable[0] in ["Building Area", "Site to Source Energy Conversion Factors"]] twotables <markdowncell> Let us leave readtables for now. It gives us the basic functionality to read any of the tables in the html output file. <headingcell level=2> Using lines_table() to get at the tables <markdowncell> We have been using titletable() to get at the tables. There is a constraint using function titletable(). Titletable() assumes that there is a unique title (in HTML bold) just above the table. It is assumed that this title will adequetly describe the table. This is true in most cases and titletable() is perfectly good to use. Unfortuntely there are some tables that do not follow this rule. The snippet below shows one of them. <codecell>no need to know this code, it just shows the image below display the image below <markdowncell> Notice that the HTML snippet shows a table with three lines above it. The first two lines have information that describe the table. We need to look at both those lines to understand what the table contains. So we need a different function that will capture all those lines before the table. The funtion lines_table() described below will do this. <codecell> the eppy module with functions to read the html the html file you want to read get a file handle to the html file reads the tables with their titles <markdowncell> The html snippet shown above is the last table in HTML file we just opened. We have used lines_table() to read the tables into the variable ltables. We can get to the last table by ltable[-1]. Let us print it and see what we have. <codecell> <markdowncell> We can see that ltables has captured all the lines before the table. Let us make our code more explicit to see this <codecell> <markdowncell> We found this table the easy way this time, because we knew it was the last one. How do we find it if we don't know where it is in the file ? Python comes to our rescue :-) Let assume that we want to find the table that has the following two lines before it. - Report: FANGER DURING COOLING AND ADAPTIVE COMFORT - For: PERIMETER_MID_ZN_4 <codecell> check if those two lines are before the table <codecell> find all the tables where those two lines are before the table <markdowncell> That worked ! What if you want to find the words "FANGER" and "PERIMETER_MID_ZN_4" before the table. The following code will do it. <codecell> sample code to illustrate what we are going to do join lines_before_table into a paragraph of text <codecell> <codecell> Let us combine the this trick to find the table <headingcell level=2> Extracting data from the tables <markdowncell> The tables in the HTML page in general have text in the top header row. The first vertical row has text. The remaining cells have numbers. We can identify the numbers we need by looking at the labelin the top row and the label in the first column. Let us construct a simple example and explore this. <codecell> ignore the following three lines. I am using them to construct the table below <markdowncell> This table is actually in the follwoing form: <codecell> <markdowncell> We can see the labels in the table. So we an look at row "x y" and column "c d". The value there is 3 <markdowncell> right now we can get to it by saying atable[1][3] <codecell> <markdowncell> readhtml has some functions that will let us address the values by the labels. We use a structure from python called named tuples to do this. The only limitation is that the labels have to be letters or digits. Named tuples does not allow spaces in the labels. We could replace the space with an underscore ' _ '. So "a b" will become "a_b". So we can look for row "x_y" and column "c_d". Let us try this out. <codecell> <codecell> <markdowncell> We can still get to the value by index <codecell> <markdowncell> Note that we used atable[1][3], but here we used h_table[0][2]. That is because h_table does not count the rows and columns where the labels are. <markdowncell> We can also do the following: <codecell> or <markdowncell> Wow … that is pretty cool. What if we want to just check what the labels are ? <codecell> <markdowncell> That gives us the horizontal lables. How about the vertical labels ? <codecell> <markdowncell> There you go !!! <markdowncell> How about if I want to use the labels differently ? Say I want to refer to the row first and then to the column. That woul be saying table.c_d.x_y. We can do that by using a different function <codecell> <markdowncell> And we can do the following <codecell> <markdowncell> Let us try to get the numbers in the first column and then get their sum <codecell> <markdowncell> Look like we got the right column. But not in the right format. We really need a list of numbers <codecell> <markdowncell> That looks like waht we wanted. Now let us get the sum <codecell> sum is a builtin function that will sum a list <markdowncell> To get the first row we use the variable h_table <codecell> <codecell> | 7,411 | en | 0.871186 |
# -*- coding: utf-8 -*-
#
# pysteps documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 31 01:11:37 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'numpydoc',
'sphinxcontrib.bibtex']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'source/index'
# General information about the project.
project = u'pysteps'
copyright = u'2018, Seppo Pulkkinen, Daniele Nerini and Loris Foresti'
author = u'Seppo Pulkkinen, Daniele Nerini and Loris Foresti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2'
# The full version, including alpha/beta/rc tags.
release = u'0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
html_domain_indices = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystepsdoc'
# -- Options for LaTeX output ---------------------------------------------
# This hack is taken from numpy (https://github.com/numpy/numpy/blob/master/doc/source/conf.py).
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': latex_preamble
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_domain_indices = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysteps.tex', u'pysteps Reference',
u'Seppo Pulkkinen, Daniele Nerini and Loris Foresti', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysteps', u'pysteps Reference',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysteps', u'pysteps Reference',
author, 'pysteps', 'One line description of project.',
'Miscellaneous'),
]
| pysteps/doc/conf.py | 6,134 | -*- coding: utf-8 -*- pysteps documentation build configuration file, created by sphinx-quickstart on Tue Jul 31 01:11:37 2018. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here. Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This patterns also effect to html_static_path and html_extra_path The name of the Pygments (syntax highlighting) style to use. If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.html_theme = 'alabaster' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. This is required for the alabaster theme refs: http://alabaster.readthedocs.io/en/latest/installation.htmlsidebars needs 'show_related': True theme option to display -- Options for HTMLHelp output ------------------------------------------ Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- This hack is taken from numpy (https://github.com/numpy/numpy/blob/master/doc/source/conf.py). Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) | 3,738 | en | 0.720477 |
# Automatically generated from poetry/pyproject.toml
# flake8: noqa
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['c7n_trailcreator']
package_data = \
{'': ['*']}
install_requires = \
['argcomplete (>=1.11.1,<2.0.0)',
'attrs (>=19.3.0,<20.0.0)',
'boto3 (>=1.12.20,<2.0.0)',
'botocore (>=1.15.20,<2.0.0)',
'c7n (>=0.9.0,<0.10.0)',
'c7n-org (>=0.5.7,<0.6.0)',
'click (>=7.1.1,<8.0.0)',
'click>=7.0,<8.0',
'docutils (>=0.15.2,<0.16.0)',
'importlib-metadata (>=1.5.0,<2.0.0)',
'jmespath (>=0.9.5,<0.10.0)',
'jsonschema (>=3.2.0,<4.0.0)',
'pyrsistent (>=0.15.7,<0.16.0)',
'python-dateutil (>=2.8.1,<3.0.0)',
'pyyaml (>=5.3,<6.0)',
's3transfer (>=0.3.3,<0.4.0)',
'six (>=1.14.0,<2.0.0)',
'tabulate (>=0.8.6,<0.9.0)',
'urllib3 (>=1.25.8,<2.0.0)',
'zipp (>=3.1.0,<4.0.0)']
entry_points = \
{'console_scripts': ['c7n-trailcreator = c7n_trailcreator.trailcreator:cli']}
setup_kwargs = {
'name': 'c7n-trailcreator',
'version': '0.1.5',
'description': 'Cloud Custodian - Retroactive Tag Resource Creators from CloudTrail',
'long_description': '# c7n-trailcreator: Retroactive Resource Creator Tagging\n\nThis script will process cloudtrail records to create a sqlite db of\nresources and their creators, and then use that sqlitedb to tag\nthe resources with their creator\'s name.\n\nIn processing cloudtrail it can use either Athena or S3 Select. A\nconfig file of the events and resources of interest is required.\n\n## Install\n\n```shell\n$ pip install c7n_trailcreator\n\n$ c7n-trailcreator --help\n```\n\n## Config File\n\nThe config file format here is similiar to what custodian requires\nfor lambda policies on cloudtrail api events as an event selector.\n\nFirst for each resource, the custodian resource-type is required\nto be specified, and then for each event, we need to know the\nname of the service, the event name, and a jmespath expression\nto get the resource ids.\n\nHere\'s a a few examples, covering iam-user, iam-role, and and an s3 bucket.\n\n\n```json\n{\n "resources": [\n {\n "resource": "iam-role",\n "events": [\n {\n "event": "CreateRole",\n "ids": "requestParameters.roleName",\n "service": "iam.amazonaws.com"\n }\n ]\n },\n {\n "resource": "s3",\n "events": [\n {\n "ids": "requestParameters.bucketName",\n "event": "CreateBucket",\n "service": "s3.amazonaws.com"\n }\n ]\n },\n {\n "resource": "iam-user",\n "events": [\n {\n "event": "CreateUser",\n "ids": "requestParameters.userName",\n "service": "iam.amazonaws.com"\n }\n ]\n }]\n}\n```\n\n## Athena Usage\n\nTrail creators supports loading data from s3 using s3 select or from cloudtrail s3 using athena.\n\nNote you\'ll have to pre-created the athena table for cloudtrail previously per\nhttps://docs.aws.amazon.com/athena/latest/ug/cloudtrail-logs.html\n\nLet\'s use the example config file to load up data for all the roles, buckets, and users created in 2019\n\n```\nc7n-trailcreator load-athena \\\n --region us-east-1 \\\n\t--resource-map resource_map.json \\\n\t--table cloudtrail_logs_custodian_skunk_trails \\\n\t--db "creators.db" \\\n\t--year 2019\n```\n\nBy default we\'ll use the default s3 athena output used by the console,\nand the default db and primary workgroup, you can pass all of these in\non the cli to be more explicit.\n\nYou can also specify to just process a month with `--month 2019/11` or\nan individual day with `--day 2019/02/01`\n\n```\nINFO:c7n_trailowner:Athena query:569712dc-d1e9-4474-b86f-6579c53b5b46\nINFO:c7n_trailowner:Polling athena query progress scanned:489.24 Mb qexec:28.62s\nINFO:c7n_trailowner:Polling athena query progress scanned:1.29 Gb qexec:88.96s\nINFO:c7n_trailowner:Polling athena query progress scanned:2.17 Gb qexec:141.16s\nINFO:c7n_trailowner:processing athena result page 78 records\nINFO:c7n_trailowner:Athena Processed 78 records\n```\n\nNote you can reprocess a completed query\'s results, by passing in `--query-id` on the cli.\n\n## Tagging\n\nIt supports this across all the resources that custodian supports.\n\n```\n$ c7n-trailcreator tag \\\n\t--db creators.db \\\n\t--creator-tag Owner \\\n\t--region us-east-1\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 13 iam-role resources users:5 population:97 not-found:84 records:124\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 5 iam-user resources users:4 population:6 not-found:1 records:18\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 9 s3 resources users:4 population:14 not-found:5 records:20\nINFO:c7n_trailowner:auto tag summary account:644160558196 region:us-east-1\n iam-role-not-found: 84\n iam-role: 13\n iam-user-not-found: 1\n iam-user: 5\n s3-not-found: 5\n s3: 9\nINFO:c7n_trailowner:Total resources tagged: 27\n```\n\nlet\'s break down one of these log messages\n\n```\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 13 iam-role resources users:5 population:97 not-found:84 records:124\n```\n\n- records: the count of database create events we have for this resource type.\n- users: the number of unique users for whom we have create events.\n- not-found: the number of resources for whom we do not have create events, ie created before or after our trail analysis period.\n- population: the total number of resources in the account region.\n\n## Multi Account / Multi Region\n\nc7n-trailcreator supports executing across multiple accounts and regions when tagging\nusing the same file format that c7n-org uses to denote accounts. See `tag-org` subcommand.\n\n',
'long_description_content_type': 'text/markdown',
'author': 'Cloud Custodian Project',
'author_email': None,
'maintainer': None,
'maintainer_email': None,
'url': 'https://cloudcustodian.io',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
| tools/c7n_trailcreator/setup.py | 6,106 | Automatically generated from poetry/pyproject.toml flake8: noqa -*- coding: utf-8 -*- | 85 | en | 0.662374 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""A collection of ORM sqlalchemy models for SQL Lab"""
from datetime import datetime
import re
from flask import Markup
from flask_appbuilder import Model
import sqlalchemy as sqla
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
Text,
)
from sqlalchemy.orm import backref, relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ExtraJSONMixin
from superset.models.tags import QueryUpdater
from superset.utils.core import QueryStatus, user_label
class Query(Model, ExtraJSONMixin):
"""ORM model for SQL query
Now that SQL Lab support multi-statement execution, an entry in this
table may represent multiple SQL statements executed sequentially"""
__tablename__ = "query"
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True, nullable=False)
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(Integer, ForeignKey("ab_user.id"), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the superset config.
limit = Column(Integer)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# key used to store the results in the results backend
results_key = Column(String(64), index=True)
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=20, scale=6))
start_running_time = Column(Numeric(precision=20, scale=6))
end_time = Column(Numeric(precision=20, scale=6))
end_result_backend_time = Column(Numeric(precision=20, scale=6))
tracking_url = Column(Text)
changed_on = Column(
DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True
)
database = relationship(
"Database",
foreign_keys=[database_id],
backref=backref("queries", cascade="all, delete-orphan"),
)
user = relationship(security_manager.user_model, foreign_keys=[user_id])
__table_args__ = (sqla.Index("ti_user_id_changed_on", user_id, changed_on),)
def to_dict(self):
return {
"changedOn": self.changed_on,
"changed_on": self.changed_on.isoformat(),
"dbId": self.database_id,
"db": self.database.database_name,
"endDttm": self.end_time,
"errorMessage": self.error_message,
"executedSql": self.executed_sql,
"id": self.client_id,
"limit": self.limit,
"progress": self.progress,
"rows": self.rows,
"schema": self.schema,
"ctas": self.select_as_cta,
"serverId": self.id,
"sql": self.sql,
"sqlEditorId": self.sql_editor_id,
"startDttm": self.start_time,
"state": self.status.lower(),
"tab": self.tab_name,
"tempTable": self.tmp_table_name,
"userId": self.user_id,
"user": user_label(self.user),
"resultsKey": self.results_key,
"trackingUrl": self.tracking_url,
"extra": self.extra,
}
@property
def name(self):
"""Name property"""
ts = datetime.now().isoformat()
ts = ts.replace("-", "").replace(":", "").split(".")[0]
tab = self.tab_name.replace(" ", "_").lower() if self.tab_name else "notab"
tab = re.sub(r"\W+", "", tab)
return f"sqllab_{tab}_{ts}"
@property
def database_name(self):
return self.database.name
@property
def username(self):
return self.user.username
class SavedQuery(Model, AuditMixinNullable, ExtraJSONMixin):
"""ORM model for SQL query"""
__tablename__ = "saved_query"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("ab_user.id"), nullable=True)
db_id = Column(Integer, ForeignKey("dbs.id"), nullable=True)
schema = Column(String(128))
label = Column(String(256))
description = Column(Text)
sql = Column(Text)
user = relationship(
security_manager.user_model,
backref=backref("saved_queries", cascade="all, delete-orphan"),
foreign_keys=[user_id],
)
database = relationship(
"Database",
foreign_keys=[db_id],
backref=backref("saved_queries", cascade="all, delete-orphan"),
)
@property
def pop_tab_link(self):
return Markup(
f"""
<a href="/metrix/sqllab?savedQueryId={self.id}">
<i class="fa fa-link"></i>
</a>
"""
)
@property
def user_email(self):
return self.user.email
@property
def sqlalchemy_uri(self):
return self.database.sqlalchemy_uri
def url(self):
return "/metrix/sqllab?savedQueryId={0}".format(self.id)
# events for updating tags
sqla.event.listen(SavedQuery, "after_insert", QueryUpdater.after_insert)
sqla.event.listen(SavedQuery, "after_update", QueryUpdater.after_update)
sqla.event.listen(SavedQuery, "after_delete", QueryUpdater.after_delete)
| superset/models/sql_lab.py | 6,622 | ORM model for SQL query
Now that SQL Lab support multi-statement execution, an entry in this
table may represent multiple SQL statements executed sequentially
ORM model for SQL query
Name property
A collection of ORM sqlalchemy models for SQL Lab
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=C,R,W Store the tmp table into the DB only if the user asks for it. Query to retrieve the results, used only in case of select_as_cta_used is true. Could be configured in the superset config. 1..100 of rows in the result set or rows modified. key used to store the results in the results backend Using Numeric in place of DateTime for sub-second precision stored as seconds since epoch, allowing for milliseconds events for updating tags | 1,457 | en | 0.857409 |
import random
import time
def dead_state(width, height):
board = []
line = []
for i in range(width):
for j in range(height):
line.append(0)
board.append(line)
line = []
return board
def random_state(width, height):
state = dead_state(width, height)
for i in range(width):
for j in range(height):
state[i][j] = 1 if random.random() >= 0.5 else 0
return state
def render(state):
term_print = ''
for i in range(len(state[:])):
for j in range(len(state[i][:])):
if state[i][j] == 1:
term_print += '#'
else:
term_print += ' '
term_print += "\n"
print(term_print)
def next_state(state):
# check the inputs for the dead state
# how to get the length of the row and height from a list of lists
width = len(state[:])
height = len(state[:][:])
test_state = dead_state(width, height)
for i in range(len(state[:])):
for j in range(len(state[i][:])):
# Alive cell
if state[i][j] == 1:
test_state[i][j] = alive_cell(i,j,state)
# Dead cell
else:
test_state[i][j] = dead_cell(i,j,state)
return test_state
def alive_cell(i,j,state):
alive = 0
width = len(state[:])
height = len(state[:][:])
# break is not being utilized properly
# when the break hits it ends the innermost loop not just an iteration
for row in range(i-1,i+2):
for column in range(j-1,j+2):
# print('\t\talive',row,column)
if row < 0 or row >= height:
# too wide
continue
if column < 0 or column >= width:
# too tall
continue
if state[row][column] == 1:
alive += 1
# print('\talive',row,column)
alive -= 1
# print('alive', alive)
if alive == 2 or alive == 3:
# current cell stays alive
return 1
else:
# current cell dies
return 0
def dead_cell(i,j,state):
alive = 0
width = len(state[:])
height = len(state[:][:])
for row in range(i-1,i+2):
for column in range(j-1,j+2):
# print('\t\tdead',row,column)
if row < 0 or row >= height:
# too wide
continue
if column < 0 or column >= width:
# too tall
continue
if state[row][column] == 1:
alive += 1
# print('\tdead',row,column)
# print('dead', alive)
if alive == 3:
# current cell revives
return 1
else:
# current cell stays dead
return 0
def load_board_state(location):
board = []
x = []
with open(location, 'r') as f:
for line in f:
for ch in line:
if ch == '\n':
continue
x.append(int(ch))
board.append(x)
x = []
return board
if __name__ == '__main__':
loaded_board = load_board_state('./toad.txt')
render(loaded_board)
flag = False
while(True):
time.sleep(0.5)
if flag == False:
next_board = next_state(loaded_board)
render(next_board)
flag = True
else:
next_board = next_state(next_board)
render(next_board)
# init_state = random_state(25,25)
# render(init_state)
# count = 0
# while(True):
# # Wait for 1 second
# time.sleep(.5)
# if count == 0:
# next_board = next_state(init_state)
# render(next_board)
# count = 1
# else:
# next_board = next_state(next_board)
# render(next_board)
| Projects/Game of Life/gol.py | 3,838 | check the inputs for the dead state how to get the length of the row and height from a list of lists Alive cell Dead cell break is not being utilized properly when the break hits it ends the innermost loop not just an iteration print('\t\talive',row,column) too wide too tall print('\talive',row,column) print('alive', alive) current cell stays alive current cell dies print('\t\tdead',row,column) too wide too tall print('\tdead',row,column) print('dead', alive) current cell revives current cell stays dead init_state = random_state(25,25) render(init_state) count = 0 while(True): Wait for 1 second time.sleep(.5) if count == 0: next_board = next_state(init_state) render(next_board) count = 1 else: next_board = next_state(next_board) render(next_board) | 814 | en | 0.60878 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 15:56:35 2019
@author: logancross
"""
from mvpa2.suite import *
from os import listdir
import time
def make_targets(subj, glm_ds_file, mask_name, runs2use, class_dict, homedir, ana_name):
start_time = time.time()
print 'Starting making targets',time.time() - start_time
onsets_folder = homedir+'DATA/brain/MODELS/RSA/'+ana_name+'/sub-'+subj+'/glm/timing/'
trial_list = []
trial_categ_list = []
chunks_list = []
for run in range(1,4):
temp_folder = onsets_folder+ana_name+'_run-0'+str(run)
csm_onsets = np.genfromtxt(temp_folder+'_CS_CSm.txt')
cs_deval_onsets = np.genfromtxt(temp_folder+'_CS_deval.txt')
cs_val_onsets = np.genfromtxt(temp_folder+'_CS_val.txt')
#get timing for all conditions and sort by this timing
timing = np.concatenate((csm_onsets[:,0], cs_deval_onsets[:,0], cs_val_onsets[:,0]))
#add a list of trial category as a sample attribute
trial_categ_unsort = [['csm' for c in range(len(csm_onsets))],['cs_deval' for c in range(len(cs_deval_onsets))],['cs_val' for c in range(len(cs_val_onsets))]]
trial_categ_unsort = [item for sublist in trial_categ_unsort for item in sublist]
#sort by trial timing and append to lists
sort_time_inds = np.argsort(timing)
all_trials = np.concatenate((csm_onsets, cs_deval_onsets, cs_val_onsets))
all_trials = all_trials[sort_time_inds,:]
trial_list.append(all_trials)
trial_categ = [trial_categ_unsort[ind] for ind in sort_time_inds]
trial_categ_list.append(trial_categ)
chunks = run*np.ones([len(all_trials)])
chunks_list.append(chunks)
#unroll lists of lists to one list
trials_allruns = np.asarray([item for sublist in trial_list for item in sublist])
trial_categ_allruns = [item for sublist in trial_categ_list for item in sublist]
chunks_allruns = np.asarray([item for sublist in chunks_list for item in sublist]).astype(int)
cs_classes = [class_dict[trial] for trial in trial_categ_allruns]
#load fmri dataset with these values as targets
fds = fmri_dataset(samples=glm_ds_file, targets=cs_classes, chunks=chunks_allruns, mask=mask_name)
print 'changes happened4'
fds.sa['trial_type'] = trial_categ_allruns
fds_subset = fds[:runs2use*60,:]
print 'Finished making targets',time.time() - start_time
#return fds_subset, trial_categ_allruns[:runs2use*60]
return fds_subset
def make_targets2(subj, glm_ds_file, mask_name, runs2use, class_dict):
start_time = time.time()
print 'Starting making targets',time.time() - start_time
onsets_folder = '/Users/logancross/Documents/EvaPavlovian/analysis/timing_files2/sub-'+subj+'/'
trial_list = []
trial_categ_list = []
chunks_list = []
for run in range(1,4):
temp_folder = onsets_folder+'GLM-02_run-0'+str(run)
csm_onsets = np.genfromtxt(temp_folder+'_CS_CSm.txt')
cs_deval_L_onsets = np.genfromtxt(temp_folder+'_CS_deval_L.txt')
cs_deval_R_onsets = np.genfromtxt(temp_folder+'_CS_deval_R.txt')
cs_val_L_onsets = np.genfromtxt(temp_folder+'_CS_val_L.txt')
cs_val_R_onsets = np.genfromtxt(temp_folder+'_CS_val_R.txt')
#get timing for all conditions and sort by this timing
timing = np.concatenate((csm_onsets[:,0], cs_deval_L_onsets[:,0], cs_deval_R_onsets[:,0], cs_val_L_onsets[:,0], cs_val_R_onsets[:,0]))
#add a list of trial category as a sample attribute
trial_categ_unsort = [['csm' for c in range(len(csm_onsets))],['cs_deval_L' for c in range(len(cs_deval_L_onsets))],['cs_deval_R' for c in range(len(cs_deval_R_onsets))],
['cs_val_L' for c in range(len(cs_val_L_onsets))], ['cs_val_R' for c in range(len(cs_val_R_onsets))]]
trial_categ_unsort = [item for sublist in trial_categ_unsort for item in sublist]
#sort by trial timing and append to lists
sort_time_inds = np.argsort(timing)
all_trials = np.concatenate((csm_onsets, cs_deval_L_onsets, cs_deval_R_onsets, cs_val_L_onsets, cs_val_R_onsets))
all_trials = all_trials[sort_time_inds,:]
trial_list.append(all_trials)
trial_categ = [trial_categ_unsort[ind] for ind in sort_time_inds]
trial_categ_list.append(trial_categ)
chunks = run*np.ones([len(all_trials)])
chunks_list.append(chunks)
#unroll lists of lists to one list
trials_allruns = np.asarray([item for sublist in trial_list for item in sublist])
trial_categ_allruns = [item for sublist in trial_categ_list for item in sublist]
chunks_allruns = np.asarray([item for sublist in chunks_list for item in sublist]).astype(int)
cs_classes = [class_dict[trial] for trial in trial_categ_allruns]
#load fmri dataset with these values as targets
fds = fmri_dataset(samples=glm_ds_file, targets=cs_classes, chunks=chunks_allruns, mask=mask_name)
fds_subset = fds[:runs2use*60,:]
print 'Finished making targets',time.time() - start_time
return fds_subset
def plot_mtx(mtx, labels, title, skip=5):
# little helper function to plot dissimilarity matrices
# if using correlation-distance, we use colorbar range of [0,2]
pl.figure()
pl.imshow(mtx, interpolation='nearest')
pl.xticks(range(len(mtx))[::skip], labels[::skip], rotation=90)
pl.yticks(range(len(mtx))[::skip], labels[::skip])
pl.title(title)
pl.clim((0, 2))
pl.colorbar()
class CrossDecodingFilter(Node):
def __init__(self, target_groups, part_attr, target_attr,
space='filtered_partitions', **kwargs):
self._target_groups = target_groups
self._target_attr = target_attr
self._part_attr = part_attr
Node.__init__(self, space=space, **kwargs)
def generate(self, ds):
# binary mask for training and testing ortion
train_part = ds.sa[self._part_attr].value == 1
test_part = ds.sa[self._part_attr].value == 2
# binary mask for the first and second target group
match_1st_group = [t in self._target_groups[0]
for t in ds.sa[self._target_attr].value]
match_2nd_group = [t in self._target_groups[1]
for t in ds.sa[self._target_attr].value]
match_3rd_group = [t in self._target_groups[2]
for t in ds.sa[self._target_attr].value]
# in the first to-be-returned dataset we will blank out
# group1 in the training set and group2 in the testing set
#LOGAN: we will also blank out group 3 in the testing set since we only want to train on it
# Note: setting the partition value to zero, will cause the Splitter
# employed in the CrossValidation Measure to ignore the corresponding
# samples
new_part = ds.sa[self._part_attr].value.copy()
new_part[np.logical_and(train_part, match_1st_group)] = 0
new_part[np.logical_and(test_part, match_2nd_group)] = 0
new_part[np.logical_and(test_part, match_3rd_group)] = 0
ds.sa[self.get_space()] = new_part
yield ds
# in the second to-be-returned dataset we will blank out
# group2 in the training set and group1 in the testing set
new_part = ds.sa[self._part_attr].value.copy()
new_part[np.logical_and(train_part, match_2nd_group)] = 0
new_part[np.logical_and(test_part, match_1st_group)] = 0
new_part[np.logical_and(test_part, match_3rd_group)] = 0
ds.sa[self.get_space()] = new_part
yield ds
| ANALYSIS/T0/MVPA/PYmvpa/cross_decoding/mvpa_utils_pav.py | 7,739 | !/usr/bin/env python2 -*- coding: utf-8 -*-get timing for all conditions and sort by this timingadd a list of trial category as a sample attributesort by trial timing and append to listsunroll lists of lists to one list load fmri dataset with these values as targetsreturn fds_subset, trial_categ_allruns[:runs2use*60]get timing for all conditions and sort by this timingadd a list of trial category as a sample attributesort by trial timing and append to listsunroll lists of lists to one listload fmri dataset with these values as targets little helper function to plot dissimilarity matrices if using correlation-distance, we use colorbar range of [0,2] binary mask for training and testing ortion binary mask for the first and second target group in the first to-be-returned dataset we will blank out group1 in the training set and group2 in the testing setLOGAN: we will also blank out group 3 in the testing set since we only want to train on it Note: setting the partition value to zero, will cause the Splitter employed in the CrossValidation Measure to ignore the corresponding samples in the second to-be-returned dataset we will blank out group2 in the training set and group1 in the testing set | 1,209 | en | 0.879435 |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet) #No base_name needed for we have a queryset in the view
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
| profiles_api/urls.py | 552 | No base_name needed for we have a queryset in the view | 54 | en | 0.9253 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""User profiles module for Invenio."""
from __future__ import absolute_import, print_function
from . import config
from .api import current_userprofile
class InvenioUserProfiles(object):
"""Invenio-UserProfiles extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
# Register current_profile
app.context_processor(lambda: dict(
current_userprofile=current_userprofile))
app.extensions['invenio-userprofiles'] = self
def init_config(self, app):
"""Initialize configuration."""
excludes = [
'USERPROFILES_BASE_TEMPLATE',
'USERPROFILES_SETTINGS_TEMPLATE',
]
for k in dir(config):
if k.startswith('USERPROFILES_') and k not in excludes:
app.config.setdefault(k, getattr(config, k))
app.config.setdefault('USERPROFILES', True)
app.config.setdefault(
'USERPROFILES_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_userprofiles/base.html'))
app.config.setdefault(
'USERPROFILES_SETTINGS_TEMPLATE',
app.config.get('SETTINGS_TEMPLATE',
'invenio_userprofiles/settings/base.html'))
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
app.config.setdefault(
'USERPROFILES_REGISTER_USER_BASE_TEMPLATE',
app.config.get(
'SECURITY_REGISTER_USER_TEMPLATE',
'invenio_accounts/register_user.html'
)
)
app.config['SECURITY_REGISTER_USER_TEMPLATE'] = \
'invenio_userprofiles/register_user.html'
| invenio_userprofiles/ext.py | 2,123 | Invenio-UserProfiles extension.
Extension initialization.
Flask application initialization.
Initialize configuration.
User profiles module for Invenio.
-*- coding: utf-8 -*- This file is part of Invenio. Copyright (C) 2015-2018 CERN. Invenio is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. Register current_profile | 398 | en | 0.684374 |
from flask import Flask
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from config import config_options
bootstrap = Bootstrap()
db = SQLAlchemy()
migrate = Migrate()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
mail = Mail()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
login_manager.init_app(app)
mail.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix="/authenticate")
return app
| app/__init__.py | 1,007 | Creating the app configurations Initializing flask extensions Registering the blueprint | 87 | en | 0.606771 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'ActiveDirectoryArgs',
'ExportPolicyRuleArgs',
'VolumePropertiesExportPolicyArgs',
]
@pulumi.input_type
class ActiveDirectoryArgs:
def __init__(__self__, *,
active_directory_id: Optional[pulumi.Input[str]] = None,
dns: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
organizational_unit: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
smb_server_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Active Directory
:param pulumi.Input[str] active_directory_id: Id of the Active Directory
:param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain
:param pulumi.Input[str] domain: Name of the Active Directory domain
:param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory
:param pulumi.Input[str] password: Plain text password of Active Directory domain administrator
:param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
:param pulumi.Input[str] status: Status of the Active Directory
:param pulumi.Input[str] username: Username of Active Directory domain administrator
"""
if active_directory_id is not None:
pulumi.set(__self__, "active_directory_id", active_directory_id)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if organizational_unit is not None:
pulumi.set(__self__, "organizational_unit", organizational_unit)
if password is not None:
pulumi.set(__self__, "password", password)
if smb_server_name is not None:
pulumi.set(__self__, "smb_server_name", smb_server_name)
if status is not None:
pulumi.set(__self__, "status", status)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="activeDirectoryId")
def active_directory_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Active Directory
"""
return pulumi.get(self, "active_directory_id")
@active_directory_id.setter
def active_directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "active_directory_id", value)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of DNS server IP addresses for the Active Directory domain
"""
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Active Directory domain
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="organizationalUnit")
def organizational_unit(self) -> Optional[pulumi.Input[str]]:
"""
The Organizational Unit (OU) within the Windows Active Directory
"""
return pulumi.get(self, "organizational_unit")
@organizational_unit.setter
def organizational_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Plain text password of Active Directory domain administrator
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="smbServerName")
def smb_server_name(self) -> Optional[pulumi.Input[str]]:
"""
NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
"""
return pulumi.get(self, "smb_server_name")
@smb_server_name.setter
def smb_server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "smb_server_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the Active Directory
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of Active Directory domain administrator
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ExportPolicyRuleArgs:
def __init__(__self__, *,
allowed_clients: Optional[pulumi.Input[str]] = None,
cifs: Optional[pulumi.Input[bool]] = None,
nfsv3: Optional[pulumi.Input[bool]] = None,
nfsv4: Optional[pulumi.Input[bool]] = None,
rule_index: Optional[pulumi.Input[int]] = None,
unix_read_only: Optional[pulumi.Input[bool]] = None,
unix_read_write: Optional[pulumi.Input[bool]] = None):
"""
Volume Export Policy Rule
:param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param pulumi.Input[bool] cifs: Allows CIFS protocol
:param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol
:param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
:param pulumi.Input[int] rule_index: Order index
:param pulumi.Input[bool] unix_read_only: Read only access
:param pulumi.Input[bool] unix_read_write: Read and write access
"""
if allowed_clients is not None:
pulumi.set(__self__, "allowed_clients", allowed_clients)
if cifs is not None:
pulumi.set(__self__, "cifs", cifs)
if nfsv3 is not None:
pulumi.set(__self__, "nfsv3", nfsv3)
if nfsv4 is not None:
pulumi.set(__self__, "nfsv4", nfsv4)
if rule_index is not None:
pulumi.set(__self__, "rule_index", rule_index)
if unix_read_only is not None:
pulumi.set(__self__, "unix_read_only", unix_read_only)
if unix_read_write is not None:
pulumi.set(__self__, "unix_read_write", unix_read_write)
@property
@pulumi.getter(name="allowedClients")
def allowed_clients(self) -> Optional[pulumi.Input[str]]:
"""
Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
"""
return pulumi.get(self, "allowed_clients")
@allowed_clients.setter
def allowed_clients(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_clients", value)
@property
@pulumi.getter
def cifs(self) -> Optional[pulumi.Input[bool]]:
"""
Allows CIFS protocol
"""
return pulumi.get(self, "cifs")
@cifs.setter
def cifs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cifs", value)
@property
@pulumi.getter
def nfsv3(self) -> Optional[pulumi.Input[bool]]:
"""
Allows NFSv3 protocol
"""
return pulumi.get(self, "nfsv3")
@nfsv3.setter
def nfsv3(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv3", value)
@property
@pulumi.getter
def nfsv4(self) -> Optional[pulumi.Input[bool]]:
"""
Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
"""
return pulumi.get(self, "nfsv4")
@nfsv4.setter
def nfsv4(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv4", value)
@property
@pulumi.getter(name="ruleIndex")
def rule_index(self) -> Optional[pulumi.Input[int]]:
"""
Order index
"""
return pulumi.get(self, "rule_index")
@rule_index.setter
def rule_index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rule_index", value)
@property
@pulumi.getter(name="unixReadOnly")
def unix_read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Read only access
"""
return pulumi.get(self, "unix_read_only")
@unix_read_only.setter
def unix_read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_only", value)
@property
@pulumi.getter(name="unixReadWrite")
def unix_read_write(self) -> Optional[pulumi.Input[bool]]:
"""
Read and write access
"""
return pulumi.get(self, "unix_read_write")
@unix_read_write.setter
def unix_read_write(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_write", value)
@pulumi.input_type
class VolumePropertiesExportPolicyArgs:
def __init__(__self__, *,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None):
"""
Set of export policy rules
:param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule
"""
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]:
"""
Export policy rule
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]):
pulumi.set(self, "rules", value)
| sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | 10,826 | Active Directory
:param pulumi.Input[str] active_directory_id: Id of the Active Directory
:param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain
:param pulumi.Input[str] domain: Name of the Active Directory domain
:param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory
:param pulumi.Input[str] password: Plain text password of Active Directory domain administrator
:param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
:param pulumi.Input[str] status: Status of the Active Directory
:param pulumi.Input[str] username: Username of Active Directory domain administrator
Volume Export Policy Rule
:param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param pulumi.Input[bool] cifs: Allows CIFS protocol
:param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol
:param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
:param pulumi.Input[int] rule_index: Order index
:param pulumi.Input[bool] unix_read_only: Read only access
:param pulumi.Input[bool] unix_read_write: Read and write access
Set of export policy rules
:param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule
Id of the Active Directory
Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
Allows CIFS protocol
Comma separated list of DNS server IP addresses for the Active Directory domain
Name of the Active Directory domain
Allows NFSv3 protocol
Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
The Organizational Unit (OU) within the Windows Active Directory
Plain text password of Active Directory domain administrator
Order index
Export policy rule
NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
Status of the Active Directory
Read only access
Read and write access
Username of Active Directory domain administrator
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 2,428 | en | 0.669402 |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vmc.orgs.sddcs.networks.edges.firewall.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Config(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ConfigStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
org,
sddc,
edge_id,
):
"""
Delete firewall configuration for a management or compute gateway (NSX
Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('delete',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
})
def get(self,
org,
sddc,
edge_id,
):
"""
Retrieve the firewall configuration for a management or compute gateway
(NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`
:return: com.vmware.vmc.model.FirewallConfig
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
})
def update(self,
org,
sddc,
edge_id,
firewall_config,
):
"""
Configure firewall for a management or compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`
:param firewall_config: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('update',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'firewall_config': firewall_config,
})
class Statistics(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.statistics'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatisticsStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
org,
sddc,
edge_id,
rule_id,
):
"""
Retrieve statistics for a specific firewall rule for a management or
compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type rule_id: :class:`long`
:param rule_id: Rule Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`
:return: com.vmware.vmc.model.FirewallRuleStats
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'rule_id': rule_id,
})
class _ConfigStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'firewall_config': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallConfig'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
request_body_parameter='firewall_config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallConfig'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StatisticsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'rule_id': type.IntegerType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/statistics/{ruleId}',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
'rule_id': 'ruleId',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallRuleStats'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vmc.orgs.sddcs.networks.edges.firewall.statistics',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Config': Config,
'Statistics': Statistics,
'config': 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config_client.StubFactory',
}
| com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | 15,013 | :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
Delete firewall configuration for a management or compute gateway (NSX
Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
Retrieve the firewall configuration for a management or compute gateway
(NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`
:return: com.vmware.vmc.model.FirewallConfig
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
Retrieve statistics for a specific firewall rule for a management or
compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type rule_id: :class:`long`
:param rule_id: Rule Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`
:return: com.vmware.vmc.model.FirewallRuleStats
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
Configure firewall for a management or compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`
:param firewall_config: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
-*- coding: utf-8 -*---------------------------------------------------------------------------- Copyright 2020 VMware, Inc. All rights reserved. AUTO GENERATED FILE -- DO NOT MODIFY! vAPI stub file for package com.vmware.vmc.orgs.sddcs.networks.edges.firewall.--------------------------------------------------------------------------- properties for delete operation properties for get operation properties for update operation properties for get operation | 3,665 | en | 0.416006 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for the pages for subtopics, and related models."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.domain import change_domain
from core.domain import html_validation_service
from core.domain import state_domain
from core.platform import models
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html'
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio'
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations'
CMD_CREATE_NEW = 'create_new'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property'
class SubtopicPageChange(change_domain.BaseChange):
"""Domain object for changes made to subtopic_page object.
The allowed commands, together with the attributes:
- 'create_new' (with topic_id, subtopic_id)
- 'update_subtopic_page_property' (
with property_name, new_value, old_value, subtopic_id).
"""
# The allowed list of subtopic page properties which can be used in
# update_subtopic_page_property command.
SUBTOPIC_PAGE_PROPERTIES = (
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML,
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO,
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': ['topic_id', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'required_attribute_names': [
'property_name', 'new_value', 'old_value', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': [],
'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES}
}]
class SubtopicPageContents(python_utils.OBJECT):
"""Domain object for the contents on a subtopic page."""
def __init__(
self, subtitled_html, recorded_voiceovers, written_translations):
"""Constructs a SubtopicPageContents domain object.
Args:
subtitled_html: SubtitledHtml. The html data being displayed on
the page.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the subtopic page content and their translations in different
languages.
written_translations: WrittenTranslations. The text translations of
the subtopic page content.
"""
self.subtitled_html = subtitled_html
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
def validate(self):
"""Validates the SubtopicPageContentsObject, verifying that all
fields are of the correct type.
"""
self.subtitled_html.validate()
content_ids = set([self.subtitled_html.content_id])
self.recorded_voiceovers.validate(content_ids)
self.written_translations.validate(content_ids)
@classmethod
def create_default_subtopic_page_contents(cls):
"""Creates a default subtopic page contents object.
Returns:
SubtopicPageContents. A default object.
"""
content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID
return cls(
state_domain.SubtitledHtml.create_default_subtitled_html(
content_id),
state_domain.RecordedVoiceovers.from_dict(
{'voiceovers_mapping': {content_id: {}}}),
state_domain.WrittenTranslations.from_dict(
{'translations_mapping': {content_id: {}}}))
def to_dict(self):
"""Returns a dict representing this SubtopicPageContents domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPageContents instance.
"""
return {
'subtitled_html': self.subtitled_html.to_dict(),
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict()
}
@classmethod
def from_dict(cls, page_contents_dict):
"""Creates a subtopic page contents object from a dictionary.
Args:
page_contents_dict: dict. The dict representation of
SubtopicPageContents object.
Returns:
SubtopicPageContents. The corresponding object.
"""
page_contents = state_domain.SubtitledHtml.from_dict(
page_contents_dict['subtitled_html'])
page_contents.validate()
return cls(
page_contents,
state_domain.RecordedVoiceovers.from_dict(page_contents_dict[
'recorded_voiceovers']),
state_domain.WrittenTranslations.from_dict(page_contents_dict[
'written_translations']))
class SubtopicPage(python_utils.OBJECT):
"""Domain object for a Subtopic page."""
def __init__(
self, subtopic_page_id, topic_id, page_contents,
page_contents_schema_version, language_code, version):
"""Constructs a SubtopicPage domain object.
Args:
subtopic_page_id: str. The unique ID of the subtopic page.
topic_id: str. The ID of the topic that this subtopic is a part of.
page_contents: SubtopicPageContents. The html and audio
translations to be surfaced to the learner.
page_contents_schema_version: int. The schema version for the page
contents object.
language_code: str. The ISO 639-1 code for the language this
subtopic page is written in.
version: int. The current version of the subtopic.
"""
self.id = subtopic_page_id
self.topic_id = topic_id
self.page_contents = page_contents
self.page_contents_schema_version = page_contents_schema_version
self.language_code = language_code
self.version = version
def to_dict(self):
"""Returns a dict representing this SubtopicPage domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPage instance.
"""
return {
'id': self.id,
'topic_id': self.topic_id,
'page_contents': self.page_contents.to_dict(),
'page_contents_schema_version': self.page_contents_schema_version,
'language_code': self.language_code,
'version': self.version
}
@classmethod
def get_subtopic_page_id(cls, topic_id, subtopic_id):
"""Returns the subtopic page id from the topic_id and subtopic_id.
Args:
topic_id: str. The id of the topic that the subtopic is a part of.
subtopic_id: int. The id of the subtopic.
Returns:
str. The subtopic_page_id calculated from the given values.
"""
return '%s-%s' % (topic_id, subtopic_id)
@classmethod
def create_default_subtopic_page(cls, subtopic_id, topic_id):
"""Creates a SubtopicPage object with default values.
Args:
subtopic_id: str. ID of the subtopic.
topic_id: str. The Id of the topic to which this page is linked
with.
Returns:
SubtopicPage. A subtopic object with given id, topic_id and default
page contents field.
"""
subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id)
return cls(
subtopic_page_id, topic_id,
SubtopicPageContents.create_default_subtopic_page_contents(),
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0)
@classmethod
def convert_html_fields_in_subtopic_page_contents(
cls, subtopic_page_contents_dict, conversion_fn):
"""Applies a conversion function on all the html strings in subtopic
page contents to migrate them to a desired state.
Args:
subtopic_page_contents_dict: dict. The dict representation of
subtopic page contents.
conversion_fn: function. The conversion function to be applied on
the subtopic_page_contents_dict.
Returns:
dict. The converted subtopic_page_contents_dict.
"""
subtopic_page_contents_dict['written_translations'] = (
state_domain.WrittenTranslations.
convert_html_in_written_translations(
subtopic_page_contents_dict['written_translations'],
conversion_fn))
subtopic_page_contents_dict['subtitled_html']['html'] = (
conversion_fn(
subtopic_page_contents_dict['subtitled_html']['html']))
return subtopic_page_contents_dict
@classmethod
def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict):
"""Converts v1 SubtopicPage Contents schema to the v2 schema.
v2 schema introduces the new schema for Math components.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.add_math_content_to_math_rte_components)
@classmethod
def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict):
"""Converts v2 SubtopicPage Contents schema to the v3 schema.
v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts
existing occurences of it to oppia-noninteractive-image tag.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.convert_svg_diagram_tags_to_image_tags)
@classmethod
def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict):
"""Converts v3 SubtopicPage Contents schema to the v4 schema.
v4 schema fixes HTML encoding issues.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.fix_incorrectly_encoded_chars)
@classmethod
def update_page_contents_from_model(
cls, versioned_page_contents, current_version):
"""Converts the page_contents blob contained in the given
versioned_page_contents dict from current_version to
current_version + 1. Note that the versioned_page_contents being
passed in is modified in-place.
Args:
versioned_page_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
page_contents dict.
- page_contents: dict. The dict comprising the subtopic page
contents.
current_version: int. The current schema version of page_contents.
"""
versioned_page_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_page_contents['page_contents'] = conversion_fn(
versioned_page_contents['page_contents'])
def get_subtopic_id_from_subtopic_page_id(self):
"""Returns the id from the subtopic page id of the object.
Returns:
int. The subtopic_id of the object.
"""
return int(self.id[len(self.topic_id) + 1:])
def update_page_contents_html(self, new_page_contents_html):
"""The new value for the html data field.
Args:
new_page_contents_html: SubtitledHtml. The new html for the subtopic
page.
"""
self.page_contents.subtitled_html = new_page_contents_html
def update_page_contents_audio(self, new_page_contents_audio):
"""The new value for the recorded_voiceovers data field.
Args:
new_page_contents_audio: RecordedVoiceovers. The new audio for
the subtopic page.
"""
self.page_contents.recorded_voiceovers = new_page_contents_audio
def update_page_contents_written_translations(
self, new_page_written_translations_dict):
"""The new value for the written_translations data field.
Args:
new_page_written_translations_dict: dict. The new translation for
the subtopic page.
"""
self.page_contents.written_translations = (
state_domain.WrittenTranslations.from_dict(
new_page_written_translations_dict))
def validate(self):
"""Validates various properties of the SubtopicPage object.
Raises:
ValidationError. One or more attributes of the subtopic page are
invalid.
"""
if not isinstance(self.topic_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected topic_id to be a string, received %s' %
self.topic_id)
if not isinstance(self.version, int):
raise utils.ValidationError(
'Expected version number to be an int, received %s' %
self.version)
self.page_contents.validate()
if not isinstance(self.page_contents_schema_version, int):
raise utils.ValidationError(
'Expected page contents schema version to be an integer, '
'received %s' % self.page_contents_schema_version)
if (
self.page_contents_schema_version !=
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected page contents schema version to be %s, received %s'
% (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
self.page_contents_schema_version)
)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not any(
self.language_code == lc['code']
for lc in constants.SUPPORTED_CONTENT_LANGUAGES
):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
| core/domain/subtopic_page_domain.py | 15,893 | Domain object for a Subtopic page.
Domain object for changes made to subtopic_page object.
The allowed commands, together with the attributes:
- 'create_new' (with topic_id, subtopic_id)
- 'update_subtopic_page_property' (
with property_name, new_value, old_value, subtopic_id).
Domain object for the contents on a subtopic page.
Constructs a SubtopicPageContents domain object.
Args:
subtitled_html: SubtitledHtml. The html data being displayed on
the page.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the subtopic page content and their translations in different
languages.
written_translations: WrittenTranslations. The text translations of
the subtopic page content.
Constructs a SubtopicPage domain object.
Args:
subtopic_page_id: str. The unique ID of the subtopic page.
topic_id: str. The ID of the topic that this subtopic is a part of.
page_contents: SubtopicPageContents. The html and audio
translations to be surfaced to the learner.
page_contents_schema_version: int. The schema version for the page
contents object.
language_code: str. The ISO 639-1 code for the language this
subtopic page is written in.
version: int. The current version of the subtopic.
Converts v1 SubtopicPage Contents schema to the v2 schema.
v2 schema introduces the new schema for Math components.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
Converts v2 SubtopicPage Contents schema to the v3 schema.
v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts
existing occurences of it to oppia-noninteractive-image tag.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
Converts v3 SubtopicPage Contents schema to the v4 schema.
v4 schema fixes HTML encoding issues.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
Applies a conversion function on all the html strings in subtopic
page contents to migrate them to a desired state.
Args:
subtopic_page_contents_dict: dict. The dict representation of
subtopic page contents.
conversion_fn: function. The conversion function to be applied on
the subtopic_page_contents_dict.
Returns:
dict. The converted subtopic_page_contents_dict.
Creates a SubtopicPage object with default values.
Args:
subtopic_id: str. ID of the subtopic.
topic_id: str. The Id of the topic to which this page is linked
with.
Returns:
SubtopicPage. A subtopic object with given id, topic_id and default
page contents field.
Creates a default subtopic page contents object.
Returns:
SubtopicPageContents. A default object.
Creates a subtopic page contents object from a dictionary.
Args:
page_contents_dict: dict. The dict representation of
SubtopicPageContents object.
Returns:
SubtopicPageContents. The corresponding object.
Returns the id from the subtopic page id of the object.
Returns:
int. The subtopic_id of the object.
Returns the subtopic page id from the topic_id and subtopic_id.
Args:
topic_id: str. The id of the topic that the subtopic is a part of.
subtopic_id: int. The id of the subtopic.
Returns:
str. The subtopic_page_id calculated from the given values.
Returns a dict representing this SubtopicPageContents domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPageContents instance.
Returns a dict representing this SubtopicPage domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPage instance.
The new value for the recorded_voiceovers data field.
Args:
new_page_contents_audio: RecordedVoiceovers. The new audio for
the subtopic page.
Converts the page_contents blob contained in the given
versioned_page_contents dict from current_version to
current_version + 1. Note that the versioned_page_contents being
passed in is modified in-place.
Args:
versioned_page_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
page_contents dict.
- page_contents: dict. The dict comprising the subtopic page
contents.
current_version: int. The current schema version of page_contents.
The new value for the html data field.
Args:
new_page_contents_html: SubtitledHtml. The new html for the subtopic
page.
The new value for the written_translations data field.
Args:
new_page_written_translations_dict: dict. The new translation for
the subtopic page.
Validates the SubtopicPageContentsObject, verifying that all
fields are of the correct type.
Validates various properties of the SubtopicPage object.
Raises:
ValidationError. One or more attributes of the subtopic page are
invalid.
Domain objects for the pages for subtopics, and related models.
coding: utf-8 Copyright 2018 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. These take additional 'property_name' and 'new_value' parameters and, optionally, 'old_value'. The allowed list of subtopic page properties which can be used in update_subtopic_page_property command. | 5,905 | en | 0.691989 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import gradient_descent
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super(SubclassedKerasModel, self).__init__()
self._can_use_graph_functions = True
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros",
input_shape=(10,)))
model.add(keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"))
return model
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 1000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with ctx.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_constant(self):
func = lambda: constant_op.constant(3.0)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
if __name__ == "__main__":
test.main()
| tensorflow/python/eager/benchmarks_test.py | 31,277 | Benchmark overheads of creating a Tensor object.
Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=redefined-builtin pylint: disable=unused-import used for multiply benchmarks used for matmul benchmarks call func to maybe warm up the GPU Warmup the GPU int32's are kept on host memory even when executing on GPU. int32's are kept on host memory even when executing on GPU. pylint: disable=protected-access pylint: enable=protected-access Benchmarks for A^2, A of dimension 2 by 2. Benchmarks for AA.T, A of dimension 100 by 784. First call is more expensive (creates variables etc.), discount that. The whole point of this test is to contrast subclassing with the functional style of keras model building, so validate that the models are equivalent. Symmetry with benchmark_keras_model_subclassed Symmetry with benchmark_keras_model_functional First call is more expensive (creates variables etc.), discount that. First call is more expensive (creates variables etc.), discount that. First call is more expensive (creates variables etc.), discount that. | 1,924 | en | 0.810139 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# =============================================================================
# title : magicblueshell.py
# description : Python tool to control Magic Blue bulbs over Bluetooth
# author : Benjamin Piouffle
# date : 23/11/2015
# usage : python magicblue.py
# python_version : 3.4
# =============================================================================
import argparse
import logging
import os
import sys
from sys import platform as _platform
import webcolors
from bluepy.btle import Scanner, DefaultDelegate
try:
from magicblue.magicbluelib import MagicBlue, Effect
from magicblue import __version__
except ImportError:
from magicbluelib import MagicBlue, Effect
from __init__ import __version__
logger = logging.getLogger(__name__)
class MagicBlueShell:
class Cmd:
def __init__(self, cmd_str, func, conn_required, help='', params=None,
aliases=None):
self.cmd_str = cmd_str
self.func = func
self.conn_required = conn_required
self.help = help
self.params = params or []
self.aliases = aliases or []
def __init__(self, bluetooth_adapter, bulb_version=7):
# List available commands and their usage. 'con_required' define if
# we need to be connected to a device for the command to run
self.available_cmds = [
MagicBlueShell.Cmd('help', self.list_commands, False,
help='Show this help'),
MagicBlueShell.Cmd('list_devices', self.cmd_list_devices, False,
help='List Bluetooth LE devices in range',
aliases=['ls']),
MagicBlueShell.Cmd('list_effects', self.cmd_list_effects, False,
help='List available effects',),
MagicBlueShell.Cmd('connect', self.cmd_connect, False,
help='Connect to light bulb',
params=['mac_address or ID']),
MagicBlueShell.Cmd('disconnect', self.cmd_disconnect, True,
help='Disconnect from current light bulb'),
MagicBlueShell.Cmd('set_color', self.cmd_set_color, True,
help="Change bulb's color",
params=['name or hexadecimal value']),
MagicBlueShell.Cmd('set_warm_light', self.cmd_set_warm_light, True,
help='Set warm light',
params=['intensity[0.0-1.0]']),
MagicBlueShell.Cmd('set_effect', self.cmd_set_effect, True,
help='Set an effect',
params=['effect_name', 'speed[1-20]']),
MagicBlueShell.Cmd('turn', self.cmd_turn, True,
help='Turn on / off the bulb',
params=['on|off']),
MagicBlueShell.Cmd('read', self.cmd_read, True,
help='Read device_info/datetime from the bulb',
params=['name|device_info|date_time']),
MagicBlueShell.Cmd('exit', self.cmd_exit, False,
help='Exit the script')
]
self.bluetooth_adapter = bluetooth_adapter
self._bulb_version = bulb_version
self._magic_blue = None
self._devices = []
self.last_scan = None
def start_interactive_mode(self):
print('Magic Blue interactive shell v{}'.format(__version__))
print('Type "help" for a list of available commands')
str_cmd = ''
while str_cmd != 'exit':
try:
str_cmd = input('> ').strip()
if str_cmd:
self.exec_cmd(str_cmd)
except (EOFError, KeyboardInterrupt): # Catch Ctrl+D / Ctrl+C
self.cmd_exit()
return
except Exception as e:
logger.error('Unexpected error with command "{}": {}'
.format(str_cmd, str(e)))
def exec_cmd(self, str_cmd):
cmd = self._get_command(str_cmd)
if cmd is not None:
if cmd.conn_required and not (self._magic_blue and
self._magic_blue.is_connected()):
logger.error('You must be connected to run this command')
elif self._check_args(str_cmd, cmd):
cmd.func(str_cmd.split()[1:])
else:
logger.error('"{}" is not a valid command.'
'Type "help" to see what you can do'
.format(str_cmd.split()[0]))
def print_usage(self, str_cmd):
cmd = self._get_command(str_cmd)
if cmd is not None:
print('Usage: {} {}'.format(cmd.cmd_str, ' '.join(cmd.params)))
else:
logger.error('Unknown command {}'.format(str_cmd))
return False
def cmd_list_devices(self, *args):
scan_time = 300
try:
self.last_scan = ScanDelegate()
scanner = Scanner().withDelegate(self.last_scan)
print('Listing Bluetooth LE devices in range for {} seconds. '
'Press CTRL+C to abort searching.'.format(scan_time))
print('{: <5} {: <30} {: <12}'.format('ID', 'Name', 'Mac address'))
print('{: <5} {: <30} {: <12}'.format('--', '----', '-----------'))
scanner.scan(scan_time)
except KeyboardInterrupt:
print('\n')
except RuntimeError as e:
logger.error('Problem with the Bluetooth adapter : {}'.format(e))
return False
def cmd_list_effects(self, *args):
for e in Effect.__members__.keys():
print(e)
def cmd_connect(self, *args):
# Use can enter either a mac address or the device ID from the list
if len(args[0][0]) < 4 and self.last_scan:
try:
dev_id = int(args[0][0]) - 1
entry = self.last_scan.devices[dev_id]
mac_address = entry.addr
addr_type = entry.addrType
except Exception:
logger.error('Bad ID / MAC address : {}'.format(args[0][0]))
return False
else:
addr_type = None
mac_address = args[0][0]
self._magic_blue = MagicBlue(mac_address,
version=self._bulb_version,
addr_type=addr_type)
self._magic_blue.connect(self.bluetooth_adapter)
logger.info('Connected')
def cmd_disconnect(self, *args):
self._magic_blue.disconnect()
self._magic_blue = None
def cmd_turn(self, *args):
if args[0][0] == 'on':
self._magic_blue.turn_on()
else:
self._magic_blue.turn_off()
def cmd_read(self, *args):
if args[0][0] == 'name':
name = self._magic_blue.get_device_name()
logger.info('Received name: {}'.format(name))
elif args[0][0] == 'device_info':
device_info = self._magic_blue.get_device_info()
logger.info('Received device_info: {}'.format(device_info))
elif args[0][0] == 'date_time':
datetime_ = self._magic_blue.get_date_time()
logger.info('Received datetime: {}'.format(datetime_))
def cmd_set_color(self, *args):
color = args[0][0]
try:
if color.startswith('#'):
self._magic_blue.set_color(webcolors.hex_to_rgb(color))
else:
self._magic_blue.set_color(webcolors.name_to_rgb(color))
except ValueError as e:
logger.error('Invalid color value : {}'.format(str(e)))
self.print_usage('set_color')
def cmd_set_warm_light(self, *args):
try:
self._magic_blue.set_warm_light(float(args[0][0]))
except ValueError as e:
logger.error('Invalid intensity value : {}'.format(str(e)))
self.print_usage('set_color')
def cmd_set_effect(self, *args):
try:
[effect, speed] = args[0]
effect = Effect[effect]
speed = int(speed)
except KeyError as key:
logger.error('Unknown effect {}'.format(key))
except ValueError:
self.print_usage('set_effect')
else:
self._magic_blue.set_effect(effect, speed)
def list_commands(self, *args):
print(' ----------------------------')
print('| List of available commands |')
print(' ----------------------------')
print('{: <16}{: <30}{}'.format('COMMAND', 'PARAMETERS', 'DETAILS'))
print('{: <16}{: <30}{}'.format('-------', '----------', '-------'))
for command in self.available_cmds:
print('{: <16}{: <30}{}'.format(
command.cmd_str, ' '.join(command.params), command.help))
for alias in command.aliases:
print('{: <16}{: <30}{}'.format(alias, '//', '//'))
def cmd_exit(self, *args):
print('Bye !')
def _check_args(self, str_cmd, cmd):
expected_nb_args = len(cmd.params)
args = str_cmd.split()[1:]
if len(args) != expected_nb_args:
self.print_usage(str_cmd.split()[0])
return False
return True
def _get_command(self, str_cmd):
str_cmd = str_cmd.split()[0]
return next((item for item in self.available_cmds
if item.cmd_str == str_cmd or str_cmd in item.aliases
), None)
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
self.devices = []
def handleDiscovery(self, dev, is_new_device, is_new_data):
if is_new_device:
self.devices.append(dev)
raw_name = dev.getValueText(9)
dev_name = raw_name.split('\x00')[0] if raw_name else "NO_NAME"
print('{: <5} {: <30} {: <12}'.format(len(self.devices),
dev_name,
dev.addr))
def get_params():
parser = argparse.ArgumentParser(description='Python tool to control Magic'
'Blue bulbs over Bluetooth')
parser.add_argument('-l', '--list_commands',
dest='list_commands',
help='List available commands',
action='store_true')
parser.add_argument('-c', '--command',
dest='command',
help='Command to execute')
parser.add_argument('-m', '--mac_address',
dest='mac_address',
help='Device mac address. Must be set if command given'
' in -c needs you to be connected')
parser.add_argument('-a', '--bluetooth_adapter',
default='hci0',
dest='bluetooth_adapter',
help='Bluetooth adapter name as listed by hciconfig')
parser.add_argument('-b', '--bulb-version',
default='7',
dest='bulb_version',
type=int,
help='Bulb version as displayed in the official app')
return parser.parse_args()
def main():
params = get_params()
# Exit if not root
if (_platform == "linux" or _platform == "linux2") and os.geteuid() != 0:
logger.error("Script must be run as root")
return 1
shell = MagicBlueShell(params.bluetooth_adapter, params.bulb_version)
if params.list_commands:
shell.list_commands()
elif params.command:
logging.basicConfig(level=logging.WARNING)
if params.mac_address:
shell.cmd_connect([params.mac_address])
shell.exec_cmd(params.command)
else:
logging.basicConfig(level=logging.INFO)
shell.start_interactive_mode()
return 0
if __name__ == '__main__':
sys.exit(main())
| magicblue/magicblueshell.py | 12,254 | !/usr/bin/env python -*- coding: UTF-8 -*- ============================================================================= title : magicblueshell.py description : Python tool to control Magic Blue bulbs over Bluetooth author : Benjamin Piouffle date : 23/11/2015 usage : python magicblue.py python_version : 3.4 ============================================================================= List available commands and their usage. 'con_required' define if we need to be connected to a device for the command to run Catch Ctrl+D / Ctrl+C Use can enter either a mac address or the device ID from the list Exit if not root | 662 | en | 0.558969 |
from pyopenproject.business.root_service import RootService
from pyopenproject.business.services.command.root.find import Find
class RootServiceImpl(RootService):
def __init__(self, connection):
"""Constructor for class RootServiceImpl, from RootService
:param connection: The connection data
"""
super().__init__(connection)
def find(self):
return Find(self.connection).execute()
| pyopenproject/business/services/root_service_impl.py | 434 | Constructor for class RootServiceImpl, from RootService
:param connection: The connection data | 95 | en | 0.650209 |
import os
import shutil
from datetime import timedelta
from django.contrib.admin.sites import AdminSite
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.utils import timezone
from allauth.account.models import EmailAddress
from rest_framework.test import APITestCase, APIClient
from challenges.models import Challenge, ChallengePhase
from hosts.models import ChallengeHostTeam
from jobs.models import Submission
from jobs.admin import SubmissionAdmin
from participants.models import ParticipantTeam, Participant
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username="someuser",
email="user@test.com",
password="secret_password",
)
EmailAddress.objects.create(
user=self.user, email="user@test.com", primary=True, verified=True
)
self.user1 = User.objects.create(
username="someuser1",
email="user1@test.com",
password="secret_password1",
)
EmailAddress.objects.create(
user=self.user1,
email="user1@test.com",
primary=True,
verified=True,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Test Challenge Host Team", created_by=self.user
)
self.participant_team = ParticipantTeam.objects.create(
team_name="Participant Team for Challenge", created_by=self.user1
)
self.participant = Participant.objects.create(
user=self.user1,
status=Participant.SELF,
team=self.participant_team,
)
self.challenge = Challenge.objects.create(
title="Test Challenge",
description="Description for test challenge",
terms_and_conditions="Terms and conditions for test challenge",
submission_guidelines="Submission guidelines for test challenge",
creator=self.challenge_host_team,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
published=False,
enable_forum=True,
anonymous_leaderboard=False,
)
try:
os.makedirs("/tmp/evalai")
except OSError:
pass
with self.settings(MEDIA_ROOT="/tmp/evalai"):
self.challenge_phase = ChallengePhase.objects.create(
name="Challenge Phase",
description="Description for Challenge Phase",
leaderboard_public=False,
is_public=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
challenge=self.challenge,
test_annotation=SimpleUploadedFile(
"test_sample_file.txt",
b"Dummy file content",
content_type="text/plain",
),
)
self.submission = Submission.objects.create(
participant_team=self.participant_team,
challenge_phase=self.challenge_phase,
created_by=self.challenge_host_team.created_by,
status="submitted",
input_file=self.challenge_phase.test_annotation,
method_name="Test Method",
method_description="Test Description",
project_url="http://testserver/",
publication_url="http://testserver/",
is_public=True,
)
self.client.force_authenticate(user=self.user)
def tearDown(self):
shutil.rmtree("/tmp/evalai")
class MockRequest(object):
pass
request = MockRequest()
class SubmissionAdminTest(BaseAPITestClass):
"""
Test case for re-running submissions from admin
"""
def setUp(self):
super(SubmissionAdminTest, self).setUp()
self.app_admin = SubmissionAdmin(Submission, AdminSite())
def test_submit_job_to_worker(self):
Submission.objects.filter(status=self.submission.status).update(
status="finished"
)
queryset = Submission.objects.filter(status="finished")
self.app_admin.submit_job_to_worker(request, queryset)
self.assertEqual(
Submission.objects.filter(status="submitted").count(), 1
)
def test_make_submission_public(self):
# make all submissions private before test
Submission.objects.filter(is_public=self.submission.is_public).update(
is_public=False
)
queryset = Submission.objects.filter(is_public=False)
self.app_admin.make_submission_public(request, queryset)
self.assertEqual(Submission.objects.filter(is_public=True).count(), 1)
def test_make_submission_private(self):
# make all submissions public before test
Submission.objects.filter(is_public=False).update(
is_public=True
)
queryset = Submission.objects.filter(is_public=True)
self.app_admin.make_submission_private(request, queryset)
self.assertEqual(Submission.objects.filter(is_public=False).count(), 1)
| tests/unit/jobs/test_admin.py | 5,320 | Test case for re-running submissions from admin
make all submissions private before test make all submissions public before test | 130 | en | 0.895599 |
import geohash
import redis
from addok.config import config
from addok.db import DB
from addok.ds import get_document
from . import iter_pipe, keys, yielder
VALUE_SEPARATOR = '|~|'
def preprocess(s):
if s not in _CACHE:
_CACHE[s] = list(iter_pipe(s, config.PROCESSORS))
return _CACHE[s]
_CACHE = {}
def token_key_frequency(key):
return DB.zcard(key)
def token_frequency(token):
return token_key_frequency(keys.token_key(token))
def extract_tokens(tokens, string, boost):
els = list(preprocess(string))
if not els:
return
boost = config.DEFAULT_BOOST / len(els) * boost
for token in els:
if tokens.get(token, 0) < boost:
tokens[token] = boost
def index_tokens(pipe, tokens, key, **kwargs):
for token, boost in tokens.items():
pipe.zadd(keys.token_key(token), mapping={key: boost})
def deindex_field(key, string):
els = list(preprocess(string))
for s in els:
deindex_token(key, s)
return els
def deindex_token(key, token):
tkey = keys.token_key(token)
DB.zrem(tkey, key)
def index_documents(docs):
pipe = DB.pipeline(transaction=False)
for doc in docs:
if not doc:
continue
if doc.get('_action') in ['delete', 'update']:
key = keys.document_key(doc['_id']).encode()
known_doc = get_document(key)
if known_doc:
deindex_document(known_doc)
if doc.get('_action') in ['index', 'update', None]:
index_document(pipe, doc)
yield doc
try:
pipe.execute()
except redis.RedisError as e:
msg = 'Error while importing document:\n{}\n{}'.format(doc, str(e))
raise ValueError(msg)
def index_document(pipe, doc, **kwargs):
key = keys.document_key(doc['_id'])
tokens = {}
for indexer in config.INDEXERS:
try:
indexer.index(pipe, key, doc, tokens, **kwargs)
except ValueError as e:
print(e)
return # Do not index.
def deindex_document(doc, **kwargs):
key = keys.document_key(doc['_id'])
tokens = []
for indexer in config.INDEXERS:
indexer.deindex(DB, key, doc, tokens, **kwargs)
def index_geohash(pipe, key, lat, lon):
lat = float(lat)
lon = float(lon)
geoh = geohash.encode(lat, lon, config.GEOHASH_PRECISION)
geok = keys.geohash_key(geoh)
pipe.sadd(geok, key)
def deindex_geohash(key, lat, lon):
lat = float(lat)
lon = float(lon)
geoh = geohash.encode(lat, lon, config.GEOHASH_PRECISION)
geok = keys.geohash_key(geoh)
DB.srem(geok, key)
class FieldsIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
importance = (float(doc.get('importance', 0.0))
* config.IMPORTANCE_WEIGHT)
for field in config.FIELDS:
name = field['key']
values = doc.get(name)
if not values:
if not field.get('null', True):
# A mandatory field is null.
raise ValueError('{} must not be null'.format(name))
continue
if name != config.HOUSENUMBERS_FIELD:
boost = field.get('boost', config.DEFAULT_BOOST)
if callable(boost):
boost = boost(doc)
boost = boost + importance
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
extract_tokens(tokens, str(value), boost=boost)
index_tokens(pipe, tokens, key, **kwargs)
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
for field in config.FIELDS:
name = field['key']
if name == config.HOUSENUMBERS_FIELD:
continue
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
tokens.extend(deindex_field(key, value))
class GeohashIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
index_geohash(pipe, key, doc['lat'], doc['lon'])
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
deindex_geohash(key, doc['lat'], doc['lon'])
class HousenumbersIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
housenumbers = doc.get('housenumbers', {})
for number, data in housenumbers.items():
index_geohash(pipe, key, data['lat'], data['lon'])
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
housenumbers = doc.get('housenumbers', {})
for token, data in housenumbers.items():
deindex_geohash(key, data['lat'], data['lon'])
class FiltersIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
for name in config.FILTERS:
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
pipe.sadd(keys.filter_key(name, value), key)
# Special case for housenumber type, because it's not a real type
if "type" in config.FILTERS and config.HOUSENUMBERS_FIELD \
and doc.get(config.HOUSENUMBERS_FIELD):
pipe.sadd(keys.filter_key("type", "housenumber"), key)
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
for name in config.FILTERS:
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
db.srem(keys.filter_key(name, value), key)
if "type" in config.FILTERS:
db.srem(keys.filter_key("type", "housenumber"), key)
@yielder
def prepare_housenumbers(doc):
# We need to have the housenumbers tokenized in the document, to match
# from user query (see results.match_housenumber).
if not doc:
return
housenumbers = doc.get(config.HOUSENUMBERS_FIELD)
if housenumbers:
doc['housenumbers'] = {}
for number, data in housenumbers.items():
# Housenumber may have multiple tokens (eg.: "dix huit").
token = ''.join(list(preprocess(number)))
data['raw'] = number
doc['housenumbers'][token] = data
return doc
| addok/helpers/index.py | 6,537 | Do not index. A mandatory field is null. Special case for housenumber type, because it's not a real type We need to have the housenumbers tokenized in the document, to match from user query (see results.match_housenumber). Housenumber may have multiple tokens (eg.: "dix huit"). | 278 | en | 0.850044 |
import pandas as pd
import numpy as np
from pandas.util.testing import rands
groups = np.arange(10)
str_groups = np.array(list("0123456789"))
np.random.seed(1)
for size in [1e2, 1e3, 1e4, 1e5, 1e6]:
size = int(size)
g = np.random.choice(groups, size)
sg = np.random.choice(str_groups, size)
v = np.random.randn(size)
df = pd.DataFrame({"groups": g, "values": v, "str": sg})
df.to_csv(f"../data/{size}.csv", index=False)
print("data created")
# Join benchmark data
# https://wesmckinney.com/blog/high-performance-database-joins-with-pandas-dataframe-more-benchmarks/
# https://github.com/wesm/pandas/blob/23669822819808bbaeb6ea36a6b2ef98026884db/bench/bench_merge_sqlite.py
N = 10000
indices = np.array([rands(10) for _ in range(N)], dtype="O")
indices2 = np.array([rands(10) for _ in range(N)], dtype="O")
key = np.tile(indices[:8000], 10)
key2 = np.tile(indices2[:8000], 10)
left = pd.DataFrame({"key": key, "key2": key2, "value": np.random.randn(80000)})
right = pd.DataFrame(
{"key": indices[2000:], "key2": indices2[2000:], "value2": np.random.randn(8000)}
)
left.to_csv("../data/join_left_80000.csv", index=False)
right.to_csv("../data/join_right_80000.csv", index=False)
| pandas_cmp/create_data.py | 1,212 | Join benchmark data https://wesmckinney.com/blog/high-performance-database-joins-with-pandas-dataframe-more-benchmarks/ https://github.com/wesm/pandas/blob/23669822819808bbaeb6ea36a6b2ef98026884db/bench/bench_merge_sqlite.py | 224 | en | 0.530329 |
# Config
NODE_ID = ${NODE_ID}
# hour,set 0 to disable
SPEEDTEST = ${SPEEDTEST}
CLOUDSAFE = ${CLOUDSAFE}
ANTISSATTACK = ${ANTISSATTACK}
AUTOEXEC = ${AUTOEXEC}
MU_SUFFIX = "${MU_SUFFIX}"
MU_REGEX = "${MU_REGEX}"
SERVER_PUB_ADDR = "127.0.0.1" # mujson_mgr need this to generate ssr link
API_INTERFACE = "${API_INTERFACE}" # glzjinmod, modwebapi
WEBAPI_URL = "${WEBAPI_URL}"
WEBAPI_TOKEN = "${WEBAPI_TOKEN}"
# mudb
MUDB_FILE = 'mudb.json'
# Mysql
MYSQL_HOST = "${MYSQL_HOST}"
MYSQL_PORT = ${MYSQL_PORT}
MYSQL_USER = "${MYSQL_USER}"
MYSQL_PASS = "${MYSQL_PASS}"
MYSQL_DB = "${MYSQL_DB}"
MYSQL_SSL_ENABLE = 0
MYSQL_SSL_CA = ''
MYSQL_SSL_CERT = ''
MYSQL_SSL_KEY = ''
# API
API_HOST = '127.0.0.1'
API_PORT = 80
API_PATH = '/mu/v2/'
API_TOKEN = 'abcdef'
API_UPDATE_TIME = 60
# Manager (ignore this)
MANAGE_PASS = 'ss233333333'
# if you want manage in other server you should set this value to global ip
MANAGE_BIND_IP = '127.0.0.1'
# make sure this port is idle
MANAGE_PORT = 23333
| apiconfig.py | 984 | Config hour,set 0 to disable mujson_mgr need this to generate ssr link glzjinmod, modwebapi mudb Mysql API Manager (ignore this) if you want manage in other server you should set this value to global ip make sure this port is idle | 230 | en | 0.683155 |
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from .result import ExperimentResult
from .result import CVExperimentResult
from ..metrics.rating import RatingMetric
from ..metrics.ranking import RankingMetric
from ..models.recommender import Recommender
class Experiment:
""" Experiment Class
Parameters
----------
eval_method: :obj:`<cornac.eval_methods.BaseMethod>`, required
The evaluation method (e.g., RatioSplit).
models: array of :obj:`<cornac.models.Recommender>`, required
A collection of recommender models to evaluate, e.g., [C2PF, HPF, PMF].
metrics: array of :obj:{`<cornac.metrics.RatingMetric>`, `<cornac.metrics.RankingMetric>`}, required
A collection of metrics to use to evaluate the recommender models, \
e.g., [NDCG, MRR, Recall].
user_based: bool, optional, default: True
This parameter is only useful if you are considering rating metrics. When True, first the average performance \
for every user is computed, then the obtained values are averaged to return the final result.
If `False`, results will be averaged over the number of ratings.
result: array of :obj:`<cornac.experiment.result.Result>`, default: None
This attribute contains the results per-model of your experiment, initially it is set to None.
"""
def __init__(self, eval_method, models, metrics, user_based=True, verbose=False):
self.eval_method = eval_method
self.models = self._validate_models(models)
self.metrics = self._validate_metrics(metrics)
self.user_based = user_based
self.verbose = verbose
self.result = None
@staticmethod
def _validate_models(input_models):
if not hasattr(input_models, "__len__"):
raise ValueError('models have to be an array but {}'.format(type(input_models)))
valid_models = []
for model in input_models:
if isinstance(model, Recommender):
valid_models.append(model)
return valid_models
@staticmethod
def _validate_metrics(input_metrics):
if not hasattr(input_metrics, "__len__"):
raise ValueError('metrics have to be an array but {}'.format(type(input_metrics)))
valid_metrics = []
for metric in input_metrics:
if isinstance(metric, RatingMetric) or isinstance(metric, RankingMetric):
valid_metrics.append(metric)
return valid_metrics
def _create_result(self):
from ..eval_methods.cross_validation import CrossValidation
if isinstance(self.eval_method, CrossValidation):
self.result = CVExperimentResult()
else:
self.result = ExperimentResult()
def run(self):
self._create_result()
for model in self.models:
model_result = self.eval_method.evaluate(model=model,
metrics=self.metrics,
user_based=self.user_based)
self.result.append(model_result)
print('\n{}'.format(self.result))
| cornac/experiment/experiment.py | 3,776 | Experiment Class
Parameters
----------
eval_method: :obj:`<cornac.eval_methods.BaseMethod>`, required
The evaluation method (e.g., RatioSplit).
models: array of :obj:`<cornac.models.Recommender>`, required
A collection of recommender models to evaluate, e.g., [C2PF, HPF, PMF].
metrics: array of :obj:{`<cornac.metrics.RatingMetric>`, `<cornac.metrics.RankingMetric>`}, required
A collection of metrics to use to evaluate the recommender models, e.g., [NDCG, MRR, Recall].
user_based: bool, optional, default: True
This parameter is only useful if you are considering rating metrics. When True, first the average performance for every user is computed, then the obtained values are averaged to return the final result.
If `False`, results will be averaged over the number of ratings.
result: array of :obj:`<cornac.experiment.result.Result>`, default: None
This attribute contains the results per-model of your experiment, initially it is set to None.
Copyright 2018 The Cornac Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ | 1,653 | en | 0.725323 |
import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_id_annotation_request import TextIdAnnotationRequest # noqa: E501
from openapi_server.models.text_id_annotation_response import TextIdAnnotationResponse # noqa: E501
def create_text_id_annotations(text_id_annotation_request=None): # noqa: E501
"""Annotate IDs in a clinical note
Return the ID annotations found in a clinical note # noqa: E501
:param text_id_annotation_request:
:type text_id_annotation_request: dict | bytes
:rtype: TextIdAnnotationResponse
"""
if connexion.request.is_json:
try:
annotation_request = TextIdAnnotationRequest.from_dict(connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.ID)
res = TextIdAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| server/openapi_server/controllers/text_id_annotation_controller.py | 1,214 | Annotate IDs in a clinical note
Return the ID annotations found in a clinical note # noqa: E501
:param text_id_annotation_request:
:type text_id_annotation_request: dict | bytes
:rtype: TextIdAnnotationResponse
noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 270 | en | 0.457301 |
#!/usr/bin/env python3
self_description = """
gridradar2influx is a tiny daemon written to fetch data from the gridradar.net-API and
writes it to an InfluxDB instance.
"""
# import standard modules
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import configparser
import logging
import os
import signal
import time
from datetime import datetime
# import 3rd party modules
import requests
import influxdb
#import functions from files
from app_functions import *
from basic_functions import *
from influx import *
__version__ = "0.0.1"
__version_date__ = "2022-02-05"
__description__ = "gridradar2influx"
__license__ = "MIT"
# default vars
running = True
default_config = os.path.join(os.path.dirname(__file__), 'config.ini')
default_log_level = logging.INFO
def main():
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
# parse command line arguments
args = parse_args()
# set logging
log_level = logging.DEBUG if args.verbose is True else default_log_level
if args.daemon:
# omit time stamp if run in daemon mode
logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s')
else:
logging.basicConfig(level=log_level, format='%(asctime)s - %(levelname)s: %(message)s')
# read config from ini file
config = read_config(args.config_file)
# set up influxdb handler
influxdb_client = None
try:
influxdb_client = influxdb.InfluxDBClient(
config.get('influxdb', 'host'),
config.getint('influxdb', 'port', fallback=8086),
config.get('influxdb', 'username'),
config.get('influxdb', 'password'),
config.get('influxdb', 'database'),
config.getboolean('influxdb', 'ssl', fallback=False),
config.getboolean('influxdb', 'verify_ssl', fallback=False)
)
measurement_name=config.get('influxdb', 'measurement_name')
location=config.get('influxdb', 'location')
# test more config options and see if they are present
#_ = config.get('influxdb', 'measurement_name')
except configparser.Error as e:
logging.error("Config Error: %s", str(e))
exit(1)
except ValueError as e:
logging.error("Config Error: %s", str(e))
exit(1)
# check influx db status
check_db_status(influxdb_client, config.get('influxdb', 'database'))
# create authenticated gridradar-api client handler
api_response = None
result_dict={}
request_interval = 60
try:
request_interval = config.getint('gridradar', 'interval', fallback=60)
url=config.get('gridradar', 'url')
token=config.get('gridradar', 'token')
api_response=getdatafromapi(url,token,{}) # blank request to check, if authentification works
except configparser.Error as e:
logging.error("Config Error: %s", str(e))
exit(1)
except BaseException as e:
logging.error("Failed to connect to gridradar-API '%s'" % str(e))
exit(1)
# test connection
try:
api_response
except requests.exceptions.RequestException as e:
if "401" in str(e):
logging.error("Failed to connect to gridradar-API '%s' using credentials. Check token!" %
config.get('gridradar', 'token'))
if "404" in str(e):
logging.error("Failed to connect to gridradar-API '%s' using credentials. Check url!" %
config.get('gridradar', 'url'))
else:
logging.error(str(e))
exit(1)
logging.info("Successfully connected to gridradar-API")
# read services from config file
###services_to_query = get_services(config, "service")
logging.info("Starting main loop - wait until first API-Request '%s' seconds",request_interval)
while running:
logging.debug("Starting gridradar-API requests")
time.sleep(request_interval) # wait, otherwise Exception 429, 'Limitation: maximum number of requests per second exceeded']
request=str2dict(config.get('gridradar', 'request_freq'))
duration=grapi2influx(request,influxdb_client,config)
# just sleep for interval seconds - last run duration
for _ in range(0, int(((request_interval * 1000) - duration) / 100)):
if running is False:
break
time.sleep(0.0965)
request=str2dict(config.get('gridradar', 'request_net_time'))
duration=grapi2influx(request,influxdb_client,config)
# just sleep for interval seconds - last run duration
for _ in range(0, int(((request_interval * 1000) - duration) / 100)):
if running is False:
break
time.sleep(0.0965)
if __name__ == "__main__":
main()
| gridradar2influx.py | 4,874 | !/usr/bin/env python3 import standard modules import 3rd party modulesimport functions from files default vars parse command line arguments set logging omit time stamp if run in daemon mode read config from ini file set up influxdb handler test more config options and see if they are present_ = config.get('influxdb', 'measurement_name') check influx db status create authenticated gridradar-api client handler blank request to check, if authentification works test connection read services from config fileservices_to_query = get_services(config, "service") wait, otherwise Exception 429, 'Limitation: maximum number of requests per second exceeded'] just sleep for interval seconds - last run duration just sleep for interval seconds - last run duration | 756 | en | 0.528271 |
import logging
import odoo.http
from odooku.request import WebRequestMixin
_logger = logging.getLogger(__name__)
class WebSocketRequest(WebRequestMixin, odoo.http.WebRequest):
def __init__(self, httprequest):
super(WebSocketRequest, self).__init__(httprequest)
def dispatch(self):
raise NotImplementedError()
class WebSocketRpcRequest(WebSocketRequest):
_request_type = 'json'
def __init__(self, httprequest, data):
super(WebSocketRpcRequest, self).__init__(httprequest)
self.params = data.get('params', {})
self.id = data.get('id')
self.context = self.params.pop('context', dict(self.session.context))
def dispatch(self):
try:
result = self._call_function(**self.params)
except Exception as exception:
return self._handle_exception(exception)
return self._json_response(result)
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.id
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
return response
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(WebSocketRpcRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, (odoo.exceptions.Warning, odoo.http.SessionExpiredException, odoo.exceptions.except_orm)):
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': odoo.http.serialize_exception(exception)
}
if isinstance(exception, odoo.http.AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, odoo.http.SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
| odooku/services/websocket/requests.py | 2,339 | Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response. | 142 | en | 0.952747 |
#!/usr/bin/env python3
from itertools import product
if __name__ == "__main__":
arr1 = list(map(int, input().strip().split(' ')))
arr2 = list(map(int, input().strip().split(' ')))
for el in product(arr1, arr2):
print("{} ".format(el), end='')
| python/itertools-product.py | 274 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
# URL pattern for the UserListView
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
# URL pattern for the UserRedirectView
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
# URL pattern for the UserDetailView
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
# URL pattern for the UserUpdateView
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
| artinvestor_server/users/urls.py | 781 | -*- coding: utf-8 -*- URL pattern for the UserListView URL pattern for the UserRedirectView URL pattern for the UserDetailView URL pattern for the UserUpdateView | 161 | en | 0.549467 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/get_inventory_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_inventory_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nCpogoprotos/networking/requests/messages/get_inventory_message.proto\x12\'pogoprotos.networking.requests.messages\"0\n\x13GetInventoryMessage\x12\x19\n\x11last_timestamp_ms\x18\x01 \x01(\x03\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETINVENTORYMESSAGE = _descriptor.Descriptor(
name='GetInventoryMessage',
full_name='pogoprotos.networking.requests.messages.GetInventoryMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_timestamp_ms', full_name='pogoprotos.networking.requests.messages.GetInventoryMessage.last_timestamp_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=160,
)
DESCRIPTOR.message_types_by_name['GetInventoryMessage'] = _GETINVENTORYMESSAGE
GetInventoryMessage = _reflection.GeneratedProtocolMessageType('GetInventoryMessage', (_message.Message,), dict(
DESCRIPTOR = _GETINVENTORYMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_inventory_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetInventoryMessage)
))
_sym_db.RegisterMessage(GetInventoryMessage)
# @@protoc_insertion_point(module_scope)
| pgoapi/protos/pogoprotos/networking/requests/messages/get_inventory_message_pb2.py | 2,365 | Generated by the protocol buffer compiler. DO NOT EDIT! source: pogoprotos/networking/requests/messages/get_inventory_message.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetInventoryMessage) @@protoc_insertion_point(module_scope) | 303 | en | 0.439898 |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Hnaynag University (Jae-Hong Lee)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import json
import logging
import re
import random
from pathlib import Path
from tqdm import tqdm
from nltk import tokenize
from espnet.utils.cli_utils import get_commandline_args
def error_checker(keys, file_path, log_path):
buffer_key = None
past_key = None
total_key_count = len(keys)
skip_key_count = 0
with open(file_path, encoding="utf-8") as f:
for line in tqdm(f.readlines()):
sps = line.rstrip().split(maxsplit=1)
if len(sps) == 2:
key, value = sps
if key in keys:
past_key = key
else:
if buffer_key != past_key:
keys.remove(past_key)
skip_key_count += 1
buffer_key = past_key
else:
pass
logging.info(f"Skip ratio is {skip_key_count / total_key_count}")
return keys
def get_parser():
parser = argparse.ArgumentParser(
description="TTT json to text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("json", type=str, help="json files")
parser.add_argument("dest", type=str, help="output file path")
parser.add_argument("prep", type=int, help="flag of preprocessing", default=False)
parser.add_argument("total_offset", type=int, help="", default=100)
parser.add_argument("max_snt_len", type=int, help="", default=150)
parser.add_argument("max_para_len", type=int, help="", default=1600)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
# logging info
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
logging.info("reading %s", args.json)
with codecs.open(args.json, "r", encoding="utf-8") as f:
j = json.load(f)
dest = Path(args.dest)
# Remove the duplicated keys and load the json to the dict
prep_j = {}
for line in tqdm(j):
try:
prep_j[line['id']] = {'paragraph': line['paragraph'], 'sentence': line['sentence']}
except:
logging.warning("The key %s is duplicated with the exsisted key", line['id'])
# Eliminate the error key with python readlines function
# FIXME(j-ppng): These lines is fixed by python reading error cleaner.
# However, we needs to more specific text cleaner
if args.prep:
keys = [k for k in prep_j.keys()]
logging.info("writing train_origin to %s", str(dest))
train_txt = codecs.open(dest / "text_orig", "w", encoding="utf-8")
for key in tqdm(keys):
train_txt.write(key + " " + prep_j[key]['paragraph'] + "\n")
keys = error_checker(keys,
dest / "text_orig",
dest / "error.log")
logging.info("writing key_file to %s", str(dest))
key_file = codecs.open(dest / "keys", "w", encoding="utf-8")
for key in keys:
key_file.write(key + "\n")
else:
keys = []
with open(dest / "keys", encoding="utf-8") as f:
for key in f.readlines():
keys.append(key.replace("\n", ""))
new_keys = []
total_offset = args.total_offset
max_snt_len = args.max_snt_len
max_para_len = args.max_para_len
for key in tqdm(keys):
# find and clipping preprocessing
# On the first try, we applied these procedures to the middle of the collect_stats process.
# However, we found that the {feat}_shape file saves the static size of the features,
# and we can know the features shape error will occur when at the training process.
idx = prep_j[key]['paragraph'].find(prep_j[key]['sentence'])
offset = random.randint(0, total_offset)
sent_len = len(prep_j[key]['sentence'])
# calculate the offset for the clip with the centroid which sentence in the paragraph.
prior_offset = max(idx - offset, 0)
post_offset = idx + sent_len + (total_offset - offset)
# clip the new paragraph area in the paragraph with the offsets.
selected_para = prep_j[key]['paragraph'][prior_offset:post_offset]
para_len = len(selected_para)
if para_len < sent_len:
raise RuntimeError(f"prior_offeset: {prior_offset}, post_offset: {post_offset}, length: {para_len}")
prep_j[key]['paragraph'] = selected_para
# remove key of the long sentence/paragraph
if sent_len < max_snt_len and para_len < max_para_len:
new_keys.append(key)
logging.info(f"Removed key raio is {1-len(new_keys)/len(keys)}")
keys = new_keys
# Save the results
logging.info("writing train.txt to %s", str(dest))
train_txt = codecs.open(dest / "text", "w", encoding="utf-8")
for key in tqdm(keys):
train_txt.write(prep_j[key]['paragraph'] + "\n")
logging.info("writing train and valid text to %s", str(dest))
split_point = int(len(keys) * 0.9)
datasets = {'train': keys[:split_point], 'valid': keys[split_point:]}
for dataset in datasets.keys():
logging.info("writing ref trn to %s", str(dest / Path(dataset)))
input_text = codecs.open(dest / Path(dataset) / "text_input", "w", encoding="utf-8")
output_text = codecs.open(dest / Path(dataset) / "text_output", "w", encoding="utf-8")
for key in tqdm(datasets[dataset]):
input_text.write(key + " " + prep_j[key]['paragraph'] + "\n")
output_text.write(key + " " + prep_j[key]['sentence'] + "\n")
# If want to check the error of data, just use these lines.
# error_checker(keys,
# dest / Path(dataset) / "text_input",
# dest / Path(dataset) / "error.log") | egs/linersum/asr1/local/data_prep.py | 6,110 | !/usr/bin/env python3 encoding: utf-8 Copyright 2020 Hnaynag University (Jae-Hong Lee) Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) logging info Remove the duplicated keys and load the json to the dict Eliminate the error key with python readlines function FIXME(j-ppng): These lines is fixed by python reading error cleaner. However, we needs to more specific text cleaner find and clipping preprocessing On the first try, we applied these procedures to the middle of the collect_stats process. However, we found that the {feat}_shape file saves the static size of the features, and we can know the features shape error will occur when at the training process. calculate the offset for the clip with the centroid which sentence in the paragraph. clip the new paragraph area in the paragraph with the offsets. remove key of the long sentence/paragraph Save the results If want to check the error of data, just use these lines. error_checker(keys, dest / Path(dataset) / "text_input", dest / Path(dataset) / "error.log") | 1,061 | en | 0.835925 |
'''define the config file for voc and resnet101os16'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'voc',
'set': 'trainaug',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
DATASET_CFG['test'].update(
{
'type': 'voc',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'annnet_resnet101os16_voc_train',
'logfilepath': 'annnet_resnet101os16_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'annnet_resnet101os16_voc_test',
'logfilepath': 'annnet_resnet101os16_voc_test/test.log',
'resultsavepath': 'annnet_resnet101os16_voc_test/annnet_resnet101os16_voc_results.pkl'
}
) | ssseg/cfgs/annnet/cfgs_voc_resnet101os16.py | 1,505 | define the config file for voc and resnet101os16
modify dataset config modify dataloader config modify optimizer config modify losses config modify model config modify inference config modify common config | 207 | en | 0.330456 |
# Write a Python function to sum all the numbers in a list
# Sample List : [8, 2, 3, 0, 7]
# Expected Output : 20
def sum_list(list):
sum = 0
for i in list:
sum += i
return sum
list = [8, 2, 3, 0, 7]
print(sum_list(list)) | introduction/exercise/ex9.py | 245 | Write a Python function to sum all the numbers in a list Sample List : [8, 2, 3, 0, 7] Expected Output : 20 | 107 | en | 0.589005 |
from django.apps import AppConfig
class SignalsConfig(AppConfig):
name = 'signals.apps.signals'
verbose_name = 'Signals'
def ready(self):
# Import Django signals to connect receiver functions.
import signals.apps.signals.signal_receivers # noqa
| api/app/signals/apps/signals/config.py | 277 | Import Django signals to connect receiver functions. noqa | 57 | en | 0.836169 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""tf2onnx.onnx_opset module"""
from . import common, controlflow, generator, logical, math, misc, nn, quantize, reduction, rnn, tensor, traditionalml
| tf2onnx/onnx_opset/__init__.py | 246 | tf2onnx.onnx_opset module
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license. | 117 | en | 0.425136 |
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from .warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| data_loader/modules/Text_Image_Augmentation_python/augment.py | 5,110 | -*- coding:utf-8 -*- Author: RubanSeven import cv2 from transform import get_perspective_transform, warp_perspective thresh = img_h // segment // 3 thresh = img_h // 5 thresh = img_h // segment // 3 thresh = img_h // 5 def distort(src, segment): img_h, img_w = src.shape[:2] dst = np.zeros_like(src, dtype=np.uint8) cut = img_w // segment thresh = img_h // 8 src_pts = list() dst_pts = list() src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) dst_pts.append([0, 0]) dst_pts.append([0, img_h]) dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32) half_thresh = thresh * 0.5 for cut_idx in np.arange(1, segment, 1): src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * i, 0]) dst_pts.append([cut * i, img_h]) src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) mat = cv2.getPerspectiveTransform(src_box, dst_box) print(mat) dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h)) mat = get_perspective_transform(dst_box, src_box) dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h)) print(mat) src_pts.append([img_w + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) src_pts.append([img_w + np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) - half_thresh]) src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) mat = cv2.getPerspectiveTransform(src_box, dst_box) dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h)) mat = get_perspective_transform(dst_box, src_box) dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h)) return dst | 2,314 | en | 0.208871 |
"""
Examples of loading all information about an object or set of objects from the
database.
"""
from __future__ import absolute_import
from __future__ import print_function
from owmeta_core.context import Context
from owmeta_core.command import OWM
from owmeta.connection import Connection
from owmeta.neuron import Neuron
def pp_connection(conn):
print(conn.pre_cell(), conn.post_cell(), conn.syntype(), conn.synclass(), conn.number())
with OWM('../.owm').connect() as owmconn:
ctx = Context(ident="http://openworm.org/data", conf=owmconn.conf).stored
query_object = ctx(Connection)(pre_cell=ctx(Neuron).query(name='AVAL'))
print('STARTING WITH AVAL')
for x in query_object.load():
pp_connection(x)
print()
print('STARTING WITH PVCL')
query_object = ctx(Connection)(pre_cell=ctx(Neuron).query(name='PVCL'))
for x in query_object.load():
pp_connection(x)
print()
print('NEURONS')
query_object = ctx(Neuron).query()
# sometimes a neuron object with the same name is returned more than once
names = dict()
for x in query_object.load():
n = x.name()
if n not in names:
names[n] = dict()
print(n)
print()
print('NEIGHBORS of PVCL')
query_object = ctx(Neuron).query(name='PVCL')
for x in query_object.neighbor():
print(x.name())
print()
print('NEIGHBORS of AVAL with number=3 connections')
query_object = ctx(Neuron).query(name='AVAL')
for x in query_object.neighbor.get(number=3):
print(x.name())
print
print('NEURONS and their RECEPTORS')
for x in ctx(Neuron).query().load():
# Wrap in a try-block in case there are no receptors listed
print(x, end=' ')
try:
for r in x.receptor():
print(' ', r, end=' ')
except StopIteration:
pass
print()
| examples/test_bgp.py | 1,901 | Examples of loading all information about an object or set of objects from the
database.
sometimes a neuron object with the same name is returned more than once Wrap in a try-block in case there are no receptors listed | 220 | en | 0.89722 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
from test_framework.util import *
import io
import time
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-debug", "-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.1 ltc (10,000,000 satoshis)
print("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.1)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("2.5"))
print("Running tests")
dest_address = peer_node.getnewaddress()
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
print("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# RECte a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.1"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
version=0,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.09"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.05"),
get_change_address(rbf_node): Decimal("0.03")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.090000})
assert_raises_message(JSONRPCException, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.1")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_message(JSONRPCException, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.050000})
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_message(JSONRPCException, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
rbf_node.bumpfee(rbfid, {"totalFee": 2000000})
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
assert_raises_message(JSONRPCException, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 2000001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=9900, but it converts to 10,000
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 1990000})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.020000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
# increase feerate by 2.5x, test that fee increased at least 2x
rbf_node.settxfee(Decimal("0.001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)
rbf_node.settxfee(Decimal("0.002500"))
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] > 2 * abs(rbftx["fee"])
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbf_node.settxfee(Decimal("0.001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 100000})
assert_raises_message(JSONRPCException, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 200000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 200000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 100000, "replaceable": False})
assert_raises_message(JSONRPCException, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 200000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = create_fund_sign_send(rbf_node, {rbf_node_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, 0.090000, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbf_node.walletlock()
assert_raises_message(JSONRPCException, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def create_fund_sign_send(node, outputs):
rawtx = node.createrawtransaction([], outputs)
fundtx = node.fundrawtransaction(rawtx)
signedtx = node.signrawtransaction(fundtx["hex"])
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def spend_one_input(node, input_amount, outputs):
input = dict(sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == input_amount))
rawtx = node.createrawtransaction([input], outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def get_change_address(node):
"""Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address."""
dest_address = node.getnewaddress()
dest_amount = Decimal("0.012345")
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx["hex"])
return next(address for out in info["vout"]
if out["value"] != dest_amount for address in out["scriptPubKey"]["addresses"])
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
error = node.submitblock(bytes_to_hex_str(block.serialize(True)))
if error is not None:
raise Exception(error)
return block
if __name__ == "__main__":
BumpFeeTest().main()
| qa/rpc-tests/bumpfee.py | 14,594 | Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address.
!/usr/bin/env python3 Copyright (c) 2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Sequence number that is BIP 125 opt-in and BIP 68-compliant Encrypt wallet for test_locked_wallet_fails test fund rbf node with 10 coins of 0.1 ltc (10,000,000 satoshis) check that bumped_tx propogates, original tx was evicted and has a wallet conflict check wallet transaction replaces and replaced_by values RECte a transaction with segwit output, then create an RBF transaction which spends it, and make sure bumpfee can be called on it. cannot replace a non RBF transaction (from node which did not enable RBF) cannot bump fee unless the tx has only inputs that we own. here, the rbftx has a peer_node coin and then adds a rbf_node input Note that this test depends upon the RPC code checking input ownership prior to change outputs (since it can't use fundrawtransaction, it lacks a proper change output) cannot bump fee if the transaction has a descendant parent is send-to-self, so we don't have to check which output is change when creating the child tx cannot bump fee with a too-small output check that if output is reduced to dust, it will be converted to fee the bumped tx sets fee=9900, but it converts to 10,000change output is eliminated check that bumpfee reacts correctly to the use of settxfee (paytxfee) increase feerate by 2.5x, test that fee increased at least 2x unset paytxfee check that re-bumping the original tx fails, but bumping the bumper succeeds check that re-bumping a non-replaceable bump tx fails check that unconfirmed outputs from bumped transactions are not spendable check that outputs from the bump transaction are not spendable due to the replaces_txid check in CWallet::AvailableCoins submit a block with the rbf tx to clear the bump tx out of the mempool, then call abandon to make sure the wallet doesn't attempt to resubmit the bump tx, then invalidate the block so the rbf tx will be put back in the mempool. this makes it possible to check whether the rbf tx outputs are spendable before the rbf tx is confirmed. check that outputs from the rbf tx are not spendable before the transaction is confirmed, due to the replaced_by_txid check in CWallet::AvailableCoins check that the main output from the rbf tx is spendable after confirmed | 2,607 | en | 0.873345 |
#!/usr/bin/env python3
def main():
pattern = input().upper()
genome = input().upper()
mismatches = int(input())
occurrences = approximate_occurrences(genome, pattern, mismatches)
for o in occurrences:
print(o, end=' ')
print()
LIST_A = ['C', 'T', 'G']
LIST_C = ['A', 'T', 'G']
LIST_T = ['C', 'A', 'G']
LIST_G = ['C', 'T', 'A']
def _generate_immediate_neighbours(pattern: str) -> list:
"""
Generate immediate (different by one mismatch) neighbours of the given genome pattern
:param pattern: a pattern to examine
:return: neighbourhood, NOT including the given pattern
"""
generated = []
for i in range(len(pattern)):
if pattern[i] == 'A':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])
elif pattern[i] == 'C':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])
elif pattern[i] == 'T':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])
elif pattern[i] == 'G':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])
return generated
def generate_neighbours(pattern: str, mismatches: int) -> set:
"""
Generate neighbours for the given pattern (genome string)
:param pattern: genome pattern
:param mismatches: number of mismatches to generate neighbours
:return: a set of patterns in the neighbourhood, including the 'pattern' itself
"""
neighbourhood = set()
neighbourhood.add(pattern)
curr_patterns = [pattern]
next_patterns = []
for curr_mismatches in range(mismatches):
for curr_pattern in curr_patterns:
for neighbour in _generate_immediate_neighbours(curr_pattern):
if neighbour not in neighbourhood:
neighbourhood.add(neighbour)
next_patterns.append(neighbour)
curr_patterns = next_patterns
next_patterns = []
return neighbourhood
def approximate_occurrences(genome: str, pattern: str, mismatches: int) -> list:
neighbours = generate_neighbours(pattern, mismatches)
occurrences = set()
for neighbour in neighbours:
search_start = 0
while search_start <= len(genome) - len(pattern):
index_found = genome.find(neighbour, search_start)
if index_found == -1:
break
occurrences.add(index_found)
search_start = index_found + 1
return sorted(list(occurrences))
if __name__ == '__main__':
main()
| hw1/approximate_occurrences.py | 2,572 | Generate immediate (different by one mismatch) neighbours of the given genome pattern
:param pattern: a pattern to examine
:return: neighbourhood, NOT including the given pattern
Generate neighbours for the given pattern (genome string)
:param pattern: genome pattern
:param mismatches: number of mismatches to generate neighbours
:return: a set of patterns in the neighbourhood, including the 'pattern' itself
!/usr/bin/env python3 | 433 | en | 0.809012 |
# -*- coding: utf-8 -*-
'''
Runs MultiprocessTest with all warnings including traceback...
'''
#
# https://stackoverflow.com/questions/22373927/get-traceback-of-warnings
import traceback
import warnings
import sys
from . import multiprocess
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
def main(test_group=None):
warnings.showwarning = warn_with_traceback
warnings.simplefilter("always")
multiprocess.main(test_group)
if __name__ == '__main__':
main()
| dh_testers/warningMultiprocess.py | 688 | Runs MultiprocessTest with all warnings including traceback...
-*- coding: utf-8 -*- https://stackoverflow.com/questions/22373927/get-traceback-of-warnings | 157 | en | 0.760746 |
version = '0.1.1'
title = 'Cloud::Auth'
api_version = 'v1'
api_prefix = '/api/' + api_version
# $ echo -n 'Once upon a time...' | openssl.exe dgst -sha256
# (stdin)= 7cc6caf901b894033626981cd102021727aa59c2548d79e59382649b2c6f50f2
ADMIN_TOKEN = 'd7981fb00d6f071e1a8b454c47b378d815b53541621e22dc4b3dbf5a6b9c8b1d'
USER_TOKEN = '4d07df1ebd8e23eb48dbcfdde93452d1392c9b890ef3a3b82dc05ff9f5ff8d19'
| src/app/conf.py | 395 | $ echo -n 'Once upon a time...' | openssl.exe dgst -sha256 (stdin)= 7cc6caf901b894033626981cd102021727aa59c2548d79e59382649b2c6f50f2 | 132 | en | 0.188881 |
from __future__ import division
from __future__ import print_function
from __future__ import with_statement
from replacers import *
import pandas as pd
import nltk
import subprocess
def findFreqWord(fuzzyDF):
f1 = fuzzyDF # pd.read_csv("SubmittedCSV/fuzzy.csv")
f2 = pd.DataFrame(columns=['Tweets', 'Classified', 'FreqWord'])
f3 = pd.read_csv("SubmittedCSV/fuzzyptag.csv", )
pop_list = list(f3.iloc[:, 0])
for zero_cl_row in range(f1.__len__()):
row = 1
found = False
splitted_sentence = f1.iloc[zero_cl_row, 0].split()
print(splitted_sentence)
for tag in pop_list:
print("Popular tags:", pop_list)
for word in splitted_sentence:
if word in tag and f1.iloc[zero_cl_row, 1] == "Highly Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Highly Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Positive":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Negative":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
else:
print("Unmatched")
if not found:
print("NO")
f2.to_csv("SubmittedCSV/fuzzyfreq.csv", index=False)
try:
subprocess.call(['libreoffice','--calc','SubmittedCSV/fuzzyfreq.csv'])
except OSError:
print("Works with DEBIAN OS & LIBREOFFICE 5 only \n Use MS Excel or equivalent Software to open : "
"SubmittedCSV/fuzzyfreq.csv")
return f2
def pivotTable():
pass
# ---------------------------------- SUBMITTED LOGIC - TEST CASE
# ---------------------------------- #01 UNIT TESTING FAILED ##10, 11, 27, 30
# ---------------------------------- #02 LOGICAL GLITCH
# ---------------------------------- #03 COMPLIANCE MISUSE
# ---------------------------------- #04 MEMDUMP DETECTED
# ---------------------------------- #05 UNUSED OBJECTS, MEMORY BLOCK 0x0008
# for hosts_row in f1:
# row = 1
# found = False
# # t1=nltk.word_tokenize(hosts_row[0])
# t1 = hosts_row.split()
# print("t1=", t1)
# for master_row in pop_list:
# print("popular tags=", pop_list)
# for word in t1:
#
# if word == master_row[0] and hosts_row[1] == "Highly Positive":
# # >>> master_row[0] # Logical glitch, value uncompilable
# # 'b'
# f2.write(str(hosts_row[1]) + "," + word) # Will always look for 1st element of string
# # >>> hosts_row
# # ' neville rooney end ever tons trophy drought httpcocryingeyesjebfkdp,Positive\r\n'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
#
# elif word == master_row[0] and hosts_row[1] == "Highly Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# # >>> master_row[0]
# # 'business'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
#
# # print count
# if not found:
# print("no")
#
# print(count)
# f1.close()
# f2.close()
| freqWordSelection.py | 5,767 | pd.read_csv("SubmittedCSV/fuzzy.csv") ---------------------------------- SUBMITTED LOGIC - TEST CASE ---------------------------------- 01 UNIT TESTING FAILED 10, 11, 27, 30 ---------------------------------- 02 LOGICAL GLITCH ---------------------------------- 03 COMPLIANCE MISUSE ---------------------------------- 04 MEMDUMP DETECTED ---------------------------------- 05 UNUSED OBJECTS, MEMORY BLOCK 0x0008 for hosts_row in f1: row = 1 found = False t1=nltk.word_tokenize(hosts_row[0]) t1 = hosts_row.split() print("t1=", t1) for master_row in pop_list: print("popular tags=", pop_list) for word in t1: if word == master_row[0] and hosts_row[1] == "Highly Positive": >>> master_row[0] Logical glitch, value uncompilable 'b' f2.write(str(hosts_row[1]) + "," + word) Will always look for 1st element of string >>> hosts_row ' neville rooney end ever tons trophy drought httpcocryingeyesjebfkdp,Positive\r\n' >>> hosts_row[1] 'n' found = True row = row + 1 elif word == master_row[0] and hosts_row[1] == "Highly Negative": f2.write(str(hosts_row[1]) + "," + str(master_row[0])) found = True row = row + 1 elif word == master_row[0] and hosts_row[1] == "Moderately Positive": f2.write(str(hosts_row[1]) + "," + str(master_row[0])) found = True row = row + 1 elif word == master_row[0] and hosts_row[1] == "Moderately Negative": f2.write(str(hosts_row[1]) + "," + str(master_row[0])) found = True row = row + 1 elif word == master_row[0] and hosts_row[1] == "Positive": f2.write(str(hosts_row[1]) + "," + str(master_row[0])) >>> master_row[0] 'business' >>> hosts_row[1] 'n' found = True row = row + 1 elif word == master_row[0] and hosts_row[1] == "Negative": f2.write(str(hosts_row[1]) + "," + str(master_row[0])) found = True row = row + 1 print count if not found: print("no") print(count) f1.close() f2.close() | 2,500 | en | 0.692841 |
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from distsup import utils
from distsup.configuration import config_utils
def get_val(dictionary, key, dict_name):
if key not in dictionary:
raise KeyError('%s has no %s key specified' % (dict_name, key))
return dictionary[key]
class ConfigInstantiator(object):
def __init__(self, objects_config, default_class_dict={},
default_modules_dict={}, name='', **kwargs):
super(ConfigInstantiator, self).__init__(**kwargs)
self.objects_config = objects_config
self.default_class_dict = default_class_dict
self.default_modules_dict = default_modules_dict
self.cache = {}
self.name = name
def keys(self):
return self.objects_config.keys()
def _getitem(self, key, additional_parameters=None):
if key not in self.cache:
# make a copy since we may change the dict in the end
opts = dict(get_val(self.objects_config, key, self.name))
if 'class_name' not in opts:
opts['class_name'] = self.default_class_dict[key]
self.cache[key] = utils.construct_from_kwargs(
opts, self.default_modules_dict.get(key),
additional_parameters)
return self.cache[key]
def __getitem__(self, key):
return self._getitem(key)
class DatasetConfigInstantiator(ConfigInstantiator):
def _getitem(self, key, additional_parameters=None):
if key not in self.cache:
# make a copy since we may change the dict in the end
opts = dict(get_val(self.objects_config, key, self.name))
if 'class_name' not in opts:
opts['class_name'] = self.default_class_dict[key]
self.cache[key] = utils.construct_from_kwargs(
opts, self.default_modules_dict.get(key),
additional_parameters)
return self.cache[key]
class _ConstantDict(object):
def __init__(self, v, **kwargs):
super(_ConstantDict, self).__init__(**kwargs)
self.v = v
def __getitem__(self, k):
return self.v
def get(self, k, v=None):
return self.v
class Configuration(ConfigInstantiator):
"""
Class responsible for instantiating object that are defined in config file.
The class tries to be smart about the following modules:
- Trainer will by default instantiate an 'distsup.trainer.Trainer'
- all items on the Data key will instantiate a 'distsup.data.Data'
- It will configure the Model key according to Dataset specification
Args:
config_path (str): Path pointing to the config file.
modify_dict (dict): Optional dictionary representing config
modifications.
store_path (str): Optional path to store linked config.
"""
default_class_dict = {
'Trainer': 'Trainer',
}
default_modules_dict = {
'Trainer': 'distsup.trainer',
'Datasets': 'distsup.data',
'Model': 'models',
}
def __init__(self, config_path, modify_dict={}, store_path=None, **kwargs):
config = config_utils.ConfigParser(config_path).get_config(modify_dict)
if store_path is not None:
config_utils.ConfigLinker(config).save_linked_config(store_path)
super(Configuration, self).__init__(
objects_config=config,
default_class_dict=Configuration.default_class_dict,
default_modules_dict=Configuration.default_modules_dict,
name=config_path,
**kwargs)
if 'Datasets' in self.objects_config:
self.cache['Datasets'] = DatasetConfigInstantiator(
self.objects_config['Datasets'],
default_modules_dict=_ConstantDict(
Configuration.default_modules_dict['Datasets']),
name='Config.Datasets')
def __getitem__(self, key):
if key == 'Model':
model_param = {'dataloader': self['Datasets']['train']}
return self._getitem('Model', additional_parameters=model_param)
else:
return super(Configuration, self).__getitem__(key)
class Globals(object):
"""Global configuration objects."""
cuda = torch.cuda.is_available()
cluster = ''
exp_tag = None
save_dir = None
exp_uuid = None
exp_config_fpath = None
# Track training progress. The trainer/loader will fill in proper values.
epoch = -1
current_iteration = -1
| distsup/configuration/__init__.py | 5,245 | Class responsible for instantiating object that are defined in config file.
The class tries to be smart about the following modules:
- Trainer will by default instantiate an 'distsup.trainer.Trainer'
- all items on the Data key will instantiate a 'distsup.data.Data'
- It will configure the Model key according to Dataset specification
Args:
config_path (str): Path pointing to the config file.
modify_dict (dict): Optional dictionary representing config
modifications.
store_path (str): Optional path to store linked config.
Global configuration objects.
-*- coding: utf8 -*- Copyright 2019 JSALT2019 Distant Supervision Team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. make a copy since we may change the dict in the end make a copy since we may change the dict in the end Track training progress. The trainer/loader will fill in proper values. | 1,368 | en | 0.791199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.