max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
api/calc.py | Isocyanide/bot-1 | 0 | 6615051 | import wolframalpha
from configuration import config
def calc(update, context):
"""Calculate anything using wolframalpha"""
message = update.message
query = ' '.join(context.args)
if not query:
text = "*Usage:* `/calc {QUERY}`\n"\
"*Example:* `/calc 1 cherry to grams`"
else:
client = wolframalpha.Client(config["WOLFRAM_APP_ID"])
result = client.query(query)
if result.success == 'true':
text = next(result.results).text
else:
text = "Invalid query"
message.reply_text(text=text)
| import wolframalpha
from configuration import config
def calc(update, context):
"""Calculate anything using wolframalpha"""
message = update.message
query = ' '.join(context.args)
if not query:
text = "*Usage:* `/calc {QUERY}`\n"\
"*Example:* `/calc 1 cherry to grams`"
else:
client = wolframalpha.Client(config["WOLFRAM_APP_ID"])
result = client.query(query)
if result.success == 'true':
text = next(result.results).text
else:
text = "Invalid query"
message.reply_text(text=text)
| en | 0.522299 | Calculate anything using wolframalpha | 2.739864 | 3 |
otus_python_homeworks/hw_3/api.py | Nikolay-Lysenko/otus-python-2018-02 | 0 | 6615052 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script starts a web service which can process POST requests
of a particular structure (see homework description for details).
To study all possible options of configuring a service, execute this
from a terminal: `python api.py -h`.
To launch a service with default settings, execute:
`python api.py`.
After server is started, you can send requests to it. A sample commands
that can be executed from a terminal are provided at the `README.md`
file of the current directory.
"""
import os
import argparse
import json
import datetime
import logging
import hashlib
import uuid
from abc import ABCMeta, abstractmethod
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from weakref import WeakKeyDictionary
from scoring import get_score, get_interests
from store import InMemoryStorage
SALT = "Otus"
ADMIN_LOGIN = "admin"
ADMIN_SALT = "42"
OK = 200
BAD_REQUEST = 400
FORBIDDEN = 403
NOT_FOUND = 404
INVALID_REQUEST = 422
INTERNAL_ERROR = 500
ERRORS = {
BAD_REQUEST: "Bad Request",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
INVALID_REQUEST: "Invalid Request",
INTERNAL_ERROR: "Internal Server Error",
}
UNKNOWN = 0
MALE = 1
FEMALE = 2
GENDERS = {
UNKNOWN: "unknown",
MALE: "male",
FEMALE: "female",
}
# -----------------------------------------------------------------------------
# Descriptors for fields validation.
class BaseField(object):
"""
Abstract descriptor representing a field of POST request.
:param required:
if it is `True`, field value must be passed explicitly
:param nullable:
if it is `True`, filed value can be empty, default is `False`
"""
__metaclass__ = ABCMeta
def __init__(self, required, nullable=False):
self.data = WeakKeyDictionary()
self.required = required
self.nullable = nullable
def __get__(self, instance, owner):
value = self.data.get(instance)
return value
@abstractmethod
def __set__(self, instance, value):
# Check that a value is in accordance with instance settings.
if value is None and self.required:
raise ValueError("Required fields must be passed explicitly.")
if not value and not self.nullable:
raise ValueError("Empty value in a non-nullable field: %s" % value)
class CharField(BaseField):
"""A descriptor that prohibits non-char values."""
def __set__(self, instance, value):
super(CharField, self).__set__(instance, value)
if value and not isinstance(value, (str, unicode)):
raise ValueError("Non-char values are not allowed: %s" % value)
self.data[instance] = value
class ArgumentsField(BaseField):
"""A descriptor that validates nested requests."""
def __set__(self, instance, value):
super(ArgumentsField, self).__set__(instance, value)
if value and not isinstance(value, dict):
raise ValueError("Non-dict values are not allowed: %s" % value)
self.data[instance] = value
class EmailField(CharField):
"""A descriptor that validates email address."""
def __set__(self, instance, value):
super(EmailField, self).__set__(instance, value)
if value:
split_value = value.split('@')
email_is_invalid = (
len(split_value) != 2
or len(split_value[0]) == 0
or len(split_value[1]) == 0
or '.' not in split_value[1]
)
if email_is_invalid:
raise ValueError("It does not look like an email: %s" % value)
self.data[instance] = value
class PhoneField(BaseField):
"""A descriptor that validates phone number."""
def __set__(self, instance, value):
super(PhoneField, self).__set__(instance, value)
if value:
if not isinstance(value, (str, unicode, int)):
raise ValueError("Value is neither char nor int: %s" % value)
try:
value = int(value)
except ValueError:
raise ValueError("Non-digits are in phone value: %s" % value)
value = str(value)
phone_validity = (
len(value) == 11
and value.startswith('7')
)
if not phone_validity:
raise ValueError("It does not look like a phone: %s" % value)
self.data[instance] = value
class DateField(CharField):
"""
A descriptor that prohibits non-date values
and forces 'DD.MM.YYYY' format.
"""
def __set__(self, instance, value):
super(DateField, self).__set__(instance, value)
if value:
try:
datetime.datetime.strptime(value, '%d.%m.%Y')
except ValueError:
raise ValueError("Date %s not in format 'DD.MM.YYYY'" % value)
self.data[instance] = value
class BirthDayField(DateField):
"""A descriptor that validates birthdays."""
def __set__(self, instance, value):
super(BirthDayField, self).__set__(instance, value)
if value:
today = datetime.date.today()
value_as_date = (
datetime.datetime.strptime(value, '%d.%m.%Y').date()
)
days_per_year = 365.25
max_age = 70
if (today - value_as_date).days / days_per_year > max_age:
raise ValueError('Sorry, too distant birthday: %s' % value)
self.data[instance] = value
class GenderField(BaseField):
"""A descriptor that validates gender codes."""
def __set__(self, instance, value):
super(GenderField, self).__set__(instance, value)
if value and value not in GENDERS.keys():
raise ValueError("Invalid gender code: %s" % value)
self.data[instance] = value
class ClientIDsField(BaseField):
"""A descriptor that validates sequences of client IDs."""
def __set__(self, instance, value):
super(ClientIDsField, self).__set__(instance, value)
if value:
if not isinstance(value, list):
raise ValueError("IDs must be stored in array: %s" % value)
value_types = set([type(x) for x in value])
if value_types != {int}:
bad_types = [x for x in value_types if x != int]
raise ValueError("Non-integer types in IDs: %s" % bad_types)
self.data[instance] = value
# -----------------------------------------------------------------------------
# Classes for requests validation.
class ClientsInterestsRequest(object):
"""
A class representing nested request to interests API.
"""
client_ids = ClientIDsField(required=True)
date = DateField(required=False, nullable=True)
def __init__(self, client_ids=None, date=None):
self.client_ids = client_ids
self.date = date
class OnlineScoreRequest(object):
"""
A class representing nested request to scoring API.
"""
first_name = CharField(required=False, nullable=True)
last_name = CharField(required=False, nullable=True)
email = EmailField(required=False, nullable=True)
phone = PhoneField(required=False, nullable=True)
birthday = BirthDayField(required=False, nullable=True)
gender = GenderField(required=False, nullable=True)
def __init__(
self,
first_name=None, last_name=None, email=None, phone=None,
birthday=None, gender=None
):
pairwise_validity = (
(first_name and last_name)
or (email and phone)
or (birthday and gender)
)
if not pairwise_validity:
raise ValueError("No pairs where both values are non-empty.")
self.first_name = first_name
self.last_name = last_name
self.email = email
self.phone = phone
self.birthday = birthday
self.gender = gender
class MethodRequest(object):
"""
A class representing top-level POST request to any of the two APIs.
"""
account = CharField(required=False, nullable=True)
login = CharField(required=True, nullable=True)
token = CharField(required=True, nullable=True)
arguments = ArgumentsField(required=True, nullable=True)
method = CharField(required=True, nullable=False)
@property
def is_admin(self):
return self.login == ADMIN_LOGIN
def __init__(
self,
account=None, login=None, token=None, arguments=None, method=None
):
self.account = account
self.login = login
self.token = token
self.arguments = arguments
self.method = method
# -----------------------------------------------------------------------------
# Functions with business logic.
def check_auth(request):
# type: (MethodRequest) -> type(None)
"""Authenticate request by token."""
if request.is_admin:
digest = hashlib.sha512(
datetime.datetime.now().strftime("%Y%m%d%H") + ADMIN_SALT
).hexdigest()
else:
digest = hashlib.sha512(
request.account + request.login + SALT
).hexdigest()
if digest == request.token:
return True
return False
def clients_interests_handler(request, context, storage):
# type: (MethodRequest, dict, InMemoryStorage) -> (str, int)
"""Handle request for clients interests."""
response, code = {}, OK
try:
nested_request = ClientsInterestsRequest(**request.arguments)
except TypeError:
msg = 'Extra arguments in request: '
logging.exception('%s :' % msg)
response, code = msg, INVALID_REQUEST
return response, code
except ValueError as e:
logging.exception('Validation error: ')
response, code = str(e), BAD_REQUEST
return response, code
try:
for client_id in nested_request.client_ids:
response[client_id] = get_interests(storage, client_id)
except (RuntimeError, KeyError):
msg = 'Can not read from storage'
logging.exception('%s: ' % msg)
response, code = msg, INTERNAL_ERROR
return response, code
context['nclients'] = len(nested_request.client_ids)
return response, code
def online_score_handler(request, context, storage):
# type: (MethodRequest, dict, InMemoryStorage) -> (str, int)
"""Handle request for scores."""
response, code = {}, OK
try:
nested_request = OnlineScoreRequest(**request.arguments)
except TypeError:
msg = 'Extra arguments in request: '
logging.exception('%s :' % msg)
response, code = msg, INVALID_REQUEST
return response, code
except ValueError as e:
logging.exception('Validation error: ')
response, code = str(e), BAD_REQUEST
return response, code
fields = [
'first_name', 'last_name', 'email', 'phone', 'birthday', 'gender'
]
context['has'] = [
field for field in fields if nested_request.__getattribute__(field)
]
score = 42 if request.is_admin else get_score(storage, **request.arguments)
response = {'score': score}
return response, code
def method_handler(request, context, storage):
# type: (dict, dict, InMemoryStorage) -> (str, int)
"""Redirect arbitrary request to corresponding handler."""
try:
request = MethodRequest(**request['body'])
except (KeyError, TypeError, ValueError) as e:
logging.exception('Can not validate POST request: ')
response, code = str(e), INVALID_REQUEST
return response, code
if not check_auth(request):
response, code = ERRORS[FORBIDDEN], FORBIDDEN
return response, code
handlers = {
'clients_interests': clients_interests_handler,
'online_score': online_score_handler
}
try:
response, code = handlers[request.method](request, context, storage)
except KeyError:
logging.exception('Unrecognized method: ')
response = 'Unrecognized method: %s' % request.method
code = INVALID_REQUEST
return response, code
# -----------------------------------------------------------------------------
# Web server.
class MainHTTPHandler(BaseHTTPRequestHandler):
router = {
"method": method_handler
}
storage = InMemoryStorage()
@staticmethod
def get_request_id(headers):
return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)
def do_POST(self):
response, code = {}, OK
context = {"request_id": self.get_request_id(self.headers)}
request = None
try:
data_string = self.rfile.read(int(self.headers['Content-Length']))
request = json.loads(data_string)
except:
logging.exception('Can not parse JSON: ')
code = INVALID_REQUEST
if request:
path = self.path.strip("/")
msg = "%s: %s %s" % (self.path, data_string, context["request_id"])
logging.info(msg)
if path in self.router:
try:
response, code = self.router[path](
{"body": request, "headers": self.headers},
context, self.storage
)
except Exception, e:
logging.exception("Unexpected error: %s" % e)
code = INTERNAL_ERROR
else:
code = NOT_FOUND
self.send_response(code)
self.send_header("Content-Type", "application/json")
self.end_headers()
if code not in ERRORS:
r = {
"response": response,
"code": code
}
else:
r = {
"error": response or ERRORS.get(code, "Unknown Error"),
"code": code
}
context.update(r)
logging.info(context)
self.wfile.write(json.dumps(r))
return
# -----------------------------------------------------------------------------
# User interaction tools.
def parse_cli_args():
# type: () -> argparse.Namespace
"""
Parse arguments passed via Command Line Interface (CLI).
:return:
namespace with arguments
"""
parser = argparse.ArgumentParser(description='Requests to API')
parser.add_argument(
'-p', '--port', type=int, default=8080,
help='port that is listened by the server'
)
parser.add_argument(
'-l', '--logging_file', type=str, default=None,
help='full path to file where logs of script execution will be stored,'
'by default stdout is used instead of a file'
)
cli_args = parser.parse_args()
return cli_args
def set_logging(logging_filename):
# type: (Optional[str]) -> type(None)
"""
Set logging according to homework specification.
:param logging_filename:
name of file where logs are written
or `None` if stdout should be used
:return:
None
"""
if logging_filename is not None:
logging_dir = os.path.dirname(logging_filename)
if not os.path.isdir(logging_dir):
os.makedirs(logging_dir)
msg_format = '[%(asctime)s] %(levelname).1s %(message)s'
datetime_fmt = '%Y.%m.%d %H:%M:%S'
logging.basicConfig(
filename=logging_filename,
format=msg_format,
datefmt=datetime_fmt,
level=logging.INFO
)
logging.info("Logging is set.")
def main():
cli_args = parse_cli_args()
set_logging(cli_args.logging_file)
server = HTTPServer(("localhost", cli_args.port), MainHTTPHandler)
logging.info("Starting server at %s" % cli_args.port)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
except:
logging.exception('Unhandled exception: ')
server.server_close()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script starts a web service which can process POST requests
of a particular structure (see homework description for details).
To study all possible options of configuring a service, execute this
from a terminal: `python api.py -h`.
To launch a service with default settings, execute:
`python api.py`.
After server is started, you can send requests to it. A sample commands
that can be executed from a terminal are provided at the `README.md`
file of the current directory.
"""
import os
import argparse
import json
import datetime
import logging
import hashlib
import uuid
from abc import ABCMeta, abstractmethod
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from weakref import WeakKeyDictionary
from scoring import get_score, get_interests
from store import InMemoryStorage
SALT = "Otus"
ADMIN_LOGIN = "admin"
ADMIN_SALT = "42"
OK = 200
BAD_REQUEST = 400
FORBIDDEN = 403
NOT_FOUND = 404
INVALID_REQUEST = 422
INTERNAL_ERROR = 500
ERRORS = {
BAD_REQUEST: "Bad Request",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
INVALID_REQUEST: "Invalid Request",
INTERNAL_ERROR: "Internal Server Error",
}
UNKNOWN = 0
MALE = 1
FEMALE = 2
GENDERS = {
UNKNOWN: "unknown",
MALE: "male",
FEMALE: "female",
}
# -----------------------------------------------------------------------------
# Descriptors for fields validation.
class BaseField(object):
"""
Abstract descriptor representing a field of POST request.
:param required:
if it is `True`, field value must be passed explicitly
:param nullable:
if it is `True`, filed value can be empty, default is `False`
"""
__metaclass__ = ABCMeta
def __init__(self, required, nullable=False):
self.data = WeakKeyDictionary()
self.required = required
self.nullable = nullable
def __get__(self, instance, owner):
value = self.data.get(instance)
return value
@abstractmethod
def __set__(self, instance, value):
# Check that a value is in accordance with instance settings.
if value is None and self.required:
raise ValueError("Required fields must be passed explicitly.")
if not value and not self.nullable:
raise ValueError("Empty value in a non-nullable field: %s" % value)
class CharField(BaseField):
"""A descriptor that prohibits non-char values."""
def __set__(self, instance, value):
super(CharField, self).__set__(instance, value)
if value and not isinstance(value, (str, unicode)):
raise ValueError("Non-char values are not allowed: %s" % value)
self.data[instance] = value
class ArgumentsField(BaseField):
"""A descriptor that validates nested requests."""
def __set__(self, instance, value):
super(ArgumentsField, self).__set__(instance, value)
if value and not isinstance(value, dict):
raise ValueError("Non-dict values are not allowed: %s" % value)
self.data[instance] = value
class EmailField(CharField):
"""A descriptor that validates email address."""
def __set__(self, instance, value):
super(EmailField, self).__set__(instance, value)
if value:
split_value = value.split('@')
email_is_invalid = (
len(split_value) != 2
or len(split_value[0]) == 0
or len(split_value[1]) == 0
or '.' not in split_value[1]
)
if email_is_invalid:
raise ValueError("It does not look like an email: %s" % value)
self.data[instance] = value
class PhoneField(BaseField):
"""A descriptor that validates phone number."""
def __set__(self, instance, value):
super(PhoneField, self).__set__(instance, value)
if value:
if not isinstance(value, (str, unicode, int)):
raise ValueError("Value is neither char nor int: %s" % value)
try:
value = int(value)
except ValueError:
raise ValueError("Non-digits are in phone value: %s" % value)
value = str(value)
phone_validity = (
len(value) == 11
and value.startswith('7')
)
if not phone_validity:
raise ValueError("It does not look like a phone: %s" % value)
self.data[instance] = value
class DateField(CharField):
"""
A descriptor that prohibits non-date values
and forces 'DD.MM.YYYY' format.
"""
def __set__(self, instance, value):
super(DateField, self).__set__(instance, value)
if value:
try:
datetime.datetime.strptime(value, '%d.%m.%Y')
except ValueError:
raise ValueError("Date %s not in format 'DD.MM.YYYY'" % value)
self.data[instance] = value
class BirthDayField(DateField):
"""A descriptor that validates birthdays."""
def __set__(self, instance, value):
super(BirthDayField, self).__set__(instance, value)
if value:
today = datetime.date.today()
value_as_date = (
datetime.datetime.strptime(value, '%d.%m.%Y').date()
)
days_per_year = 365.25
max_age = 70
if (today - value_as_date).days / days_per_year > max_age:
raise ValueError('Sorry, too distant birthday: %s' % value)
self.data[instance] = value
class GenderField(BaseField):
"""A descriptor that validates gender codes."""
def __set__(self, instance, value):
super(GenderField, self).__set__(instance, value)
if value and value not in GENDERS.keys():
raise ValueError("Invalid gender code: %s" % value)
self.data[instance] = value
class ClientIDsField(BaseField):
"""A descriptor that validates sequences of client IDs."""
def __set__(self, instance, value):
super(ClientIDsField, self).__set__(instance, value)
if value:
if not isinstance(value, list):
raise ValueError("IDs must be stored in array: %s" % value)
value_types = set([type(x) for x in value])
if value_types != {int}:
bad_types = [x for x in value_types if x != int]
raise ValueError("Non-integer types in IDs: %s" % bad_types)
self.data[instance] = value
# -----------------------------------------------------------------------------
# Classes for requests validation.
class ClientsInterestsRequest(object):
"""
A class representing nested request to interests API.
"""
client_ids = ClientIDsField(required=True)
date = DateField(required=False, nullable=True)
def __init__(self, client_ids=None, date=None):
self.client_ids = client_ids
self.date = date
class OnlineScoreRequest(object):
"""
A class representing nested request to scoring API.
"""
first_name = CharField(required=False, nullable=True)
last_name = CharField(required=False, nullable=True)
email = EmailField(required=False, nullable=True)
phone = PhoneField(required=False, nullable=True)
birthday = BirthDayField(required=False, nullable=True)
gender = GenderField(required=False, nullable=True)
def __init__(
self,
first_name=None, last_name=None, email=None, phone=None,
birthday=None, gender=None
):
pairwise_validity = (
(first_name and last_name)
or (email and phone)
or (birthday and gender)
)
if not pairwise_validity:
raise ValueError("No pairs where both values are non-empty.")
self.first_name = first_name
self.last_name = last_name
self.email = email
self.phone = phone
self.birthday = birthday
self.gender = gender
class MethodRequest(object):
"""
A class representing top-level POST request to any of the two APIs.
"""
account = CharField(required=False, nullable=True)
login = CharField(required=True, nullable=True)
token = CharField(required=True, nullable=True)
arguments = ArgumentsField(required=True, nullable=True)
method = CharField(required=True, nullable=False)
@property
def is_admin(self):
return self.login == ADMIN_LOGIN
def __init__(
self,
account=None, login=None, token=None, arguments=None, method=None
):
self.account = account
self.login = login
self.token = token
self.arguments = arguments
self.method = method
# -----------------------------------------------------------------------------
# Functions with business logic.
def check_auth(request):
# type: (MethodRequest) -> type(None)
"""Authenticate request by token."""
if request.is_admin:
digest = hashlib.sha512(
datetime.datetime.now().strftime("%Y%m%d%H") + ADMIN_SALT
).hexdigest()
else:
digest = hashlib.sha512(
request.account + request.login + SALT
).hexdigest()
if digest == request.token:
return True
return False
def clients_interests_handler(request, context, storage):
# type: (MethodRequest, dict, InMemoryStorage) -> (str, int)
"""Handle request for clients interests."""
response, code = {}, OK
try:
nested_request = ClientsInterestsRequest(**request.arguments)
except TypeError:
msg = 'Extra arguments in request: '
logging.exception('%s :' % msg)
response, code = msg, INVALID_REQUEST
return response, code
except ValueError as e:
logging.exception('Validation error: ')
response, code = str(e), BAD_REQUEST
return response, code
try:
for client_id in nested_request.client_ids:
response[client_id] = get_interests(storage, client_id)
except (RuntimeError, KeyError):
msg = 'Can not read from storage'
logging.exception('%s: ' % msg)
response, code = msg, INTERNAL_ERROR
return response, code
context['nclients'] = len(nested_request.client_ids)
return response, code
def online_score_handler(request, context, storage):
# type: (MethodRequest, dict, InMemoryStorage) -> (str, int)
"""Handle request for scores."""
response, code = {}, OK
try:
nested_request = OnlineScoreRequest(**request.arguments)
except TypeError:
msg = 'Extra arguments in request: '
logging.exception('%s :' % msg)
response, code = msg, INVALID_REQUEST
return response, code
except ValueError as e:
logging.exception('Validation error: ')
response, code = str(e), BAD_REQUEST
return response, code
fields = [
'first_name', 'last_name', 'email', 'phone', 'birthday', 'gender'
]
context['has'] = [
field for field in fields if nested_request.__getattribute__(field)
]
score = 42 if request.is_admin else get_score(storage, **request.arguments)
response = {'score': score}
return response, code
def method_handler(request, context, storage):
# type: (dict, dict, InMemoryStorage) -> (str, int)
"""Redirect arbitrary request to corresponding handler."""
try:
request = MethodRequest(**request['body'])
except (KeyError, TypeError, ValueError) as e:
logging.exception('Can not validate POST request: ')
response, code = str(e), INVALID_REQUEST
return response, code
if not check_auth(request):
response, code = ERRORS[FORBIDDEN], FORBIDDEN
return response, code
handlers = {
'clients_interests': clients_interests_handler,
'online_score': online_score_handler
}
try:
response, code = handlers[request.method](request, context, storage)
except KeyError:
logging.exception('Unrecognized method: ')
response = 'Unrecognized method: %s' % request.method
code = INVALID_REQUEST
return response, code
# -----------------------------------------------------------------------------
# Web server.
class MainHTTPHandler(BaseHTTPRequestHandler):
router = {
"method": method_handler
}
storage = InMemoryStorage()
@staticmethod
def get_request_id(headers):
return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)
def do_POST(self):
response, code = {}, OK
context = {"request_id": self.get_request_id(self.headers)}
request = None
try:
data_string = self.rfile.read(int(self.headers['Content-Length']))
request = json.loads(data_string)
except:
logging.exception('Can not parse JSON: ')
code = INVALID_REQUEST
if request:
path = self.path.strip("/")
msg = "%s: %s %s" % (self.path, data_string, context["request_id"])
logging.info(msg)
if path in self.router:
try:
response, code = self.router[path](
{"body": request, "headers": self.headers},
context, self.storage
)
except Exception, e:
logging.exception("Unexpected error: %s" % e)
code = INTERNAL_ERROR
else:
code = NOT_FOUND
self.send_response(code)
self.send_header("Content-Type", "application/json")
self.end_headers()
if code not in ERRORS:
r = {
"response": response,
"code": code
}
else:
r = {
"error": response or ERRORS.get(code, "Unknown Error"),
"code": code
}
context.update(r)
logging.info(context)
self.wfile.write(json.dumps(r))
return
# -----------------------------------------------------------------------------
# User interaction tools.
def parse_cli_args():
# type: () -> argparse.Namespace
"""
Parse arguments passed via Command Line Interface (CLI).
:return:
namespace with arguments
"""
parser = argparse.ArgumentParser(description='Requests to API')
parser.add_argument(
'-p', '--port', type=int, default=8080,
help='port that is listened by the server'
)
parser.add_argument(
'-l', '--logging_file', type=str, default=None,
help='full path to file where logs of script execution will be stored,'
'by default stdout is used instead of a file'
)
cli_args = parser.parse_args()
return cli_args
def set_logging(logging_filename):
# type: (Optional[str]) -> type(None)
"""
Set logging according to homework specification.
:param logging_filename:
name of file where logs are written
or `None` if stdout should be used
:return:
None
"""
if logging_filename is not None:
logging_dir = os.path.dirname(logging_filename)
if not os.path.isdir(logging_dir):
os.makedirs(logging_dir)
msg_format = '[%(asctime)s] %(levelname).1s %(message)s'
datetime_fmt = '%Y.%m.%d %H:%M:%S'
logging.basicConfig(
filename=logging_filename,
format=msg_format,
datefmt=datetime_fmt,
level=logging.INFO
)
logging.info("Logging is set.")
def main():
cli_args = parse_cli_args()
set_logging(cli_args.logging_file)
server = HTTPServer(("localhost", cli_args.port), MainHTTPHandler)
logging.info("Starting server at %s" % cli_args.port)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
except:
logging.exception('Unhandled exception: ')
server.server_close()
if __name__ == "__main__":
main()
| en | 0.692112 | #!/usr/bin/env python # -*- coding: utf-8 -*- This script starts a web service which can process POST requests of a particular structure (see homework description for details). To study all possible options of configuring a service, execute this from a terminal: `python api.py -h`. To launch a service with default settings, execute: `python api.py`. After server is started, you can send requests to it. A sample commands that can be executed from a terminal are provided at the `README.md` file of the current directory. # ----------------------------------------------------------------------------- # Descriptors for fields validation. Abstract descriptor representing a field of POST request. :param required: if it is `True`, field value must be passed explicitly :param nullable: if it is `True`, filed value can be empty, default is `False` # Check that a value is in accordance with instance settings. A descriptor that prohibits non-char values. A descriptor that validates nested requests. A descriptor that validates email address. A descriptor that validates phone number. A descriptor that prohibits non-date values and forces 'DD.MM.YYYY' format. A descriptor that validates birthdays. A descriptor that validates gender codes. A descriptor that validates sequences of client IDs. # ----------------------------------------------------------------------------- # Classes for requests validation. A class representing nested request to interests API. A class representing nested request to scoring API. A class representing top-level POST request to any of the two APIs. # ----------------------------------------------------------------------------- # Functions with business logic. # type: (MethodRequest) -> type(None) Authenticate request by token. # type: (MethodRequest, dict, InMemoryStorage) -> (str, int) Handle request for clients interests. # type: (MethodRequest, dict, InMemoryStorage) -> (str, int) Handle request for scores. # type: (dict, dict, InMemoryStorage) -> (str, int) Redirect arbitrary request to corresponding handler. # ----------------------------------------------------------------------------- # Web server. # ----------------------------------------------------------------------------- # User interaction tools. # type: () -> argparse.Namespace Parse arguments passed via Command Line Interface (CLI). :return: namespace with arguments # type: (Optional[str]) -> type(None) Set logging according to homework specification. :param logging_filename: name of file where logs are written or `None` if stdout should be used :return: None | 3.454912 | 3 |
checker/shell_commands.py | arcturus5340/julia | 13 | 6615053 | precompile = {
'c': 'gcc -o {0}.exec {0} 2> {0}.error',
'cpp11': 'g++ -static -lm -s -x c++ -O2 -std=c++11 -o {0}.exec {0} 2> {0}.error',
'cpp14': 'g++ -static -lm -s -x c++ -O2 -std=c++14 -o {0}.exec {0} 2> {0}.error',
'java': 'javac Main.java 2> {0}.error',
'NASM': 'nasm -f elf {0} -o {0}.o 2> {0}.error; ld -melf_i386 {0}.o -o {0}.exec 2> {0}.error',
'pabc': 'mono ./pascal/pabcnetc.exe {0} 2> {0}.error',
}
run = {
'c': '{0}.exec 2> {0}.error',
'cpp11': '{0}.exec 2> {0}.error',
'cpp14': '{0}.exec 2> {0}.error',
'java': 'java Main Main.class 2> {0}.error',
'nasm': '{0}.exec 2> {0}.error',
'pabc': 'mono {0}.exe 2> {0}.error',
'php': 'php {0} 2> {0}.error',
'python2': 'python2 {0} 2> {0}.error',
'python3': 'python3 {0} 2> {0}.error',
} | precompile = {
'c': 'gcc -o {0}.exec {0} 2> {0}.error',
'cpp11': 'g++ -static -lm -s -x c++ -O2 -std=c++11 -o {0}.exec {0} 2> {0}.error',
'cpp14': 'g++ -static -lm -s -x c++ -O2 -std=c++14 -o {0}.exec {0} 2> {0}.error',
'java': 'javac Main.java 2> {0}.error',
'NASM': 'nasm -f elf {0} -o {0}.o 2> {0}.error; ld -melf_i386 {0}.o -o {0}.exec 2> {0}.error',
'pabc': 'mono ./pascal/pabcnetc.exe {0} 2> {0}.error',
}
run = {
'c': '{0}.exec 2> {0}.error',
'cpp11': '{0}.exec 2> {0}.error',
'cpp14': '{0}.exec 2> {0}.error',
'java': 'java Main Main.class 2> {0}.error',
'nasm': '{0}.exec 2> {0}.error',
'pabc': 'mono {0}.exe 2> {0}.error',
'php': 'php {0} 2> {0}.error',
'python2': 'python2 {0} 2> {0}.error',
'python3': 'python3 {0} 2> {0}.error',
} | none | 1 | 1.799586 | 2 | |
models.py | gve-sw/gve_devnet_meraki_wireless_visibility_dashboard | 0 | 6615054 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#DB Models
class APStatus(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
mac = db.Column(db.String(20), unique=False, nullable=False)
start_time = db.Column(db.DateTime, unique=False, nullable=False)
end_time = db.Column(db.DateTime, unique=False, nullable=False)
class System(db.Model):
id = db.Column(db.Integer, primary_key=True)
start = db.Column(db.DateTime, unique=True, nullable=False)
class APClient(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
count = db.Column(db.Integer, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False)
class APBandwidth(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
bandwidth = db.Column(db.Integer, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False)
class Client(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
client_id = db.Column(db.String(20), unique=True, nullable=True)
ip = db.Column(db.String(20), unique=False, nullable=True)
ap = db.Column(db.String(50), unique=False, nullable=True)
ssid = db.Column(db.String(20), unique=False, nullable=True)
snr = db.Column(db.Integer, unique=False, nullable=True)
rssi = db.Column(db.Integer, unique=False, nullable=True)
vip = db.Column(db.Boolean, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False) | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#DB Models
class APStatus(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
mac = db.Column(db.String(20), unique=False, nullable=False)
start_time = db.Column(db.DateTime, unique=False, nullable=False)
end_time = db.Column(db.DateTime, unique=False, nullable=False)
class System(db.Model):
id = db.Column(db.Integer, primary_key=True)
start = db.Column(db.DateTime, unique=True, nullable=False)
class APClient(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
count = db.Column(db.Integer, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False)
class APBandwidth(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
bandwidth = db.Column(db.Integer, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False)
class Client(db.Model):
mac = db.Column(db.String(20), primary_key=True)
name = db.Column(db.String(50), unique=False, nullable=False)
client_id = db.Column(db.String(20), unique=True, nullable=True)
ip = db.Column(db.String(20), unique=False, nullable=True)
ap = db.Column(db.String(50), unique=False, nullable=True)
ssid = db.Column(db.String(20), unique=False, nullable=True)
snr = db.Column(db.Integer, unique=False, nullable=True)
rssi = db.Column(db.Integer, unique=False, nullable=True)
vip = db.Column(db.Boolean, unique=False, nullable=False)
alert = db.Column(db.Boolean, unique=False, nullable=False) | en | 0.373863 | #DB Models | 2.314644 | 2 |
test_project/tests/fixtures/orders.py | wishmaestro/drf-fat-models | 0 | 6615055 | import pytest
from apps.orders.models import Order
@pytest.fixture
def create_orders(request, create_customers):
request.cls.order1 = Order.objects.create(
description="Description 1",
customer=request.cls.customer1
)
request.cls.order2 = Order.objects.create(
description="Description 2",
customer=request.cls.customer2
)
| import pytest
from apps.orders.models import Order
@pytest.fixture
def create_orders(request, create_customers):
request.cls.order1 = Order.objects.create(
description="Description 1",
customer=request.cls.customer1
)
request.cls.order2 = Order.objects.create(
description="Description 2",
customer=request.cls.customer2
)
| none | 1 | 2.280666 | 2 | |
examples/io_pubsub.py | pvanallen/esp32-getstarted | 71 | 6615056 | <filename>examples/io_pubsub.py
#
# example ESP8266 or ESP32 Huzzah mqtt publish/subscribe with io.adafruit.com
# <NAME>
#
# for more info see: https://github.com/micropython/micropython-lib/tree/master/umqtt.simple
# https://github.com/micropython/micropython-lib/blob/master/umqtt.simple/example_sub.py
#
import network
import time
import machine
from umqtt.simple import MQTTClient
pin = machine.Pin(13, machine.Pin.OUT) # LED on the board
def sub_cb(topic, msg):
value = float(str(msg,'utf-8'))
print("subscribed value = {}".format(value))
if value > 4:
pin.value(1)
else:
pin.value(0)
# configuration from io.adafruit.com
#
ADAFRUIT_IO_USERNAME = "<enter your Adafruit Username here>" # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com
ADAFRUIT_IO_KEY = "<enter your Adafruit IO Key here>" # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com
# only one program with the same MqttClient Name can access the Adarfuit service at a time
myMqttClient = "phils_client1" # replace with your own client name unique to you and this code instance
adafruitFeed = ADAFRUIT_IO_USERNAME + "/feeds/test" # replace "test" with your feed name
adafruitIoUrl = "io.adafruit.com"
#
# connect ESP to Adafruit IO using MQTT
def connect_mqtt():
c = MQTTClient(myMqttClient, adafruitIoUrl, 0, ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY, keepalive=10000)
c.set_callback(sub_cb)
c.connect()
c.subscribe(bytes(adafruitFeed,'utf-8'))
return c
#c = MQTTClient(myMqttClient, adafruitIoUrl, 0, adafruitUsername, adafruitAioKey)
c = connect_mqtt()
for i in range(15):
print(i)
c.publish(adafruitFeed, str(i))
time.sleep(2)
c.check_msg()
c.disconnect()
| <filename>examples/io_pubsub.py
#
# example ESP8266 or ESP32 Huzzah mqtt publish/subscribe with io.adafruit.com
# <NAME>
#
# for more info see: https://github.com/micropython/micropython-lib/tree/master/umqtt.simple
# https://github.com/micropython/micropython-lib/blob/master/umqtt.simple/example_sub.py
#
import network
import time
import machine
from umqtt.simple import MQTTClient
pin = machine.Pin(13, machine.Pin.OUT) # LED on the board
def sub_cb(topic, msg):
value = float(str(msg,'utf-8'))
print("subscribed value = {}".format(value))
if value > 4:
pin.value(1)
else:
pin.value(0)
# configuration from io.adafruit.com
#
ADAFRUIT_IO_USERNAME = "<enter your Adafruit Username here>" # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com
ADAFRUIT_IO_KEY = "<enter your Adafruit IO Key here>" # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com
# only one program with the same MqttClient Name can access the Adarfuit service at a time
myMqttClient = "phils_client1" # replace with your own client name unique to you and this code instance
adafruitFeed = ADAFRUIT_IO_USERNAME + "/feeds/test" # replace "test" with your feed name
adafruitIoUrl = "io.adafruit.com"
#
# connect ESP to Adafruit IO using MQTT
def connect_mqtt():
c = MQTTClient(myMqttClient, adafruitIoUrl, 0, ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY, keepalive=10000)
c.set_callback(sub_cb)
c.connect()
c.subscribe(bytes(adafruitFeed,'utf-8'))
return c
#c = MQTTClient(myMqttClient, adafruitIoUrl, 0, adafruitUsername, adafruitAioKey)
c = connect_mqtt()
for i in range(15):
print(i)
c.publish(adafruitFeed, str(i))
time.sleep(2)
c.check_msg()
c.disconnect()
| en | 0.70996 | # # example ESP8266 or ESP32 Huzzah mqtt publish/subscribe with io.adafruit.com # <NAME> # # for more info see: https://github.com/micropython/micropython-lib/tree/master/umqtt.simple # https://github.com/micropython/micropython-lib/blob/master/umqtt.simple/example_sub.py # # LED on the board # configuration from io.adafruit.com # # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com # can be found by clicking on "MY KEY" when viewing your account on io.adafruit.com # only one program with the same MqttClient Name can access the Adarfuit service at a time # replace with your own client name unique to you and this code instance # replace "test" with your feed name # # connect ESP to Adafruit IO using MQTT #c = MQTTClient(myMqttClient, adafruitIoUrl, 0, adafruitUsername, adafruitAioKey) | 3.114513 | 3 |
omniglot/sense.py | DennisMerkus/omniglot | 0 | 6615057 | from typing import Dict, List, Optional, Set
from omnilingual import LanguageCode
from pydantic import BaseModel
class SourceWord(BaseModel):
language: LanguageCode
word: Optional[str]
full: bool
tags: Set[str] = set()
class Sense(BaseModel):
definitions: Dict[LanguageCode, List[str]]
tags: Set[str] = set()
information: List[str] = []
references: List[str] = []
antonyms: List[str] = []
synonyms: List[str] = []
source_language_words: List[SourceWord] = []
def to_bson(self):
return {
"definitions": {
language.value: definitions
for language, definitions in self.definitions.items()
},
"tags": list(self.tags),
"information": self.information,
"references": self.references,
"antonyms": self.antonyms,
"synonyms": self.synonyms,
"source_language_words": self.source_language_words,
}
| from typing import Dict, List, Optional, Set
from omnilingual import LanguageCode
from pydantic import BaseModel
class SourceWord(BaseModel):
language: LanguageCode
word: Optional[str]
full: bool
tags: Set[str] = set()
class Sense(BaseModel):
definitions: Dict[LanguageCode, List[str]]
tags: Set[str] = set()
information: List[str] = []
references: List[str] = []
antonyms: List[str] = []
synonyms: List[str] = []
source_language_words: List[SourceWord] = []
def to_bson(self):
return {
"definitions": {
language.value: definitions
for language, definitions in self.definitions.items()
},
"tags": list(self.tags),
"information": self.information,
"references": self.references,
"antonyms": self.antonyms,
"synonyms": self.synonyms,
"source_language_words": self.source_language_words,
}
| none | 1 | 2.668131 | 3 | |
vocoders/__init__.py | BenAAndrew/Voice-API | 2 | 6615058 | <reponame>BenAAndrew/Voice-API<gh_stars>1-10
from vocoders.hifigan import Hifigan # noqa
| from vocoders.hifigan import Hifigan # noqa | none | 1 | 1.028868 | 1 | |
TangoPi/input/keyboard.py | sierra-m/TangoBot | 0 | 6615059 | <gh_stars>0
"""
MIT License
Copyright (c) 2020 <NAME> and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tkinter
from tango import TangoBot
from util.enums import Direction
from util.scheduler import Scheduler
class KeyboardControl:
def __init__(self, window: tkinter.Tk):
self.root = window
self.bot = TangoBot()
self.failsafe_timer = Scheduler(7, lambda: self.stop_event(None))
self.root.after(200, self.update_timer)
self.velocity = 0 # driving
self.rotational = 0 # steering
self.head_swivel = 0
self.head_tilt = 0
self.waist_turn = 0
self.root.bind('<Up>', self.drive_event)
self.root.bind('<Down>', self.drive_event)
self.root.bind('<Left>', self.steer_event)
self.root.bind('<Right>', self.steer_event)
self.root.bind('<space>', self.stop_event)
self.root.bind('q', self.head_swivel_event)
self.root.bind('e', self.head_swivel_event)
self.root.bind('r', self.head_tilt_event)
self.root.bind('f', self.head_tilt_event)
self.root.bind('a', self.waist_turn_event)
self.root.bind('s', self.waist_turn_event)
self.root.bind('d', self.waist_turn_event)
self.root.bind('p', self.reset_event)
def update_timer(self):
self.failsafe_timer.update()
self.root.after(200, self.update_timer)
# Seven speeds total
def drive_event(self, event):
if event.keysym == 'Up':
self.velocity += 0.3
elif event.keysym == 'Down':
self.velocity -= 0.3
if self.velocity > 0.9:
self.velocity = 0.9
elif self.velocity < -0.9:
self.velocity = -0.9
self.failsafe_timer.reset()
self.bot.drive(self.velocity)
def steer_event(self, event):
if event.keysym == 'Left':
self.rotational += 0.5
elif event.keysym == 'Right':
self.rotational -= 0.5
if self.rotational > 0.5:
self.rotational = 0.5
elif self.rotational < -0.5:
self.rotational = -0.5
direction = Direction.RIGHT if self.rotational > 0 else Direction.LEFT
self.failsafe_timer.reset()
self.bot.steer(direction, abs(self.rotational))
def stop_event(self, event):
self.velocity = 0
self.rotational = 0
self.bot.drive(self.velocity)
self.bot.steer(Direction.LEFT, 0)
def reset_event(self, event):
self.head_swivel = 0
self.head_tilt = 0
self.waist_turn = 0
self.bot.swivel_head(self.head_swivel)
self.bot.tilt_head(self.head_tilt)
self.bot.turn_waist(self.waist_turn)
self.stop_event(event)
# Five degrees of freedom
def head_swivel_event(self, event):
if event.char == 'q':
self.head_swivel += 0.5
elif event.char == 'e':
self.head_swivel -= 0.5
if self.head_swivel > 1:
self.head_swivel = 1
elif self.head_swivel < -1:
self.head_swivel = -1
self.bot.swivel_head(self.head_swivel)
def head_tilt_event(self, event):
if event.char == 'r':
self.head_tilt += 0.5
elif event.char == 'f':
self.head_tilt -= 0.5
if self.head_tilt > 1:
self.head_tilt = 1
elif self.head_tilt < -1:
self.head_tilt = -1
self.bot.tilt_head(self.head_tilt)
def waist_turn_event(self, event):
if event.char == 'a':
self.waist_turn = 0.5
elif event.char == 's':
self.waist_turn = 0
elif event.char == 'd':
self.waist_turn = -0.5
self.bot.turn_waist(self.waist_turn)
| """
MIT License
Copyright (c) 2020 <NAME> and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tkinter
from tango import TangoBot
from util.enums import Direction
from util.scheduler import Scheduler
class KeyboardControl:
def __init__(self, window: tkinter.Tk):
self.root = window
self.bot = TangoBot()
self.failsafe_timer = Scheduler(7, lambda: self.stop_event(None))
self.root.after(200, self.update_timer)
self.velocity = 0 # driving
self.rotational = 0 # steering
self.head_swivel = 0
self.head_tilt = 0
self.waist_turn = 0
self.root.bind('<Up>', self.drive_event)
self.root.bind('<Down>', self.drive_event)
self.root.bind('<Left>', self.steer_event)
self.root.bind('<Right>', self.steer_event)
self.root.bind('<space>', self.stop_event)
self.root.bind('q', self.head_swivel_event)
self.root.bind('e', self.head_swivel_event)
self.root.bind('r', self.head_tilt_event)
self.root.bind('f', self.head_tilt_event)
self.root.bind('a', self.waist_turn_event)
self.root.bind('s', self.waist_turn_event)
self.root.bind('d', self.waist_turn_event)
self.root.bind('p', self.reset_event)
def update_timer(self):
self.failsafe_timer.update()
self.root.after(200, self.update_timer)
# Seven speeds total
def drive_event(self, event):
if event.keysym == 'Up':
self.velocity += 0.3
elif event.keysym == 'Down':
self.velocity -= 0.3
if self.velocity > 0.9:
self.velocity = 0.9
elif self.velocity < -0.9:
self.velocity = -0.9
self.failsafe_timer.reset()
self.bot.drive(self.velocity)
def steer_event(self, event):
if event.keysym == 'Left':
self.rotational += 0.5
elif event.keysym == 'Right':
self.rotational -= 0.5
if self.rotational > 0.5:
self.rotational = 0.5
elif self.rotational < -0.5:
self.rotational = -0.5
direction = Direction.RIGHT if self.rotational > 0 else Direction.LEFT
self.failsafe_timer.reset()
self.bot.steer(direction, abs(self.rotational))
def stop_event(self, event):
self.velocity = 0
self.rotational = 0
self.bot.drive(self.velocity)
self.bot.steer(Direction.LEFT, 0)
def reset_event(self, event):
self.head_swivel = 0
self.head_tilt = 0
self.waist_turn = 0
self.bot.swivel_head(self.head_swivel)
self.bot.tilt_head(self.head_tilt)
self.bot.turn_waist(self.waist_turn)
self.stop_event(event)
# Five degrees of freedom
def head_swivel_event(self, event):
if event.char == 'q':
self.head_swivel += 0.5
elif event.char == 'e':
self.head_swivel -= 0.5
if self.head_swivel > 1:
self.head_swivel = 1
elif self.head_swivel < -1:
self.head_swivel = -1
self.bot.swivel_head(self.head_swivel)
def head_tilt_event(self, event):
if event.char == 'r':
self.head_tilt += 0.5
elif event.char == 'f':
self.head_tilt -= 0.5
if self.head_tilt > 1:
self.head_tilt = 1
elif self.head_tilt < -1:
self.head_tilt = -1
self.bot.tilt_head(self.head_tilt)
def waist_turn_event(self, event):
if event.char == 'a':
self.waist_turn = 0.5
elif event.char == 's':
self.waist_turn = 0
elif event.char == 'd':
self.waist_turn = -0.5
self.bot.turn_waist(self.waist_turn) | en | 0.772206 | MIT License Copyright (c) 2020 <NAME> and <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # driving # steering # Seven speeds total # Five degrees of freedom | 2.220959 | 2 |
autumn/settings/constants.py | jtrauer/AuTuMN | 0 | 6615060 | from datetime import datetime
COVID_BASE_DATETIME = datetime(2019, 12, 31)
| from datetime import datetime
COVID_BASE_DATETIME = datetime(2019, 12, 31)
| none | 1 | 1.58399 | 2 | |
ip_bot/screens/__init__.py | DavisDmitry/ip-civsoc-bot | 2 | 6615061 | <reponame>DavisDmitry/ip-civsoc-bot
from aiogram import types
from ip_bot.screens import join
from ip_bot.screens.base import BaseTextScreen
from ip_bot.screens.remove_reply_keyboard import RemoveReplyKeyboard
from ip_bot.screens.start import Start
__all__ = ['BaseTextScreen', 'RemoveReplyKeyboard', 'Start',
'join', 'ContactSet',
'MessageForwarded', 'MessageFromUser', 'MessageFromChat',
'AboutCivsoc', 'AboutFraction']
class ContactSet(BaseTextScreen):
"""
The first message when trying to contact is with an explanation
of what to do.
"""
def __init__(self, recipient: str):
self.text = 'Отправь сообщение и я перешлю его {}.'.format(recipient)
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('<< Назад', callback_data='start')
)
class MessageForwarded(BaseTextScreen):
def __init__(self, recipient: str):
self.text = 'Ваше сообщение отправлено {}.'.format(recipient)
class MessageFromUser(BaseTextScreen):
"""A message from a user for a contact chat."""
def __init__(self, user: types.User, text: str):
if user.username:
name = '@' + user.username
else:
name = user.first_name
self.text = 'От <a href="tg://user?id={}">{}</a>\n\n{}'.format(user.id,
name,
text)
class MessageFromChat(BaseTextScreen):
def __init__(self, sender: str, text: str):
self.text = (
'#{}\n\n{}\n\n'
'<i>Вы можете снова отправить сообщение, ответив на это.</i>')\
.format(sender, text)
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('<< В главное меню',
callback_data='start_new')
)
class AboutCivsoc(BaseTextScreen):
"""Section about civsoc."""
def __init__(self):
self.text = ('Мы — граждане Российской Федерации, с целью '
'восстановления народного суверенитета и предотвращения '
'узурпации власти, объявляем о формировании '
'всероссийского движения "Гражданское Общество".')
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('Манифест',
url='https://civsoc.net/our-manifest/'),
types.InlineKeyboardButton(
'Устав', url='https://civsoc.net/ustav-dvizheniya/'
)
)
self.reply_markup.add(
types.InlineKeyboardButton('<< Назад', callback_data='start')
)
class AboutFraction(BaseTextScreen):
"""Section about fraction."""
def __init__(self):
self.text = ('Мы считаем свободу распространения и получения '
'информации главной ценностью сети Интернет.')
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton(
'Манифест и устав',
url=('https://civsoc.net/frakciya/'
'frakciya-zashchity-interneta/')
)
)
self.reply_markup.row(
types.InlineKeyboardButton('<< Назад', callback_data='start')
)
| from aiogram import types
from ip_bot.screens import join
from ip_bot.screens.base import BaseTextScreen
from ip_bot.screens.remove_reply_keyboard import RemoveReplyKeyboard
from ip_bot.screens.start import Start
__all__ = ['BaseTextScreen', 'RemoveReplyKeyboard', 'Start',
'join', 'ContactSet',
'MessageForwarded', 'MessageFromUser', 'MessageFromChat',
'AboutCivsoc', 'AboutFraction']
class ContactSet(BaseTextScreen):
"""
The first message when trying to contact is with an explanation
of what to do.
"""
def __init__(self, recipient: str):
self.text = 'Отправь сообщение и я перешлю его {}.'.format(recipient)
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('<< Назад', callback_data='start')
)
class MessageForwarded(BaseTextScreen):
def __init__(self, recipient: str):
self.text = 'Ваше сообщение отправлено {}.'.format(recipient)
class MessageFromUser(BaseTextScreen):
"""A message from a user for a contact chat."""
def __init__(self, user: types.User, text: str):
if user.username:
name = '@' + user.username
else:
name = user.first_name
self.text = 'От <a href="tg://user?id={}">{}</a>\n\n{}'.format(user.id,
name,
text)
class MessageFromChat(BaseTextScreen):
def __init__(self, sender: str, text: str):
self.text = (
'#{}\n\n{}\n\n'
'<i>Вы можете снова отправить сообщение, ответив на это.</i>')\
.format(sender, text)
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('<< В главное меню',
callback_data='start_new')
)
class AboutCivsoc(BaseTextScreen):
"""Section about civsoc."""
def __init__(self):
self.text = ('Мы — граждане Российской Федерации, с целью '
'восстановления народного суверенитета и предотвращения '
'узурпации власти, объявляем о формировании '
'всероссийского движения "Гражданское Общество".')
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton('Манифест',
url='https://civsoc.net/our-manifest/'),
types.InlineKeyboardButton(
'Устав', url='https://civsoc.net/ustav-dvizheniya/'
)
)
self.reply_markup.add(
types.InlineKeyboardButton('<< Назад', callback_data='start')
)
class AboutFraction(BaseTextScreen):
"""Section about fraction."""
def __init__(self):
self.text = ('Мы считаем свободу распространения и получения '
'информации главной ценностью сети Интернет.')
self._create_reply_markup()
def _create_reply_markup(self):
self.reply_markup = types.InlineKeyboardMarkup()
self.reply_markup.add(
types.InlineKeyboardButton(
'Манифест и устав',
url=('https://civsoc.net/frakciya/'
'frakciya-zashchity-interneta/')
)
)
self.reply_markup.row(
types.InlineKeyboardButton('<< Назад', callback_data='start')
) | en | 0.911085 | The first message when trying to contact is with an explanation of what to do. A message from a user for a contact chat. Section about civsoc. Section about fraction. | 2.500432 | 3 |
src/google_io.py | amcumber/mtgCardDatabase | 0 | 6615062 | import gspread
import oauth2client
| import gspread
import oauth2client
| none | 1 | 1.019186 | 1 | |
ref/networking/tcpipserver.py | skrymets/python-core-and-advanced | 2 | 6615063 | import socket
host='localhost'
port=4000
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
print("Server listening on port:",port)
s.listen(1)
c,addr = s.accept()
print("Connection from:",str(addr))
c.send(b"Hello, how are you")
c.send("bye".encode())
c.close() | import socket
host='localhost'
port=4000
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
print("Server listening on port:",port)
s.listen(1)
c,addr = s.accept()
print("Connection from:",str(addr))
c.send(b"Hello, how are you")
c.send("bye".encode())
c.close() | none | 1 | 2.957773 | 3 | |
tests/hwsim/test_wnm.py | PleXone2019/hostap | 5 | 6615064 | #!/usr/bin/python
#
# WNM tests
# Copyright (c) 2013, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import hostapd
def test_wnm_bss_transition_mgmt(dev, apdev):
"""WNM BSS Transition Management"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
dev[0].request("WNM_BSS_QUERY 0")
def test_wnm_disassoc_imminent(dev, apdev):
"""WNM Disassociation Imminent"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("DISASSOC_IMMINENT " + addr + " 10")
ev = dev[0].wait_event(["WNM: Disassociation Imminent"])
if ev is None:
raise Exception("Timeout while waiting for disassociation imminent")
if "Disassociation Timer 10" not in ev:
raise Exception("Unexpected disassociation imminent contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def test_wnm_ess_disassoc_imminent(dev, apdev):
"""WNM ESS Disassociation Imminent"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("ESS_DISASSOC " + addr + " 10 http://example.com/session-info")
ev = dev[0].wait_event(["ESS-DISASSOC-IMMINENT"])
if ev is None:
raise Exception("Timeout while waiting for ESS disassociation imminent")
if "0 1024 http://example.com/session-info" not in ev:
raise Exception("Unexpected ESS disassociation imminent message contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def test_wnm_ess_disassoc_imminent_pmf(dev, apdev):
"""WNM ESS Disassociation Imminent"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256";
params["ieee80211w"] = "2";
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("ESS_DISASSOC " + addr + " 10 http://example.com/session-info")
ev = dev[0].wait_event(["ESS-DISASSOC-IMMINENT"])
if ev is None:
raise Exception("Timeout while waiting for ESS disassociation imminent")
if "1 1024 http://example.com/session-info" not in ev:
raise Exception("Unexpected ESS disassociation imminent message contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def check_wnm_sleep_mode_enter_exit(hapd, dev):
addr = dev.p2p_interface_addr()
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" in sta['flags']:
raise Exception("Station unexpectedly in WNM-Sleep Mode")
logger.info("Going to WNM Sleep Mode")
dev.request("WNM_SLEEP enter")
time.sleep(0.5)
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" not in sta['flags']:
raise Exception("Station failed to enter WNM-Sleep Mode")
logger.info("Waking up from WNM Sleep Mode")
dev.request("WNM_SLEEP exit")
time.sleep(0.5)
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" in sta['flags']:
raise Exception("Station failed to exit WNM-Sleep Mode")
def test_wnm_sleep_mode_open(dev, apdev):
"""WNM Sleep Mode - open"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
def test_wnm_sleep_mode_rsn(dev, apdev):
"""WNM Sleep Mode - RSN"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["time_advertisement"] = "2"
params["time_zone"] = "EST5"
params["wnm_sleep_mode"] = "1"
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
def test_wnm_sleep_mode_rsn_pmf(dev, apdev):
"""WNM Sleep Mode - RSN with PMF"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256";
params["ieee80211w"] = "2";
params["time_advertisement"] = "2"
params["time_zone"] = "EST5"
params["wnm_sleep_mode"] = "1"
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
| #!/usr/bin/python
#
# WNM tests
# Copyright (c) 2013, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import hostapd
def test_wnm_bss_transition_mgmt(dev, apdev):
"""WNM BSS Transition Management"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
dev[0].request("WNM_BSS_QUERY 0")
def test_wnm_disassoc_imminent(dev, apdev):
"""WNM Disassociation Imminent"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("DISASSOC_IMMINENT " + addr + " 10")
ev = dev[0].wait_event(["WNM: Disassociation Imminent"])
if ev is None:
raise Exception("Timeout while waiting for disassociation imminent")
if "Disassociation Timer 10" not in ev:
raise Exception("Unexpected disassociation imminent contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def test_wnm_ess_disassoc_imminent(dev, apdev):
"""WNM ESS Disassociation Imminent"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("ESS_DISASSOC " + addr + " 10 http://example.com/session-info")
ev = dev[0].wait_event(["ESS-DISASSOC-IMMINENT"])
if ev is None:
raise Exception("Timeout while waiting for ESS disassociation imminent")
if "0 1024 http://example.com/session-info" not in ev:
raise Exception("Unexpected ESS disassociation imminent message contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def test_wnm_ess_disassoc_imminent_pmf(dev, apdev):
"""WNM ESS Disassociation Imminent"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256";
params["ieee80211w"] = "2";
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2", scan_freq="2412")
addr = dev[0].p2p_interface_addr()
hapd.request("ESS_DISASSOC " + addr + " 10 http://example.com/session-info")
ev = dev[0].wait_event(["ESS-DISASSOC-IMMINENT"])
if ev is None:
raise Exception("Timeout while waiting for ESS disassociation imminent")
if "1 1024 http://example.com/session-info" not in ev:
raise Exception("Unexpected ESS disassociation imminent message contents")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Timeout while waiting for re-connection scan")
def check_wnm_sleep_mode_enter_exit(hapd, dev):
addr = dev.p2p_interface_addr()
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" in sta['flags']:
raise Exception("Station unexpectedly in WNM-Sleep Mode")
logger.info("Going to WNM Sleep Mode")
dev.request("WNM_SLEEP enter")
time.sleep(0.5)
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" not in sta['flags']:
raise Exception("Station failed to enter WNM-Sleep Mode")
logger.info("Waking up from WNM Sleep Mode")
dev.request("WNM_SLEEP exit")
time.sleep(0.5)
sta = hapd.get_sta(addr)
if "[WNM_SLEEP_MODE]" in sta['flags']:
raise Exception("Station failed to exit WNM-Sleep Mode")
def test_wnm_sleep_mode_open(dev, apdev):
"""WNM Sleep Mode - open"""
params = { "ssid": "test-wnm",
"time_advertisement": "2",
"time_zone": "EST5",
"wnm_sleep_mode": "1",
"bss_transition": "1" }
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm", key_mgmt="NONE", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
def test_wnm_sleep_mode_rsn(dev, apdev):
"""WNM Sleep Mode - RSN"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["time_advertisement"] = "2"
params["time_zone"] = "EST5"
params["wnm_sleep_mode"] = "1"
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
def test_wnm_sleep_mode_rsn_pmf(dev, apdev):
"""WNM Sleep Mode - RSN with PMF"""
params = hostapd.wpa2_params("test-wnm-rsn", "12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256";
params["ieee80211w"] = "2";
params["time_advertisement"] = "2"
params["time_zone"] = "EST5"
params["wnm_sleep_mode"] = "1"
params["bss_transition"] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
dev[0].connect("test-wnm-rsn", psk="12345678", ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2", scan_freq="2412")
check_wnm_sleep_mode_enter_exit(hapd, dev[0])
| en | 0.728189 | #!/usr/bin/python # # WNM tests # Copyright (c) 2013, <NAME> <<EMAIL>> # # This software may be distributed under the terms of the BSD license. # See README for more details. WNM BSS Transition Management WNM Disassociation Imminent WNM ESS Disassociation Imminent WNM ESS Disassociation Imminent WNM Sleep Mode - open WNM Sleep Mode - RSN WNM Sleep Mode - RSN with PMF | 2.046032 | 2 |
Homework #2/homework #2.py | Vinciwayne/python-is-easy | 0 | 6615065 | """
homework #2
FUNCTIONS
"""
"""
Lollipop is the first single from American rapper <NAME>'s sixth studio album, <NAME> III.
The track posthumously features American singer Static Major and is produced by Deezle and <NAME>.
It heavily utilizes the Auto-Tune vocal effect. The song was released digitally on March 13, 2008.
"""
print()
print()
# artist names
def artist():
print('<NAME>, static major')
artist()
# year released
def year_released():
print(2008)
year_released()
# the type of music
def genre():
print('Electropop, R&B, dirty rap')
genre()
# extra credit
def trueorfalase():
print(100<40)
trueorfalase()
print()
print()
"""
EZEBUIRO
UCHECHUKWU
VINCENT
""" | """
homework #2
FUNCTIONS
"""
"""
Lollipop is the first single from American rapper <NAME>'s sixth studio album, <NAME> III.
The track posthumously features American singer Static Major and is produced by Deezle and <NAME>.
It heavily utilizes the Auto-Tune vocal effect. The song was released digitally on March 13, 2008.
"""
print()
print()
# artist names
def artist():
print('<NAME>, static major')
artist()
# year released
def year_released():
print(2008)
year_released()
# the type of music
def genre():
print('Electropop, R&B, dirty rap')
genre()
# extra credit
def trueorfalase():
print(100<40)
trueorfalase()
print()
print()
"""
EZEBUIRO
UCHECHUKWU
VINCENT
""" | en | 0.949341 | homework #2 FUNCTIONS Lollipop is the first single from American rapper <NAME>'s sixth studio album, <NAME> III. The track posthumously features American singer Static Major and is produced by Deezle and <NAME>. It heavily utilizes the Auto-Tune vocal effect. The song was released digitally on March 13, 2008. # artist names # year released # the type of music # extra credit EZEBUIRO UCHECHUKWU VINCENT | 3.24206 | 3 |
alipay/aop/api/domain/EsignResult.py | antopen/alipay-sdk-python-all | 213 | 6615066 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class EsignResult(object):
def __init__(self):
self._agreement_url = None
self._apply_dutiable_mode_enum = None
self._contractor_code = None
self._contractor_name = None
self._employer_code = None
self._identification_in_belonging_employer = None
self._pay_salary_mode_enum = None
self._sign_time = None
self._status = None
self._tax_optimization_mode = None
self._termination_time = None
@property
def agreement_url(self):
return self._agreement_url
@agreement_url.setter
def agreement_url(self, value):
self._agreement_url = value
@property
def apply_dutiable_mode_enum(self):
return self._apply_dutiable_mode_enum
@apply_dutiable_mode_enum.setter
def apply_dutiable_mode_enum(self, value):
self._apply_dutiable_mode_enum = value
@property
def contractor_code(self):
return self._contractor_code
@contractor_code.setter
def contractor_code(self, value):
self._contractor_code = value
@property
def contractor_name(self):
return self._contractor_name
@contractor_name.setter
def contractor_name(self, value):
self._contractor_name = value
@property
def employer_code(self):
return self._employer_code
@employer_code.setter
def employer_code(self, value):
self._employer_code = value
@property
def identification_in_belonging_employer(self):
return self._identification_in_belonging_employer
@identification_in_belonging_employer.setter
def identification_in_belonging_employer(self, value):
self._identification_in_belonging_employer = value
@property
def pay_salary_mode_enum(self):
return self._pay_salary_mode_enum
@pay_salary_mode_enum.setter
def pay_salary_mode_enum(self, value):
self._pay_salary_mode_enum = value
@property
def sign_time(self):
return self._sign_time
@sign_time.setter
def sign_time(self, value):
self._sign_time = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tax_optimization_mode(self):
return self._tax_optimization_mode
@tax_optimization_mode.setter
def tax_optimization_mode(self, value):
self._tax_optimization_mode = value
@property
def termination_time(self):
return self._termination_time
@termination_time.setter
def termination_time(self, value):
self._termination_time = value
def to_alipay_dict(self):
params = dict()
if self.agreement_url:
if hasattr(self.agreement_url, 'to_alipay_dict'):
params['agreement_url'] = self.agreement_url.to_alipay_dict()
else:
params['agreement_url'] = self.agreement_url
if self.apply_dutiable_mode_enum:
if hasattr(self.apply_dutiable_mode_enum, 'to_alipay_dict'):
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum.to_alipay_dict()
else:
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum
if self.contractor_code:
if hasattr(self.contractor_code, 'to_alipay_dict'):
params['contractor_code'] = self.contractor_code.to_alipay_dict()
else:
params['contractor_code'] = self.contractor_code
if self.contractor_name:
if hasattr(self.contractor_name, 'to_alipay_dict'):
params['contractor_name'] = self.contractor_name.to_alipay_dict()
else:
params['contractor_name'] = self.contractor_name
if self.employer_code:
if hasattr(self.employer_code, 'to_alipay_dict'):
params['employer_code'] = self.employer_code.to_alipay_dict()
else:
params['employer_code'] = self.employer_code
if self.identification_in_belonging_employer:
if hasattr(self.identification_in_belonging_employer, 'to_alipay_dict'):
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer.to_alipay_dict()
else:
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer
if self.pay_salary_mode_enum:
if hasattr(self.pay_salary_mode_enum, 'to_alipay_dict'):
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum.to_alipay_dict()
else:
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum
if self.sign_time:
if hasattr(self.sign_time, 'to_alipay_dict'):
params['sign_time'] = self.sign_time.to_alipay_dict()
else:
params['sign_time'] = self.sign_time
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tax_optimization_mode:
if hasattr(self.tax_optimization_mode, 'to_alipay_dict'):
params['tax_optimization_mode'] = self.tax_optimization_mode.to_alipay_dict()
else:
params['tax_optimization_mode'] = self.tax_optimization_mode
if self.termination_time:
if hasattr(self.termination_time, 'to_alipay_dict'):
params['termination_time'] = self.termination_time.to_alipay_dict()
else:
params['termination_time'] = self.termination_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EsignResult()
if 'agreement_url' in d:
o.agreement_url = d['agreement_url']
if 'apply_dutiable_mode_enum' in d:
o.apply_dutiable_mode_enum = d['apply_dutiable_mode_enum']
if 'contractor_code' in d:
o.contractor_code = d['contractor_code']
if 'contractor_name' in d:
o.contractor_name = d['contractor_name']
if 'employer_code' in d:
o.employer_code = d['employer_code']
if 'identification_in_belonging_employer' in d:
o.identification_in_belonging_employer = d['identification_in_belonging_employer']
if 'pay_salary_mode_enum' in d:
o.pay_salary_mode_enum = d['pay_salary_mode_enum']
if 'sign_time' in d:
o.sign_time = d['sign_time']
if 'status' in d:
o.status = d['status']
if 'tax_optimization_mode' in d:
o.tax_optimization_mode = d['tax_optimization_mode']
if 'termination_time' in d:
o.termination_time = d['termination_time']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class EsignResult(object):
def __init__(self):
self._agreement_url = None
self._apply_dutiable_mode_enum = None
self._contractor_code = None
self._contractor_name = None
self._employer_code = None
self._identification_in_belonging_employer = None
self._pay_salary_mode_enum = None
self._sign_time = None
self._status = None
self._tax_optimization_mode = None
self._termination_time = None
@property
def agreement_url(self):
return self._agreement_url
@agreement_url.setter
def agreement_url(self, value):
self._agreement_url = value
@property
def apply_dutiable_mode_enum(self):
return self._apply_dutiable_mode_enum
@apply_dutiable_mode_enum.setter
def apply_dutiable_mode_enum(self, value):
self._apply_dutiable_mode_enum = value
@property
def contractor_code(self):
return self._contractor_code
@contractor_code.setter
def contractor_code(self, value):
self._contractor_code = value
@property
def contractor_name(self):
return self._contractor_name
@contractor_name.setter
def contractor_name(self, value):
self._contractor_name = value
@property
def employer_code(self):
return self._employer_code
@employer_code.setter
def employer_code(self, value):
self._employer_code = value
@property
def identification_in_belonging_employer(self):
return self._identification_in_belonging_employer
@identification_in_belonging_employer.setter
def identification_in_belonging_employer(self, value):
self._identification_in_belonging_employer = value
@property
def pay_salary_mode_enum(self):
return self._pay_salary_mode_enum
@pay_salary_mode_enum.setter
def pay_salary_mode_enum(self, value):
self._pay_salary_mode_enum = value
@property
def sign_time(self):
return self._sign_time
@sign_time.setter
def sign_time(self, value):
self._sign_time = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tax_optimization_mode(self):
return self._tax_optimization_mode
@tax_optimization_mode.setter
def tax_optimization_mode(self, value):
self._tax_optimization_mode = value
@property
def termination_time(self):
return self._termination_time
@termination_time.setter
def termination_time(self, value):
self._termination_time = value
def to_alipay_dict(self):
params = dict()
if self.agreement_url:
if hasattr(self.agreement_url, 'to_alipay_dict'):
params['agreement_url'] = self.agreement_url.to_alipay_dict()
else:
params['agreement_url'] = self.agreement_url
if self.apply_dutiable_mode_enum:
if hasattr(self.apply_dutiable_mode_enum, 'to_alipay_dict'):
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum.to_alipay_dict()
else:
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum
if self.contractor_code:
if hasattr(self.contractor_code, 'to_alipay_dict'):
params['contractor_code'] = self.contractor_code.to_alipay_dict()
else:
params['contractor_code'] = self.contractor_code
if self.contractor_name:
if hasattr(self.contractor_name, 'to_alipay_dict'):
params['contractor_name'] = self.contractor_name.to_alipay_dict()
else:
params['contractor_name'] = self.contractor_name
if self.employer_code:
if hasattr(self.employer_code, 'to_alipay_dict'):
params['employer_code'] = self.employer_code.to_alipay_dict()
else:
params['employer_code'] = self.employer_code
if self.identification_in_belonging_employer:
if hasattr(self.identification_in_belonging_employer, 'to_alipay_dict'):
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer.to_alipay_dict()
else:
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer
if self.pay_salary_mode_enum:
if hasattr(self.pay_salary_mode_enum, 'to_alipay_dict'):
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum.to_alipay_dict()
else:
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum
if self.sign_time:
if hasattr(self.sign_time, 'to_alipay_dict'):
params['sign_time'] = self.sign_time.to_alipay_dict()
else:
params['sign_time'] = self.sign_time
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tax_optimization_mode:
if hasattr(self.tax_optimization_mode, 'to_alipay_dict'):
params['tax_optimization_mode'] = self.tax_optimization_mode.to_alipay_dict()
else:
params['tax_optimization_mode'] = self.tax_optimization_mode
if self.termination_time:
if hasattr(self.termination_time, 'to_alipay_dict'):
params['termination_time'] = self.termination_time.to_alipay_dict()
else:
params['termination_time'] = self.termination_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EsignResult()
if 'agreement_url' in d:
o.agreement_url = d['agreement_url']
if 'apply_dutiable_mode_enum' in d:
o.apply_dutiable_mode_enum = d['apply_dutiable_mode_enum']
if 'contractor_code' in d:
o.contractor_code = d['contractor_code']
if 'contractor_name' in d:
o.contractor_name = d['contractor_name']
if 'employer_code' in d:
o.employer_code = d['employer_code']
if 'identification_in_belonging_employer' in d:
o.identification_in_belonging_employer = d['identification_in_belonging_employer']
if 'pay_salary_mode_enum' in d:
o.pay_salary_mode_enum = d['pay_salary_mode_enum']
if 'sign_time' in d:
o.sign_time = d['sign_time']
if 'status' in d:
o.status = d['status']
if 'tax_optimization_mode' in d:
o.tax_optimization_mode = d['tax_optimization_mode']
if 'termination_time' in d:
o.termination_time = d['termination_time']
return o | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.857943 | 2 |
sentiment/__init__.py | kianho/sentiment-playground | 0 | 6615067 | <gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
"""
Date:
Mon Feb 23 16:42:36 AEDT 2015
Author:
<NAME> <<EMAIL>>
Description:
...
"""
from sentiment import *
| #!/usr/bin/env python
# encoding: utf-8
"""
Date:
Mon Feb 23 16:42:36 AEDT 2015
Author:
<NAME> <<EMAIL>>
Description:
...
"""
from sentiment import * | en | 0.453329 | #!/usr/bin/env python # encoding: utf-8 Date: Mon Feb 23 16:42:36 AEDT 2015 Author: <NAME> <<EMAIL>> Description: ... | 1.028905 | 1 |
Assignment1/linear_networks.py | seanstappas/ecse-543-assignment1 | 0 | 6615068 | from __future__ import division
import csv
import time
from matrices import Matrix
from choleski import choleski_solve
def solve_linear_network(A, Y, J, E, half_bandwidth=None):
"""
Solve the linear resistive network described by the given matrices.
:param A: the incidence matrix
:param Y: the admittance matrix
:param J: the current source matrix
:param E: the voltage source matrix
:param half_bandwidth:
:return: the solved voltage matrix
"""
A_new = A * Y * A.transpose()
b = A * (J - Y * E)
return choleski_solve(A_new, b, half_bandwidth=half_bandwidth)
def solve_linear_network_runtime(A, Y, J, E, half_bandwidth=None):
"""
Solve the linear resistive network described by the given matrices.
:param A: the incidence matrix
:param Y: the admittance matrix
:param J: the current source matrix
:param E: the voltage source matrix
:param half_bandwidth:
:return: the solved voltage matrix and the runtime of the Choleski program (in ms)
"""
A_new = A * Y * A.transpose()
b = A * (J - Y * E)
t = time.clock()
x = choleski_solve(A_new, b, half_bandwidth=half_bandwidth)
runtime = (time.clock() - t) * 1000
return x, runtime
def csv_to_network_branch_matrices(filename):
"""
Converts a CSV file to Y, J, E network matrices.
:param filename: the name of the CSV file
:return: the Y, J, E network matrices
"""
with open(filename, 'r') as csv_file:
reader = csv.reader(csv_file)
J = []
Y = []
E = []
for row in reader:
J_k = float(row[0])
R_k = float(row[1])
E_k = float(row[2])
J.append(J_k)
Y.append(1 / R_k)
E.append(E_k)
Y = Matrix.diagonal(Y)
J = Matrix.column_vector(J)
E = Matrix.column_vector(E)
return Y, J, E
def create_network_matrices_mesh(rows, cols, branch_resistance, test_current):
"""
Create the network matrices needed (A, Y, J, E) to solve the resitive mesh network with the given rows, columns,
branch resistance and test current.
:param rows: the number of rows in the mesh
:param cols: the number of columns in the mesh
:param branch_resistance: the resistance in each branch
:param test_current: the test current to apply
:return: the network matrices (A, Y, J, E)
"""
num_horizontal_branches = (cols - 1) * rows
num_vertical_branches = (rows - 1) * cols
num_branches = num_horizontal_branches + num_vertical_branches + 1
num_nodes = rows * cols - 1
A = create_incidence_matrix_mesh(cols, num_branches, num_horizontal_branches, num_nodes, num_vertical_branches)
Y, J, E = create_network_branch_matrices_mesh(num_branches, branch_resistance, test_current)
return A, Y, J, E
def create_incidence_matrix_mesh(cols, num_branches, num_horizontal_branches, num_nodes, num_vertical_branches):
"""
Create the incidence matrix given by the resistive mesh with the given number of columns, number of branches,
number of horizontal branches, number of nodes, and number of vertical branches.
:param cols: the number of columns in the mesh
:param num_branches: the number of branches in the mesh
:param num_horizontal_branches: the number of horizontal branches in the mesh
:param num_nodes: the number of nodes in the mesh
:param num_vertical_branches: the number of vertical branches in the mesh
:return: the incidence matrix (A)
"""
A = Matrix.empty(num_nodes, num_branches)
node_offset = -1
for branch in range(num_horizontal_branches):
if branch == num_horizontal_branches - cols + 1:
A[branch + node_offset + 1][branch] = 1
else:
if branch % (cols - 1) == 0:
node_offset += 1
node_number = branch + node_offset
A[node_number][branch] = -1
A[node_number + 1][branch] = 1
branch_offset = num_horizontal_branches
node_offset = cols
for branch in range(num_vertical_branches):
if branch == num_vertical_branches - cols:
node_offset -= 1
A[branch][branch + branch_offset] = 1
else:
A[branch][branch + branch_offset] = 1
A[branch + node_offset][branch + branch_offset] = -1
if num_branches == 2:
A[0][1] = -1
else:
A[cols - 1][num_branches - 1] = -1
return A
def create_network_branch_matrices_mesh(num_branches, branch_resistance, test_current):
"""
Create the Y, J, E network branch matrices of the resistive mesh given by the provided number of branches, branch
resistance and test current.
:param num_branches: the number of branches in the mesh
:param branch_resistance: the resistance of each branch in the mesh
:param test_current: the test current to apply to the mesh
:return: the Y, J, E network branch matrices
"""
Y = Matrix.diagonal([1 / branch_resistance if branch < num_branches - 1 else 0 for branch in range(num_branches)])
# Negative test current here because we assume current is coming OUT of the test current node.
J = Matrix.column_vector([0 if branch < num_branches - 1 else -test_current for branch in range(num_branches)])
E = Matrix.column_vector([0 for _ in range(num_branches)])
return Y, J, E
def find_mesh_resistance(N, branch_resistance, half_bandwidth=None):
"""
Find the equivalent resistance of an Nx2N resistive mesh with the given branch resistance and optional
half-bandwidth
:param N: the size of the mesh (Nx2N)
:param branch_resistance: the resistance of each branch of the mesh
:param half_bandwidth: the half-bandwidth to be used for banded Choleski decomposition (or None to use non-banded)
:return: the equivalent resistance of the mesh
"""
test_current = 0.01
A, Y, J, E = create_network_matrices_mesh(N, 2 * N, branch_resistance, test_current)
x, choleski_runtime = solve_linear_network_runtime(A, Y, J, E, half_bandwidth=half_bandwidth)
test_voltage = x[2 * N - 1 if N > 1 else 0][0]
equivalent_resistance = test_voltage / test_current
return equivalent_resistance, choleski_runtime
| from __future__ import division
import csv
import time
from matrices import Matrix
from choleski import choleski_solve
def solve_linear_network(A, Y, J, E, half_bandwidth=None):
"""
Solve the linear resistive network described by the given matrices.
:param A: the incidence matrix
:param Y: the admittance matrix
:param J: the current source matrix
:param E: the voltage source matrix
:param half_bandwidth:
:return: the solved voltage matrix
"""
A_new = A * Y * A.transpose()
b = A * (J - Y * E)
return choleski_solve(A_new, b, half_bandwidth=half_bandwidth)
def solve_linear_network_runtime(A, Y, J, E, half_bandwidth=None):
"""
Solve the linear resistive network described by the given matrices.
:param A: the incidence matrix
:param Y: the admittance matrix
:param J: the current source matrix
:param E: the voltage source matrix
:param half_bandwidth:
:return: the solved voltage matrix and the runtime of the Choleski program (in ms)
"""
A_new = A * Y * A.transpose()
b = A * (J - Y * E)
t = time.clock()
x = choleski_solve(A_new, b, half_bandwidth=half_bandwidth)
runtime = (time.clock() - t) * 1000
return x, runtime
def csv_to_network_branch_matrices(filename):
"""
Converts a CSV file to Y, J, E network matrices.
:param filename: the name of the CSV file
:return: the Y, J, E network matrices
"""
with open(filename, 'r') as csv_file:
reader = csv.reader(csv_file)
J = []
Y = []
E = []
for row in reader:
J_k = float(row[0])
R_k = float(row[1])
E_k = float(row[2])
J.append(J_k)
Y.append(1 / R_k)
E.append(E_k)
Y = Matrix.diagonal(Y)
J = Matrix.column_vector(J)
E = Matrix.column_vector(E)
return Y, J, E
def create_network_matrices_mesh(rows, cols, branch_resistance, test_current):
"""
Create the network matrices needed (A, Y, J, E) to solve the resitive mesh network with the given rows, columns,
branch resistance and test current.
:param rows: the number of rows in the mesh
:param cols: the number of columns in the mesh
:param branch_resistance: the resistance in each branch
:param test_current: the test current to apply
:return: the network matrices (A, Y, J, E)
"""
num_horizontal_branches = (cols - 1) * rows
num_vertical_branches = (rows - 1) * cols
num_branches = num_horizontal_branches + num_vertical_branches + 1
num_nodes = rows * cols - 1
A = create_incidence_matrix_mesh(cols, num_branches, num_horizontal_branches, num_nodes, num_vertical_branches)
Y, J, E = create_network_branch_matrices_mesh(num_branches, branch_resistance, test_current)
return A, Y, J, E
def create_incidence_matrix_mesh(cols, num_branches, num_horizontal_branches, num_nodes, num_vertical_branches):
"""
Create the incidence matrix given by the resistive mesh with the given number of columns, number of branches,
number of horizontal branches, number of nodes, and number of vertical branches.
:param cols: the number of columns in the mesh
:param num_branches: the number of branches in the mesh
:param num_horizontal_branches: the number of horizontal branches in the mesh
:param num_nodes: the number of nodes in the mesh
:param num_vertical_branches: the number of vertical branches in the mesh
:return: the incidence matrix (A)
"""
A = Matrix.empty(num_nodes, num_branches)
node_offset = -1
for branch in range(num_horizontal_branches):
if branch == num_horizontal_branches - cols + 1:
A[branch + node_offset + 1][branch] = 1
else:
if branch % (cols - 1) == 0:
node_offset += 1
node_number = branch + node_offset
A[node_number][branch] = -1
A[node_number + 1][branch] = 1
branch_offset = num_horizontal_branches
node_offset = cols
for branch in range(num_vertical_branches):
if branch == num_vertical_branches - cols:
node_offset -= 1
A[branch][branch + branch_offset] = 1
else:
A[branch][branch + branch_offset] = 1
A[branch + node_offset][branch + branch_offset] = -1
if num_branches == 2:
A[0][1] = -1
else:
A[cols - 1][num_branches - 1] = -1
return A
def create_network_branch_matrices_mesh(num_branches, branch_resistance, test_current):
"""
Create the Y, J, E network branch matrices of the resistive mesh given by the provided number of branches, branch
resistance and test current.
:param num_branches: the number of branches in the mesh
:param branch_resistance: the resistance of each branch in the mesh
:param test_current: the test current to apply to the mesh
:return: the Y, J, E network branch matrices
"""
Y = Matrix.diagonal([1 / branch_resistance if branch < num_branches - 1 else 0 for branch in range(num_branches)])
# Negative test current here because we assume current is coming OUT of the test current node.
J = Matrix.column_vector([0 if branch < num_branches - 1 else -test_current for branch in range(num_branches)])
E = Matrix.column_vector([0 for _ in range(num_branches)])
return Y, J, E
def find_mesh_resistance(N, branch_resistance, half_bandwidth=None):
"""
Find the equivalent resistance of an Nx2N resistive mesh with the given branch resistance and optional
half-bandwidth
:param N: the size of the mesh (Nx2N)
:param branch_resistance: the resistance of each branch of the mesh
:param half_bandwidth: the half-bandwidth to be used for banded Choleski decomposition (or None to use non-banded)
:return: the equivalent resistance of the mesh
"""
test_current = 0.01
A, Y, J, E = create_network_matrices_mesh(N, 2 * N, branch_resistance, test_current)
x, choleski_runtime = solve_linear_network_runtime(A, Y, J, E, half_bandwidth=half_bandwidth)
test_voltage = x[2 * N - 1 if N > 1 else 0][0]
equivalent_resistance = test_voltage / test_current
return equivalent_resistance, choleski_runtime
| en | 0.778668 | Solve the linear resistive network described by the given matrices. :param A: the incidence matrix :param Y: the admittance matrix :param J: the current source matrix :param E: the voltage source matrix :param half_bandwidth: :return: the solved voltage matrix Solve the linear resistive network described by the given matrices. :param A: the incidence matrix :param Y: the admittance matrix :param J: the current source matrix :param E: the voltage source matrix :param half_bandwidth: :return: the solved voltage matrix and the runtime of the Choleski program (in ms) Converts a CSV file to Y, J, E network matrices. :param filename: the name of the CSV file :return: the Y, J, E network matrices Create the network matrices needed (A, Y, J, E) to solve the resitive mesh network with the given rows, columns, branch resistance and test current. :param rows: the number of rows in the mesh :param cols: the number of columns in the mesh :param branch_resistance: the resistance in each branch :param test_current: the test current to apply :return: the network matrices (A, Y, J, E) Create the incidence matrix given by the resistive mesh with the given number of columns, number of branches, number of horizontal branches, number of nodes, and number of vertical branches. :param cols: the number of columns in the mesh :param num_branches: the number of branches in the mesh :param num_horizontal_branches: the number of horizontal branches in the mesh :param num_nodes: the number of nodes in the mesh :param num_vertical_branches: the number of vertical branches in the mesh :return: the incidence matrix (A) Create the Y, J, E network branch matrices of the resistive mesh given by the provided number of branches, branch resistance and test current. :param num_branches: the number of branches in the mesh :param branch_resistance: the resistance of each branch in the mesh :param test_current: the test current to apply to the mesh :return: the Y, J, E network branch matrices # Negative test current here because we assume current is coming OUT of the test current node. Find the equivalent resistance of an Nx2N resistive mesh with the given branch resistance and optional half-bandwidth :param N: the size of the mesh (Nx2N) :param branch_resistance: the resistance of each branch of the mesh :param half_bandwidth: the half-bandwidth to be used for banded Choleski decomposition (or None to use non-banded) :return: the equivalent resistance of the mesh | 3.392772 | 3 |
py_getEthnicity_packageEthnicolr_v1.py | santoshbs/python-code-snippets | 1 | 6615069 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 2022;
@author: santoshbs;
@purpose: Infer ethnicity based on the last name of a person;
@package-used: https://pypi.org/project/ethnicolr/;
"""
import pandas as pd
from ethnicolr import census_ln
f= './persons.csv' #read the input data file
df= pd.read_csv(f, low_memory= False)
df.columns
df_sub= df[df['person_country_code'] == 'US'] #filter as necessary
df_sub= df_sub[~df_sub['name_last'].isnull()]
df_sub['name_last'].head()
df_sub= df_sub['name_last']
df_sub= pd.DataFrame(df_sub)
df_sub= df_sub.drop_duplicates()
p= census_ln(df_sub, 'name_last') #obtain ethnicity information
p= p.rename(columns= {
'pctwhite': 'ethnicity_inferred_percent_white',
'pctblack': 'ethnicity_inferred_percent_black',
'pctapi': 'ethnicity_inferred_percent_asianPacificIslander',
'pctaian': 'ethnicity_inferred_percent_americanIndianAlaskanNative',
'pct2prace': 'ethnicity_inferred_percent_twoOrMoreRaces',
'pcthispanic': 'ethnicity_inferred_percent_hispanic',
}) #rename columns for readability
p.columns
p= p.drop_duplicates(subset=['name_last'])
p.head()
df_ethnicity= df.merge(p, on= 'name_last', how= 'left') #merge with
f= './inferred_ETHNICITY_usingPYEthnicolrAPI_v1.csv'
df_ethnicity.to_csv(f, index= False)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 2022;
@author: santoshbs;
@purpose: Infer ethnicity based on the last name of a person;
@package-used: https://pypi.org/project/ethnicolr/;
"""
import pandas as pd
from ethnicolr import census_ln
f= './persons.csv' #read the input data file
df= pd.read_csv(f, low_memory= False)
df.columns
df_sub= df[df['person_country_code'] == 'US'] #filter as necessary
df_sub= df_sub[~df_sub['name_last'].isnull()]
df_sub['name_last'].head()
df_sub= df_sub['name_last']
df_sub= pd.DataFrame(df_sub)
df_sub= df_sub.drop_duplicates()
p= census_ln(df_sub, 'name_last') #obtain ethnicity information
p= p.rename(columns= {
'pctwhite': 'ethnicity_inferred_percent_white',
'pctblack': 'ethnicity_inferred_percent_black',
'pctapi': 'ethnicity_inferred_percent_asianPacificIslander',
'pctaian': 'ethnicity_inferred_percent_americanIndianAlaskanNative',
'pct2prace': 'ethnicity_inferred_percent_twoOrMoreRaces',
'pcthispanic': 'ethnicity_inferred_percent_hispanic',
}) #rename columns for readability
p.columns
p= p.drop_duplicates(subset=['name_last'])
p.head()
df_ethnicity= df.merge(p, on= 'name_last', how= 'left') #merge with
f= './inferred_ETHNICITY_usingPYEthnicolrAPI_v1.csv'
df_ethnicity.to_csv(f, index= False)
| en | 0.680964 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Jan 24 2022; @author: santoshbs; @purpose: Infer ethnicity based on the last name of a person; @package-used: https://pypi.org/project/ethnicolr/; #read the input data file #filter as necessary #obtain ethnicity information #rename columns for readability #merge with | 3.246702 | 3 |
utils/__init__.py | naivete5656/BFP | 8 | 6615070 | from .for_vis import Visdom
from .load import *
from .utils import local_maxima, gaus_filter, optimum, gather_path
from .load_for_CMP import *
from .load_image import visuarize_img, load_image
from .cmp_library import * | from .for_vis import Visdom
from .load import *
from .utils import local_maxima, gaus_filter, optimum, gather_path
from .load_for_CMP import *
from .load_image import visuarize_img, load_image
from .cmp_library import * | none | 1 | 1.114721 | 1 | |
LeetCode/MinRotate.py | Jaidev810/Competitive-Questions | 1 | 6615071 | def minrotate(arr):
i = 0
j = len(arr)-1
while i < j :
mid = (i+j)//2
if arr[mid] < arr[mid-1]:
return arr[mid]
if arr[mid+1] < arr[mid]:
return arr[mid+1]
elif arr[mid] > arr[0]:
i = mid+1
else:
j = mid-1
return arr[0]
arr = [4, 5, 6, 7, 0, 1, 2, 3]
element = minrotate(arr)
print(element) | def minrotate(arr):
i = 0
j = len(arr)-1
while i < j :
mid = (i+j)//2
if arr[mid] < arr[mid-1]:
return arr[mid]
if arr[mid+1] < arr[mid]:
return arr[mid+1]
elif arr[mid] > arr[0]:
i = mid+1
else:
j = mid-1
return arr[0]
arr = [4, 5, 6, 7, 0, 1, 2, 3]
element = minrotate(arr)
print(element) | none | 1 | 3.487135 | 3 | |
app/models/user.py | Kyooooma/view-oj-backend | 6 | 6615072 | <reponame>Kyooooma/view-oj-backend
from flask_login import UserMixin
from sqlalchemy import Boolean, Column, Date, Integer, String, cast, func
from app import login_manager
from app.libs.error_code import AuthFailed
from app.models.base import Base, db
class User(UserMixin, Base):
__tablename__ = 'user'
fields = ['username', 'nickname', 'group', 'permission', 'status', 'is_freshman', 'custom_color']
username = Column(String(100), primary_key=True)
nickname = Column(String(100), nullable=False)
password = Column(String(100), nullable=False)
group = Column(String(100))
permission = Column(Integer, nullable=False, default=0)
status = Column(Integer, nullable=False, default=0)
rating = Column(Integer, nullable=False, default=0)
codeforces_rating = Column(Integer, nullable=False, default=0)
contest_num = Column(Integer, nullable=False, default=0)
last_cf_date = Column(Date)
custom_color_ = Column('custom_color', String(10000))
is_freshman = Column(Boolean, default=False, nullable=False)
@property
def id(self):
return self.username
@property
def oj_username(self):
from app.models.oj import OJ
from app.models.oj_username import OJUsername
res = OJUsername.search(username=self.username, page_size=-1)['data']
r = list()
for i in OJ.search(status=1, page_size=-1)['data']:
oj_username = None
last_success_time = None
for j in res:
if j.oj_id == i.id:
oj_username = j.oj_username
last_success_time = j.last_success_time
break
r.append({
'oj': i,
'oj_username': oj_username,
'last_success_time': last_success_time
})
return r
@property
def problem_distributed(self):
from app.models.accept_problem import AcceptProblem
from app.models.oj import OJ
from app.models.problem import Problem
res = []
oj_list = OJ.search(page_size=-1)['data']
for i in oj_list:
res.append({
'oj': i,
'num': AcceptProblem.query.filter(
AcceptProblem.username == self.username,
AcceptProblem.problem_id.in_(db.session.query(Problem.id).filter_by(oj_id=i.id).subquery())
).count()
})
return res
@property
def rating_trend(self):
from app.models.accept_problem import AcceptProblem
return [{
'date': i[0],
'add_rating': int(i[1])
} for i in
db.session.query(cast(AcceptProblem.create_time, Date), func.sum(AcceptProblem.add_rating)).filter(
AcceptProblem.username == self.username
).group_by(cast(AcceptProblem.create_time, Date)).order_by(cast(AcceptProblem.create_time, Date)).all()]
@property
def cf_rating_trend(self):
from app.models.codeforces_rounds import CodeforcesRounds
return db.session.query(CodeforcesRounds). \
filter(CodeforcesRounds.username == self.username). \
order_by(cast(CodeforcesRounds.create_time, Date)).all()
@property
def cf_statistics(self):
from datetime import datetime, timedelta
from app.models.codeforces_rounds import CodeforcesRounds
end_date = datetime.now()
start_date = end_date - timedelta(days=7)
res = db.session.query(CodeforcesRounds). \
filter(CodeforcesRounds.username == self.username). \
filter(CodeforcesRounds.create_time <= end_date, CodeforcesRounds.create_time > start_date). \
order_by(cast(CodeforcesRounds.create_time, Date)).all()
cnt = len(res)
rating_change = sum([i.rating_change for i in res])
return {
'count': cnt,
'rating_change': rating_change,
'last_rating': self.codeforces_rating
}
@property
def custom_color(self):
if self.custom_color_ is None:
return None
return eval(self.custom_color_)
def check_password(self, password):
return self.password == password
@staticmethod
@login_manager.user_loader
def load_user(id_):
return User.get_by_id(id_)
@staticmethod
@login_manager.unauthorized_handler
def unauthorized_handler():
return AuthFailed()
| from flask_login import UserMixin
from sqlalchemy import Boolean, Column, Date, Integer, String, cast, func
from app import login_manager
from app.libs.error_code import AuthFailed
from app.models.base import Base, db
class User(UserMixin, Base):
__tablename__ = 'user'
fields = ['username', 'nickname', 'group', 'permission', 'status', 'is_freshman', 'custom_color']
username = Column(String(100), primary_key=True)
nickname = Column(String(100), nullable=False)
password = Column(String(100), nullable=False)
group = Column(String(100))
permission = Column(Integer, nullable=False, default=0)
status = Column(Integer, nullable=False, default=0)
rating = Column(Integer, nullable=False, default=0)
codeforces_rating = Column(Integer, nullable=False, default=0)
contest_num = Column(Integer, nullable=False, default=0)
last_cf_date = Column(Date)
custom_color_ = Column('custom_color', String(10000))
is_freshman = Column(Boolean, default=False, nullable=False)
@property
def id(self):
return self.username
@property
def oj_username(self):
from app.models.oj import OJ
from app.models.oj_username import OJUsername
res = OJUsername.search(username=self.username, page_size=-1)['data']
r = list()
for i in OJ.search(status=1, page_size=-1)['data']:
oj_username = None
last_success_time = None
for j in res:
if j.oj_id == i.id:
oj_username = j.oj_username
last_success_time = j.last_success_time
break
r.append({
'oj': i,
'oj_username': oj_username,
'last_success_time': last_success_time
})
return r
@property
def problem_distributed(self):
from app.models.accept_problem import AcceptProblem
from app.models.oj import OJ
from app.models.problem import Problem
res = []
oj_list = OJ.search(page_size=-1)['data']
for i in oj_list:
res.append({
'oj': i,
'num': AcceptProblem.query.filter(
AcceptProblem.username == self.username,
AcceptProblem.problem_id.in_(db.session.query(Problem.id).filter_by(oj_id=i.id).subquery())
).count()
})
return res
@property
def rating_trend(self):
from app.models.accept_problem import AcceptProblem
return [{
'date': i[0],
'add_rating': int(i[1])
} for i in
db.session.query(cast(AcceptProblem.create_time, Date), func.sum(AcceptProblem.add_rating)).filter(
AcceptProblem.username == self.username
).group_by(cast(AcceptProblem.create_time, Date)).order_by(cast(AcceptProblem.create_time, Date)).all()]
@property
def cf_rating_trend(self):
from app.models.codeforces_rounds import CodeforcesRounds
return db.session.query(CodeforcesRounds). \
filter(CodeforcesRounds.username == self.username). \
order_by(cast(CodeforcesRounds.create_time, Date)).all()
@property
def cf_statistics(self):
from datetime import datetime, timedelta
from app.models.codeforces_rounds import CodeforcesRounds
end_date = datetime.now()
start_date = end_date - timedelta(days=7)
res = db.session.query(CodeforcesRounds). \
filter(CodeforcesRounds.username == self.username). \
filter(CodeforcesRounds.create_time <= end_date, CodeforcesRounds.create_time > start_date). \
order_by(cast(CodeforcesRounds.create_time, Date)).all()
cnt = len(res)
rating_change = sum([i.rating_change for i in res])
return {
'count': cnt,
'rating_change': rating_change,
'last_rating': self.codeforces_rating
}
@property
def custom_color(self):
if self.custom_color_ is None:
return None
return eval(self.custom_color_)
def check_password(self, password):
return self.password == password
@staticmethod
@login_manager.user_loader
def load_user(id_):
return User.get_by_id(id_)
@staticmethod
@login_manager.unauthorized_handler
def unauthorized_handler():
return AuthFailed() | none | 1 | 2.318384 | 2 | |
plaso/classifier/classify.py | cvandeplas/plaso | 3 | 6615073 | <reponame>cvandeplas/plaso
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a small classify test program."""
import argparse
import glob
import logging
from plaso.classifier import classifier
from plaso.classifier import scanner
from plaso.classifier import test_lib
def Main():
args_parser = argparse.ArgumentParser(
decription='Classify test program.')
args_parser.add_argument(
'-t', '--type', type='choice', metavar='TYPE', action='store',
dest='scanner_type', choices=['scan-tree', 'scan_tree'],
default='scan-tree', help='The scanner type')
args_parser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Print verbose output')
args_parser.add_argument(
'filenames', nargs='+', action='store', metavar='FILENAMES',
default=None, help='The input filename(s) to classify.')
options = args_parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
files_to_classify = []
for input_glob in options.filenames:
files_to_classify += glob.glob(input_glob)
store = test_lib.CreateSpecificationStore()
if options.scanner_type not in ['scan-tree', 'scan_tree']:
print u'Unsupported scanner type defaulting to: scan-tree'
scan = scanner.Scanner(store)
classify = classifier.Classifier(scan)
for input_filename in files_to_classify:
classifications = classify.ClassifyFile(input_filename)
print u'File: {0:s}'.format(input_filename)
if not classifications:
print u'No classifications found.'
else:
print u'Classifications:'
for classification in classifications:
print u'\tformat: {0:s}'.format(classification.identifier)
print u''
if __name__ == '__main__':
Main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a small classify test program."""
import argparse
import glob
import logging
from plaso.classifier import classifier
from plaso.classifier import scanner
from plaso.classifier import test_lib
def Main():
args_parser = argparse.ArgumentParser(
decription='Classify test program.')
args_parser.add_argument(
'-t', '--type', type='choice', metavar='TYPE', action='store',
dest='scanner_type', choices=['scan-tree', 'scan_tree'],
default='scan-tree', help='The scanner type')
args_parser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Print verbose output')
args_parser.add_argument(
'filenames', nargs='+', action='store', metavar='FILENAMES',
default=None, help='The input filename(s) to classify.')
options = args_parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
files_to_classify = []
for input_glob in options.filenames:
files_to_classify += glob.glob(input_glob)
store = test_lib.CreateSpecificationStore()
if options.scanner_type not in ['scan-tree', 'scan_tree']:
print u'Unsupported scanner type defaulting to: scan-tree'
scan = scanner.Scanner(store)
classify = classifier.Classifier(scan)
for input_filename in files_to_classify:
classifications = classify.ClassifyFile(input_filename)
print u'File: {0:s}'.format(input_filename)
if not classifications:
print u'No classifications found.'
else:
print u'Classifications:'
for classification in classifications:
print u'\tformat: {0:s}'.format(classification.identifier)
print u''
if __name__ == '__main__':
Main() | en | 0.838531 | #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This file contains a small classify test program. | 2.439439 | 2 |
structured_tables/app.py | CivicKnowledge/structured_tables | 0 | 6615074 | """ Flask application to parse STF Files
"""
from six import string_types
from bottle import error, hook, get, request, response # , redirect, put, post
from bottle import HTTPResponse, install # , static_file, url
from bottle import run # , debug # @UnresolvedImport
from decorator import decorator # @UnresolvedImport
import logging
import string
logging.basicConfig(level=logging.DEBUG)
class NotFound(Exception):
pass
class InternalError(Exception):
pass
class NotAuthorized(Exception):
pass
class TooManyRequests(Exception):
pass
def capture_return_exception(e):
import sys
import traceback
# (exc_type, exc_value, exc_traceback) = sys.exc_info() # @UnusedVariable
tb_list = traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))
return {'exception': {
'class': e.__class__.__name__,
'args': e.args,
'trace': "\n".join(tb_list)
}}
def _CaptureException(f, *args, **kwargs):
"""Decorator implementation for capturing exceptions."""
try:
r = f(*args, **kwargs)
except HTTPResponse:
raise # redirect() uses exceptions
except Exception as e:
r = capture_return_exception(e)
if hasattr(e, 'code'):
response.status = e.code
return r
def CaptureException(f, *args, **kwargs):
"""Decorator to capture exceptions and convert them to a dict that can be
returned as JSON."""
return decorator(_CaptureException, f) # Preserves signature
class AllJSONPlugin(object):
"""A copy of the bottle JSONPlugin, but this one tries to convert all
objects to json."""
from json import dumps as json_dumps
name = 'json'
remote = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, HTTPResponse):
return rv
if isinstance(rv, string_types):
return rv
# Attempt to serialize, raises exception on failure
try:
json_response = dumps(rv)
except Exception as e:
r = capture_return_exception(e)
json_response = dumps(r)
# Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return wrapper
install(AllJSONPlugin())
@error(404)
@CaptureException
def error404(error):
raise NotFound("For url: {}".format(repr(request.url)))
@error(500)
def error500(error):
raise InternalError("For Url: {}".format(repr(request.url)))
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
@get('/')
def get_root():
return ['Nothing Here']
def _run(host, port, reloader=False, **kwargs):
logging.info('Listening on {} {}'.format(host, port))
return run(host=host, port=port, reloader=reloader, server='paste')
if __name__ == '__main__':
import argparse
import os
from uuid import uuid4
# Env vars set by docker when a link is made.
docker_host = os.getenv('REDIS_PORT_6379_TCP_ADDR')
docker_port = os.getenv('REDIS_PORT_6379_TCP_PORT', 6379)
unregistered_key = os.getenv('UNREGISTERED_KEY', None)
registered_key = os.getenv('REGISTERED_KEY', None)
authoritative_key = os.getenv('AUTHORITATIVE_KEY', None)
numbers_host = os.getenv('NUMBERS_HOST', '0.0.0.0')
d = {
'reloader': False,
'host': numbers_host,
'port': 80,
'redis': {
'host': docker_host,
'port': docker_port
},
'unregistered_key': unregistered_key,
'registered_key': registered_key,
'authoritative_key': authoritative_key,
}
parser = argparse.ArgumentParser(prog='python -mambry.server.numbers',
description='Run an Ambry numbers server')
parser.add_argument('-H', '--server-host', default=None, help="Server host. ")
parser.add_argument('-p', '--server-port', default=None, help="Server port.")
parser.add_argument('-R', '--redis-host', default=docker_host, help="Redis host.")
parser.add_argument('-r', '--redis-port', default=docker_port, help="Redis port.")
parser.add_argument('-d', '--debug', default=False, action='store_true')
parser.add_argument('-u', '--unregistered-key', default=None, help="access_key value for unregistered access")
parser.add_argument('-g', '--registered-key', default=None, help="access_key value for registered access")
parser.add_argument('-a', '--authoritative-key', default=None, help="access_key value for authoritative access")
parser.add_argument('-U', '--gen-unregistered-key', default=False, action='store_true', help="Generate an unregistered keys")
parser.add_argument('-G', '--gen-registered-key', default=False, action='store_true', help="Generate a registered key")
parser.add_argument('-A', '--gen-authoritative-key', default=False, action='store_true', help="Generate an authoritative key")
args = parser.parse_args()
if args.server_port:
d['port'] = args.server_port
if args.server_host:
d['host'] = args.server_host
if args.redis_port:
d['redis']['port'] = args.redis_port
if args.redis_host:
d['redis']['host'] = args.redis_host
if args.unregistered_key:
d['unregistered_key'] = args.unregistered_key
elif args.gen_unregistered_key:
d['unregistered_key'] = str(uuid4())
if args.registered_key:
d['registered_key'] = args.registered_key
elif args.gen_registered_key:
d['registered_key'] = str(uuid4())
if args.authoritative_key:
d['authoritative_key'] = args.authoritative_key
elif args.gen_authoritative_key:
d['authoritative_key'] = str(uuid4())
if args.debug:
d['reloader'] = args.debug
_run(**d)
| """ Flask application to parse STF Files
"""
from six import string_types
from bottle import error, hook, get, request, response # , redirect, put, post
from bottle import HTTPResponse, install # , static_file, url
from bottle import run # , debug # @UnresolvedImport
from decorator import decorator # @UnresolvedImport
import logging
import string
logging.basicConfig(level=logging.DEBUG)
class NotFound(Exception):
pass
class InternalError(Exception):
pass
class NotAuthorized(Exception):
pass
class TooManyRequests(Exception):
pass
def capture_return_exception(e):
import sys
import traceback
# (exc_type, exc_value, exc_traceback) = sys.exc_info() # @UnusedVariable
tb_list = traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))
return {'exception': {
'class': e.__class__.__name__,
'args': e.args,
'trace': "\n".join(tb_list)
}}
def _CaptureException(f, *args, **kwargs):
"""Decorator implementation for capturing exceptions."""
try:
r = f(*args, **kwargs)
except HTTPResponse:
raise # redirect() uses exceptions
except Exception as e:
r = capture_return_exception(e)
if hasattr(e, 'code'):
response.status = e.code
return r
def CaptureException(f, *args, **kwargs):
"""Decorator to capture exceptions and convert them to a dict that can be
returned as JSON."""
return decorator(_CaptureException, f) # Preserves signature
class AllJSONPlugin(object):
"""A copy of the bottle JSONPlugin, but this one tries to convert all
objects to json."""
from json import dumps as json_dumps
name = 'json'
remote = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps:
return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, HTTPResponse):
return rv
if isinstance(rv, string_types):
return rv
# Attempt to serialize, raises exception on failure
try:
json_response = dumps(rv)
except Exception as e:
r = capture_return_exception(e)
json_response = dumps(r)
# Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return wrapper
install(AllJSONPlugin())
@error(404)
@CaptureException
def error404(error):
raise NotFound("For url: {}".format(repr(request.url)))
@error(500)
def error500(error):
raise InternalError("For Url: {}".format(repr(request.url)))
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
@get('/')
def get_root():
return ['Nothing Here']
def _run(host, port, reloader=False, **kwargs):
logging.info('Listening on {} {}'.format(host, port))
return run(host=host, port=port, reloader=reloader, server='paste')
if __name__ == '__main__':
import argparse
import os
from uuid import uuid4
# Env vars set by docker when a link is made.
docker_host = os.getenv('REDIS_PORT_6379_TCP_ADDR')
docker_port = os.getenv('REDIS_PORT_6379_TCP_PORT', 6379)
unregistered_key = os.getenv('UNREGISTERED_KEY', None)
registered_key = os.getenv('REGISTERED_KEY', None)
authoritative_key = os.getenv('AUTHORITATIVE_KEY', None)
numbers_host = os.getenv('NUMBERS_HOST', '0.0.0.0')
d = {
'reloader': False,
'host': numbers_host,
'port': 80,
'redis': {
'host': docker_host,
'port': docker_port
},
'unregistered_key': unregistered_key,
'registered_key': registered_key,
'authoritative_key': authoritative_key,
}
parser = argparse.ArgumentParser(prog='python -mambry.server.numbers',
description='Run an Ambry numbers server')
parser.add_argument('-H', '--server-host', default=None, help="Server host. ")
parser.add_argument('-p', '--server-port', default=None, help="Server port.")
parser.add_argument('-R', '--redis-host', default=docker_host, help="Redis host.")
parser.add_argument('-r', '--redis-port', default=docker_port, help="Redis port.")
parser.add_argument('-d', '--debug', default=False, action='store_true')
parser.add_argument('-u', '--unregistered-key', default=None, help="access_key value for unregistered access")
parser.add_argument('-g', '--registered-key', default=None, help="access_key value for registered access")
parser.add_argument('-a', '--authoritative-key', default=None, help="access_key value for authoritative access")
parser.add_argument('-U', '--gen-unregistered-key', default=False, action='store_true', help="Generate an unregistered keys")
parser.add_argument('-G', '--gen-registered-key', default=False, action='store_true', help="Generate a registered key")
parser.add_argument('-A', '--gen-authoritative-key', default=False, action='store_true', help="Generate an authoritative key")
args = parser.parse_args()
if args.server_port:
d['port'] = args.server_port
if args.server_host:
d['host'] = args.server_host
if args.redis_port:
d['redis']['port'] = args.redis_port
if args.redis_host:
d['redis']['host'] = args.redis_host
if args.unregistered_key:
d['unregistered_key'] = args.unregistered_key
elif args.gen_unregistered_key:
d['unregistered_key'] = str(uuid4())
if args.registered_key:
d['registered_key'] = args.registered_key
elif args.gen_registered_key:
d['registered_key'] = str(uuid4())
if args.authoritative_key:
d['authoritative_key'] = args.authoritative_key
elif args.gen_authoritative_key:
d['authoritative_key'] = str(uuid4())
if args.debug:
d['reloader'] = args.debug
_run(**d)
| en | 0.778213 | Flask application to parse STF Files # , redirect, put, post # , static_file, url # , debug # @UnresolvedImport # @UnresolvedImport # (exc_type, exc_value, exc_traceback) = sys.exc_info() # @UnusedVariable Decorator implementation for capturing exceptions. # redirect() uses exceptions Decorator to capture exceptions and convert them to a dict that can be returned as JSON. # Preserves signature A copy of the bottle JSONPlugin, but this one tries to convert all objects to json. # Attempt to serialize, raises exception on failure # Set content type only if serialization succesful # Env vars set by docker when a link is made. | 2.420157 | 2 |
src/ai_ga.py | lclpsoz/corrida-ga | 0 | 6615075 | <filename>src/ai_ga.py
import random
import time
import os
import json
import subprocess
import numpy as np
from copy import deepcopy
from datetime import datetime
class AIGA(object):
def __init__(self, config, ai_info):
self.population_size = config['ai']['population_size']
self.config = config
if (((not 'train' in self.config['ai']) or self.config['ai']['train']) and self.population_size%2):
print("Population size must be even!")
exit(0)
self.evaluated = 0
self.features = [None for x in range(self.population_size)]
self.fitness = None
self.population = None
if 'save' in config['ai']:
self.must_save = config['ai']['save']
else:
self.must_save = False
# One for acc/break and the other for turns
self.gene_amnt = 2
self.gene_size = self.config['car']['number_of_visions'] + 1
self.EPS = config['EPS']
if ai_info:
self.set_ai_info(ai_info)
else:
self.population = self.random_population(self.population_size)
self.generation = 1
self.num_generations = config['ai']['num_of_generations']
self.verbose = config['verbose']
self.t_gen_start = time.time()
self.fps = config['fps']
if 'max_frames' in config['ai']:
self.max_frames = config['ai']['max_frames']
else:
self.max_frames = config["circuit_" + config['track']]['max_frames']
if not 'mutation_type' in config['ai'] or \
config['ai']['mutation_type'] == 'simple':
self.mutation = self.mutation_simple
else:
self.mutation = self.mutation_gradient
self.mutation_chance = config['ai']['mutation_chance']
self.mutation_factor = config['ai']['mutation_factor']
self.pop_size_elitism = int(round(config['ai']["proportion_elitism"] * self.population_size))
self.pop_size_crossover = int(round(config['ai']["proportion_crossover"] * self.population_size))
if self.pop_size_crossover%2:
self.pop_size_crossover -= 1
self.pop_size_new = self.population_size - self.pop_size_crossover - self.pop_size_elitism
try:
label_last_commit = \
subprocess.check_output(["git", "describe", "--always"]).strip()
if isinstance(label_last_commit, bytes):
label_last_commit = label_last_commit.decode('utf-8')
except:
label_last_commit = "_git-not_found"
self.identifier = \
"ga_" + \
self.config["track"] + \
datetime.now().strftime("__%Y-%d-%m_%H-%M-%S") + \
"__git-" + label_last_commit
if self.must_save:
self.save()
def set_evaluation(self, car_id : int, features : dict):
"""Set features of a car with car_id based on received features."""
if self.features[car_id] == None:
self.features[car_id] = features
self.evaluated+=1
def population_evaluated(self):
"""Returns if the whole population was evaluated."""
return self.evaluated == self.population_size
def random_population(self, n):
"""Generate n random individuals."""
ret = []
for i in range(n):
ret.append([[random.uniform(-1, 1) for j in range(self.gene_size)]
for i in range(self.gene_amnt)])
return ret
def calc_movement(self, car_id, vision, speed):
"""Based on car with car_id AI, it's vision and speed at the moment,
returns movement list."""
mov = []
indv = self.population[car_id]
for i in range(self.gene_amnt):
gene = indv[i]
total = (gene[0]*speed)/self.config['car']['number_of_visions']
for j in range(1, self.gene_size):
total += gene[j]*vision[j-1]
mov.append(total)
return mov
def calc_fitness(self):
"""Calculate fitness of the population based on features."""
self.fitness = []
for i in range(self.population_size):
feat = self.features[i]
if feat['perc_of_sectors'] < 1.0-self.EPS:
self.fitness.append(100*feat['perc_of_sectors'] +
(self.max_frames - feat['amount_frames'])/(2*self.max_frames))
else:
self.fitness.append(100*feat['perc_of_sectors'] +
(self.max_frames - feat['amount_frames']))
def clamp(self, x, mini, maxi):
"""Apply clamp to x."""
if x > maxi:
return maxi
elif x < mini:
return mini
return x
def mutation_simple(self, indv):
"""Apply mutation to indv in place. Work by adding a random value in the
interval [-self.mutation_factor, self.mutation_factor] to each position
and aplying clamp so each value is in the range [-1, 1]."""
for j in range(self.gene_amnt):
for k in range(self.gene_size):
if random.random() < self.mutation_chance:
indv[j][k] += random.uniform(-self.mutation_factor, self.mutation_factor)
self.clamp(indv[j][k], -1, 1)
def mutation_gradient(self, indv):
"""Apply mutation to indv in place. There's two type of mutation in this
function, the first is applied to speed, and it's equivalent to
mutation_simple, the second is for the rest of the gene, that is
responsible for vision, and works by setting three points, left, center
and right and applying a random value to this position that degredes
as it is spread to all it neighbours."""
for i in range(self.gene_amnt):
indv[i][0] += random.uniform(-self.mutation_factor, self.mutation_factor)
indv[i][0] = self.clamp(indv[i][0], -1, 1)
left = 1
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (left, self.gene_size):
indv[i][j] += mut/div
indv[i][j] = self.clamp(indv[i][j], -1, 1)
div *= 2
center = self.config['car']['number_of_visions']//2 + 1
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (center):
indv[i][center+j] += mut/div
indv[i][center+j] = self.clamp(indv[i][center+j], -1, 1)
if j:
indv[i][center-j] += mut/div
indv[i][center-j] = self.clamp(indv[i][center-j], -1, 1)
div *= 2
right = self.config['car']['number_of_visions']
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (right, left, -1):
indv[i][j] += mut/div
indv[i][j] = self.clamp(indv[i][j], -1, 1)
div *= 2
def crossover(self, parent_1, parent_2):
"""Returns two individuals, result of the crossover."""
# Proportion from each parent
proportion_vision = random.randint(0, self.gene_size//2)
mid_left = (self.gene_size-2)//2 - proportion_vision
mid_right = (self.gene_size-1)//2 + proportion_vision
dominant_speed = random.randint(0, 1)
def apply(p_1, p_2):
"""Apply crossover."""
indv = deepcopy(p_1)
for i in range(self.gene_amnt):
if not dominant_speed:
indv[i][0] = p_2[i][0]
for k in range(mid_left+1, mid_right):
indv[i][k] = p_2[i][k]
self.mutation(indv)
return indv
return [apply(parent_1, parent_2),
apply(parent_2, parent_1)]
def save(self):
"""Save data about the AI in specific folder."""
folder_path = os.path.join("ga", self.identifier)
if not os.path.exists("ga"):
os.makedirs("ga")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(
folder_path,
"config.json"
)
if self.generation == 0 or not os.path.exists(file_path):
json.dump(self.config, open(file_path, 'w'))
else:
ai_info = {
'population' : self.population,
'generation' : self.generation,
'features' : self.features,
'fitness' : self.fitness
}
file_path = os.path.join(
folder_path,
"gen_" + str(self.generation) + ".json"
)
json.dump(ai_info, open(file_path, 'w'))
def load_generation(self, ga_folder_path : str, generation : int):
"""Loads specified generation from folder ga_folder_path."""
load(os.path.join(ga_folder_path, "gen_" + str(generation)))
def load(self, file_path):
"""Loads generation saved on json in file_path."""
self.load(json.load(open(file_path, 'r')))
def set_ai_info(self, ai_info):
"""Sets attributes of class based on ai_info."""
self.generation = ai_info['generation']
self.features = [None for i in range(self.population_size)]
self.fitness = None
if len(ai_info['population']) >= self.population_size:
self.population = ai_info['population'][-self.population_size:]
else:
sz_new = self.population_size - len(ai_info['population'])
self.population = ai_info['population'] + self.random_population(sz_new)
if len(self.population[0]) == 4:
for i in range(len(self.population)):
self.population[i][1] = self.population[i][2]
self.population[i] = self.population[i][:2]
def next_generation(self):
"""If the number of generation was achieved, returns False, else,
generates next generation."""
if self.generation == self.num_generations:
if self.verbose > 0:
print("Generation %d. Previous in %.2f s" % (self.generation, time.time() - self.t_gen_start))
if self.verbose > 1:
print("Last generation:")
for i in range(len(self.population)):
print(self.population[i], self.features[i])
if self.verbose > 0:
print("")
return False
self.calc_fitness()
sorted_by_fitness = list(zip(self.fitness, self.features, self.population))
sorted_by_fitness.sort(key=lambda x : (x[0], x[2]))
if self.verbose > 0:
print("Generation %d. Evaluated in %.2f s" % (
self.generation,
time.time() - self.t_gen_start)
)
qnt_top_5p = max(1, int(self.population_size*0.05))
top_5p = [(x,y) for x, y, _ in sorted_by_fitness][-qnt_top_5p:]
to_prt = [( x[0],
x[1]['perc_of_sectors'],
x[1]['amount_frames']) for x in top_5p][::-1]
print("\tTOP 5% fitness:", ["%.2f, (%.2f, %.2f)" % x for x in to_prt])
print("\tBest fitness: %.2f" % max(self.fitness))
print("\tAvr fitness: %.2f" % (sum(self.fitness)/self.population_size))
print("\tWorst fitness: %.2f" % min(self.fitness))
if (not 'train' in self.config['ai']) or self.config['ai']['train']:
pop_elitism = deepcopy([x for _,_,x in sorted_by_fitness][-self.pop_size_elitism:])[::-1]
pop_crossover = []
for i in range(0, self.pop_size_crossover, 2):
parent_1, parent_2 = map(
deepcopy,
random.choices(self.population, self.fitness, k=2)
)
pop_crossover.extend(self.crossover(parent_1, parent_2))
pop_new = self.random_population(self.pop_size_new)
self.fitness = [x for x,_,_ in sorted_by_fitness]
self.features = [x for _,x,_ in sorted_by_fitness]
self.population = [x for _,_,x in sorted_by_fitness]
if self.must_save:
self.save()
if self.verbose > 1:
for i in range(self.population_size):
print(self.population[i], self.features[i], self.fitness[i])
if self.verbose > 0:
print("")
self.population = pop_elitism + pop_crossover + pop_new
self.generation += 1
self.fitness = None
self.features = [None for i in range(self.population_size)]
self.evaluated = 0
self.t_gen_start = time.time()
return True | <filename>src/ai_ga.py
import random
import time
import os
import json
import subprocess
import numpy as np
from copy import deepcopy
from datetime import datetime
class AIGA(object):
def __init__(self, config, ai_info):
self.population_size = config['ai']['population_size']
self.config = config
if (((not 'train' in self.config['ai']) or self.config['ai']['train']) and self.population_size%2):
print("Population size must be even!")
exit(0)
self.evaluated = 0
self.features = [None for x in range(self.population_size)]
self.fitness = None
self.population = None
if 'save' in config['ai']:
self.must_save = config['ai']['save']
else:
self.must_save = False
# One for acc/break and the other for turns
self.gene_amnt = 2
self.gene_size = self.config['car']['number_of_visions'] + 1
self.EPS = config['EPS']
if ai_info:
self.set_ai_info(ai_info)
else:
self.population = self.random_population(self.population_size)
self.generation = 1
self.num_generations = config['ai']['num_of_generations']
self.verbose = config['verbose']
self.t_gen_start = time.time()
self.fps = config['fps']
if 'max_frames' in config['ai']:
self.max_frames = config['ai']['max_frames']
else:
self.max_frames = config["circuit_" + config['track']]['max_frames']
if not 'mutation_type' in config['ai'] or \
config['ai']['mutation_type'] == 'simple':
self.mutation = self.mutation_simple
else:
self.mutation = self.mutation_gradient
self.mutation_chance = config['ai']['mutation_chance']
self.mutation_factor = config['ai']['mutation_factor']
self.pop_size_elitism = int(round(config['ai']["proportion_elitism"] * self.population_size))
self.pop_size_crossover = int(round(config['ai']["proportion_crossover"] * self.population_size))
if self.pop_size_crossover%2:
self.pop_size_crossover -= 1
self.pop_size_new = self.population_size - self.pop_size_crossover - self.pop_size_elitism
try:
label_last_commit = \
subprocess.check_output(["git", "describe", "--always"]).strip()
if isinstance(label_last_commit, bytes):
label_last_commit = label_last_commit.decode('utf-8')
except:
label_last_commit = "_git-not_found"
self.identifier = \
"ga_" + \
self.config["track"] + \
datetime.now().strftime("__%Y-%d-%m_%H-%M-%S") + \
"__git-" + label_last_commit
if self.must_save:
self.save()
def set_evaluation(self, car_id : int, features : dict):
"""Set features of a car with car_id based on received features."""
if self.features[car_id] == None:
self.features[car_id] = features
self.evaluated+=1
def population_evaluated(self):
"""Returns if the whole population was evaluated."""
return self.evaluated == self.population_size
def random_population(self, n):
"""Generate n random individuals."""
ret = []
for i in range(n):
ret.append([[random.uniform(-1, 1) for j in range(self.gene_size)]
for i in range(self.gene_amnt)])
return ret
def calc_movement(self, car_id, vision, speed):
"""Based on car with car_id AI, it's vision and speed at the moment,
returns movement list."""
mov = []
indv = self.population[car_id]
for i in range(self.gene_amnt):
gene = indv[i]
total = (gene[0]*speed)/self.config['car']['number_of_visions']
for j in range(1, self.gene_size):
total += gene[j]*vision[j-1]
mov.append(total)
return mov
def calc_fitness(self):
"""Calculate fitness of the population based on features."""
self.fitness = []
for i in range(self.population_size):
feat = self.features[i]
if feat['perc_of_sectors'] < 1.0-self.EPS:
self.fitness.append(100*feat['perc_of_sectors'] +
(self.max_frames - feat['amount_frames'])/(2*self.max_frames))
else:
self.fitness.append(100*feat['perc_of_sectors'] +
(self.max_frames - feat['amount_frames']))
def clamp(self, x, mini, maxi):
"""Apply clamp to x."""
if x > maxi:
return maxi
elif x < mini:
return mini
return x
def mutation_simple(self, indv):
"""Apply mutation to indv in place. Work by adding a random value in the
interval [-self.mutation_factor, self.mutation_factor] to each position
and aplying clamp so each value is in the range [-1, 1]."""
for j in range(self.gene_amnt):
for k in range(self.gene_size):
if random.random() < self.mutation_chance:
indv[j][k] += random.uniform(-self.mutation_factor, self.mutation_factor)
self.clamp(indv[j][k], -1, 1)
def mutation_gradient(self, indv):
"""Apply mutation to indv in place. There's two type of mutation in this
function, the first is applied to speed, and it's equivalent to
mutation_simple, the second is for the rest of the gene, that is
responsible for vision, and works by setting three points, left, center
and right and applying a random value to this position that degredes
as it is spread to all it neighbours."""
for i in range(self.gene_amnt):
indv[i][0] += random.uniform(-self.mutation_factor, self.mutation_factor)
indv[i][0] = self.clamp(indv[i][0], -1, 1)
left = 1
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (left, self.gene_size):
indv[i][j] += mut/div
indv[i][j] = self.clamp(indv[i][j], -1, 1)
div *= 2
center = self.config['car']['number_of_visions']//2 + 1
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (center):
indv[i][center+j] += mut/div
indv[i][center+j] = self.clamp(indv[i][center+j], -1, 1)
if j:
indv[i][center-j] += mut/div
indv[i][center-j] = self.clamp(indv[i][center-j], -1, 1)
div *= 2
right = self.config['car']['number_of_visions']
if random.random() < self.mutation_chance:
div = 1
mut = random.uniform(-self.mutation_factor, self.mutation_factor)
for j in range (right, left, -1):
indv[i][j] += mut/div
indv[i][j] = self.clamp(indv[i][j], -1, 1)
div *= 2
def crossover(self, parent_1, parent_2):
"""Returns two individuals, result of the crossover."""
# Proportion from each parent
proportion_vision = random.randint(0, self.gene_size//2)
mid_left = (self.gene_size-2)//2 - proportion_vision
mid_right = (self.gene_size-1)//2 + proportion_vision
dominant_speed = random.randint(0, 1)
def apply(p_1, p_2):
"""Apply crossover."""
indv = deepcopy(p_1)
for i in range(self.gene_amnt):
if not dominant_speed:
indv[i][0] = p_2[i][0]
for k in range(mid_left+1, mid_right):
indv[i][k] = p_2[i][k]
self.mutation(indv)
return indv
return [apply(parent_1, parent_2),
apply(parent_2, parent_1)]
def save(self):
"""Save data about the AI in specific folder."""
folder_path = os.path.join("ga", self.identifier)
if not os.path.exists("ga"):
os.makedirs("ga")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(
folder_path,
"config.json"
)
if self.generation == 0 or not os.path.exists(file_path):
json.dump(self.config, open(file_path, 'w'))
else:
ai_info = {
'population' : self.population,
'generation' : self.generation,
'features' : self.features,
'fitness' : self.fitness
}
file_path = os.path.join(
folder_path,
"gen_" + str(self.generation) + ".json"
)
json.dump(ai_info, open(file_path, 'w'))
def load_generation(self, ga_folder_path : str, generation : int):
"""Loads specified generation from folder ga_folder_path."""
load(os.path.join(ga_folder_path, "gen_" + str(generation)))
def load(self, file_path):
"""Loads generation saved on json in file_path."""
self.load(json.load(open(file_path, 'r')))
def set_ai_info(self, ai_info):
"""Sets attributes of class based on ai_info."""
self.generation = ai_info['generation']
self.features = [None for i in range(self.population_size)]
self.fitness = None
if len(ai_info['population']) >= self.population_size:
self.population = ai_info['population'][-self.population_size:]
else:
sz_new = self.population_size - len(ai_info['population'])
self.population = ai_info['population'] + self.random_population(sz_new)
if len(self.population[0]) == 4:
for i in range(len(self.population)):
self.population[i][1] = self.population[i][2]
self.population[i] = self.population[i][:2]
def next_generation(self):
"""If the number of generation was achieved, returns False, else,
generates next generation."""
if self.generation == self.num_generations:
if self.verbose > 0:
print("Generation %d. Previous in %.2f s" % (self.generation, time.time() - self.t_gen_start))
if self.verbose > 1:
print("Last generation:")
for i in range(len(self.population)):
print(self.population[i], self.features[i])
if self.verbose > 0:
print("")
return False
self.calc_fitness()
sorted_by_fitness = list(zip(self.fitness, self.features, self.population))
sorted_by_fitness.sort(key=lambda x : (x[0], x[2]))
if self.verbose > 0:
print("Generation %d. Evaluated in %.2f s" % (
self.generation,
time.time() - self.t_gen_start)
)
qnt_top_5p = max(1, int(self.population_size*0.05))
top_5p = [(x,y) for x, y, _ in sorted_by_fitness][-qnt_top_5p:]
to_prt = [( x[0],
x[1]['perc_of_sectors'],
x[1]['amount_frames']) for x in top_5p][::-1]
print("\tTOP 5% fitness:", ["%.2f, (%.2f, %.2f)" % x for x in to_prt])
print("\tBest fitness: %.2f" % max(self.fitness))
print("\tAvr fitness: %.2f" % (sum(self.fitness)/self.population_size))
print("\tWorst fitness: %.2f" % min(self.fitness))
if (not 'train' in self.config['ai']) or self.config['ai']['train']:
pop_elitism = deepcopy([x for _,_,x in sorted_by_fitness][-self.pop_size_elitism:])[::-1]
pop_crossover = []
for i in range(0, self.pop_size_crossover, 2):
parent_1, parent_2 = map(
deepcopy,
random.choices(self.population, self.fitness, k=2)
)
pop_crossover.extend(self.crossover(parent_1, parent_2))
pop_new = self.random_population(self.pop_size_new)
self.fitness = [x for x,_,_ in sorted_by_fitness]
self.features = [x for _,x,_ in sorted_by_fitness]
self.population = [x for _,_,x in sorted_by_fitness]
if self.must_save:
self.save()
if self.verbose > 1:
for i in range(self.population_size):
print(self.population[i], self.features[i], self.fitness[i])
if self.verbose > 0:
print("")
self.population = pop_elitism + pop_crossover + pop_new
self.generation += 1
self.fitness = None
self.features = [None for i in range(self.population_size)]
self.evaluated = 0
self.t_gen_start = time.time()
return True | en | 0.923327 | # One for acc/break and the other for turns Set features of a car with car_id based on received features. Returns if the whole population was evaluated. Generate n random individuals. Based on car with car_id AI, it's vision and speed at the moment, returns movement list. Calculate fitness of the population based on features. Apply clamp to x. Apply mutation to indv in place. Work by adding a random value in the interval [-self.mutation_factor, self.mutation_factor] to each position and aplying clamp so each value is in the range [-1, 1]. Apply mutation to indv in place. There's two type of mutation in this function, the first is applied to speed, and it's equivalent to mutation_simple, the second is for the rest of the gene, that is responsible for vision, and works by setting three points, left, center and right and applying a random value to this position that degredes as it is spread to all it neighbours. Returns two individuals, result of the crossover. # Proportion from each parent Apply crossover. Save data about the AI in specific folder. Loads specified generation from folder ga_folder_path. Loads generation saved on json in file_path. Sets attributes of class based on ai_info. If the number of generation was achieved, returns False, else, generates next generation. | 2.623527 | 3 |
hathor/transaction/resources/block_at_height.py | mbnunes/hathor-core | 51 | 6615076 | <reponame>mbnunes/hathor-core
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import TYPE_CHECKING
from twisted.web import resource
from hathor.api_util import get_missing_params_msg, parse_get_arguments, set_cors
from hathor.cli.openapi_files.register import register_resource
if TYPE_CHECKING:
from twisted.web.http import Request
from hathor.manager import HathorManager
@register_resource
class BlockAtHeightResource(resource.Resource):
""" Implements a web server API to return the block at specific height.
You must run with option `--status <PORT>`.
"""
isLeaf = True
def __init__(self, manager: 'HathorManager'):
# Important to have the manager so we can know the tx_storage
self.manager = manager
def render_GET(self, request: 'Request') -> bytes:
""" Get request /block_at_height/ that returns a block at height in parameter
'height': int, the height of block to get
:rtype: string (json)
"""
request.setHeader(b'content-type', b'application/json; charset=utf-8')
set_cors(request, 'GET')
# Height parameter is required
parsed = parse_get_arguments(request.args, ['height'])
if not parsed['success']:
return get_missing_params_msg(parsed['missing'])
args = parsed['args']
# Height parameter must be an integer
try:
height = int(args['height'])
except ValueError:
return json.dumps({
'success': False,
'message': 'Invalid \'height\' parameter, expected an integer'
}).encode('utf-8')
# Get hash of the block with the height
block_hash = self.manager.tx_storage.get_from_block_height_index(height)
# If there is no block in the index with this height, block_hash will be None
if block_hash is None:
return json.dumps({
'success': False,
'message': 'No block with height {}.'.format(height)
}).encode('utf-8')
block = self.manager.tx_storage.get_transaction(block_hash)
data = {'success': True, 'block': block.to_json_extended()}
return json.dumps(data, indent=4).encode('utf-8')
BlockAtHeightResource.openapi = {
'/block_at_height': {
'x-visibility': 'public',
'x-rate-limit': {
'global': [
{
'rate': '50r/s',
'burst': 100,
'delay': 50
}
],
'per-ip': [
{
'rate': '3r/s',
'burst': 10,
'delay': 3
}
]
},
'get': {
'tags': ['block'],
'operationId': 'block',
'summary': 'Get block at height',
'description': 'Returns the block at specific height in the best chain.',
'parameters': [
{
'name': 'height',
'in': 'query',
'description': 'Height of the block to get',
'required': True,
'schema': {
'type': 'int'
}
},
],
'responses': {
'200': {
'description': 'Success',
'content': {
'application/json': {
'examples': {
'success': {
'summary': 'Success block height 1',
'value': {
'success': True,
'block': {
'tx_id': ('080c8086376ab7105d17df1127a68ede'
'df54029a21b5d98841448cc23b5123ff'),
'version': 0,
'weight': 1.0,
'timestamp': 1616094323,
'is_voided': False,
'inputs': [],
'outputs': [
{
'value': 6400,
'token_data': 0,
'script': 'dqkU4yipgEZjbphR/M3gUGjsbyb1s76IrA==',
'decoded': {
'type': 'P2PKH',
'address': 'HTEEV9FJeqBCYLUvkEHsWAAi6UGs9yxJKj',
'timelock': None
},
'token': '00',
'spent_by': None
}
],
'parents': [
'339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792',
'16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952',
'33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'
],
'height': 1
}
}
},
'error': {
'summary': 'Block not found',
'value': {
'success': False,
'message': 'Does not have a block with height 100.'
}
},
}
}
}
}
}
}
}
}
| # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import TYPE_CHECKING
from twisted.web import resource
from hathor.api_util import get_missing_params_msg, parse_get_arguments, set_cors
from hathor.cli.openapi_files.register import register_resource
if TYPE_CHECKING:
from twisted.web.http import Request
from hathor.manager import HathorManager
@register_resource
class BlockAtHeightResource(resource.Resource):
""" Implements a web server API to return the block at specific height.
You must run with option `--status <PORT>`.
"""
isLeaf = True
def __init__(self, manager: 'HathorManager'):
# Important to have the manager so we can know the tx_storage
self.manager = manager
def render_GET(self, request: 'Request') -> bytes:
""" Get request /block_at_height/ that returns a block at height in parameter
'height': int, the height of block to get
:rtype: string (json)
"""
request.setHeader(b'content-type', b'application/json; charset=utf-8')
set_cors(request, 'GET')
# Height parameter is required
parsed = parse_get_arguments(request.args, ['height'])
if not parsed['success']:
return get_missing_params_msg(parsed['missing'])
args = parsed['args']
# Height parameter must be an integer
try:
height = int(args['height'])
except ValueError:
return json.dumps({
'success': False,
'message': 'Invalid \'height\' parameter, expected an integer'
}).encode('utf-8')
# Get hash of the block with the height
block_hash = self.manager.tx_storage.get_from_block_height_index(height)
# If there is no block in the index with this height, block_hash will be None
if block_hash is None:
return json.dumps({
'success': False,
'message': 'No block with height {}.'.format(height)
}).encode('utf-8')
block = self.manager.tx_storage.get_transaction(block_hash)
data = {'success': True, 'block': block.to_json_extended()}
return json.dumps(data, indent=4).encode('utf-8')
BlockAtHeightResource.openapi = {
'/block_at_height': {
'x-visibility': 'public',
'x-rate-limit': {
'global': [
{
'rate': '50r/s',
'burst': 100,
'delay': 50
}
],
'per-ip': [
{
'rate': '3r/s',
'burst': 10,
'delay': 3
}
]
},
'get': {
'tags': ['block'],
'operationId': 'block',
'summary': 'Get block at height',
'description': 'Returns the block at specific height in the best chain.',
'parameters': [
{
'name': 'height',
'in': 'query',
'description': 'Height of the block to get',
'required': True,
'schema': {
'type': 'int'
}
},
],
'responses': {
'200': {
'description': 'Success',
'content': {
'application/json': {
'examples': {
'success': {
'summary': 'Success block height 1',
'value': {
'success': True,
'block': {
'tx_id': ('080c8086376ab7105d17df1127a68ede'
'df54029a21b5d98841448cc23b5123ff'),
'version': 0,
'weight': 1.0,
'timestamp': 1616094323,
'is_voided': False,
'inputs': [],
'outputs': [
{
'value': 6400,
'token_data': 0,
'script': 'dqkU4yipgEZjbphR/M3gUGjsbyb1s76IrA==',
'decoded': {
'type': 'P2PKH',
'address': 'HTEEV9FJeqBCYLUvkEHsWAAi6UGs9yxJKj',
'timelock': None
},
'token': '00',
'spent_by': None
}
],
'parents': [
'339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792',
'16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952',
'33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'
],
'height': 1
}
}
},
'error': {
'summary': 'Block not found',
'value': {
'success': False,
'message': 'Does not have a block with height 100.'
}
},
}
}
}
}
}
}
}
} | en | 0.77727 | # Copyright 2021 Hathor Labs # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Implements a web server API to return the block at specific height. You must run with option `--status <PORT>`. # Important to have the manager so we can know the tx_storage Get request /block_at_height/ that returns a block at height in parameter 'height': int, the height of block to get :rtype: string (json) # Height parameter is required # Height parameter must be an integer # Get hash of the block with the height # If there is no block in the index with this height, block_hash will be None | 2.314047 | 2 |
how_much.py | belsimpel/code-event | 0 | 6615077 | # How much
# This was the most tricky one. We will explain how you could have solved this.
#
# You could for example bruteforce all the available options to see whether or not
# the option would fall within the allowed range (with for example: https://docs.python.org/3/library/itertools.html)
# However doing so would take hours/days (more time then there was available for the hackathon)
# It would be possible to validate your script on a smaller amount (which you could prove by hand and then
# use to validate that your program works), however then you wouldn't be able to generate the answer (due to time
# constraints) with the bruteforce method.
#
# The option that we opted for is a Tree based structure, this because we can smartly traverse the tree / add nodes
# to prevent excessive calculations / duplicate data. Another benefit of the tree structure is that we prevent a huge
# dataset. There are some comments around the code to hopefully clarify some bits, but if things are unclear,
# it helps if you draw the tree on paper (with smaller values, for example: 10L, and cups of sizes 5, 3 & 2,
# this should result in 8 unique possible combinations).
#
# Below we have provided our example on how you could have solved this puzzle
#
# If you get a recursion depth error, you could either increase the recursion depth
# (see: https://stackoverflow.com/questions/5061582/setting-stacksize-in-a-python-script/16248113#16248113)
# Or rewrite the code to be iterative.
#
# Note that increasing the recursion depth can be dangerous (mainly due to Python crashing, see below for more details),
# but the standard limit is a little bit conservative.
# (a copy pasta from the SO link above, works fine for example (on my machine)).
#
# The recursion depth is set as a guard to prevent infinite recursions from causing an overflow of the C stack
# and thus crashing Python (this is the 'dangerous' part)
# see: https://docs.python.org/3/library/sys.html#sys.setrecursionlimit
multiplication_factor = 100
# Change False to True if you like to see the valid combinations (will slow the execution of this script down
# by quite a lot since it's IO)
print_combinations = False
class Tree(object):
def __init__(self, lower_bound, upper_bound, cup=None, parent=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.cup = cup
self.parent = parent
def get_cups(self):
"""
This method returns all the cups from the current node respecting all the cups
that were set in it's parents (so it will return a list containing all the cups
that were set up until (including) this node).
:return: A list containing all the set cups up until (including) this node.
"""
if self.cup is None:
return []
cups = [self.cup]
if self.parent:
cups = cups + self.parent.get_cups()
return cups
def get_possible_sizes():
# We need to multiply our sizes by 100 to prevent floating point issues whenever
# we are doing the calculations.
# This means that we need to multiply our input by 100 as well (and the +1 liter becomes
# +100 liter, but that doesn't matter since everything got multiplied by 100)
return [int(x * multiplication_factor) for x in [50, 35, .33, .3]]
def calculate_possible_combinations(cups, lower_bound, upper_bound):
root = Tree(lower_bound, upper_bound)
possible_combinations = add_nodes(root, cups)
print("A total of {0} combinations are possible.".format(possible_combinations))
def add_nodes(tree, cups):
possible_combinations = 0
for cup in cups:
if cup <= tree.upper_bound:
child = Tree((tree.lower_bound - cup), (tree.upper_bound - cup), cup, tree)
# Because we were allowed to have an additional of 1 liter (so valid combinations are between
# our target and our target + 1) the upper bound value of our child node is a valid leaf node
# if it's between the 0 and 1 (respecting our multiplication factor)
# If this is the case, we got a valid combination
if 0 <= child.upper_bound <= (1 * multiplication_factor):
possible_combinations += 1
if print_combinations:
print(child.get_cups())
else:
# We only need to add nodes for all the cups that are equal to or lower
# then the current cup, thus we will create a new list (`new_cups`) that
# only contains values equal or lower to the current cup.
# This because (5, 3, 2) is the same as (3, 5, 2) in the case
# of storage. Thus we only need to check paths with decreasing numbers.
new_cups = [c for c in cups if c <= cup]
possible_combinations += add_nodes(child, new_cups)
return possible_combinations
if __name__ == "__main__":
user_input = input("How much beer do you need to order?")
# See the PyDoc in get_possible_sizes() for the reasoning behind the `* 100`
amount_of_beer_to_order = int(user_input) * multiplication_factor
cups = get_possible_sizes()
lower_bound = amount_of_beer_to_order
upper_bound = amount_of_beer_to_order + (1 * multiplication_factor)
calculate_possible_combinations(cups, lower_bound, upper_bound)
| # How much
# This was the most tricky one. We will explain how you could have solved this.
#
# You could for example bruteforce all the available options to see whether or not
# the option would fall within the allowed range (with for example: https://docs.python.org/3/library/itertools.html)
# However doing so would take hours/days (more time then there was available for the hackathon)
# It would be possible to validate your script on a smaller amount (which you could prove by hand and then
# use to validate that your program works), however then you wouldn't be able to generate the answer (due to time
# constraints) with the bruteforce method.
#
# The option that we opted for is a Tree based structure, this because we can smartly traverse the tree / add nodes
# to prevent excessive calculations / duplicate data. Another benefit of the tree structure is that we prevent a huge
# dataset. There are some comments around the code to hopefully clarify some bits, but if things are unclear,
# it helps if you draw the tree on paper (with smaller values, for example: 10L, and cups of sizes 5, 3 & 2,
# this should result in 8 unique possible combinations).
#
# Below we have provided our example on how you could have solved this puzzle
#
# If you get a recursion depth error, you could either increase the recursion depth
# (see: https://stackoverflow.com/questions/5061582/setting-stacksize-in-a-python-script/16248113#16248113)
# Or rewrite the code to be iterative.
#
# Note that increasing the recursion depth can be dangerous (mainly due to Python crashing, see below for more details),
# but the standard limit is a little bit conservative.
# (a copy pasta from the SO link above, works fine for example (on my machine)).
#
# The recursion depth is set as a guard to prevent infinite recursions from causing an overflow of the C stack
# and thus crashing Python (this is the 'dangerous' part)
# see: https://docs.python.org/3/library/sys.html#sys.setrecursionlimit
multiplication_factor = 100
# Change False to True if you like to see the valid combinations (will slow the execution of this script down
# by quite a lot since it's IO)
print_combinations = False
class Tree(object):
def __init__(self, lower_bound, upper_bound, cup=None, parent=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.cup = cup
self.parent = parent
def get_cups(self):
"""
This method returns all the cups from the current node respecting all the cups
that were set in it's parents (so it will return a list containing all the cups
that were set up until (including) this node).
:return: A list containing all the set cups up until (including) this node.
"""
if self.cup is None:
return []
cups = [self.cup]
if self.parent:
cups = cups + self.parent.get_cups()
return cups
def get_possible_sizes():
# We need to multiply our sizes by 100 to prevent floating point issues whenever
# we are doing the calculations.
# This means that we need to multiply our input by 100 as well (and the +1 liter becomes
# +100 liter, but that doesn't matter since everything got multiplied by 100)
return [int(x * multiplication_factor) for x in [50, 35, .33, .3]]
def calculate_possible_combinations(cups, lower_bound, upper_bound):
root = Tree(lower_bound, upper_bound)
possible_combinations = add_nodes(root, cups)
print("A total of {0} combinations are possible.".format(possible_combinations))
def add_nodes(tree, cups):
possible_combinations = 0
for cup in cups:
if cup <= tree.upper_bound:
child = Tree((tree.lower_bound - cup), (tree.upper_bound - cup), cup, tree)
# Because we were allowed to have an additional of 1 liter (so valid combinations are between
# our target and our target + 1) the upper bound value of our child node is a valid leaf node
# if it's between the 0 and 1 (respecting our multiplication factor)
# If this is the case, we got a valid combination
if 0 <= child.upper_bound <= (1 * multiplication_factor):
possible_combinations += 1
if print_combinations:
print(child.get_cups())
else:
# We only need to add nodes for all the cups that are equal to or lower
# then the current cup, thus we will create a new list (`new_cups`) that
# only contains values equal or lower to the current cup.
# This because (5, 3, 2) is the same as (3, 5, 2) in the case
# of storage. Thus we only need to check paths with decreasing numbers.
new_cups = [c for c in cups if c <= cup]
possible_combinations += add_nodes(child, new_cups)
return possible_combinations
if __name__ == "__main__":
user_input = input("How much beer do you need to order?")
# See the PyDoc in get_possible_sizes() for the reasoning behind the `* 100`
amount_of_beer_to_order = int(user_input) * multiplication_factor
cups = get_possible_sizes()
lower_bound = amount_of_beer_to_order
upper_bound = amount_of_beer_to_order + (1 * multiplication_factor)
calculate_possible_combinations(cups, lower_bound, upper_bound)
| en | 0.939477 | # How much # This was the most tricky one. We will explain how you could have solved this. # # You could for example bruteforce all the available options to see whether or not # the option would fall within the allowed range (with for example: https://docs.python.org/3/library/itertools.html) # However doing so would take hours/days (more time then there was available for the hackathon) # It would be possible to validate your script on a smaller amount (which you could prove by hand and then # use to validate that your program works), however then you wouldn't be able to generate the answer (due to time # constraints) with the bruteforce method. # # The option that we opted for is a Tree based structure, this because we can smartly traverse the tree / add nodes # to prevent excessive calculations / duplicate data. Another benefit of the tree structure is that we prevent a huge # dataset. There are some comments around the code to hopefully clarify some bits, but if things are unclear, # it helps if you draw the tree on paper (with smaller values, for example: 10L, and cups of sizes 5, 3 & 2, # this should result in 8 unique possible combinations). # # Below we have provided our example on how you could have solved this puzzle # # If you get a recursion depth error, you could either increase the recursion depth # (see: https://stackoverflow.com/questions/5061582/setting-stacksize-in-a-python-script/16248113#16248113) # Or rewrite the code to be iterative. # # Note that increasing the recursion depth can be dangerous (mainly due to Python crashing, see below for more details), # but the standard limit is a little bit conservative. # (a copy pasta from the SO link above, works fine for example (on my machine)). # # The recursion depth is set as a guard to prevent infinite recursions from causing an overflow of the C stack # and thus crashing Python (this is the 'dangerous' part) # see: https://docs.python.org/3/library/sys.html#sys.setrecursionlimit # Change False to True if you like to see the valid combinations (will slow the execution of this script down # by quite a lot since it's IO) This method returns all the cups from the current node respecting all the cups that were set in it's parents (so it will return a list containing all the cups that were set up until (including) this node). :return: A list containing all the set cups up until (including) this node. # We need to multiply our sizes by 100 to prevent floating point issues whenever # we are doing the calculations. # This means that we need to multiply our input by 100 as well (and the +1 liter becomes # +100 liter, but that doesn't matter since everything got multiplied by 100) # Because we were allowed to have an additional of 1 liter (so valid combinations are between # our target and our target + 1) the upper bound value of our child node is a valid leaf node # if it's between the 0 and 1 (respecting our multiplication factor) # If this is the case, we got a valid combination # We only need to add nodes for all the cups that are equal to or lower # then the current cup, thus we will create a new list (`new_cups`) that # only contains values equal or lower to the current cup. # This because (5, 3, 2) is the same as (3, 5, 2) in the case # of storage. Thus we only need to check paths with decreasing numbers. # See the PyDoc in get_possible_sizes() for the reasoning behind the `* 100` | 3.674757 | 4 |
libg3n/modules/java/java_config_parser.py | jhkloss/libg3n | 0 | 6615078 | <reponame>jhkloss/libg3n
import libg3n
from libg3n.model.libg3n_class import Libg3nClass
from libg3n.model.libg3n_config_parser_g3n import Libg3nConfigParserG3n
from libg3n.model.libg3n_function import Libg3nFunction
from libg3n.modules.java.java_function import JavaFunction
from libg3n.modules.java.java_class import JavaClass
from libg3n.modules.java.java_property import JavaProperty
class JavaConfigParser(Libg3nConfigParserG3n):
PROPERTY_TYPE_CONSTANTS = {
'String': '',
'char': '',
'int': 0,
'short': 0,
'long': 0,
'byte': 0,
'float': 0.0,
'double': 0.0,
'boolean': True,
'void': None
}
def process_function(self, function_element) -> Libg3nFunction:
libg3n.logger.debug('Parse Java function from token: ' + str(function_element))
id = function_element[1]
type = function_element[3]
value = function_element[4]
function_type = self._parse_function_type(type)
return JavaFunction(id, function_type, value)
def process_class(self, class_element) -> Libg3nClass:
libg3n.logger.debug('Parse Java class from token: ' + str(class_element))
new_class = JavaClass()
new_class.name = class_element[1]
if class_element[2] in self.SYMBOLS:
new_class.meta_class = class_element[3]
for i, token in enumerate(class_element):
if token == self.PROPERTY_KEYWORD:
new_property = JavaProperty()
new_property.name = class_element[i + 1]
new_property.type = class_element[i + 3]
new_property.value = self.PROPERTY_TYPE_CONSTANTS[new_property.type]
new_class.add_property(new_property)
return new_class
| import libg3n
from libg3n.model.libg3n_class import Libg3nClass
from libg3n.model.libg3n_config_parser_g3n import Libg3nConfigParserG3n
from libg3n.model.libg3n_function import Libg3nFunction
from libg3n.modules.java.java_function import JavaFunction
from libg3n.modules.java.java_class import JavaClass
from libg3n.modules.java.java_property import JavaProperty
class JavaConfigParser(Libg3nConfigParserG3n):
PROPERTY_TYPE_CONSTANTS = {
'String': '',
'char': '',
'int': 0,
'short': 0,
'long': 0,
'byte': 0,
'float': 0.0,
'double': 0.0,
'boolean': True,
'void': None
}
def process_function(self, function_element) -> Libg3nFunction:
libg3n.logger.debug('Parse Java function from token: ' + str(function_element))
id = function_element[1]
type = function_element[3]
value = function_element[4]
function_type = self._parse_function_type(type)
return JavaFunction(id, function_type, value)
def process_class(self, class_element) -> Libg3nClass:
libg3n.logger.debug('Parse Java class from token: ' + str(class_element))
new_class = JavaClass()
new_class.name = class_element[1]
if class_element[2] in self.SYMBOLS:
new_class.meta_class = class_element[3]
for i, token in enumerate(class_element):
if token == self.PROPERTY_KEYWORD:
new_property = JavaProperty()
new_property.name = class_element[i + 1]
new_property.type = class_element[i + 3]
new_property.value = self.PROPERTY_TYPE_CONSTANTS[new_property.type]
new_class.add_property(new_property)
return new_class | none | 1 | 2.315612 | 2 | |
module1.py | vainolo/learning-python | 0 | 6615079 | print "Initializing"
def f1():
print "Hello from f1"
def f2():
print "Hello from f2" | print "Initializing"
def f1():
print "Hello from f1"
def f2():
print "Hello from f2" | none | 1 | 2.388304 | 2 | |
api/api/viewsets/__init__.py | sergioguzman27/test_starter | 0 | 6615080 | <filename>api/api/viewsets/__init__.py
from .test import TestViewset | <filename>api/api/viewsets/__init__.py
from .test import TestViewset | none | 1 | 1.170906 | 1 | |
webgraze.py | kroncrv/webgraze | 1 | 6615081 | <reponame>kroncrv/webgraze
#!/usr/bin/env python3
from argparse import ArgumentParser
from webgraze.utils import combine, reparse
from webgraze.scrapergetter import get_scraper, get_scraper_by_name
import logging
import sys
logger = logging.getLogger(__name__)
class Webgraze:
def __init__(self):
self.parser = None
self.get_parser()
def get_parser(self):
parser = ArgumentParser()
parser.add_argument("input", nargs = "?", help = "URL or directory")
parser.add_argument("-c", "--combine", action = "store_true",
help = "Combine all JSON files in a directory"
)
parser.add_argument("-if", "--input-file", action = "store_true",
help = "Input is not an URL, but a file"
)
parser.add_argument("-o", "--output", type = str,
help = "Output file or directory"
)
parser.add_argument("-ow", "--overwrite", action = "store_true",
help = "Overwrite existing files"
)
parser.add_argument("-p", "--paged", action = "store_true",
help = "URL is pageable"
)
parser.add_argument("-ps", "--page-suffix", type = str,
help = "Add a suffix to output page JSON files",
default = None
)
parser.add_argument("--pretty", action = "store_true",
help = "Output pretty indented JSON"
)
parser.add_argument("-rp", "--reparse", action = "store_true",
help = "Reparse input file"
)
parser.add_argument("--scraper", help = "Force a scraper", type = str)
parser.add_argument("-v", "--verbose", action = "store_true")
self.parser = parser
self.args = self.parser.parse_args()
def is_verbose(self):
return self.args.verbose == True
def print_help(self):
self.parser.print_help()
sys.exit(0)
def run(self):
args = self.args
if len(sys.argv) == 1 or not args.input:
self.print_help()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Debug logging is ON")
else:
logging.basicConfig(level=logging.INFO)
logging.debug(args)
if args.combine:
logging.debug("Combining data")
combine(args.input, args.output)
elif args.reparse:
reparse(args.input,
output = args.output,
pretty = args.pretty,
overwrite = args.overwrite,
paged = args.paged
)
else:
logging.info(f"Trying to scrape <{args.input}>")
logging.debug(f"Finding a scraper for <{args.input}>")
if args.scraper:
logging.debug(f"A scraper is being forced: {args.scraper}")
Scraper = get_scraper_by_name(args.scraper)
else:
Scraper = get_scraper(args.input)
logging.info(f"Scraping with {Scraper.NAME}")
scraper = Scraper(args.input,
paged = args.paged,
input_file = args.input_file,
page_suffix = args.page_suffix,
overwrite = args.overwrite
)
scraper.scrape(output = args.output, pretty = args.pretty)
if __name__ == "__main__":
grazer = Webgraze()
try:
grazer.run()
except Exception as e:
if grazer.is_verbose():
raise(e)
else:
# NotImplementedError doesn't have a string representation
if str(e) == "":
sys.exit(e.__repr__())
else:
sys.exit(e) | #!/usr/bin/env python3
from argparse import ArgumentParser
from webgraze.utils import combine, reparse
from webgraze.scrapergetter import get_scraper, get_scraper_by_name
import logging
import sys
logger = logging.getLogger(__name__)
class Webgraze:
def __init__(self):
self.parser = None
self.get_parser()
def get_parser(self):
parser = ArgumentParser()
parser.add_argument("input", nargs = "?", help = "URL or directory")
parser.add_argument("-c", "--combine", action = "store_true",
help = "Combine all JSON files in a directory"
)
parser.add_argument("-if", "--input-file", action = "store_true",
help = "Input is not an URL, but a file"
)
parser.add_argument("-o", "--output", type = str,
help = "Output file or directory"
)
parser.add_argument("-ow", "--overwrite", action = "store_true",
help = "Overwrite existing files"
)
parser.add_argument("-p", "--paged", action = "store_true",
help = "URL is pageable"
)
parser.add_argument("-ps", "--page-suffix", type = str,
help = "Add a suffix to output page JSON files",
default = None
)
parser.add_argument("--pretty", action = "store_true",
help = "Output pretty indented JSON"
)
parser.add_argument("-rp", "--reparse", action = "store_true",
help = "Reparse input file"
)
parser.add_argument("--scraper", help = "Force a scraper", type = str)
parser.add_argument("-v", "--verbose", action = "store_true")
self.parser = parser
self.args = self.parser.parse_args()
def is_verbose(self):
return self.args.verbose == True
def print_help(self):
self.parser.print_help()
sys.exit(0)
def run(self):
args = self.args
if len(sys.argv) == 1 or not args.input:
self.print_help()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Debug logging is ON")
else:
logging.basicConfig(level=logging.INFO)
logging.debug(args)
if args.combine:
logging.debug("Combining data")
combine(args.input, args.output)
elif args.reparse:
reparse(args.input,
output = args.output,
pretty = args.pretty,
overwrite = args.overwrite,
paged = args.paged
)
else:
logging.info(f"Trying to scrape <{args.input}>")
logging.debug(f"Finding a scraper for <{args.input}>")
if args.scraper:
logging.debug(f"A scraper is being forced: {args.scraper}")
Scraper = get_scraper_by_name(args.scraper)
else:
Scraper = get_scraper(args.input)
logging.info(f"Scraping with {Scraper.NAME}")
scraper = Scraper(args.input,
paged = args.paged,
input_file = args.input_file,
page_suffix = args.page_suffix,
overwrite = args.overwrite
)
scraper.scrape(output = args.output, pretty = args.pretty)
if __name__ == "__main__":
grazer = Webgraze()
try:
grazer.run()
except Exception as e:
if grazer.is_verbose():
raise(e)
else:
# NotImplementedError doesn't have a string representation
if str(e) == "":
sys.exit(e.__repr__())
else:
sys.exit(e) | en | 0.710046 | #!/usr/bin/env python3 # NotImplementedError doesn't have a string representation | 2.813879 | 3 |
phase_cells/evaluate_life_cycle.py | shenghh2015/segmentation_models | 0 | 6615082 | <reponame>shenghh2015/segmentation_models<filename>phase_cells/evaluate_life_cycle.py
import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models as sm
from segmentation_models import Unet, Linknet, PSPNet, FPN
sm.set_framework('tf.keras')
import glob
def generate_folder(folder):
if not os.path.exists(folder):
os.system('mkdir -p {}'.format(folder))
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
model_root_folder ='./models';
model_name = 'livedead-net-Unet-bone-efficientnetb3-pre-True-epoch-200-batch-6-lr-0.0005';
DATA_DIR = './data/live_dead'; result_folder = './results/{}'.format(os.basename(DATA_DIR))
generate_folder(result_folder);print(result_folder)
CLASSES = ['live', 'inter', 'dead']
x_train_dir = os.path.join(DATA_DIR, 'train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, 'val_images')
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, 'test_images')
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
# print(np.unique(mask))
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
# print(self.class_values)
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
def get_validation_augmentation():
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(832, 832)
# A.PadIfNeeded(384, 480)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
## prediction with the best model
model_folder = os.path.join(model_root_folder,model_name)
best_weight = model_folder+'/best_model.h5'
if os.path.exists(best_weight):
## parse the folder name
splits = model_name.split('-')
for v in range(len(splits)):
if splits[v] == 'net':
net_type = splits[v+1]
elif splits[v] == 'bone':
backbone = splits[v+1]
print('network: {}, backbone: {}'.format(net_type, backbone))
# dataset settings
preprocess_input = sm.get_preprocessing(backbone)
# load test data
test_dataset = Dataset(
x_test_dir,
y_test_dir,
classes=CLASSES,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocess_input),
)
test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
#create model
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1)
activation = 'sigmoid' if n_classes == 1 else 'softmax'
net_func = globals()[net_type]
model = net_func(backbone, classes=n_classes, activation=activation)
#load best weights
model.load_weights(model_folder+'/best_model.h5')
# define optomizer
optim = tf.keras.optimizers.Adam(0.001)
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
dice_loss = sm.losses.DiceLoss(class_weights=np.array([1, 1, 1, 0.5]))
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)
# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, total_loss, metrics)
scores = model.evaluate_generator(test_dataloader)
print("Loss: {:.5}".format(scores[0]))
for metric, value in zip(metrics, scores[1:]):
print("mean {}: {:.5}".format(metric.__name__, value))
## evaluate the model on testing data
images = []; gt_masks = []
for i in range(len(test_dataset)):
image, gt_mask = test_dataset[i]
images.append(image); gt_masks.append(gt_mask)
images = np.stack(images); gt_masks = np.stack(gt_masks)
pr_masks = model.predict(test_dataloader)
## calculate IoU and Dice
def iou_calculate(y_true, y_pred):
# one hot encoding of predictions
num_classes = y_pred.shape[-1]
y_pred = np.array([np.argmax(y_pred, axis=-1)==i for i in range(num_classes)]).transpose(1,2,3,0)
print(y_pred.shape)
axes = (1,2) # W,H axes of each image
intersection = np.sum(np.logical_and(y_pred, y_true), axis=axes)
# intersection = np.sum(np.abs(y_pred * y_true), axis=axes)
union = np.sum(np.logical_or(y_pred, y_true), axis=axes)
mask_sum = np.sum(np.abs(y_true), axis=axes) + np.sum(np.abs(y_pred), axis=axes)
# union = mask_sum - intersection
smooth = .00001
iou_per_image_class = (intersection + smooth) / (union + smooth)
dice_per_image_class = (2 * intersection + smooth)/(mask_sum + smooth)
mean_iou_over_images = np.mean(iou_per_image_class, axis = 0)
mean_iou_over_images_class = np.mean(mean_iou_over_images)
dice_class = np.mean(dice_per_image_class, axis = 0)
mean_dice = np.mean(dice_per_image_class)
return mean_iou_over_images_class, mean_iou_over_images, mean_dice, dice_class
result_file = result_folder+'/'+model_name+'/IoU_Dice.txt'
iou_score, iou_cls, dice_score, dice_class = iou_calculate(gt_masks, pr_masks)
with open(result_file, 'w+') as f:
# write iou
f.write('IoU:{}{}{}{}{}',format())
print(iou_cls)
print(iou_score)
print(dice_class)
print(dice_score)
| import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models as sm
from segmentation_models import Unet, Linknet, PSPNet, FPN
sm.set_framework('tf.keras')
import glob
def generate_folder(folder):
if not os.path.exists(folder):
os.system('mkdir -p {}'.format(folder))
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
model_root_folder ='./models';
model_name = 'livedead-net-Unet-bone-efficientnetb3-pre-True-epoch-200-batch-6-lr-0.0005';
DATA_DIR = './data/live_dead'; result_folder = './results/{}'.format(os.basename(DATA_DIR))
generate_folder(result_folder);print(result_folder)
CLASSES = ['live', 'inter', 'dead']
x_train_dir = os.path.join(DATA_DIR, 'train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, 'val_images')
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, 'test_images')
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
# print(np.unique(mask))
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
# print(self.class_values)
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
def get_validation_augmentation():
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(832, 832)
# A.PadIfNeeded(384, 480)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
## prediction with the best model
model_folder = os.path.join(model_root_folder,model_name)
best_weight = model_folder+'/best_model.h5'
if os.path.exists(best_weight):
## parse the folder name
splits = model_name.split('-')
for v in range(len(splits)):
if splits[v] == 'net':
net_type = splits[v+1]
elif splits[v] == 'bone':
backbone = splits[v+1]
print('network: {}, backbone: {}'.format(net_type, backbone))
# dataset settings
preprocess_input = sm.get_preprocessing(backbone)
# load test data
test_dataset = Dataset(
x_test_dir,
y_test_dir,
classes=CLASSES,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocess_input),
)
test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
#create model
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1)
activation = 'sigmoid' if n_classes == 1 else 'softmax'
net_func = globals()[net_type]
model = net_func(backbone, classes=n_classes, activation=activation)
#load best weights
model.load_weights(model_folder+'/best_model.h5')
# define optomizer
optim = tf.keras.optimizers.Adam(0.001)
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
dice_loss = sm.losses.DiceLoss(class_weights=np.array([1, 1, 1, 0.5]))
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)
# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, total_loss, metrics)
scores = model.evaluate_generator(test_dataloader)
print("Loss: {:.5}".format(scores[0]))
for metric, value in zip(metrics, scores[1:]):
print("mean {}: {:.5}".format(metric.__name__, value))
## evaluate the model on testing data
images = []; gt_masks = []
for i in range(len(test_dataset)):
image, gt_mask = test_dataset[i]
images.append(image); gt_masks.append(gt_mask)
images = np.stack(images); gt_masks = np.stack(gt_masks)
pr_masks = model.predict(test_dataloader)
## calculate IoU and Dice
def iou_calculate(y_true, y_pred):
# one hot encoding of predictions
num_classes = y_pred.shape[-1]
y_pred = np.array([np.argmax(y_pred, axis=-1)==i for i in range(num_classes)]).transpose(1,2,3,0)
print(y_pred.shape)
axes = (1,2) # W,H axes of each image
intersection = np.sum(np.logical_and(y_pred, y_true), axis=axes)
# intersection = np.sum(np.abs(y_pred * y_true), axis=axes)
union = np.sum(np.logical_or(y_pred, y_true), axis=axes)
mask_sum = np.sum(np.abs(y_true), axis=axes) + np.sum(np.abs(y_pred), axis=axes)
# union = mask_sum - intersection
smooth = .00001
iou_per_image_class = (intersection + smooth) / (union + smooth)
dice_per_image_class = (2 * intersection + smooth)/(mask_sum + smooth)
mean_iou_over_images = np.mean(iou_per_image_class, axis = 0)
mean_iou_over_images_class = np.mean(mean_iou_over_images)
dice_class = np.mean(dice_per_image_class, axis = 0)
mean_dice = np.mean(dice_per_image_class)
return mean_iou_over_images_class, mean_iou_over_images, mean_dice, dice_class
result_file = result_folder+'/'+model_name+'/IoU_Dice.txt'
iou_score, iou_cls, dice_score, dice_class = iou_calculate(gt_masks, pr_masks)
with open(result_file, 'w+') as f:
# write iou
f.write('IoU:{}{}{}{}{}',format())
print(iou_cls)
print(iou_score)
print(dice_class)
print(dice_score) | en | 0.630404 | # classes for data loading and preprocessing CamVid Dataset. Read images, apply augmentation and preprocessing transformations. Args: images_dir (str): path to images folder masks_dir (str): path to segmentation masks folder class_values (list): values of classes to extract from segmentation mask augmentation (albumentations.Compose): data transfromation pipeline (e.g. flip, scale, etc.) preprocessing (albumentations.Compose): data preprocessing (e.g. noralization, shape manipulation, etc.) # convert str names to class values on masks # read data # print(np.unique(mask)) # extract certain classes from mask (e.g. cars) # print(self.class_values) # add background if mask is not binary # apply augmentations # apply preprocessing Load data from dataset and form batches Args: dataset: instance of Dataset class for image loading and preprocessing. batch_size: Integet number of images in batch. shuffle: Boolean, if `True` shuffle image indexes each epoch. # collect batch data # transpose list of lists Denotes the number of batches per epoch Callback function to shuffle indexes each epoch Add paddings to make image shape divisible by 32 # A.PadIfNeeded(384, 480) Construct preprocessing transform Args: preprocessing_fn (callbale): data normalization function (can be specific for each pretrained neural network) Return: transform: albumentations.Compose ## prediction with the best model ## parse the folder name # dataset settings # load test data #create model #load best weights # define optomizer # Segmentation models losses can be combined together by '+' and scaled by integer or float factor # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;) # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss # compile keras model with defined optimozer, loss and metrics ## evaluate the model on testing data ## calculate IoU and Dice # one hot encoding of predictions # W,H axes of each image # intersection = np.sum(np.abs(y_pred * y_true), axis=axes) # union = mask_sum - intersection # write iou | 2.359081 | 2 |
tests/utils/test_remove_folder.py | peddamat/home-assistant-supervisor-test | 597 | 6615083 | """test json."""
from pathlib import Path
import shutil
import pytest
from supervisor.utils import remove_folder
@pytest.mark.asyncio
async def test_remove_all(tmp_path):
"""Test remove folder."""
# Prepair test folder
temp_orig = tmp_path.joinpath("orig")
fixture_data = Path(__file__).parents[1].joinpath("fixtures/tar_data")
shutil.copytree(fixture_data, temp_orig, symlinks=True)
assert temp_orig.exists()
await remove_folder(temp_orig)
assert not temp_orig.exists()
@pytest.mark.asyncio
async def test_remove_content(tmp_path):
"""Test remove content of folder."""
# Prepair test folder
temp_orig = tmp_path.joinpath("orig")
fixture_data = Path(__file__).parents[1].joinpath("fixtures/tar_data")
shutil.copytree(fixture_data, temp_orig, symlinks=True)
test_folder = Path(temp_orig, "test1")
test_file = Path(temp_orig, "README.md")
test_hidden = Path(temp_orig, ".hidden")
test_hidden.touch()
assert test_folder.exists()
assert test_file.exists()
assert test_hidden.exists()
await remove_folder(temp_orig, content_only=True)
assert not test_folder.exists()
assert not test_file.exists()
assert not test_hidden.exists()
| """test json."""
from pathlib import Path
import shutil
import pytest
from supervisor.utils import remove_folder
@pytest.mark.asyncio
async def test_remove_all(tmp_path):
"""Test remove folder."""
# Prepair test folder
temp_orig = tmp_path.joinpath("orig")
fixture_data = Path(__file__).parents[1].joinpath("fixtures/tar_data")
shutil.copytree(fixture_data, temp_orig, symlinks=True)
assert temp_orig.exists()
await remove_folder(temp_orig)
assert not temp_orig.exists()
@pytest.mark.asyncio
async def test_remove_content(tmp_path):
"""Test remove content of folder."""
# Prepair test folder
temp_orig = tmp_path.joinpath("orig")
fixture_data = Path(__file__).parents[1].joinpath("fixtures/tar_data")
shutil.copytree(fixture_data, temp_orig, symlinks=True)
test_folder = Path(temp_orig, "test1")
test_file = Path(temp_orig, "README.md")
test_hidden = Path(temp_orig, ".hidden")
test_hidden.touch()
assert test_folder.exists()
assert test_file.exists()
assert test_hidden.exists()
await remove_folder(temp_orig, content_only=True)
assert not test_folder.exists()
assert not test_file.exists()
assert not test_hidden.exists()
| en | 0.483395 | test json. Test remove folder. # Prepair test folder Test remove content of folder. # Prepair test folder | 2.209951 | 2 |
flask_app/book/__init__.py | Jarrettluo/flask-restful-quick-start | 6 | 6615084 | # encoding: utf-8
"""
@version: 1.0
@author: Jarrett
@file: __init__.py
@time: 2021/11/10 15:50
"""
from flask import Blueprint
book_blueprint = Blueprint("book_blueprint", __name__, url_prefix="/book")
from flask_app.book import views | # encoding: utf-8
"""
@version: 1.0
@author: Jarrett
@file: __init__.py
@time: 2021/11/10 15:50
"""
from flask import Blueprint
book_blueprint = Blueprint("book_blueprint", __name__, url_prefix="/book")
from flask_app.book import views | en | 0.533482 | # encoding: utf-8 @version: 1.0 @author: Jarrett @file: __init__.py @time: 2021/11/10 15:50 | 1.529641 | 2 |
vet_website/vet_website/doctype/vetasset/vetasset.py | rezazrna/vet_website | 0 | 6615085 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2020, bikbuk and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import pytz
from frappe.model.document import Document
from dateutil.relativedelta import relativedelta
from datetime import datetime
from vet_website.vet_website.doctype.vetjournalentry.vetjournalentry import new_journal_entry
class VetAsset(Document):
pass
@frappe.whitelist()
def get_asset_list(filters=None):
default_sort = "creation desc"
asset_filters = []
asset_or_filters = []
filter_json = False
page = 1
if filters:
try:
filter_json = json.loads(filters)
except:
filter_json = False
if filter_json:
sort = filter_json.get('sort', False)
filters_json = filter_json.get('filters', False)
currentpage = filter_json.get('currentpage', False)
search = filter_json.get('search', False)
if currentpage:
page = currentpage
if filters_json:
for fj in filters_json:
asset_filters.append(fj)
if search:
asset_or_filters.append({'asset_name': ['like', '%'+search+'%']})
asset_or_filters.append({'period': ['like', '%'+search+'%']})
asset_or_filters.append({'method': ['like', '%'+search+'%']})
asset_or_filters.append({'book_value': ['like', '%'+search+'%']})
asset_or_filters.append({'original_value': ['like', '%'+search+'%']})
asset_or_filters.append({'residual_value': ['like', '%'+search+'%']})
asset_or_filters.append({'duration': ['like', '%'+search+'%']})
asset_or_filters.append({'status': ['like', '%'+search+'%']})
if sort:
default_sort = sort
try:
asset = frappe.get_list("VetAsset", or_filters=asset_or_filters, filters=asset_filters, fields=["*"], order_by=default_sort, start=(page - 1) * 10, page_length= 10)
datalength = len(frappe.get_all("VetAsset", or_filters=asset_or_filters, filters=asset_filters, as_list=True))
for a in asset:
a['first_depreciation_date'] = frappe.get_value('VetDepreciationList', {'parent': a['name']}, 'depreciation_date')
return {'asset': asset, 'datalength': datalength}
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def delete_asset(data):
try:
data_json = json.loads(data)
except:
return {'error': "Gagal menghapus asset"}
for d in data_json:
frappe.delete_doc('VetAsset', d)
frappe.db.commit()
return {'success': True}
@frappe.whitelist()
def get_asset(name=None):
try:
if name == False or name == None:
asset = {}
else:
asset_search = frappe.get_list("VetAsset", filters={'name': name}, fields=['*'])
asset = asset_search[0]
depreciation_list = frappe.get_list('VetDepreciationList', filters={'parent': asset['name']}, fields=['*'])
asset['depreciation_list'] = depreciation_list
coaAll = frappe.get_list('VetCoa', fields=['*'])
res = {'asset': asset, 'coaAll': coaAll}
return res
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def submit_asset(data):
try:
tz = pytz.timezone("Asia/Jakarta")
data_json = json.loads(data)
if data_json.get('name') :
asset = frappe.get_doc("VetAsset", data_json['name'])
asset.update(data_json)
asset.save()
else :
asset = frappe.new_doc("VetAsset")
asset.update(data_json)
asset.insert()
journal_entry = {
'date': data_json.get('acquistion_date'),
'journal': '8137c6f684',
'period': data_json.get('period'),
'reference': asset.name,
'journal_items': [
{'account': asset.payment_account, 'credit': data_json.get('original_value')},
{'account': asset.fixed_asset_account, 'debit': data_json.get('original_value')},
]
}
new_journal_entry(json.dumps(journal_entry))
acquistion_date = datetime.strptime(data_json.get('acquistion_date'), "%Y-%m-%d")
if data_json.get('duration_type') == 'Year' :
duration = float(data_json.get('duration')) * 12
else :
duration = float(data_json.get('duration'))
original_value = float(data_json.get('original_value')) - float(data_json.get('residual_value'))
depreciation_value = original_value / duration
r = relativedelta(datetime.now(tz).today(), acquistion_date)
book_value = float(data_json.get('original_value')) - (float(r.months + (12 * r.years)) * depreciation_value)
if not data_json.get('name') or (data_json.get('name') and float(asset.book_value) != float(book_value)):
if (data_json.get('name')):
frappe.db.delete('VetDepreciationList', {
'parent': asset.name
})
list_journal = frappe.get_list('VetJournalEntry', filters={'reference': asset.name, 'date': ['!=', data_json.get('acquistion_date')]}, fields=['name'])
journal = [i['name'] for i in list_journal]
frappe.db.delete('VetJournalItem', {
'parent': ['in', journal]
})
frappe.db.delete('VetJournalEntry', {
'name': ['in', journal]
})
asset.book_value = book_value
asset.save()
asset.reload()
i = 1
while acquistion_date + relativedelta(months=i) <= datetime.now(tz).today() and i <= duration:
new_depreciation = frappe.new_doc('VetDepreciationList')
new_depreciation.update({
'reference': data_json.get('asset_name') + ' ' + '(' + str(i) + '/' + str(int(duration)) + ')',
'depreciation_date': acquistion_date + relativedelta(months=i),
'depreciation_value': depreciation_value,
'cumulative_depreciation': depreciation_value * i,
'parent': asset.name,
'parenttype': 'VetAsset',
'parentfield': 'depreciation_list'
})
asset.depreciation_list.append(new_depreciation)
asset.save()
journal_entry_despreciation = {
'date': (acquistion_date + relativedelta(months=i)).strftime('%Y-%m-%d'),
'journal': '8137c6f684',
'period': data_json.get('period'),
'reference': asset.name,
'journal_items': [
{'account': asset.depreciation_account, 'credit': depreciation_value},
{'account': asset.expense_account, 'debit': depreciation_value},
]
}
new_journal_entry(json.dumps(journal_entry_despreciation))
i = i+1
return {'asset': asset}
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def sell_asset(data):
tz = pytz.timezone("Asia/Jakarta")
try:
data_json = json.loads(data)
asset = frappe.get_doc('VetAsset', data_json.get('name'))
asset.status = 'Sell/Dispose'
new_depreciation = frappe.new_doc('VetDepreciationList')
new_depreciation.update({
'reference': 'Sell/Dispose',
'depreciation_date': datetime.now(tz).today(),
'depreciation_value': float(asset.book_value) - float(data_json.get('amount')),
'cumulative_depreciation': data_json.get('amount'),
'parent': asset.name,
'parenttype': 'VetAsset',
'parentfield': 'depreciation_list'
})
asset.depreciation_list.append(new_depreciation)
asset.save()
asset.save()
frappe.db.commit()
ji_list = [
{'account': asset.depreciation_account, 'debit': (float(asset.original_value) - float(asset.book_value))},
{'account': asset.fixed_asset_account, 'credit': float(asset.original_value)},
{'account': data_json.get('cash_account'), 'debit': float(data_json.get('amount'))},
]
if float(data_json.get('amount')) - float(asset.book_value) != 0 :
if float(data_json.get('amount')) - float(asset.book_value) > 0 :
ji_list.append(
{'account': data_json.get('lost_account'), 'credit': float(data_json.get('amount')) - float(asset.book_value)},
)
else :
ji_list.append(
{'account': data_json.get('lost_account'), 'debit': float(asset.book_value) - float(data_json.get('amount'))},
)
journal_entry = {
'date': datetime.now(tz).strftime('%Y-%m-%d'),
'journal': '8137c6f684',
'period': asset.period.strftime('%m/%Y'),
'reference': asset.name,
'journal_items': ji_list
}
new_journal_entry(json.dumps(journal_entry))
return {'asset': asset}
except PermissionError as e:
return {'error': e} | # -*- coding: utf-8 -*-
# Copyright (c) 2020, bikbuk and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import pytz
from frappe.model.document import Document
from dateutil.relativedelta import relativedelta
from datetime import datetime
from vet_website.vet_website.doctype.vetjournalentry.vetjournalentry import new_journal_entry
class VetAsset(Document):
pass
@frappe.whitelist()
def get_asset_list(filters=None):
default_sort = "creation desc"
asset_filters = []
asset_or_filters = []
filter_json = False
page = 1
if filters:
try:
filter_json = json.loads(filters)
except:
filter_json = False
if filter_json:
sort = filter_json.get('sort', False)
filters_json = filter_json.get('filters', False)
currentpage = filter_json.get('currentpage', False)
search = filter_json.get('search', False)
if currentpage:
page = currentpage
if filters_json:
for fj in filters_json:
asset_filters.append(fj)
if search:
asset_or_filters.append({'asset_name': ['like', '%'+search+'%']})
asset_or_filters.append({'period': ['like', '%'+search+'%']})
asset_or_filters.append({'method': ['like', '%'+search+'%']})
asset_or_filters.append({'book_value': ['like', '%'+search+'%']})
asset_or_filters.append({'original_value': ['like', '%'+search+'%']})
asset_or_filters.append({'residual_value': ['like', '%'+search+'%']})
asset_or_filters.append({'duration': ['like', '%'+search+'%']})
asset_or_filters.append({'status': ['like', '%'+search+'%']})
if sort:
default_sort = sort
try:
asset = frappe.get_list("VetAsset", or_filters=asset_or_filters, filters=asset_filters, fields=["*"], order_by=default_sort, start=(page - 1) * 10, page_length= 10)
datalength = len(frappe.get_all("VetAsset", or_filters=asset_or_filters, filters=asset_filters, as_list=True))
for a in asset:
a['first_depreciation_date'] = frappe.get_value('VetDepreciationList', {'parent': a['name']}, 'depreciation_date')
return {'asset': asset, 'datalength': datalength}
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def delete_asset(data):
try:
data_json = json.loads(data)
except:
return {'error': "Gagal menghapus asset"}
for d in data_json:
frappe.delete_doc('VetAsset', d)
frappe.db.commit()
return {'success': True}
@frappe.whitelist()
def get_asset(name=None):
try:
if name == False or name == None:
asset = {}
else:
asset_search = frappe.get_list("VetAsset", filters={'name': name}, fields=['*'])
asset = asset_search[0]
depreciation_list = frappe.get_list('VetDepreciationList', filters={'parent': asset['name']}, fields=['*'])
asset['depreciation_list'] = depreciation_list
coaAll = frappe.get_list('VetCoa', fields=['*'])
res = {'asset': asset, 'coaAll': coaAll}
return res
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def submit_asset(data):
try:
tz = pytz.timezone("Asia/Jakarta")
data_json = json.loads(data)
if data_json.get('name') :
asset = frappe.get_doc("VetAsset", data_json['name'])
asset.update(data_json)
asset.save()
else :
asset = frappe.new_doc("VetAsset")
asset.update(data_json)
asset.insert()
journal_entry = {
'date': data_json.get('acquistion_date'),
'journal': '8137c6f684',
'period': data_json.get('period'),
'reference': asset.name,
'journal_items': [
{'account': asset.payment_account, 'credit': data_json.get('original_value')},
{'account': asset.fixed_asset_account, 'debit': data_json.get('original_value')},
]
}
new_journal_entry(json.dumps(journal_entry))
acquistion_date = datetime.strptime(data_json.get('acquistion_date'), "%Y-%m-%d")
if data_json.get('duration_type') == 'Year' :
duration = float(data_json.get('duration')) * 12
else :
duration = float(data_json.get('duration'))
original_value = float(data_json.get('original_value')) - float(data_json.get('residual_value'))
depreciation_value = original_value / duration
r = relativedelta(datetime.now(tz).today(), acquistion_date)
book_value = float(data_json.get('original_value')) - (float(r.months + (12 * r.years)) * depreciation_value)
if not data_json.get('name') or (data_json.get('name') and float(asset.book_value) != float(book_value)):
if (data_json.get('name')):
frappe.db.delete('VetDepreciationList', {
'parent': asset.name
})
list_journal = frappe.get_list('VetJournalEntry', filters={'reference': asset.name, 'date': ['!=', data_json.get('acquistion_date')]}, fields=['name'])
journal = [i['name'] for i in list_journal]
frappe.db.delete('VetJournalItem', {
'parent': ['in', journal]
})
frappe.db.delete('VetJournalEntry', {
'name': ['in', journal]
})
asset.book_value = book_value
asset.save()
asset.reload()
i = 1
while acquistion_date + relativedelta(months=i) <= datetime.now(tz).today() and i <= duration:
new_depreciation = frappe.new_doc('VetDepreciationList')
new_depreciation.update({
'reference': data_json.get('asset_name') + ' ' + '(' + str(i) + '/' + str(int(duration)) + ')',
'depreciation_date': acquistion_date + relativedelta(months=i),
'depreciation_value': depreciation_value,
'cumulative_depreciation': depreciation_value * i,
'parent': asset.name,
'parenttype': 'VetAsset',
'parentfield': 'depreciation_list'
})
asset.depreciation_list.append(new_depreciation)
asset.save()
journal_entry_despreciation = {
'date': (acquistion_date + relativedelta(months=i)).strftime('%Y-%m-%d'),
'journal': '8137c6f684',
'period': data_json.get('period'),
'reference': asset.name,
'journal_items': [
{'account': asset.depreciation_account, 'credit': depreciation_value},
{'account': asset.expense_account, 'debit': depreciation_value},
]
}
new_journal_entry(json.dumps(journal_entry_despreciation))
i = i+1
return {'asset': asset}
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def sell_asset(data):
tz = pytz.timezone("Asia/Jakarta")
try:
data_json = json.loads(data)
asset = frappe.get_doc('VetAsset', data_json.get('name'))
asset.status = 'Sell/Dispose'
new_depreciation = frappe.new_doc('VetDepreciationList')
new_depreciation.update({
'reference': 'Sell/Dispose',
'depreciation_date': datetime.now(tz).today(),
'depreciation_value': float(asset.book_value) - float(data_json.get('amount')),
'cumulative_depreciation': data_json.get('amount'),
'parent': asset.name,
'parenttype': 'VetAsset',
'parentfield': 'depreciation_list'
})
asset.depreciation_list.append(new_depreciation)
asset.save()
asset.save()
frappe.db.commit()
ji_list = [
{'account': asset.depreciation_account, 'debit': (float(asset.original_value) - float(asset.book_value))},
{'account': asset.fixed_asset_account, 'credit': float(asset.original_value)},
{'account': data_json.get('cash_account'), 'debit': float(data_json.get('amount'))},
]
if float(data_json.get('amount')) - float(asset.book_value) != 0 :
if float(data_json.get('amount')) - float(asset.book_value) > 0 :
ji_list.append(
{'account': data_json.get('lost_account'), 'credit': float(data_json.get('amount')) - float(asset.book_value)},
)
else :
ji_list.append(
{'account': data_json.get('lost_account'), 'debit': float(asset.book_value) - float(data_json.get('amount'))},
)
journal_entry = {
'date': datetime.now(tz).strftime('%Y-%m-%d'),
'journal': '8137c6f684',
'period': asset.period.strftime('%m/%Y'),
'reference': asset.name,
'journal_items': ji_list
}
new_journal_entry(json.dumps(journal_entry))
return {'asset': asset}
except PermissionError as e:
return {'error': e} | en | 0.780704 | # -*- coding: utf-8 -*- # Copyright (c) 2020, bikbuk and contributors # For license information, please see license.txt | 1.852313 | 2 |
aggregit/rate_limit.py | MichaelCurrin/aggre-git | 0 | 6615086 | <reponame>MichaelCurrin/aggre-git
#!/usr/bin/env python
"""
Rate limit report.
Get rate limiting status and reset time for the configured GitHub API token.
Check in the browser using your browser user rather than a token.
https://developer.github.com/v3/rate_limit/
"Note: Accessing this endpoint does not count against your REST API rate
limit."
"""
import datetime
import time
from lib.connection import CONN
def main():
"""
Main command-line function.
"""
reset_time = datetime.datetime.fromtimestamp(CONN.rate_limiting_resettime)
wait = reset_time - datetime.datetime.now()
print(f"Reset time: {reset_time.time()}")
print(f"Wait time : {wait}")
print()
while True:
remaining, total = CONN.rate_limiting
percent = remaining / total
print(f"Remaining : {remaining:,d} / {total:,d} ({percent:3.2%})")
print("Waiting...")
time.sleep(5)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""
Rate limit report.
Get rate limiting status and reset time for the configured GitHub API token.
Check in the browser using your browser user rather than a token.
https://developer.github.com/v3/rate_limit/
"Note: Accessing this endpoint does not count against your REST API rate
limit."
"""
import datetime
import time
from lib.connection import CONN
def main():
"""
Main command-line function.
"""
reset_time = datetime.datetime.fromtimestamp(CONN.rate_limiting_resettime)
wait = reset_time - datetime.datetime.now()
print(f"Reset time: {reset_time.time()}")
print(f"Wait time : {wait}")
print()
while True:
remaining, total = CONN.rate_limiting
percent = remaining / total
print(f"Remaining : {remaining:,d} / {total:,d} ({percent:3.2%})")
print("Waiting...")
time.sleep(5)
if __name__ == "__main__":
main() | en | 0.680981 | #!/usr/bin/env python Rate limit report. Get rate limiting status and reset time for the configured GitHub API token. Check in the browser using your browser user rather than a token. https://developer.github.com/v3/rate_limit/ "Note: Accessing this endpoint does not count against your REST API rate limit." Main command-line function. | 2.904785 | 3 |
OSM-DataWrangleSQL-LasVegas/data.py | alexmc1510/data-analyst-nanodegree | 0 | 6615087 | import csv
import codecs
import re
import xml.etree.cElementTree as ET
from unittest import TestCase
import cerberus
import schema
OSM_PATH = "data\las-vegas_sample.osm"
NODES_PATH = "nodes.csv"
NODE_TAGS_PATH = "nodes_tags.csv"
WAYS_PATH = "ways.csv"
WAY_NODES_PATH = "ways_nodes.csv"
WAY_TAGS_PATH = "ways_tags.csv"
LOWER_COLON = re.compile(r'^([a-z]|_)+:([a-z]|_)+')
PROBLEMCHARS = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
SCHEMA = schema.schema
NODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']
NODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
WAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_NODES_FIELDS = ['id', 'node_id', 'position']
def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,
problem_chars=PROBLEMCHARS, default_tag_type='regular'):
"""Clean and shape node or way XML element to Python dict"""
node_attribs = {}
way_attribs = {}
way_nodes = []
tags = []
if element.tag == 'node':
for attrib in element.attrib:
if attrib in NODE_FIELDS:
node_attribs[attrib] = element.attrib[attrib]
for child in element:
node_tag = {}
if LOWER_COLON.match(child.attrib['k']):
node_tag['type'] = child.attrib['k'].split(':',1)[0]
node_tag['key'] = child.attrib['k'].split(':',1)[1]
node_tag['id'] = element.attrib['id']
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
elif PROBLEMCHARS.match(child.attrib['k']):
continue
else:
node_tag['type'] = 'regular'
node_tag['key'] = child.attrib['k']
node_tag['id'] = element.attrib['id']
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
return {'node': node_attribs, 'node_tags': tags}
elif element.tag == 'way':
for attrib in element.attrib:
if attrib in WAY_FIELDS:
way_attribs[attrib] = element.attrib[attrib]
position = 0
for child in element:
way_tag = {}
way_node = {}
if child.tag == 'tag':
if LOWER_COLON.match(child.attrib['k']):
way_tag['type'] = child.attrib['k'].split(':',1)[0]
way_tag['key'] = child.attrib['k'].split(':',1)[1]
way_tag['id'] = element.attrib['id']
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif PROBLEMCHARS.match(child.attrib['k']):
continue
else:
way_tag['type'] = 'regular'
way_tag['key'] = child.attrib['k']
way_tag['id'] = element.attrib['id']
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif child.tag == 'nd':
way_node['id'] = element.attrib['id']
way_node['node_id'] = child.attrib['ref']
way_node['position'] = position
position += 1
way_nodes.append(way_node)
return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}
# ================================================== #
# Helper Functions #
# ================================================== #
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag"""
context = ET.iterparse(osm_file, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def validate_element(element, validator, schema=SCHEMA):
"""Raise ValidationError if element does not match schema"""
if validator.validate(element, schema) is not True:
field, errors = next(validator.errors.iteritems())
message_string = "\nElement of type '{0}' has the following errors:\n{1}"
error_strings = (
"{0}: {1}".format(k, v if isinstance(v, str) else ", ".join(v))
for k, v in errors.iteritems()
)
raise cerberus.ValidationError(
message_string.format(field, "\n".join(error_strings))
)
class UnicodeDictWriter(csv.DictWriter, object):
"""Extend csv.DictWriter to handle Unicode input"""
def writerow(self, row):
super(UnicodeDictWriter, self).writerow({
k: (v.encode('utf-8') if isinstance(v, unicode) else v) for k, v in row.iteritems()
})
def writerows(self, rows):
for row in rows:
self.writerow(row)
# ================================================== #
# Main Function #
# ================================================== #
def process_map(file_in, validate):
"""Iteratively process each XML element and write to csv(s)"""
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_in, tags=('node', 'way')):
el = shape_element(element)
if el:
if validate is True:
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
print "DONE!!"
if __name__ == '__main__':
process_map(OSM_PATH, validate=True)
| import csv
import codecs
import re
import xml.etree.cElementTree as ET
from unittest import TestCase
import cerberus
import schema
OSM_PATH = "data\las-vegas_sample.osm"
NODES_PATH = "nodes.csv"
NODE_TAGS_PATH = "nodes_tags.csv"
WAYS_PATH = "ways.csv"
WAY_NODES_PATH = "ways_nodes.csv"
WAY_TAGS_PATH = "ways_tags.csv"
LOWER_COLON = re.compile(r'^([a-z]|_)+:([a-z]|_)+')
PROBLEMCHARS = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
SCHEMA = schema.schema
NODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']
NODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
WAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_NODES_FIELDS = ['id', 'node_id', 'position']
def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,
problem_chars=PROBLEMCHARS, default_tag_type='regular'):
"""Clean and shape node or way XML element to Python dict"""
node_attribs = {}
way_attribs = {}
way_nodes = []
tags = []
if element.tag == 'node':
for attrib in element.attrib:
if attrib in NODE_FIELDS:
node_attribs[attrib] = element.attrib[attrib]
for child in element:
node_tag = {}
if LOWER_COLON.match(child.attrib['k']):
node_tag['type'] = child.attrib['k'].split(':',1)[0]
node_tag['key'] = child.attrib['k'].split(':',1)[1]
node_tag['id'] = element.attrib['id']
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
elif PROBLEMCHARS.match(child.attrib['k']):
continue
else:
node_tag['type'] = 'regular'
node_tag['key'] = child.attrib['k']
node_tag['id'] = element.attrib['id']
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
return {'node': node_attribs, 'node_tags': tags}
elif element.tag == 'way':
for attrib in element.attrib:
if attrib in WAY_FIELDS:
way_attribs[attrib] = element.attrib[attrib]
position = 0
for child in element:
way_tag = {}
way_node = {}
if child.tag == 'tag':
if LOWER_COLON.match(child.attrib['k']):
way_tag['type'] = child.attrib['k'].split(':',1)[0]
way_tag['key'] = child.attrib['k'].split(':',1)[1]
way_tag['id'] = element.attrib['id']
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif PROBLEMCHARS.match(child.attrib['k']):
continue
else:
way_tag['type'] = 'regular'
way_tag['key'] = child.attrib['k']
way_tag['id'] = element.attrib['id']
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif child.tag == 'nd':
way_node['id'] = element.attrib['id']
way_node['node_id'] = child.attrib['ref']
way_node['position'] = position
position += 1
way_nodes.append(way_node)
return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}
# ================================================== #
# Helper Functions #
# ================================================== #
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag"""
context = ET.iterparse(osm_file, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def validate_element(element, validator, schema=SCHEMA):
"""Raise ValidationError if element does not match schema"""
if validator.validate(element, schema) is not True:
field, errors = next(validator.errors.iteritems())
message_string = "\nElement of type '{0}' has the following errors:\n{1}"
error_strings = (
"{0}: {1}".format(k, v if isinstance(v, str) else ", ".join(v))
for k, v in errors.iteritems()
)
raise cerberus.ValidationError(
message_string.format(field, "\n".join(error_strings))
)
class UnicodeDictWriter(csv.DictWriter, object):
"""Extend csv.DictWriter to handle Unicode input"""
def writerow(self, row):
super(UnicodeDictWriter, self).writerow({
k: (v.encode('utf-8') if isinstance(v, unicode) else v) for k, v in row.iteritems()
})
def writerows(self, rows):
for row in rows:
self.writerow(row)
# ================================================== #
# Main Function #
# ================================================== #
def process_map(file_in, validate):
"""Iteratively process each XML element and write to csv(s)"""
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_in, tags=('node', 'way')):
el = shape_element(element)
if el:
if validate is True:
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
print "DONE!!"
if __name__ == '__main__':
process_map(OSM_PATH, validate=True)
| en | 0.47043 | #$@\,\. \t\r\n]') Clean and shape node or way XML element to Python dict # ================================================== # # Helper Functions # # ================================================== # Yield element if it is the right type of tag Raise ValidationError if element does not match schema Extend csv.DictWriter to handle Unicode input # ================================================== # # Main Function # # ================================================== # Iteratively process each XML element and write to csv(s) | 2.377339 | 2 |
Day05_20190906/seq2seq/sentences_to_id.py | Magicboomliu/liuzihua_PKU_intern | 2 | 6615088 | <filename>Day05_20190906/seq2seq/sentences_to_id.py
__author__ = "<NAME>"
#encoding="utf-8"
"""
@Author:Che_Hongshu
@Modify:2018.1.8
"""
import codecs
import os
DATA_TYPE = "english" # 将DATA_TYPE先后设置为chinese,english得到中英文VOCAB文件
train_dataset_path='D:/BaiduYunDownload/python_exe/dataset/en-zh'
if DATA_TYPE == "chinese": # 翻译语料的中文部分
RAW_DATA = os.path.join(train_dataset_path,"train.txt.zh")
VOCAB = "vocab_dicts/zh.vocab"
OUTPUT_DATA = "preprocessed_data/train.zh"
elif DATA_TYPE == "english": # 翻译语料的英文部分
RAW_DATA = os.path.join(train_dataset_path,"train.txt.en")
VOCAB = "vocab_dicts/en.vocab"
OUTPUT_DATA = "preprocessed_data/train.en"
with codecs.open(VOCAB, 'r', 'utf-8') as f_vocab: #打开文件进入读操作
vocab = [w.strip() for w in f_vocab.readlines()] # 先把所有词转换成list
# 把每个词和所在行数对应起来并且zip打包成(“词”,行数)格式转换成dict格式
#建立字典
word_to_id = {k: v for (k, v) in zip(vocab, range(len(vocab)))}
# 返回id 如果在词汇表文件中则返回对应的id即可,如果没有则返回'<unk>'
def get_id(word):
return word_to_id[word] if word in word_to_id else word_to_id['<unk>']
# 打开文件
fin = codecs.open(RAW_DATA, 'r', 'utf-8')
fout = codecs.open(OUTPUT_DATA, 'w', 'utf-8')
for line in fin:
# 每一行的单词变成sring list格式,每一句话后面加上一个结束符号
words = line.strip().split() + ["<eos>"]
# 这一行中的每个单词取出对应的id之后用空格相连接
out_line = ' '.join([str(get_id(w)) for w in words]) + '\n'
fout.write(out_line)
# 关闭文件
fin.close()
fout.close()
| <filename>Day05_20190906/seq2seq/sentences_to_id.py
__author__ = "<NAME>"
#encoding="utf-8"
"""
@Author:Che_Hongshu
@Modify:2018.1.8
"""
import codecs
import os
DATA_TYPE = "english" # 将DATA_TYPE先后设置为chinese,english得到中英文VOCAB文件
train_dataset_path='D:/BaiduYunDownload/python_exe/dataset/en-zh'
if DATA_TYPE == "chinese": # 翻译语料的中文部分
RAW_DATA = os.path.join(train_dataset_path,"train.txt.zh")
VOCAB = "vocab_dicts/zh.vocab"
OUTPUT_DATA = "preprocessed_data/train.zh"
elif DATA_TYPE == "english": # 翻译语料的英文部分
RAW_DATA = os.path.join(train_dataset_path,"train.txt.en")
VOCAB = "vocab_dicts/en.vocab"
OUTPUT_DATA = "preprocessed_data/train.en"
with codecs.open(VOCAB, 'r', 'utf-8') as f_vocab: #打开文件进入读操作
vocab = [w.strip() for w in f_vocab.readlines()] # 先把所有词转换成list
# 把每个词和所在行数对应起来并且zip打包成(“词”,行数)格式转换成dict格式
#建立字典
word_to_id = {k: v for (k, v) in zip(vocab, range(len(vocab)))}
# 返回id 如果在词汇表文件中则返回对应的id即可,如果没有则返回'<unk>'
def get_id(word):
return word_to_id[word] if word in word_to_id else word_to_id['<unk>']
# 打开文件
fin = codecs.open(RAW_DATA, 'r', 'utf-8')
fout = codecs.open(OUTPUT_DATA, 'w', 'utf-8')
for line in fin:
# 每一行的单词变成sring list格式,每一句话后面加上一个结束符号
words = line.strip().split() + ["<eos>"]
# 这一行中的每个单词取出对应的id之后用空格相连接
out_line = ' '.join([str(get_id(w)) for w in words]) + '\n'
fout.write(out_line)
# 关闭文件
fin.close()
fout.close()
| zh | 0.955245 | #encoding="utf-8" @Author:Che_Hongshu @Modify:2018.1.8 # 将DATA_TYPE先后设置为chinese,english得到中英文VOCAB文件 # 翻译语料的中文部分 # 翻译语料的英文部分 #打开文件进入读操作 # 先把所有词转换成list # 把每个词和所在行数对应起来并且zip打包成(“词”,行数)格式转换成dict格式 #建立字典 # 返回id 如果在词汇表文件中则返回对应的id即可,如果没有则返回'<unk>' # 打开文件 # 每一行的单词变成sring list格式,每一句话后面加上一个结束符号 # 这一行中的每个单词取出对应的id之后用空格相连接 # 关闭文件 | 2.719347 | 3 |
dist-packages/defer/utils.py | Jianwei-Wang/python2.7_lib | 0 | 6615089 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utils for using deferreds with D-Bus."""
# Copyright (C) 2008-2010 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "<NAME> <<EMAIL>>"
__all__ = ("dbus_deferred_method", "deferable")
from functools import wraps
import inspect
import dbus
from . import defer, Deferred, DeferredException
def dbus_deferred_method(*args, **kwargs):
"""Export the decorated method on the D-Bus and handle a maybe
returned Deferred.
This decorator can be applied to methods in the same way as the
@dbus.service.method method, but it correctly handles the case where
the method returns a Deferred.
This decorator was kindly taken from James Henstridge blog post and
adopted:
http://blogs.gnome.org/jamesh/2009/07/06/watching-iview-with-rygel/
"""
def decorator(function):
function = dbus.service.method(*args, **kwargs)(function)
@wraps(function)
def wrapper(*args, **kwargs):
def ignore_none_callback(*cb_args):
# The deferred method at least returns an tuple containing
# only None. Ignore this case.
if cb_args == (None,):
dbus_callback()
else:
dbus_callback(*cb_args)
dbus_callback = kwargs.pop('_dbus_callback')
dbus_errback = kwargs.pop('_dbus_errback')
deferred = defer(function, *args, **kwargs)
deferred.add_callback(ignore_none_callback)
deferred.add_errback(lambda error: dbus_errback(error.value))
# The @wraps decorator has copied over the attributes added by
# the @dbus.service.method decorator, but we need to manually
# set the async callback attributes.
wrapper._dbus_async_callbacks = ('_dbus_callback', '_dbus_errback')
return wrapper
return decorator
def deferable(func):
"""Add a defer attribute to the decorated function and return a Deferred
object. The callback of the Deferred will be passed as reply_handler
argument and the errback as the error_handler argument to the decorated
function.
This decorator allows to easily make use of Deferreds in a DBus client.
"""
@wraps(func)
def _deferable(*args, **kwargs):
def on_error(error, deferred):
# Make sure that we return a deferred exception
if isinstance(error, DeferredException):
deferred.errback(error)
else:
deferred.errback(DeferredException(error))
try:
# Check if the defer argument was specified
to_defer = kwargs.pop("defer")
except KeyError:
# Check if this function was called from an inline_callbacks
# decorated method
stack = inspect.stack()
try:
to_defer = stack[2][3] == "_inline_callbacks"
except IndexError:
to_defer = False
if to_defer:
deferred = Deferred()
kwargs["reply_handler"] = deferred.callback
kwargs["error_handler"] = lambda err: on_error(err, deferred)
func(*args, **kwargs)
return deferred
return func(*args, **kwargs)
return _deferable
# vim:tw=4:sw=4:et
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utils for using deferreds with D-Bus."""
# Copyright (C) 2008-2010 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "<NAME> <<EMAIL>>"
__all__ = ("dbus_deferred_method", "deferable")
from functools import wraps
import inspect
import dbus
from . import defer, Deferred, DeferredException
def dbus_deferred_method(*args, **kwargs):
"""Export the decorated method on the D-Bus and handle a maybe
returned Deferred.
This decorator can be applied to methods in the same way as the
@dbus.service.method method, but it correctly handles the case where
the method returns a Deferred.
This decorator was kindly taken from James Henstridge blog post and
adopted:
http://blogs.gnome.org/jamesh/2009/07/06/watching-iview-with-rygel/
"""
def decorator(function):
function = dbus.service.method(*args, **kwargs)(function)
@wraps(function)
def wrapper(*args, **kwargs):
def ignore_none_callback(*cb_args):
# The deferred method at least returns an tuple containing
# only None. Ignore this case.
if cb_args == (None,):
dbus_callback()
else:
dbus_callback(*cb_args)
dbus_callback = kwargs.pop('_dbus_callback')
dbus_errback = kwargs.pop('_dbus_errback')
deferred = defer(function, *args, **kwargs)
deferred.add_callback(ignore_none_callback)
deferred.add_errback(lambda error: dbus_errback(error.value))
# The @wraps decorator has copied over the attributes added by
# the @dbus.service.method decorator, but we need to manually
# set the async callback attributes.
wrapper._dbus_async_callbacks = ('_dbus_callback', '_dbus_errback')
return wrapper
return decorator
def deferable(func):
"""Add a defer attribute to the decorated function and return a Deferred
object. The callback of the Deferred will be passed as reply_handler
argument and the errback as the error_handler argument to the decorated
function.
This decorator allows to easily make use of Deferreds in a DBus client.
"""
@wraps(func)
def _deferable(*args, **kwargs):
def on_error(error, deferred):
# Make sure that we return a deferred exception
if isinstance(error, DeferredException):
deferred.errback(error)
else:
deferred.errback(DeferredException(error))
try:
# Check if the defer argument was specified
to_defer = kwargs.pop("defer")
except KeyError:
# Check if this function was called from an inline_callbacks
# decorated method
stack = inspect.stack()
try:
to_defer = stack[2][3] == "_inline_callbacks"
except IndexError:
to_defer = False
if to_defer:
deferred = Deferred()
kwargs["reply_handler"] = deferred.callback
kwargs["error_handler"] = lambda err: on_error(err, deferred)
func(*args, **kwargs)
return deferred
return func(*args, **kwargs)
return _deferable
# vim:tw=4:sw=4:et
| en | 0.822243 | #!/usr/bin/env python # -*- coding: utf-8 -*- Utils for using deferreds with D-Bus. # Copyright (C) 2008-2010 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. Export the decorated method on the D-Bus and handle a maybe returned Deferred. This decorator can be applied to methods in the same way as the @dbus.service.method method, but it correctly handles the case where the method returns a Deferred. This decorator was kindly taken from James Henstridge blog post and adopted: http://blogs.gnome.org/jamesh/2009/07/06/watching-iview-with-rygel/ # The deferred method at least returns an tuple containing # only None. Ignore this case. # The @wraps decorator has copied over the attributes added by # the @dbus.service.method decorator, but we need to manually # set the async callback attributes. Add a defer attribute to the decorated function and return a Deferred object. The callback of the Deferred will be passed as reply_handler argument and the errback as the error_handler argument to the decorated function. This decorator allows to easily make use of Deferreds in a DBus client. # Make sure that we return a deferred exception # Check if the defer argument was specified # Check if this function was called from an inline_callbacks # decorated method # vim:tw=4:sw=4:et | 1.999394 | 2 |
mab/grid/common.py | maartenbreddels/mab | 1 | 6615090 | <reponame>maartenbreddels/mab
class Domain(object):
def __init__(self, domain=None):
self.domain = domain
def scale_uniform_point(self, point):
if self.domain is None:
return point
else:
newpoint = []
for domain1d, coordinate in zip(self.domain, point):
x1, x2 = domain1d
new_coordinate = coordinate * (x2-x1) + x1
newpoint.append(new_coordinate)
return tuple(newpoint)
def scale_uniform_length(self, length, dimension):
if self.domain is None:
return length
else:
domain1d = self.domain[dimension]
x1, x2 = domain1d
new_length = length * (x2-x1)
return new_length
def scale_to_uniform_length(self, length, dimension):
if self.domain is None:
return length
else:
domain1d = self.domain[dimension]
x1, x2 = domain1d
new_length = length / (x2-x1)
return new_length
def scale_to_uniform_point(self, point):
if self.domain is None:
return point
else:
newpoint = []
for domain1d, coordinate in zip(self.domain, point):
x1, x2 = domain1d
new_coordinate = (coordinate - x1)/(x2-x1)
newpoint.append(new_coordinate)
return tuple(newpoint)
| class Domain(object):
def __init__(self, domain=None):
self.domain = domain
def scale_uniform_point(self, point):
if self.domain is None:
return point
else:
newpoint = []
for domain1d, coordinate in zip(self.domain, point):
x1, x2 = domain1d
new_coordinate = coordinate * (x2-x1) + x1
newpoint.append(new_coordinate)
return tuple(newpoint)
def scale_uniform_length(self, length, dimension):
if self.domain is None:
return length
else:
domain1d = self.domain[dimension]
x1, x2 = domain1d
new_length = length * (x2-x1)
return new_length
def scale_to_uniform_length(self, length, dimension):
if self.domain is None:
return length
else:
domain1d = self.domain[dimension]
x1, x2 = domain1d
new_length = length / (x2-x1)
return new_length
def scale_to_uniform_point(self, point):
if self.domain is None:
return point
else:
newpoint = []
for domain1d, coordinate in zip(self.domain, point):
x1, x2 = domain1d
new_coordinate = (coordinate - x1)/(x2-x1)
newpoint.append(new_coordinate)
return tuple(newpoint) | none | 1 | 3.354587 | 3 | |
grortir/externals/pyswarm/__init__.py | wojtekPi/grortir | 0 | 6615091 | <reponame>wojtekPi/grortir<filename>grortir/externals/pyswarm/__init__.py
"""Package with pyswarm.
https://github.com/tisimst/pyswarm
"""
| """Package with pyswarm.
https://github.com/tisimst/pyswarm
""" | en | 0.731923 | Package with pyswarm. https://github.com/tisimst/pyswarm | 1.009377 | 1 |
tests/test_queue_lengths.py | woltapp/celery-farmer | 10 | 6615092 | from typing import Iterator
from unittest.mock import Mock, patch
from celery import Celery
from pytest import fixture
from celery_farmer.queue_lengths import QueueLengths
from tests import fixtures
from tests.helpers import wait_until_success
@fixture
def _queue_lengths(celery_app: Celery, statsd_mock: Mock
) -> Iterator[QueueLengths]:
queue_lengths = QueueLengths(
celery_app,
statsd_client=statsd_mock,
poll_time=0.1,
)
queue_lengths.start()
yield queue_lengths
queue_lengths.stop()
@fixture
def _active_queues() -> Iterator[Mock]:
with patch('celery.app.control.Inspect.active_queues') as active_queues:
active_queues.return_value = fixtures.active_queues_response
yield active_queues
def test_tracks_counts_of_events(_active_queues: Mock,
_queue_lengths: QueueLengths,
statsd_mock: Mock) -> None:
def assert_mock_gauge_called() -> None:
assert statsd_mock.gauge.called
wait_until_success(assert_mock_gauge_called)
def test_sends_heartbeats(_active_queues: Mock, _queue_lengths: QueueLengths,
statsd_mock: Mock) -> None:
def assert_mock_incr_called() -> None:
assert statsd_mock.incr.called
wait_until_success(assert_mock_incr_called)
assert statsd_mock.incr.call_args[0][0] == 'heartbeats.queue_lengths'
| from typing import Iterator
from unittest.mock import Mock, patch
from celery import Celery
from pytest import fixture
from celery_farmer.queue_lengths import QueueLengths
from tests import fixtures
from tests.helpers import wait_until_success
@fixture
def _queue_lengths(celery_app: Celery, statsd_mock: Mock
) -> Iterator[QueueLengths]:
queue_lengths = QueueLengths(
celery_app,
statsd_client=statsd_mock,
poll_time=0.1,
)
queue_lengths.start()
yield queue_lengths
queue_lengths.stop()
@fixture
def _active_queues() -> Iterator[Mock]:
with patch('celery.app.control.Inspect.active_queues') as active_queues:
active_queues.return_value = fixtures.active_queues_response
yield active_queues
def test_tracks_counts_of_events(_active_queues: Mock,
_queue_lengths: QueueLengths,
statsd_mock: Mock) -> None:
def assert_mock_gauge_called() -> None:
assert statsd_mock.gauge.called
wait_until_success(assert_mock_gauge_called)
def test_sends_heartbeats(_active_queues: Mock, _queue_lengths: QueueLengths,
statsd_mock: Mock) -> None:
def assert_mock_incr_called() -> None:
assert statsd_mock.incr.called
wait_until_success(assert_mock_incr_called)
assert statsd_mock.incr.call_args[0][0] == 'heartbeats.queue_lengths'
| none | 1 | 2.169858 | 2 | |
vec/vec.py | ex00/spacy-ru | 0 | 6615093 | <filename>vec/vec.py
import os
import gensim
from spacy import util
from spacy._ml import flatten, PrecomputableAffine
from spacy.pipeline.pipes import (
TextCategorizer,
build_simple_cnn_text_classifier,
build_bow_text_classifier,
build_text_classifier,
EntityRecognizer,
)
from spacy.syntax._parser_model import ParserModel
from thinc.api import chain
from thinc.neural import Model
from thinc.v2v import Affine
from .vec_utils import my_tok_to_vec
VECTORS = None
class MyTextCategorizer(TextCategorizer):
@classmethod
def Model(cls, nr_class=1, **cfg):
print("Config:", cfg)
embed_size = cfg.get("embed_size", 2000)
token_vector_width = cfg.get("token_vector_width", 96)
if cfg.get("architecture") == "simple_cnn":
tok2vec = get_t2v(token_vector_width, embed_size, **cfg)
return build_simple_cnn_text_classifier(tok2vec, nr_class, **cfg)
elif cfg.get("architecture") == "bow":
return build_bow_text_classifier(nr_class, **cfg)
else:
return build_text_classifier(nr_class, **cfg)
class MyNER(EntityRecognizer):
@classmethod
def Model(cls, nr_class, **cfg):
depth = util.env_opt("parser_hidden_depth", cfg.get("hidden_depth", 1))
subword_features = util.env_opt(
"subword_features", cfg.get("subword_features", True)
)
conv_depth = util.env_opt("conv_depth", cfg.get("conv_depth", 4))
conv_window = util.env_opt("conv_window", cfg.get("conv_depth", 1))
t2v_pieces = util.env_opt("cnn_maxout_pieces", cfg.get("cnn_maxout_pieces", 3))
bilstm_depth = util.env_opt("bilstm_depth", cfg.get("bilstm_depth", 0))
self_attn_depth = util.env_opt("self_attn_depth", cfg.get("self_attn_depth", 0))
assert depth == 1
parser_maxout_pieces = util.env_opt(
"parser_maxout_pieces", cfg.get("maxout_pieces", 2)
)
token_vector_width = util.env_opt(
"token_vector_width", cfg.get("token_vector_width", 96)
)
hidden_width = util.env_opt("hidden_width", cfg.get("hidden_width", 64))
embed_size = util.env_opt("embed_size", cfg.get("embed_size", 2000))
tok2vec = get_t2v(
token_vector_width,
embed_size,
conv_depth=conv_depth,
conv_window=conv_window,
cnn_maxout_pieces=t2v_pieces,
subword_features=subword_features,
bilstm_depth=bilstm_depth,
)
tok2vec = chain(tok2vec, flatten)
tok2vec.nO = token_vector_width
lower = PrecomputableAffine(
hidden_width,
nF=cls.nr_feature,
nI=token_vector_width,
nP=parser_maxout_pieces,
)
lower.nP = parser_maxout_pieces
with Model.use_device("cpu"):
upper = Affine(nr_class, hidden_width, drop_factor=0.0)
upper.W *= 0
cfg = {
"nr_class": nr_class,
"hidden_depth": depth,
"token_vector_width": token_vector_width,
"hidden_width": hidden_width,
"maxout_pieces": parser_maxout_pieces,
"pretrained_vectors": None,
"bilstm_depth": bilstm_depth,
"self_attn_depth": self_attn_depth,
"conv_depth": conv_depth,
"conv_window": conv_window,
"embed_size": embed_size,
"cnn_maxout_pieces": t2v_pieces,
}
return ParserModel(tok2vec, lower, upper), cfg
def get_ft_vec():
global VECTORS
if VECTORS is None:
fdir = os.path.dirname(os.path.dirname(__file__)) + "/data/vec/"
VECTORS = gensim.models.KeyedVectors.load(fdir + "vectors.bin")
return VECTORS
def get_t2v(token_vector_width, embed_size, **cfg):
vectors = get_ft_vec()
t2v = my_tok_to_vec(token_vector_width, embed_size, vectors)
return t2v
if __name__ == "__main__":
import spacy
nlp = spacy.blank("ru")
doc = nlp("Привет вам, мужики!")
doc2 = nlp("Зелёный день шагает по планете...")
docs = [doc, doc2]
t2v = get_t2v(96, 4000)
r = t2v(docs)
print(r)
textcat = MyTextCategorizer(
nlp.vocab, **{"exclusive_classes": True, "architecture": "simple_cnn"}
)
nlp.add_pipe(textcat, name="textcat")
for c in [
"69-я параллель",
"Бизнес",
"Бывший СССР",
"Дом",
"Из жизни",
"Интернет и СМИ",
"Крым",
"Культпросвет ",
"Культура",
"Мир",
"Наука и техника",
"Путешествия",
"Россия",
"Силовые структуры",
"Спорт",
"Ценности",
"Экономика",
]:
textcat.add_label(c)
CFG = {"device": 0, "cpu_count": 4}
nlp.begin_training(**CFG)
df = list(zip(docs, [1, 2]))
# docs_iter = tqdm((nlp.tokenizer(x[0]) for x in df), total=len(df))
# r = list(nlp.pipe(docs_iter))
# print(r)
r = textcat(doc)
| <filename>vec/vec.py
import os
import gensim
from spacy import util
from spacy._ml import flatten, PrecomputableAffine
from spacy.pipeline.pipes import (
TextCategorizer,
build_simple_cnn_text_classifier,
build_bow_text_classifier,
build_text_classifier,
EntityRecognizer,
)
from spacy.syntax._parser_model import ParserModel
from thinc.api import chain
from thinc.neural import Model
from thinc.v2v import Affine
from .vec_utils import my_tok_to_vec
VECTORS = None
class MyTextCategorizer(TextCategorizer):
@classmethod
def Model(cls, nr_class=1, **cfg):
print("Config:", cfg)
embed_size = cfg.get("embed_size", 2000)
token_vector_width = cfg.get("token_vector_width", 96)
if cfg.get("architecture") == "simple_cnn":
tok2vec = get_t2v(token_vector_width, embed_size, **cfg)
return build_simple_cnn_text_classifier(tok2vec, nr_class, **cfg)
elif cfg.get("architecture") == "bow":
return build_bow_text_classifier(nr_class, **cfg)
else:
return build_text_classifier(nr_class, **cfg)
class MyNER(EntityRecognizer):
@classmethod
def Model(cls, nr_class, **cfg):
depth = util.env_opt("parser_hidden_depth", cfg.get("hidden_depth", 1))
subword_features = util.env_opt(
"subword_features", cfg.get("subword_features", True)
)
conv_depth = util.env_opt("conv_depth", cfg.get("conv_depth", 4))
conv_window = util.env_opt("conv_window", cfg.get("conv_depth", 1))
t2v_pieces = util.env_opt("cnn_maxout_pieces", cfg.get("cnn_maxout_pieces", 3))
bilstm_depth = util.env_opt("bilstm_depth", cfg.get("bilstm_depth", 0))
self_attn_depth = util.env_opt("self_attn_depth", cfg.get("self_attn_depth", 0))
assert depth == 1
parser_maxout_pieces = util.env_opt(
"parser_maxout_pieces", cfg.get("maxout_pieces", 2)
)
token_vector_width = util.env_opt(
"token_vector_width", cfg.get("token_vector_width", 96)
)
hidden_width = util.env_opt("hidden_width", cfg.get("hidden_width", 64))
embed_size = util.env_opt("embed_size", cfg.get("embed_size", 2000))
tok2vec = get_t2v(
token_vector_width,
embed_size,
conv_depth=conv_depth,
conv_window=conv_window,
cnn_maxout_pieces=t2v_pieces,
subword_features=subword_features,
bilstm_depth=bilstm_depth,
)
tok2vec = chain(tok2vec, flatten)
tok2vec.nO = token_vector_width
lower = PrecomputableAffine(
hidden_width,
nF=cls.nr_feature,
nI=token_vector_width,
nP=parser_maxout_pieces,
)
lower.nP = parser_maxout_pieces
with Model.use_device("cpu"):
upper = Affine(nr_class, hidden_width, drop_factor=0.0)
upper.W *= 0
cfg = {
"nr_class": nr_class,
"hidden_depth": depth,
"token_vector_width": token_vector_width,
"hidden_width": hidden_width,
"maxout_pieces": parser_maxout_pieces,
"pretrained_vectors": None,
"bilstm_depth": bilstm_depth,
"self_attn_depth": self_attn_depth,
"conv_depth": conv_depth,
"conv_window": conv_window,
"embed_size": embed_size,
"cnn_maxout_pieces": t2v_pieces,
}
return ParserModel(tok2vec, lower, upper), cfg
def get_ft_vec():
global VECTORS
if VECTORS is None:
fdir = os.path.dirname(os.path.dirname(__file__)) + "/data/vec/"
VECTORS = gensim.models.KeyedVectors.load(fdir + "vectors.bin")
return VECTORS
def get_t2v(token_vector_width, embed_size, **cfg):
vectors = get_ft_vec()
t2v = my_tok_to_vec(token_vector_width, embed_size, vectors)
return t2v
if __name__ == "__main__":
import spacy
nlp = spacy.blank("ru")
doc = nlp("Привет вам, мужики!")
doc2 = nlp("Зелёный день шагает по планете...")
docs = [doc, doc2]
t2v = get_t2v(96, 4000)
r = t2v(docs)
print(r)
textcat = MyTextCategorizer(
nlp.vocab, **{"exclusive_classes": True, "architecture": "simple_cnn"}
)
nlp.add_pipe(textcat, name="textcat")
for c in [
"69-я параллель",
"Бизнес",
"Бывший СССР",
"Дом",
"Из жизни",
"Интернет и СМИ",
"Крым",
"Культпросвет ",
"Культура",
"Мир",
"Наука и техника",
"Путешествия",
"Россия",
"Силовые структуры",
"Спорт",
"Ценности",
"Экономика",
]:
textcat.add_label(c)
CFG = {"device": 0, "cpu_count": 4}
nlp.begin_training(**CFG)
df = list(zip(docs, [1, 2]))
# docs_iter = tqdm((nlp.tokenizer(x[0]) for x in df), total=len(df))
# r = list(nlp.pipe(docs_iter))
# print(r)
r = textcat(doc)
| en | 0.378272 | # docs_iter = tqdm((nlp.tokenizer(x[0]) for x in df), total=len(df)) # r = list(nlp.pipe(docs_iter)) # print(r) | 2.589032 | 3 |
tests/tests/tests.py | YulioTech/django-authtools | 0 | 6615094 | import datetime
from unittest import skipIf, skipUnless
from django.core import mail
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str
from django.utils.translation import gettext as _
from django.forms.fields import Field
from django.conf import settings
from django.contrib.auth import get_user_model
from authtools.admin import BASE_FIELDS
from authtools.forms import (
UserCreationForm,
UserChangeForm,
CaseInsensitiveUsernameFieldCreationForm,
CaseInsensitiveEmailUserCreationForm,
)
User = get_user_model()
def skipIfNotCustomUser(test_func):
return skipIf(settings.AUTH_USER_MODEL == 'auth.User', 'Built-in User model in use')(test_func)
def skipIfCustomUser(test_func):
"""
Copied from django.contrib.auth.tests.utils, This is deprecated in the future, but we still
need it for some of our tests.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User', 'Custom user model in use')(test_func)
class UserCreationFormTest(TestCase):
def setUp(self):
# in built-in UserManager, the order of arguments is:
# username, email, password
# in authtools UserManager, the order of arguments is:
# USERNAME_FIELD, password
User.objects.create_user('<EMAIL>', password='<PASSWORD>')
self.username = User.USERNAME_FIELD
def test_user_already_exists(self):
# The benefit of the custom validation message is only available if the
# messages are translated. We won't be able to translate all the
# strings if we don't know what the username will be ahead of time.
data = {
self.username: '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form[self.username].errors, [
force_str(form.error_messages['duplicate_username']) % {'username': self.username}])
def test_password_verification(self):
# The verification password is incorrect.
data = {
self.username: 'jsmith',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {self.username: 'jsmith'}
form = UserCreationForm(data)
required_error = [force_str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = '<PASSWORD>'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
self.username: '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
if settings.AUTH_USER_MODEL == 'authtools.User':
data['name'] = '<NAME>'
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(getattr(u, self.username), '<EMAIL>')
self.assertTrue(u.check_password('<PASSWORD>'))
self.assertEqual(u, User._default_manager.get_by_natural_key('<EMAIL>'))
def test_generated_fields_list(self):
if settings.AUTH_USER_MODEL == 'auth.User':
fields = ('username', 'email', 'password1', 'password2')
elif settings.AUTH_USER_MODEL == 'authtools.User':
fields = ('email', 'name', 'password1', '<PASSWORD>')
elif settings.AUTH_USER_MODEL == 'tests.User':
fields = ('email', 'full_name', 'preferred_name', 'password1', '<PASSWORD>')
else:
assert False, "I don't know your user model"
form = UserCreationForm()
self.assertSequenceEqual(list(form.fields.keys()), fields)
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='<PASSWORD>',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
# cls.u3 = User.objects.create(
# password='<PASSWORD>',
# last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
# first_name='Staff', last_name='Member', email='<EMAIL>', is_staff=True, is_active=True,
# date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
# )
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='<EMAIL>',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='<PASSWORD>', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = '<PASSWORD>'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], '<PASSWORD>')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_better_readonly_password_widget(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(instance=user)
self.assertIn(_('*************'), form.as_table())
class UserAdminTest(TestCase):
def test_generated_fieldsets(self):
if settings.AUTH_USER_MODEL == 'auth.User':
fields = ('username', 'email', 'password')
elif settings.AUTH_USER_MODEL == 'authtools.User':
fields = ('email', 'name', 'password')
elif settings.AUTH_USER_MODEL == 'tests.User':
fields = ('email', 'full_name', 'preferred_name', 'password')
else:
assert False, "I don't know your user model"
self.assertSequenceEqual(BASE_FIELDS[1]['fields'], fields)
class UserManagerTest(TestCase):
def test_create_user(self):
u = User._default_manager.create_user(**{
User.USERNAME_FIELD: '<EMAIL>',
'password': '<PASSWORD>',
})
self.assertEqual(getattr(u, User.USERNAME_FIELD), '<EMAIL>')
self.assertTrue(u.check_password('<PASSWORD>'))
self.assertEqual(u, User._default_manager.get_by_natural_key('<EMAIL>'))
self.assertTrue(u.is_active)
self.assertFalse(u.is_staff)
self.assertFalse(u.is_superuser)
@skipIfNotCustomUser
def test_create_superuser(self):
u = User._default_manager.create_superuser(**{
User.USERNAME_FIELD: '<EMAIL>',
'password': '<PASSWORD>',
})
self.assertTrue(u.is_staff)
self.assertTrue(u.is_superuser)
class UserModelTest(TestCase):
@skipUnless(settings.AUTH_USER_MODEL == 'authtools.User',
"only check authuser's ordering")
def test_default_ordering(self):
self.assertSequenceEqual(['name', 'email'], User._meta.ordering)
def test_send_mail(self):
abstract_user = User(email='<EMAIL>')
abstract_user.email_user(subject="Subject here",
message="This is a message", from_email="<EMAIL>")
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "<EMAIL>")
self.assertEqual(message.to, [abstract_user.email])
@override_settings(AUTHENTICATION_BACKENDS=['authtools.backends.CaseInsensitiveUsernameFieldModelBackend'])
class CaseInsensitiveTest(TestCase):
form_class = CaseInsensitiveUsernameFieldCreationForm
def get_form_data(self, data):
base_data = {
'auth.User': {},
'authtools.User': {
'name': 'Test Name',
},
'tests.User': {
'full_name': '<NAME>',
'preferred_name': 'Frank',
}
}
defaults = base_data[settings.AUTH_USER_MODEL]
defaults.update(data)
return defaults
def test_case_insensitive_login_works(self):
password = '<PASSWORD>'
form = self.form_class(self.get_form_data({
User.USERNAME_FIELD: '<EMAIL>',
'password1': password,
'password2': password,
}))
self.assertTrue(form.is_valid(), form.errors)
form.save()
self.assertTrue(self.client.login(
username='<EMAIL>',
password=password,
))
self.assertTrue(self.client.login(
username='<EMAIL>',
password=password,
))
@override_settings(AUTHENTICATION_BACKENDS=['authtools.backends.CaseInsensitiveEmailModelBackend'])
class CaseInsensitiveAliasTest(TestCase):
"""Test that the aliases still work as well"""
form_class = CaseInsensitiveEmailUserCreationForm
| import datetime
from unittest import skipIf, skipUnless
from django.core import mail
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str
from django.utils.translation import gettext as _
from django.forms.fields import Field
from django.conf import settings
from django.contrib.auth import get_user_model
from authtools.admin import BASE_FIELDS
from authtools.forms import (
UserCreationForm,
UserChangeForm,
CaseInsensitiveUsernameFieldCreationForm,
CaseInsensitiveEmailUserCreationForm,
)
User = get_user_model()
def skipIfNotCustomUser(test_func):
return skipIf(settings.AUTH_USER_MODEL == 'auth.User', 'Built-in User model in use')(test_func)
def skipIfCustomUser(test_func):
"""
Copied from django.contrib.auth.tests.utils, This is deprecated in the future, but we still
need it for some of our tests.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User', 'Custom user model in use')(test_func)
class UserCreationFormTest(TestCase):
def setUp(self):
# in built-in UserManager, the order of arguments is:
# username, email, password
# in authtools UserManager, the order of arguments is:
# USERNAME_FIELD, password
User.objects.create_user('<EMAIL>', password='<PASSWORD>')
self.username = User.USERNAME_FIELD
def test_user_already_exists(self):
# The benefit of the custom validation message is only available if the
# messages are translated. We won't be able to translate all the
# strings if we don't know what the username will be ahead of time.
data = {
self.username: '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form[self.username].errors, [
force_str(form.error_messages['duplicate_username']) % {'username': self.username}])
def test_password_verification(self):
# The verification password is incorrect.
data = {
self.username: 'jsmith',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {self.username: 'jsmith'}
form = UserCreationForm(data)
required_error = [force_str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = '<PASSWORD>'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
self.username: '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
if settings.AUTH_USER_MODEL == 'authtools.User':
data['name'] = '<NAME>'
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(getattr(u, self.username), '<EMAIL>')
self.assertTrue(u.check_password('<PASSWORD>'))
self.assertEqual(u, User._default_manager.get_by_natural_key('<EMAIL>'))
def test_generated_fields_list(self):
if settings.AUTH_USER_MODEL == 'auth.User':
fields = ('username', 'email', 'password1', 'password2')
elif settings.AUTH_USER_MODEL == 'authtools.User':
fields = ('email', 'name', 'password1', '<PASSWORD>')
elif settings.AUTH_USER_MODEL == 'tests.User':
fields = ('email', 'full_name', 'preferred_name', 'password1', '<PASSWORD>')
else:
assert False, "I don't know your user model"
form = UserCreationForm()
self.assertSequenceEqual(list(form.fields.keys()), fields)
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='<PASSWORD>',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
# cls.u3 = User.objects.create(
# password='<PASSWORD>',
# last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
# first_name='Staff', last_name='Member', email='<EMAIL>', is_staff=True, is_active=True,
# date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
# )
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='<EMAIL>',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='<PASSWORD>', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='<EMAIL>', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = '<PASSWORD>'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], '<PASSWORD>')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_better_readonly_password_widget(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(instance=user)
self.assertIn(_('*************'), form.as_table())
class UserAdminTest(TestCase):
def test_generated_fieldsets(self):
if settings.AUTH_USER_MODEL == 'auth.User':
fields = ('username', 'email', 'password')
elif settings.AUTH_USER_MODEL == 'authtools.User':
fields = ('email', 'name', 'password')
elif settings.AUTH_USER_MODEL == 'tests.User':
fields = ('email', 'full_name', 'preferred_name', 'password')
else:
assert False, "I don't know your user model"
self.assertSequenceEqual(BASE_FIELDS[1]['fields'], fields)
class UserManagerTest(TestCase):
def test_create_user(self):
u = User._default_manager.create_user(**{
User.USERNAME_FIELD: '<EMAIL>',
'password': '<PASSWORD>',
})
self.assertEqual(getattr(u, User.USERNAME_FIELD), '<EMAIL>')
self.assertTrue(u.check_password('<PASSWORD>'))
self.assertEqual(u, User._default_manager.get_by_natural_key('<EMAIL>'))
self.assertTrue(u.is_active)
self.assertFalse(u.is_staff)
self.assertFalse(u.is_superuser)
@skipIfNotCustomUser
def test_create_superuser(self):
u = User._default_manager.create_superuser(**{
User.USERNAME_FIELD: '<EMAIL>',
'password': '<PASSWORD>',
})
self.assertTrue(u.is_staff)
self.assertTrue(u.is_superuser)
class UserModelTest(TestCase):
@skipUnless(settings.AUTH_USER_MODEL == 'authtools.User',
"only check authuser's ordering")
def test_default_ordering(self):
self.assertSequenceEqual(['name', 'email'], User._meta.ordering)
def test_send_mail(self):
abstract_user = User(email='<EMAIL>')
abstract_user.email_user(subject="Subject here",
message="This is a message", from_email="<EMAIL>")
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "<EMAIL>")
self.assertEqual(message.to, [abstract_user.email])
@override_settings(AUTHENTICATION_BACKENDS=['authtools.backends.CaseInsensitiveUsernameFieldModelBackend'])
class CaseInsensitiveTest(TestCase):
form_class = CaseInsensitiveUsernameFieldCreationForm
def get_form_data(self, data):
base_data = {
'auth.User': {},
'authtools.User': {
'name': 'Test Name',
},
'tests.User': {
'full_name': '<NAME>',
'preferred_name': 'Frank',
}
}
defaults = base_data[settings.AUTH_USER_MODEL]
defaults.update(data)
return defaults
def test_case_insensitive_login_works(self):
password = '<PASSWORD>'
form = self.form_class(self.get_form_data({
User.USERNAME_FIELD: '<EMAIL>',
'password1': password,
'password2': password,
}))
self.assertTrue(form.is_valid(), form.errors)
form.save()
self.assertTrue(self.client.login(
username='<EMAIL>',
password=password,
))
self.assertTrue(self.client.login(
username='<EMAIL>',
password=password,
))
@override_settings(AUTHENTICATION_BACKENDS=['authtools.backends.CaseInsensitiveEmailModelBackend'])
class CaseInsensitiveAliasTest(TestCase):
"""Test that the aliases still work as well"""
form_class = CaseInsensitiveEmailUserCreationForm
| en | 0.777804 | Copied from django.contrib.auth.tests.utils, This is deprecated in the future, but we still need it for some of our tests. # in built-in UserManager, the order of arguments is: # username, email, password # in authtools UserManager, the order of arguments is: # USERNAME_FIELD, password # The benefit of the custom validation message is only available if the # messages are translated. We won't be able to translate all the # strings if we don't know what the username will be ahead of time. # The verification password is incorrect. # One (or both) passwords weren't given # The success case. # cls.u3 = User.objects.create( # password='<PASSWORD>', # last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff', # first_name='Staff', last_name='Member', email='<EMAIL>', is_staff=True, is_active=True, # date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) # ) # A regression test, introduce by adding an optimization for the # UserChangeForm. # Just check we can create it # Use the form to construct the POST data # The password field should be readonly, so anything # posted here should be ignored; the form will be # valid, and give back the 'initial' value for the # password field. # When rendering the bound password field, # ReadOnlyPasswordHashWidget needs the initial # value to render correctly # Test that one message has been sent. # Verify that test email contains the correct attributes: Test that the aliases still work as well | 2.332253 | 2 |
tests/resources/functions/python/main.py | muradahmed25/appwrite | 1 | 6615095 | <reponame>muradahmed25/appwrite<gh_stars>1-10
import json
# 'req' variable has:
# 'headers' - object with request headers
# 'payload' - object with request body data
# 'env' - object with environment variables
# 'res' variable has:
# 'send(text, status)' - function to return text response. Status code defaults to 200
# 'json(obj, status)' - function to return JSON response. Status code defaults to 200
#
# If an error is thrown, a response with code 500 will be returned.
def main(request, response):
return response.json({
'APPWRITE_FUNCTION_ID' : request.env['APPWRITE_FUNCTION_ID'],
'APPWRITE_FUNCTION_NAME' : request.env['APPWRITE_FUNCTION_NAME'],
'APPWRITE_FUNCTION_DEPLOYMENT' : request.env['APPWRITE_FUNCTION_DEPLOYMENT'],
'APPWRITE_FUNCTION_TRIGGER' : request.env['APPWRITE_FUNCTION_TRIGGER'],
'APPWRITE_FUNCTION_RUNTIME_NAME' : request.env['APPWRITE_FUNCTION_RUNTIME_NAME'],
'APPWRITE_FUNCTION_RUNTIME_VERSION' : request.env['APPWRITE_FUNCTION_RUNTIME_VERSION'],
'APPWRITE_FUNCTION_EVENT' : request.env['APPWRITE_FUNCTION_EVENT'],
'APPWRITE_FUNCTION_EVENT_DATA' : request.env['APPWRITE_FUNCTION_EVENT_DATA'],
'APPWRITE_FUNCTION_DATA' : request.env['APPWRITE_FUNCTION_DATA'],
'APPWRITE_FUNCTION_USER_ID' : request.env['APPWRITE_FUNCTION_USER_ID'],
'APPWRITE_FUNCTION_JWT' : request.env['APPWRITE_FUNCTION_JWT'],
'APPWRITE_FUNCTION_PROJECT_ID' : request.env['APPWRITE_FUNCTION_PROJECT_ID'],
'CUSTOM_VARIABLE' : request.env['CUSTOM_VARIABLE'],
}) | import json
# 'req' variable has:
# 'headers' - object with request headers
# 'payload' - object with request body data
# 'env' - object with environment variables
# 'res' variable has:
# 'send(text, status)' - function to return text response. Status code defaults to 200
# 'json(obj, status)' - function to return JSON response. Status code defaults to 200
#
# If an error is thrown, a response with code 500 will be returned.
def main(request, response):
return response.json({
'APPWRITE_FUNCTION_ID' : request.env['APPWRITE_FUNCTION_ID'],
'APPWRITE_FUNCTION_NAME' : request.env['APPWRITE_FUNCTION_NAME'],
'APPWRITE_FUNCTION_DEPLOYMENT' : request.env['APPWRITE_FUNCTION_DEPLOYMENT'],
'APPWRITE_FUNCTION_TRIGGER' : request.env['APPWRITE_FUNCTION_TRIGGER'],
'APPWRITE_FUNCTION_RUNTIME_NAME' : request.env['APPWRITE_FUNCTION_RUNTIME_NAME'],
'APPWRITE_FUNCTION_RUNTIME_VERSION' : request.env['APPWRITE_FUNCTION_RUNTIME_VERSION'],
'APPWRITE_FUNCTION_EVENT' : request.env['APPWRITE_FUNCTION_EVENT'],
'APPWRITE_FUNCTION_EVENT_DATA' : request.env['APPWRITE_FUNCTION_EVENT_DATA'],
'APPWRITE_FUNCTION_DATA' : request.env['APPWRITE_FUNCTION_DATA'],
'APPWRITE_FUNCTION_USER_ID' : request.env['APPWRITE_FUNCTION_USER_ID'],
'APPWRITE_FUNCTION_JWT' : request.env['APPWRITE_FUNCTION_JWT'],
'APPWRITE_FUNCTION_PROJECT_ID' : request.env['APPWRITE_FUNCTION_PROJECT_ID'],
'CUSTOM_VARIABLE' : request.env['CUSTOM_VARIABLE'],
}) | en | 0.542088 | # 'req' variable has: # 'headers' - object with request headers # 'payload' - object with request body data # 'env' - object with environment variables # 'res' variable has: # 'send(text, status)' - function to return text response. Status code defaults to 200 # 'json(obj, status)' - function to return JSON response. Status code defaults to 200 # # If an error is thrown, a response with code 500 will be returned. | 2.821728 | 3 |
users/migrations/0008_remove_user_phone.py | LukasBaecker/be-stadtlabor | 1 | 6615096 | <filename>users/migrations/0008_remove_user_phone.py<gh_stars>1-10
# Generated by Django 3.1 on 2022-01-23 11:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20211231_2258'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='phone',
),
]
| <filename>users/migrations/0008_remove_user_phone.py<gh_stars>1-10
# Generated by Django 3.1 on 2022-01-23 11:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20211231_2258'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='phone',
),
]
| en | 0.875882 | # Generated by Django 3.1 on 2022-01-23 11:56 | 1.538464 | 2 |
tests/test_pydict2json.py | sisi100/pydict2json | 0 | 6615097 | import json
from collections import OrderedDict
from copy import copy
from decimal import Decimal
from typing import Tuple
import pytest
from pydict2json import pydict2json
def clear_text(text: str):
"""改行と空白を削除する"""
lines = text.splitlines()
return "".join(lines).replace(" ", "")
def dummy_data() -> str:
# 期待値
dummy_1 = {
"hoge": 1234,
"hoge_ordered_dict": {"key1": 1, "key2": "a"},
"hoge_decimal": "348559.097765",
}
json_text = clear_text(json.dumps(dummy_1))
# テスト用のテキストデータ
dummy_2 = copy(dummy_1)
dummy_2.update(
{
"hoge_ordered_dict": OrderedDict(dummy_1["hoge_ordered_dict"]),
"hoge_decimal": Decimal(dummy_1["hoge_decimal"]),
}
)
python_dict_text = dummy_2.__str__()
return python_dict_text, json_text
@pytest.mark.parametrize("python_dict_text, json_text", [dummy_data()])
def test_success(python_dict_text: str, json_text: str):
assert json_text == clear_text(pydict2json.text_to_json(python_dict_text))
| import json
from collections import OrderedDict
from copy import copy
from decimal import Decimal
from typing import Tuple
import pytest
from pydict2json import pydict2json
def clear_text(text: str):
"""改行と空白を削除する"""
lines = text.splitlines()
return "".join(lines).replace(" ", "")
def dummy_data() -> str:
# 期待値
dummy_1 = {
"hoge": 1234,
"hoge_ordered_dict": {"key1": 1, "key2": "a"},
"hoge_decimal": "348559.097765",
}
json_text = clear_text(json.dumps(dummy_1))
# テスト用のテキストデータ
dummy_2 = copy(dummy_1)
dummy_2.update(
{
"hoge_ordered_dict": OrderedDict(dummy_1["hoge_ordered_dict"]),
"hoge_decimal": Decimal(dummy_1["hoge_decimal"]),
}
)
python_dict_text = dummy_2.__str__()
return python_dict_text, json_text
@pytest.mark.parametrize("python_dict_text, json_text", [dummy_data()])
def test_success(python_dict_text: str, json_text: str):
assert json_text == clear_text(pydict2json.text_to_json(python_dict_text))
| ja | 1.000041 | 改行と空白を削除する # 期待値 # テスト用のテキストデータ | 2.592551 | 3 |
exams/2014/6_golden_rule.py | JoaoCostaIFG/MNUM | 1 | 6615098 | #!/usr/bin/env python3
from math import sin, sqrt
def f(x):
return x + ((x - 2)**2) / (sin(x) + 4)
B = (sqrt(5) - 1) / 2
A = B**2
x1 = -1
x2 = 1.5
x3 = -0.045080
x4 = 0.545085
# x3 = A * (x2 - x1) + x1
# x4 = B * (x2 - x1) + x1
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
for i in range(2):
if (f(x3) < f(x4)):
x2 = x4
x4 = x3
x3 = x1 + A * (x2 - x1)
else:
x1 = x3
x3 = x4
x4 = x1 + B * (x2 - x1)
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
| #!/usr/bin/env python3
from math import sin, sqrt
def f(x):
return x + ((x - 2)**2) / (sin(x) + 4)
B = (sqrt(5) - 1) / 2
A = B**2
x1 = -1
x2 = 1.5
x3 = -0.045080
x4 = 0.545085
# x3 = A * (x2 - x1) + x1
# x4 = B * (x2 - x1) + x1
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
for i in range(2):
if (f(x3) < f(x4)):
x2 = x4
x4 = x3
x3 = x1 + A * (x2 - x1)
else:
x1 = x3
x3 = x4
x4 = x1 + B * (x2 - x1)
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
| en | 0.321388 | #!/usr/bin/env python3 # x3 = A * (x2 - x1) + x1 # x4 = B * (x2 - x1) + x1 | 3.589438 | 4 |
backend/backend/serializers.py | val-ilyukh/DRF-poll-app | 0 | 6615099 | from rest_framework import serializers
from rest_framework.serializers import ValidationError
from .models import *
import datetime
class PollSerializer(serializers.ModelSerializer):
'''
Сериализатор модели опроса
Дата начала опроса не может быть меньше, чем текущая дата
Дата окончания опроса не может быть меньше, чем дата начала опроса
'''
class Meta:
model = Poll
fields = '__all__'
read_only_fields = ['id',]
def validate(self, data):
'''
проверяем дату начала и дату окончания
'''
if data['startDate'] > data['finishDate']:
raise serializers.ValidationError("finish must occur after start")
today = datetime.date.today()
if data['startDate'] < today:
raise serializers.ValidationError("Unable to start polls backdating")
return data
def get_fields(self):
'''
Если объект загружен, устанавливаем свойство read_only для даты начала опроса
'''
fields = super(PollSerializer, self).get_fields()
if self.instance and getattr(self.instance, 'startDate', None):
fields['startDate'].read_only = True
return fields
class QuestionSerializer(serializers.ModelSerializer):
'''
Сериализатор вопроса
Вопрос должен быть одного из 3х типов
'''
class Meta:
model = Question
exclude = ['poll',]
def validate(self, data):
if not data['type'] in ['TEXT', 'CHOICE', 'MULTIPLE_CHOICE']:
raise ValidationError('Invalid question type')
return data
class OptionSerializer(serializers.ModelSerializer):
'''
Сериализатор варианта ответа
'''
class Meta:
model = Option
exclude = ['question',]
class UserOptionSerializer(serializers.Serializer):
'''
Вспомогательный сериализатор для формирования ваариантов ответа для пользователя
Не базируется на классе
'''
index = serializers.IntegerField()
text = serializers.CharField(max_length=100)
class UserResponseSerializer(serializers.ModelSerializer):
'''
Заполненный опрос
'''
class Meta:
model = UserResponse
fields = '__all__'
| from rest_framework import serializers
from rest_framework.serializers import ValidationError
from .models import *
import datetime
class PollSerializer(serializers.ModelSerializer):
'''
Сериализатор модели опроса
Дата начала опроса не может быть меньше, чем текущая дата
Дата окончания опроса не может быть меньше, чем дата начала опроса
'''
class Meta:
model = Poll
fields = '__all__'
read_only_fields = ['id',]
def validate(self, data):
'''
проверяем дату начала и дату окончания
'''
if data['startDate'] > data['finishDate']:
raise serializers.ValidationError("finish must occur after start")
today = datetime.date.today()
if data['startDate'] < today:
raise serializers.ValidationError("Unable to start polls backdating")
return data
def get_fields(self):
'''
Если объект загружен, устанавливаем свойство read_only для даты начала опроса
'''
fields = super(PollSerializer, self).get_fields()
if self.instance and getattr(self.instance, 'startDate', None):
fields['startDate'].read_only = True
return fields
class QuestionSerializer(serializers.ModelSerializer):
'''
Сериализатор вопроса
Вопрос должен быть одного из 3х типов
'''
class Meta:
model = Question
exclude = ['poll',]
def validate(self, data):
if not data['type'] in ['TEXT', 'CHOICE', 'MULTIPLE_CHOICE']:
raise ValidationError('Invalid question type')
return data
class OptionSerializer(serializers.ModelSerializer):
'''
Сериализатор варианта ответа
'''
class Meta:
model = Option
exclude = ['question',]
class UserOptionSerializer(serializers.Serializer):
'''
Вспомогательный сериализатор для формирования ваариантов ответа для пользователя
Не базируется на классе
'''
index = serializers.IntegerField()
text = serializers.CharField(max_length=100)
class UserResponseSerializer(serializers.ModelSerializer):
'''
Заполненный опрос
'''
class Meta:
model = UserResponse
fields = '__all__'
| ru | 0.996901 | Сериализатор модели опроса Дата начала опроса не может быть меньше, чем текущая дата Дата окончания опроса не может быть меньше, чем дата начала опроса проверяем дату начала и дату окончания Если объект загружен, устанавливаем свойство read_only для даты начала опроса Сериализатор вопроса Вопрос должен быть одного из 3х типов Сериализатор варианта ответа Вспомогательный сериализатор для формирования ваариантов ответа для пользователя Не базируется на классе Заполненный опрос | 2.522727 | 3 |
simple/lstm.py | cvanoort/differentiable-plasticity | 0 | 6615100 | # Memorization of two 50-bit binary patterns per episode, with LSTMs. Takes a very long time to learn the task, and even then imperfectly. 2050 neurons (fewer neurons = worse performance).
#
#
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from numpy import random
import torch.nn.functional as F
from torch import optim
import random
import sys
import pickle as pickle
import pdb
import time
# Uber-only (comment out if not at Uber)
import OpusHdfsCopy
from OpusHdfsCopy import transferFileToHdfsDir, checkHdfs
# Parsing command-line arguments
params = {}; params['rngseed'] = 0
parser = argparse.ArgumentParser()
parser.add_argument("--rngseed", type=int, help="random seed", default=0)
parser.add_argument("--nbiter", type=int, help="number of episodes", default=2000)
parser.add_argument("--clamp", type=int, help="whether inputs are clamping (1) or not (0)", default=1)
parser.add_argument("--nbaddneurons", type=int, help="number of additional neurons", default=0)
parser.add_argument("--lr", type=float, help="learning rate of Adam optimizer", default=3e-4)
parser.add_argument("--patternsize", type=int, help="size of the binary patterns", default=1000)
parser.add_argument("--nbpatterns", type=int, help="number of patterns to memorize", default=5)
parser.add_argument("--nbprescycles", type=int, help="number of presentation cycles", default=2)
parser.add_argument("--prestime", type=int, help="number of time steps for each pattern presentation", default=6)
parser.add_argument("--interpresdelay", type=int, help="number of time steps between each pattern presentation (with zero input)", default=4)
parser.add_argument("--type", help="network type ('plastic' or 'nonplastic')", default='plastic')
args = parser.parse_args(); argvars = vars(args); argdict = { k : argvars[k] for k in argvars if argvars[k] != None }
params.update(argdict)
PATTERNSIZE = params['patternsize']
NBHIDDENNEUR = PATTERNSIZE + params['nbaddneurons'] + 1 # NbNeur = Pattern Size + additional neurons + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness)
ETA = .01 # The "learning rate" of plastic connections; not used for LSTMs
ADAMLEARNINGRATE = params['lr']
PROBADEGRADE = .5 # Proportion of bits to zero out in the target pattern at test time
CLAMPING = params['clamp']
NBPATTERNS = params['nbpatterns'] # The number of patterns to learn in each episode
NBPRESCYCLES = params['nbprescycles'] # Number of times each pattern is to be presented
PRESTIME = params['prestime'] # Number of time steps for each presentation
PRESTIMETEST = PRESTIME # Same thing but for the final test pattern
INTERPRESDELAY = params['interpresdelay'] # Duration of zero-input interval between presentations
NBSTEPS = NBPRESCYCLES * ((PRESTIME + INTERPRESDELAY) * NBPATTERNS) + PRESTIMETEST # Total number of steps per episode
RNGSEED = params['rngseed']
#PATTERNSIZE = 50
#
## Note: For LSTM, there are PATTERNSIZE input and output neurons, and NBHIDDENNEUR neurons in the hidden recurrent layer
##NBNEUR = PATTERNSIZE # NbNeur = Pattern Size + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness)
#NBHIDDENNEUR = 2000 # 1000 takes longer
#
##ETA = .01 # The "learning rate" of plastic connections. Not used for LSTMs.
#ADAMLEARNINGRATE = 3e-5 # 1e-4 # 3e-5 works better in the long run. 1e-4 OK. 3e-4 fails.
#RNGSEED = 0
#
#PROBADEGRADE = .5 # Proportion of bits to zero out in the target pattern at test time
#NBPATTERNS = 2 # The number of patterns to learn in each episode
#NBPRESCYCLES = 1 # Number of times each pattern is to be presented
#PRESTIME = 3 # Number of time steps for each presentation
#PRESTIMETEST = 3 # Same thing but for the final test pattern
#INTERPRESDELAY = 1 # Duration of zero-input interval between presentations
#NBSTEPS = NBPRESCYCLES * ((PRESTIME + INTERPRESDELAY) * NBPATTERNS) + PRESTIMETEST # Total number of steps per episode
#ttype = torch.FloatTensor;
ttype = torch.cuda.FloatTensor;
# Generate the full list of inputs for an episode. The inputs are returned as a PyTorch tensor of shape NbSteps x 1 x NbNeur
def generateInputsAndTarget():
#inputT = np.zeros((NBSTEPS, 1, NBNEUR)) #inputTensor, initially in numpy format...
inputT = np.zeros((NBSTEPS, 1, PATTERNSIZE)) #inputTensor, initially in numpy format...
# Create the random patterns to be memorized in an episode
seedp = np.ones(PATTERNSIZE); seedp[:PATTERNSIZE//2] = -1
patterns=[]
for nump in range(NBPATTERNS):
p = np.random.permutation(seedp)
patterns.append(p)
# Now 'patterns' contains the NBPATTERNS patterns to be memorized in this episode - in numpy format
# Choosing the test pattern, partially zero'ed out, that the network will have to complete
testpattern = random.choice(patterns).copy()
#testpattern = patterns[1].copy()
preservedbits = np.ones(PATTERNSIZE); preservedbits[:int(PROBADEGRADE * PATTERNSIZE)] = 0; np.random.shuffle(preservedbits)
degradedtestpattern = testpattern * preservedbits
# Inserting the inputs in the input tensor at the proper places
for nc in range(NBPRESCYCLES):
np.random.shuffle(patterns)
for ii in range(NBPATTERNS):
for nn in range(PRESTIME):
numi =nc * (NBPATTERNS * (PRESTIME+INTERPRESDELAY)) + ii * (PRESTIME+INTERPRESDELAY) + nn
inputT[numi][0][:PATTERNSIZE] = patterns[ii][:]
# Inserting the degraded pattern
for nn in range(PRESTIMETEST):
inputT[-PRESTIMETEST + nn][0][:PATTERNSIZE] = degradedtestpattern[:]
for nn in range(NBSTEPS):
#inputT[nn][0][-1] = 1.0 # Bias neuron.
inputT[nn] *= 100.0 # Strengthen inputs
inputT = torch.from_numpy(inputT).type(ttype) # Convert from numpy to Tensor
target = torch.from_numpy(testpattern).type(ttype)
return inputT, target
class NETWORK(nn.Module):
def __init__(self):
super(NETWORK, self).__init__()
self.lstm = torch.nn.LSTM(PATTERNSIZE, NBHIDDENNEUR).cuda() #input size, hidden size
self.hidden = self.initialZeroState() # Note that the "hidden state" is a tuple (hidden state, cells state)
def forward(self, inputs,):
# Run the network over entire sequence of inputs
self.hidden = self.initialZeroState()
if CLAMPING:
# This code allows us to make the inputs on the LSTM "clamping",
# i.e. neurons that receive an input have their output clamped at
# this value, to make it similar to the RNN architectures.
#
# Note that you get worse results if you don't use it ! ("CLAMPING = 0" above) (clamping automatically reduces chance error to ~.25, since all input bits are always correct)
#
#self.lstm.weight_hh_l0.data.fill_(0)
#self.lstm.weight_ih_l0.data.fill_(0)
self.lstm.bias_hh_l0.data.fill_(0)
#self.lstm.bias_ih_l0.data.fill_(0)
for ii in range(PATTERNSIZE):
self.lstm.weight_ih_l0.data[2*NBHIDDENNEUR + ii].fill_(0)
self.lstm.weight_ih_l0.data[2*NBHIDDENNEUR + ii][ii] = 10.0 # Trick to make inputs clamping on the cells, for fair comparison (need to also set input gates...)
self.lstm.bias_ih_l0.data[0*NBHIDDENNEUR+ ii]= 10.0 # bias to input gate
self.lstm.bias_ih_l0.data[1*NBHIDDENNEUR+ ii]= -1000.0 # bias to forget gate (actually a persistence gate? - sigmoid, so to set it to 0, put a massive negative bias)
self.lstm.bias_ih_l0.data[2*NBHIDDENNEUR+ ii]= 0 # bias to cell gate
self.lstm.bias_ih_l0.data[3*NBHIDDENNEUR+ ii]= 10.0 # bias to output gate; sigmoid
lstm_out, self.hidden = self.lstm(inputs, self.hidden)
#o = self.h2o(lstm_out) #.view(NBSTEPS, -1))
#outputz = F.tanh(o)
outputz = lstm_out
return outputz
#yout = F.tanh( yin.mm(self.w + torch.mul(self.alpha, hebb)) + input )
#hebb = (1 - ETA) * hebb + ETA * torch.bmm(yin.unsqueeze(2), yout.unsqueeze(1))[0] # bmm used to implement outer product with the help of unsqueeze (i.e. added empty dimensions)
#return yout, hebb
def initialZeroState(self):
return (Variable(torch.zeros(1, 1, NBHIDDENNEUR).type(ttype)),
Variable(torch.zeros(1, 1, NBHIDDENNEUR).type(ttype)))
if len(sys.argv) == 2:
RNGSEED = int(sys.argv[1])
print("Setting RNGSEED to "+str(RNGSEED))
np.set_printoptions(precision=3)
np.random.seed(RNGSEED); random.seed(RNGSEED); torch.manual_seed(RNGSEED)
net = NETWORK()
optimizer = torch.optim.Adam(net.parameters(), lr=ADAMLEARNINGRATE)
total_loss = 0.0; all_losses = []
print_every = 100
save_every = 1000
nowtime = time.time()
for numiter in range(params['nbiter']):
optimizer.zero_grad()
net.hidden = net.initialZeroState()
# Generate the inputs and target pattern for this episode
inputs, target = generateInputsAndTarget()
# Run the episode!
y = net(Variable(inputs, requires_grad=False))[-1][0]
# Compute loss for this episode (last step only)
loss = (y[:PATTERNSIZE] - Variable(target, requires_grad=False)).pow(2).sum()
#pdb.set_trace()
# Apply backpropagation to adapt basic weights and plasticity coefficients
loss.backward()
optimizer.step()
# That's it for the actual algorithm.
# Print statistics, save files
#lossnum = loss.data[0]
yo = y.data.cpu().numpy()[:PATTERNSIZE]
to = target.cpu().numpy()
z = (np.sign(yo) != np.sign(to))
lossnum = np.mean(z)
total_loss += lossnum
if (numiter+1) % print_every == 0:
print((numiter, "===="))
print(target.cpu().numpy()[:10]) # Target pattern to be reconstructed
print(inputs.cpu().numpy()[-1][0][:10]) # Last input contains the degraded pattern fed to the network at test time
print(y.data.cpu().numpy()[:10]) # Final output of the network
previoustime = nowtime
nowtime = time.time()
print("Time spent on last", print_every, "iters: ", nowtime - previoustime)
total_loss /= print_every
all_losses.append(total_loss)
print("Mean loss over last", print_every, "iters:", total_loss)
print("")
if (numiter+1) % save_every == 0:
fname = 'loss_binary_lstm_nbiter_'+str(params['nbiter'])+'_nbhneur_'+str(NBHIDDENNEUR)+'_clamp_'+str(CLAMPING)+'_lr_'+str(ADAMLEARNINGRATE)+'_prestime_'+str(PRESTIME)+'_ipd_'+str(INTERPRESDELAY)+'_rngseed_'+str(RNGSEED)+'.txt'
with open(fname, 'w') as fo:
for item in all_losses:
fo.write("%s\n" % item)
# Uber-only (comment out if not at Uber)
if checkHdfs():
print("Transfering to HDFS...")
transferFileToHdfsDir(fname, '/ailabs/tmiconi/simple/')
total_loss = 0
| # Memorization of two 50-bit binary patterns per episode, with LSTMs. Takes a very long time to learn the task, and even then imperfectly. 2050 neurons (fewer neurons = worse performance).
#
#
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from numpy import random
import torch.nn.functional as F
from torch import optim
import random
import sys
import pickle as pickle
import pdb
import time
# Uber-only (comment out if not at Uber)
import OpusHdfsCopy
from OpusHdfsCopy import transferFileToHdfsDir, checkHdfs
# Parsing command-line arguments
params = {}; params['rngseed'] = 0
parser = argparse.ArgumentParser()
parser.add_argument("--rngseed", type=int, help="random seed", default=0)
parser.add_argument("--nbiter", type=int, help="number of episodes", default=2000)
parser.add_argument("--clamp", type=int, help="whether inputs are clamping (1) or not (0)", default=1)
parser.add_argument("--nbaddneurons", type=int, help="number of additional neurons", default=0)
parser.add_argument("--lr", type=float, help="learning rate of Adam optimizer", default=3e-4)
parser.add_argument("--patternsize", type=int, help="size of the binary patterns", default=1000)
parser.add_argument("--nbpatterns", type=int, help="number of patterns to memorize", default=5)
parser.add_argument("--nbprescycles", type=int, help="number of presentation cycles", default=2)
parser.add_argument("--prestime", type=int, help="number of time steps for each pattern presentation", default=6)
parser.add_argument("--interpresdelay", type=int, help="number of time steps between each pattern presentation (with zero input)", default=4)
parser.add_argument("--type", help="network type ('plastic' or 'nonplastic')", default='plastic')
args = parser.parse_args(); argvars = vars(args); argdict = { k : argvars[k] for k in argvars if argvars[k] != None }
params.update(argdict)
PATTERNSIZE = params['patternsize']
NBHIDDENNEUR = PATTERNSIZE + params['nbaddneurons'] + 1 # NbNeur = Pattern Size + additional neurons + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness)
ETA = .01 # The "learning rate" of plastic connections; not used for LSTMs
ADAMLEARNINGRATE = params['lr']
PROBADEGRADE = .5 # Proportion of bits to zero out in the target pattern at test time
CLAMPING = params['clamp']
NBPATTERNS = params['nbpatterns'] # The number of patterns to learn in each episode
NBPRESCYCLES = params['nbprescycles'] # Number of times each pattern is to be presented
PRESTIME = params['prestime'] # Number of time steps for each presentation
PRESTIMETEST = PRESTIME # Same thing but for the final test pattern
INTERPRESDELAY = params['interpresdelay'] # Duration of zero-input interval between presentations
NBSTEPS = NBPRESCYCLES * ((PRESTIME + INTERPRESDELAY) * NBPATTERNS) + PRESTIMETEST # Total number of steps per episode
RNGSEED = params['rngseed']
#PATTERNSIZE = 50
#
## Note: For LSTM, there are PATTERNSIZE input and output neurons, and NBHIDDENNEUR neurons in the hidden recurrent layer
##NBNEUR = PATTERNSIZE # NbNeur = Pattern Size + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness)
#NBHIDDENNEUR = 2000 # 1000 takes longer
#
##ETA = .01 # The "learning rate" of plastic connections. Not used for LSTMs.
#ADAMLEARNINGRATE = 3e-5 # 1e-4 # 3e-5 works better in the long run. 1e-4 OK. 3e-4 fails.
#RNGSEED = 0
#
#PROBADEGRADE = .5 # Proportion of bits to zero out in the target pattern at test time
#NBPATTERNS = 2 # The number of patterns to learn in each episode
#NBPRESCYCLES = 1 # Number of times each pattern is to be presented
#PRESTIME = 3 # Number of time steps for each presentation
#PRESTIMETEST = 3 # Same thing but for the final test pattern
#INTERPRESDELAY = 1 # Duration of zero-input interval between presentations
#NBSTEPS = NBPRESCYCLES * ((PRESTIME + INTERPRESDELAY) * NBPATTERNS) + PRESTIMETEST # Total number of steps per episode
#ttype = torch.FloatTensor;
ttype = torch.cuda.FloatTensor;
# Generate the full list of inputs for an episode. The inputs are returned as a PyTorch tensor of shape NbSteps x 1 x NbNeur
def generateInputsAndTarget():
#inputT = np.zeros((NBSTEPS, 1, NBNEUR)) #inputTensor, initially in numpy format...
inputT = np.zeros((NBSTEPS, 1, PATTERNSIZE)) #inputTensor, initially in numpy format...
# Create the random patterns to be memorized in an episode
seedp = np.ones(PATTERNSIZE); seedp[:PATTERNSIZE//2] = -1
patterns=[]
for nump in range(NBPATTERNS):
p = np.random.permutation(seedp)
patterns.append(p)
# Now 'patterns' contains the NBPATTERNS patterns to be memorized in this episode - in numpy format
# Choosing the test pattern, partially zero'ed out, that the network will have to complete
testpattern = random.choice(patterns).copy()
#testpattern = patterns[1].copy()
preservedbits = np.ones(PATTERNSIZE); preservedbits[:int(PROBADEGRADE * PATTERNSIZE)] = 0; np.random.shuffle(preservedbits)
degradedtestpattern = testpattern * preservedbits
# Inserting the inputs in the input tensor at the proper places
for nc in range(NBPRESCYCLES):
np.random.shuffle(patterns)
for ii in range(NBPATTERNS):
for nn in range(PRESTIME):
numi =nc * (NBPATTERNS * (PRESTIME+INTERPRESDELAY)) + ii * (PRESTIME+INTERPRESDELAY) + nn
inputT[numi][0][:PATTERNSIZE] = patterns[ii][:]
# Inserting the degraded pattern
for nn in range(PRESTIMETEST):
inputT[-PRESTIMETEST + nn][0][:PATTERNSIZE] = degradedtestpattern[:]
for nn in range(NBSTEPS):
#inputT[nn][0][-1] = 1.0 # Bias neuron.
inputT[nn] *= 100.0 # Strengthen inputs
inputT = torch.from_numpy(inputT).type(ttype) # Convert from numpy to Tensor
target = torch.from_numpy(testpattern).type(ttype)
return inputT, target
class NETWORK(nn.Module):
def __init__(self):
super(NETWORK, self).__init__()
self.lstm = torch.nn.LSTM(PATTERNSIZE, NBHIDDENNEUR).cuda() #input size, hidden size
self.hidden = self.initialZeroState() # Note that the "hidden state" is a tuple (hidden state, cells state)
def forward(self, inputs,):
# Run the network over entire sequence of inputs
self.hidden = self.initialZeroState()
if CLAMPING:
# This code allows us to make the inputs on the LSTM "clamping",
# i.e. neurons that receive an input have their output clamped at
# this value, to make it similar to the RNN architectures.
#
# Note that you get worse results if you don't use it ! ("CLAMPING = 0" above) (clamping automatically reduces chance error to ~.25, since all input bits are always correct)
#
#self.lstm.weight_hh_l0.data.fill_(0)
#self.lstm.weight_ih_l0.data.fill_(0)
self.lstm.bias_hh_l0.data.fill_(0)
#self.lstm.bias_ih_l0.data.fill_(0)
for ii in range(PATTERNSIZE):
self.lstm.weight_ih_l0.data[2*NBHIDDENNEUR + ii].fill_(0)
self.lstm.weight_ih_l0.data[2*NBHIDDENNEUR + ii][ii] = 10.0 # Trick to make inputs clamping on the cells, for fair comparison (need to also set input gates...)
self.lstm.bias_ih_l0.data[0*NBHIDDENNEUR+ ii]= 10.0 # bias to input gate
self.lstm.bias_ih_l0.data[1*NBHIDDENNEUR+ ii]= -1000.0 # bias to forget gate (actually a persistence gate? - sigmoid, so to set it to 0, put a massive negative bias)
self.lstm.bias_ih_l0.data[2*NBHIDDENNEUR+ ii]= 0 # bias to cell gate
self.lstm.bias_ih_l0.data[3*NBHIDDENNEUR+ ii]= 10.0 # bias to output gate; sigmoid
lstm_out, self.hidden = self.lstm(inputs, self.hidden)
#o = self.h2o(lstm_out) #.view(NBSTEPS, -1))
#outputz = F.tanh(o)
outputz = lstm_out
return outputz
#yout = F.tanh( yin.mm(self.w + torch.mul(self.alpha, hebb)) + input )
#hebb = (1 - ETA) * hebb + ETA * torch.bmm(yin.unsqueeze(2), yout.unsqueeze(1))[0] # bmm used to implement outer product with the help of unsqueeze (i.e. added empty dimensions)
#return yout, hebb
def initialZeroState(self):
return (Variable(torch.zeros(1, 1, NBHIDDENNEUR).type(ttype)),
Variable(torch.zeros(1, 1, NBHIDDENNEUR).type(ttype)))
if len(sys.argv) == 2:
RNGSEED = int(sys.argv[1])
print("Setting RNGSEED to "+str(RNGSEED))
np.set_printoptions(precision=3)
np.random.seed(RNGSEED); random.seed(RNGSEED); torch.manual_seed(RNGSEED)
net = NETWORK()
optimizer = torch.optim.Adam(net.parameters(), lr=ADAMLEARNINGRATE)
total_loss = 0.0; all_losses = []
print_every = 100
save_every = 1000
nowtime = time.time()
for numiter in range(params['nbiter']):
optimizer.zero_grad()
net.hidden = net.initialZeroState()
# Generate the inputs and target pattern for this episode
inputs, target = generateInputsAndTarget()
# Run the episode!
y = net(Variable(inputs, requires_grad=False))[-1][0]
# Compute loss for this episode (last step only)
loss = (y[:PATTERNSIZE] - Variable(target, requires_grad=False)).pow(2).sum()
#pdb.set_trace()
# Apply backpropagation to adapt basic weights and plasticity coefficients
loss.backward()
optimizer.step()
# That's it for the actual algorithm.
# Print statistics, save files
#lossnum = loss.data[0]
yo = y.data.cpu().numpy()[:PATTERNSIZE]
to = target.cpu().numpy()
z = (np.sign(yo) != np.sign(to))
lossnum = np.mean(z)
total_loss += lossnum
if (numiter+1) % print_every == 0:
print((numiter, "===="))
print(target.cpu().numpy()[:10]) # Target pattern to be reconstructed
print(inputs.cpu().numpy()[-1][0][:10]) # Last input contains the degraded pattern fed to the network at test time
print(y.data.cpu().numpy()[:10]) # Final output of the network
previoustime = nowtime
nowtime = time.time()
print("Time spent on last", print_every, "iters: ", nowtime - previoustime)
total_loss /= print_every
all_losses.append(total_loss)
print("Mean loss over last", print_every, "iters:", total_loss)
print("")
if (numiter+1) % save_every == 0:
fname = 'loss_binary_lstm_nbiter_'+str(params['nbiter'])+'_nbhneur_'+str(NBHIDDENNEUR)+'_clamp_'+str(CLAMPING)+'_lr_'+str(ADAMLEARNINGRATE)+'_prestime_'+str(PRESTIME)+'_ipd_'+str(INTERPRESDELAY)+'_rngseed_'+str(RNGSEED)+'.txt'
with open(fname, 'w') as fo:
for item in all_losses:
fo.write("%s\n" % item)
# Uber-only (comment out if not at Uber)
if checkHdfs():
print("Transfering to HDFS...")
transferFileToHdfsDir(fname, '/ailabs/tmiconi/simple/')
total_loss = 0
| en | 0.782191 | # Memorization of two 50-bit binary patterns per episode, with LSTMs. Takes a very long time to learn the task, and even then imperfectly. 2050 neurons (fewer neurons = worse performance). # # # Copyright (c) 2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Uber-only (comment out if not at Uber) # Parsing command-line arguments # NbNeur = Pattern Size + additional neurons + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness) # The "learning rate" of plastic connections; not used for LSTMs # Proportion of bits to zero out in the target pattern at test time # The number of patterns to learn in each episode # Number of times each pattern is to be presented # Number of time steps for each presentation # Same thing but for the final test pattern # Duration of zero-input interval between presentations # Total number of steps per episode #PATTERNSIZE = 50 # ## Note: For LSTM, there are PATTERNSIZE input and output neurons, and NBHIDDENNEUR neurons in the hidden recurrent layer ##NBNEUR = PATTERNSIZE # NbNeur = Pattern Size + 1 "bias", fixed-output neuron (bias neuron not needed for this task, but included for completeness) #NBHIDDENNEUR = 2000 # 1000 takes longer # ##ETA = .01 # The "learning rate" of plastic connections. Not used for LSTMs. #ADAMLEARNINGRATE = 3e-5 # 1e-4 # 3e-5 works better in the long run. 1e-4 OK. 3e-4 fails. #RNGSEED = 0 # #PROBADEGRADE = .5 # Proportion of bits to zero out in the target pattern at test time #NBPATTERNS = 2 # The number of patterns to learn in each episode #NBPRESCYCLES = 1 # Number of times each pattern is to be presented #PRESTIME = 3 # Number of time steps for each presentation #PRESTIMETEST = 3 # Same thing but for the final test pattern #INTERPRESDELAY = 1 # Duration of zero-input interval between presentations #NBSTEPS = NBPRESCYCLES * ((PRESTIME + INTERPRESDELAY) * NBPATTERNS) + PRESTIMETEST # Total number of steps per episode #ttype = torch.FloatTensor; # Generate the full list of inputs for an episode. The inputs are returned as a PyTorch tensor of shape NbSteps x 1 x NbNeur #inputT = np.zeros((NBSTEPS, 1, NBNEUR)) #inputTensor, initially in numpy format... #inputTensor, initially in numpy format... # Create the random patterns to be memorized in an episode # Now 'patterns' contains the NBPATTERNS patterns to be memorized in this episode - in numpy format # Choosing the test pattern, partially zero'ed out, that the network will have to complete #testpattern = patterns[1].copy() # Inserting the inputs in the input tensor at the proper places # Inserting the degraded pattern #inputT[nn][0][-1] = 1.0 # Bias neuron. # Strengthen inputs # Convert from numpy to Tensor #input size, hidden size # Note that the "hidden state" is a tuple (hidden state, cells state) # Run the network over entire sequence of inputs # This code allows us to make the inputs on the LSTM "clamping", # i.e. neurons that receive an input have their output clamped at # this value, to make it similar to the RNN architectures. # # Note that you get worse results if you don't use it ! ("CLAMPING = 0" above) (clamping automatically reduces chance error to ~.25, since all input bits are always correct) # #self.lstm.weight_hh_l0.data.fill_(0) #self.lstm.weight_ih_l0.data.fill_(0) #self.lstm.bias_ih_l0.data.fill_(0) # Trick to make inputs clamping on the cells, for fair comparison (need to also set input gates...) # bias to input gate # bias to forget gate (actually a persistence gate? - sigmoid, so to set it to 0, put a massive negative bias) # bias to cell gate # bias to output gate; sigmoid #o = self.h2o(lstm_out) #.view(NBSTEPS, -1)) #outputz = F.tanh(o) #yout = F.tanh( yin.mm(self.w + torch.mul(self.alpha, hebb)) + input ) #hebb = (1 - ETA) * hebb + ETA * torch.bmm(yin.unsqueeze(2), yout.unsqueeze(1))[0] # bmm used to implement outer product with the help of unsqueeze (i.e. added empty dimensions) #return yout, hebb # Generate the inputs and target pattern for this episode # Run the episode! # Compute loss for this episode (last step only) #pdb.set_trace() # Apply backpropagation to adapt basic weights and plasticity coefficients # That's it for the actual algorithm. # Print statistics, save files #lossnum = loss.data[0] # Target pattern to be reconstructed # Last input contains the degraded pattern fed to the network at test time # Final output of the network # Uber-only (comment out if not at Uber) | 2.317297 | 2 |
PyYaMusic/radioDaemon.py | AlexRoar/YaMusic-Python | 2 | 6615101 | <reponame>AlexRoar/YaMusic-Python<filename>PyYaMusic/radioDaemon.py
# Copyright (c) 2019.
# Designed and codded with love by <NAME>
#
#
from PyYaMusic.radio import Radio
r = Radio('Dremov11112')
r.start() | # Copyright (c) 2019.
# Designed and codded with love by <NAME>
#
#
from PyYaMusic.radio import Radio
r = Radio('Dremov11112')
r.start() | en | 0.966504 | # Copyright (c) 2019. # Designed and codded with love by <NAME> # # | 1.243707 | 1 |
python/testData/resolve/AugmentedAfterAugmented.py | jnthn/intellij-community | 2 | 6615102 | foo = 1
foo += 1
while True:
foo += 2
# <ref>
| foo = 1
foo += 1
while True:
foo += 2
# <ref>
| none | 1 | 2.665828 | 3 | |
digsby/src/msn/oim.py | ifwe/digsby | 35 | 6615103 | from __future__ import with_statement
import re, sys, itertools
from util import threaded, traceguard, CallCounter, fmt_to_dict
from util.xml_tag import tag, post_xml
from uuid import UUID
from datetime import datetime
from email import message_from_string
from email.header import Header, decode_header
from base64 import b64decode
from common import pref
from logging import getLogger
log = getLogger('msn.oim')
import msn
import uuid
import util
from util.auxencodings import fuzzydecode
from util.Events import EventMixin, event
SOAP_NS = "http://schemas.xmlsoap.org/soap/envelope/"
POST_URL = "https://rsi.hotmail.com/rsi/rsi.asmx"
NS = "http://www.hotmail.msn.com/ws/2004/09/oim/rsi"
GET = NS + "/GetMessage"
DEL = NS + "/DeleteMessages"
META = NS + '/GetMetadata'
import msn.SOAP.services as SOAPServices
def soap(ns):
env = tag((('soap',SOAP_NS), 'Envelope'), xmlns=ns)
env += ('soap','Header'),
env += ('soap','Body'),
return env
def make_header(s):
return str(Header(s, 'utf-8'))
class OIMMessages(list):
def __init__(self, acct, t):
log.info('OIMMessages created')
self.acct = acct
list.__init__(self)
if t is None:
# SOAPRequest the XML data and store it in messages
messages = None
else:
messages = t.M
self.msginit(messages)
def msginit(self, meta):
if meta is None:
return self.request_meta()
if not meta:
return
if type(meta) is tag:
messages = [meta]
else:
messages = meta
del self[:]
for message in messages:
with traceguard:
self.append(OIM(self.acct, message))
self.get_messages(pref('msn.oim.markasread',False), True)
def get_messages(self, markread=False, delete=True):
for oim in self:
callback = lambda _oim=oim: (self.received(_oim), log.info_s('Received: %r', oim))
oim.get_message(markread=markread, success=callback, error=self.get_message_error)
if False: #delete:
self.delete_messages()
def get_message_error(self, e):
log.info("Error getting message %r", e)
try:
log.info('\t%r', e.body.getvalue())
except:
pass
def received(self, oim):
log.info('Received OIM (%d/%d): %r', len(filter(None, [x.received for x in self])), len(self), oim)
if all(x.received for x in self):
log.info('Received all OIMs. Telling acct')
self.acct.received_oims(list(sorted(self)))
def delete_messages(self):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.DeleteMessages(message_ids = [oim.id for oim in self],
success = self.delete_success)
def delete_success(self, response):
fault = response._findOne('Fault')
if response._find('DeleteMessagesResponse'):
for oim in self:
oim.deleted = True
log.info('OIMs deleted from server')
elif fault:
log.info('OIMs were not deleted. Fault occurred: %r', fault._to_xml(pretty=False))
def request_meta(self):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.GetMetadata(success = lambda response: self.msginit(response.MD.M))
class OIM(object):
time_re = re.compile('(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d{3})(Z?)')
def __init__(self, acct, m_tag):
'''
http://msnpiki.msnfanatic.com/index.php/MSNP13:Offline_IM
* T: Unknown, but has so far only been set to 11.
* S: Unknown, but has so far only been set to 6.
* RT: The date/time stamp for when the message was received by the server.
This stamp can be used to sort the message in the proper order,
although you are recommended to use a different method instead
which will be explained later.
* RS: Unknown, but most likely is set to 1 if the message has been read
before ("Read Set").
* SZ: The size of the message, including headers
* E: The e-mail address of the sender
* I: This is the ID of the message, which should be used later on to retrieve
the message. Note that the ID is a GUID in the form
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX. It was previously (the change
was first noticed in March 2007) in the format of
"MSGunix-timestamp.millseconds" (for example MSG1132093467.11) and
the Message ID format could change again anytime.
* F: Unknown, but has so far only been observed as either a GUID with a
single 9 at the end, or as ".!!OIM" (in case you are already online
when receiving the notification).
* N: This field contains the friendlyname of the person, wrapped in a special
encoding. This encoding is defined in RFC 2047, but to get you started
there is a quick overview of the format below (see #Field_encoding).
You are recommended however to implement a fully able e-mail parser to
handle OIMs!
o Note! When this field is found in a non-initial notification it will
contain a space in the data field. You must filter this space (trim
the string) in order to correctly decode this field!
* SU: Unknown, has only been observed to contain one space.
Example:
<M>
<T>11</T>
<S>6</S>
<RT>2007-05-14T15:52:53.377Z</RT>
<RS>0</RS>
<SZ>950</SZ>
<E><EMAIL></E>
<I>08CBD8BE-9972-433C-A9DA-84A0A725ABFA</I>
<F>00000000-0000-0000-0000-000000000009</F>
<N>=?utf-8?B?QWFyb24=?=</N>
</M>
'''
self.acct = acct
self.size = int(str(m_tag.SZ))
self.email = str(m_tag.E)
self.name = u''
for val, encoding in decode_header(m_tag.N.text.strip()):
self.name += fuzzydecode(val, encoding)
try:
self.time = self.parse_time(str(m_tag.RT))
except Exception:
self.time = None
self.id = UUID(str(m_tag.I))
self.msg = ''
self.deleted = False
self._had_error = False
self.received = False
self.runid = None
self.seqnum = 0
log.info_s('%r created', self)
@util.callbacks.callsback
def get_message(self, markread=False, callback=None):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.GetMessage(client = self.acct, message_id = self.id, markread = markread,
success = lambda resp: (self.parse_msg(resp), callback.success()),
error = callback.error)
def parse_msg(self, response):
log.debug('Parsing this OIM: %r',response._to_xml(pretty=False))
self._content = (response.Body.GetMessageResponse.GetMessageResult._cdata.strip()).encode('utf-8')
fault = response._findOne('Fault')
if fault:
log.error('Error retrieving message (%r): %r', self, fault._to_xml(pretty=False))
self._had_error = True
return
self._msgobj = message_from_string(self._content)
oim_proxy = self._msgobj.get('X-OIMProxy', None)
if oim_proxy == 'MOSMS': # MObile SMS
self._parse_mobile_oim()
payload = self._msgobj
# rfc822 messages have a pretty bad API. We call get_payload(0) on it (to get the first part of a multipart message
# as long as it continues to work. When we get a TypeError, it's because it tried call something on a string (the
# real content) instead of a list (which is what there is when the message is_multipart()). By the end of this loop
# payload will be the our rfc822 object that has the *real* message as it's payload.
while True:
try:
payload = payload.get_payload(0)
except TypeError:
break
msgtext = payload.get_payload()
charset = payload.get_content_charset() or ''
msgtext = msgtext.strip()
msgtext = msgtext.decode('base64')
msgtext = msgtext.decode('fuzzy %s' % charset)
self.msg = msgtext
self.received = True
self.runid = self._msgobj.get('X-OIM-Run-Id', None)
if self.runid is not None:
self.runid = uuid.UUID(self.runid)
try:
self.seqnum = int(self._msgobj.get('X-OIM-Sequence-Num', '0'))
except ValueError:
self.seqnum = 0
newtime = self._msgobj.get('X-OriginalArrivalTime', self.time)
if isinstance(newtime, basestring):
try:
timestr = newtime.split(' (UTC) ')[0]
# ex: '27 Feb 2008 23:20:21.0425'...cut off the last 2 digits since datetime doesnt support that resolution
timestr = timestr[:-2]
dt = datetime.strptime(timestr, '%d %b %Y %H:%M:%S.%f')
self.time = dt
except Exception:
import traceback;traceback.print_exc()
log.error('Error parsing time: %r', newtime)
log.debug('\t\tMessage successfully parsed')
return self.msg
def _parse_mobile_oim(self):
self.name = self.email = (self._msgobj.get('From', self.name)).strip('<>')
def parse_time(self, timestr):
yr, mo, da, hr, mi, se, ms, tz = self.time_re.search(timestr).groups()
args = map(int, (yr,mo,da,hr,mi,se,ms))
args[-1] = args[-1] * 1000
if not tz:
log.warning_s('no time zone for %r', self)
return datetime(*args)
def __repr__(self):
return '<OfflineIM from %r (%s) sent at %s%s>' % (self.name, self.email, self.time,
': %r'%(self.msg) if self.msg else '')
def __cmp__(self, other):
try:
return cmp((self.time, self.runid, self.seqnum), (other.time, other.runid, other.seqnum))
except Exception:
return -1
class OIMExceptions:
AuthFailed = 'AuthenticationFailed'
class OfflineSBAdapter(EventMixin):
events = EventMixin.events | set ((
'on_buddy_join',
'on_buddy_leave',
'on_buddy_timeout',
'on_conn_success',
'on_authenticate',
'disconnect',
'contact_alias',
'needs_auth',
'recv_error',
'recv_text_msg',
'send_text_msg',
'typing_info',
'recv_action',
'recv_p2p_msg',
'transport_error',
))
POST_URL = "https://ows.messenger.msn.com/OimWS/oim.asmx"
#SOAP_ACT = "http://messenger.msn.com/ws/2004/09/oim/Store"
SOAP_ACT = 'http://messenger.live.com/ws/2006/09/oim/Store2'
OIM_NS = ('oim',"http://messenger.msn.com/ws/2004/09/oim/")
WSRM_NS = ('wsrm',"http://schemas.xmlsoap.org/ws/2003/03/rm")
WSUTIL_NS = ('wsutil',"http://schemas.xmlsoap.org/ws/2002/07/utility")
# Don't switch this to util.net.user_agent()
USER_AGENT= 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Messenger (BETA) 8.0.0328)'
CLIENT_STR = '8.1.0178'
def __init__(self, client, buddy):
EventMixin.__init__(self)
self.lockkey = ''
self.buildver = 'Digsby %r' % sys.REVISION
self.run_id = str(uuid.uuid4()).upper()
self.msgnum = 1
self.client = client
self.buddy = buddy
self.version = self.client.version
self._closed = False
self._connected = False
@property
def appid(self):
return self.client.appid
@property
def appcode(self):
return self.client.appcode
def get_token(self):
#return self.client.get_token('messenger.msn.com')
return self.client.get_token('messengersecure.live.com')
def set_token(self, newtoken):
#return self.client.set_token('messenger.msn.com', newtoken)
return self.client.set_token('messengersecure.live.com', newtoken)
token = property(get_token, set_token)
@property
def self_buddy(self):
return self.client.self_buddy
def invite(self, name):
self.on_buddy_join(name)
@event
def on_buddy_join(self, name):
"the buddy named 'name' has joined"
def connected(self):
return self._connected
@util.callbacks.callsback
def connect(self, callback):
log.info('Connecting OfflineMessageSender')
log.info('OfflineSBAdapter "connected"')
self.event('on_conn_success', self)
self._connected = True
callback.success()
@util.callbacks.callsback
def send_message(self, fmsg, callback=None):
text = fmsg.format_as('plaintext')
log.info('OfflineSBAdapter send_message: %r', text)
env = soap(self.OIM_NS[1])
env.Header += tag('From',
memberName=self.self_buddy.name,
#friendlyName=make_header(self.self_buddy.remote_alias),
proxy='MSNMSGR',
msnpVer=self.version,
buildVer=self.CLIENT_STR)
env.Header += tag('To',memberName=self.buddy.name)
env.Header += tag('Ticket',
passport=self.token.encode('xml'),
appid=self.appid,
lockkey = self.lockkey,
)
env.Header += (tag((self.WSRM_NS, 'Sequence'))
(tag((self.WSUTIL_NS, 'Identifier'), 'http://messenger.msn.com'),
tag('MessageNumber',self.msgnum))
)
env.Body += tag('MessageType','text')
env.Body += tag('Content',self._build_message(text))
self.event('send_text_msg', text)
def post_success(result):
log.info('Post result: %r', result._to_xml(pretty=False))
fault = result._findOne("Fault")
if fault:
if (OIMExceptions.AuthFailed in fault.faultcode._cdata.strip()):
# try authentication again...
self.authenticate(fault,
success=lambda: self.send_message(fmsg, callback=callback),
error =lambda e,*a,**k: (callback.error(e), log.info('Error from authenticate: %r, %r', a,k))
)
else:
log.info('Sending message failed: %r', result._to_xml(pretty=False))
callback.error(result)
elif result.Header.SequenceAcknowledgment:
log.info('Got SequenceAcknowledgment')
self.msgnum += 1
callback.success()
else:
log.info('Unknown response from posting OIM: %r', result._to_xml(pretty=False))
def post_error(exception):
log.info('Post exception: %r, %r, %r', type(exception), (exception._to_xml(pretty=False) if hasattr(exception, '_to_xml') else ''), vars(exception))
callback.error(exception)
self.post(env, success=post_success, error=post_error)
@util.callbacks.callsback
def authenticate(self, fault, callback=None):
lockcode = fault.detail.LockKeyChallenge._cdata.strip()
twnchal = fault.detail.TweenerChallenge._cdata.strip()
if not (lockcode or twnchal):
#assert lockcode or twnchal, (lockcode, twnchal, t._to_xml())
callback.error(fault)
return
log.info('OIM LockKey=%r, TweenerChallenge=%r', lockcode, twnchal)
if twnchal:
self.token = ''
if lockcode:
self.lockkey = ''
# Don't do this 'til we have both lockkey and tweener ticket
success = util.CallCounter(2, callback.success)
if lockcode:
log.info('Making lockkey from LockKeyChallenge')
self.lockkey = self.client.ns._challenge_response(lockcode, self.appcode)
success()
#env.Header.Ticket['lockkey'] = self.lockkey
else:
# knock the callcounter down one anyway
success()
if twnchal:
log.info('Requesting tweener authentication with TweenerChallenge')
def set_ticket(tck):
log.info('Got tweener ticket. Setting it on protocol and calling success()')
self.token = tck.decode('xml')
success()
import mail.passport
mail.passport.do_tweener_auth_3(self.client.username, self.client.password,
(twnchal,), success = set_ticket, error=callback.error)
else:
# knock the callcounter down one anyway. this will definitely call callback.success if we get here.
success()
@util.callbacks.callsback
def post(self, env, callback=None):
post_xml(self.POST_URL, env,
callback=callback,
Accept='*/*',
SOAPAction=self.SOAP_ACT,
ContentType='text/xml; charset=utf-8',
**{'User-Agent':self.USER_AGENT})
def _build_message(self, msg):
return '\r\n'.join([
'MIME-Version: 1.0',
'Content-Type: text/plain; charset=UTF-8',
'Content-Transfer-Encoding: base64',
'X-OIM-Message-Type: OfflineMessage',
'X-OIM-Run-Id: {%s}' % self.run_id,
'X-OIM-Sequence-Num: %d' % self.msgnum,
'',
msg.encode('utf-8').encode('base64'),
])
def leave(self):
self._closed = True
self._connected = False
# ------------------------------------------------------------------------------
| from __future__ import with_statement
import re, sys, itertools
from util import threaded, traceguard, CallCounter, fmt_to_dict
from util.xml_tag import tag, post_xml
from uuid import UUID
from datetime import datetime
from email import message_from_string
from email.header import Header, decode_header
from base64 import b64decode
from common import pref
from logging import getLogger
log = getLogger('msn.oim')
import msn
import uuid
import util
from util.auxencodings import fuzzydecode
from util.Events import EventMixin, event
SOAP_NS = "http://schemas.xmlsoap.org/soap/envelope/"
POST_URL = "https://rsi.hotmail.com/rsi/rsi.asmx"
NS = "http://www.hotmail.msn.com/ws/2004/09/oim/rsi"
GET = NS + "/GetMessage"
DEL = NS + "/DeleteMessages"
META = NS + '/GetMetadata'
import msn.SOAP.services as SOAPServices
def soap(ns):
env = tag((('soap',SOAP_NS), 'Envelope'), xmlns=ns)
env += ('soap','Header'),
env += ('soap','Body'),
return env
def make_header(s):
return str(Header(s, 'utf-8'))
class OIMMessages(list):
def __init__(self, acct, t):
log.info('OIMMessages created')
self.acct = acct
list.__init__(self)
if t is None:
# SOAPRequest the XML data and store it in messages
messages = None
else:
messages = t.M
self.msginit(messages)
def msginit(self, meta):
if meta is None:
return self.request_meta()
if not meta:
return
if type(meta) is tag:
messages = [meta]
else:
messages = meta
del self[:]
for message in messages:
with traceguard:
self.append(OIM(self.acct, message))
self.get_messages(pref('msn.oim.markasread',False), True)
def get_messages(self, markread=False, delete=True):
for oim in self:
callback = lambda _oim=oim: (self.received(_oim), log.info_s('Received: %r', oim))
oim.get_message(markread=markread, success=callback, error=self.get_message_error)
if False: #delete:
self.delete_messages()
def get_message_error(self, e):
log.info("Error getting message %r", e)
try:
log.info('\t%r', e.body.getvalue())
except:
pass
def received(self, oim):
log.info('Received OIM (%d/%d): %r', len(filter(None, [x.received for x in self])), len(self), oim)
if all(x.received for x in self):
log.info('Received all OIMs. Telling acct')
self.acct.received_oims(list(sorted(self)))
def delete_messages(self):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.DeleteMessages(message_ids = [oim.id for oim in self],
success = self.delete_success)
def delete_success(self, response):
fault = response._findOne('Fault')
if response._find('DeleteMessagesResponse'):
for oim in self:
oim.deleted = True
log.info('OIMs deleted from server')
elif fault:
log.info('OIMs were not deleted. Fault occurred: %r', fault._to_xml(pretty=False))
def request_meta(self):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.GetMetadata(success = lambda response: self.msginit(response.MD.M))
class OIM(object):
time_re = re.compile('(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d{3})(Z?)')
def __init__(self, acct, m_tag):
'''
http://msnpiki.msnfanatic.com/index.php/MSNP13:Offline_IM
* T: Unknown, but has so far only been set to 11.
* S: Unknown, but has so far only been set to 6.
* RT: The date/time stamp for when the message was received by the server.
This stamp can be used to sort the message in the proper order,
although you are recommended to use a different method instead
which will be explained later.
* RS: Unknown, but most likely is set to 1 if the message has been read
before ("Read Set").
* SZ: The size of the message, including headers
* E: The e-mail address of the sender
* I: This is the ID of the message, which should be used later on to retrieve
the message. Note that the ID is a GUID in the form
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX. It was previously (the change
was first noticed in March 2007) in the format of
"MSGunix-timestamp.millseconds" (for example MSG1132093467.11) and
the Message ID format could change again anytime.
* F: Unknown, but has so far only been observed as either a GUID with a
single 9 at the end, or as ".!!OIM" (in case you are already online
when receiving the notification).
* N: This field contains the friendlyname of the person, wrapped in a special
encoding. This encoding is defined in RFC 2047, but to get you started
there is a quick overview of the format below (see #Field_encoding).
You are recommended however to implement a fully able e-mail parser to
handle OIMs!
o Note! When this field is found in a non-initial notification it will
contain a space in the data field. You must filter this space (trim
the string) in order to correctly decode this field!
* SU: Unknown, has only been observed to contain one space.
Example:
<M>
<T>11</T>
<S>6</S>
<RT>2007-05-14T15:52:53.377Z</RT>
<RS>0</RS>
<SZ>950</SZ>
<E><EMAIL></E>
<I>08CBD8BE-9972-433C-A9DA-84A0A725ABFA</I>
<F>00000000-0000-0000-0000-000000000009</F>
<N>=?utf-8?B?QWFyb24=?=</N>
</M>
'''
self.acct = acct
self.size = int(str(m_tag.SZ))
self.email = str(m_tag.E)
self.name = u''
for val, encoding in decode_header(m_tag.N.text.strip()):
self.name += fuzzydecode(val, encoding)
try:
self.time = self.parse_time(str(m_tag.RT))
except Exception:
self.time = None
self.id = UUID(str(m_tag.I))
self.msg = ''
self.deleted = False
self._had_error = False
self.received = False
self.runid = None
self.seqnum = 0
log.info_s('%r created', self)
@util.callbacks.callsback
def get_message(self, markread=False, callback=None):
rsi = self.acct.getService(SOAPServices.AppIDs.RSIService)
rsi.GetMessage(client = self.acct, message_id = self.id, markread = markread,
success = lambda resp: (self.parse_msg(resp), callback.success()),
error = callback.error)
def parse_msg(self, response):
log.debug('Parsing this OIM: %r',response._to_xml(pretty=False))
self._content = (response.Body.GetMessageResponse.GetMessageResult._cdata.strip()).encode('utf-8')
fault = response._findOne('Fault')
if fault:
log.error('Error retrieving message (%r): %r', self, fault._to_xml(pretty=False))
self._had_error = True
return
self._msgobj = message_from_string(self._content)
oim_proxy = self._msgobj.get('X-OIMProxy', None)
if oim_proxy == 'MOSMS': # MObile SMS
self._parse_mobile_oim()
payload = self._msgobj
# rfc822 messages have a pretty bad API. We call get_payload(0) on it (to get the first part of a multipart message
# as long as it continues to work. When we get a TypeError, it's because it tried call something on a string (the
# real content) instead of a list (which is what there is when the message is_multipart()). By the end of this loop
# payload will be the our rfc822 object that has the *real* message as it's payload.
while True:
try:
payload = payload.get_payload(0)
except TypeError:
break
msgtext = payload.get_payload()
charset = payload.get_content_charset() or ''
msgtext = msgtext.strip()
msgtext = msgtext.decode('base64')
msgtext = msgtext.decode('fuzzy %s' % charset)
self.msg = msgtext
self.received = True
self.runid = self._msgobj.get('X-OIM-Run-Id', None)
if self.runid is not None:
self.runid = uuid.UUID(self.runid)
try:
self.seqnum = int(self._msgobj.get('X-OIM-Sequence-Num', '0'))
except ValueError:
self.seqnum = 0
newtime = self._msgobj.get('X-OriginalArrivalTime', self.time)
if isinstance(newtime, basestring):
try:
timestr = newtime.split(' (UTC) ')[0]
# ex: '27 Feb 2008 23:20:21.0425'...cut off the last 2 digits since datetime doesnt support that resolution
timestr = timestr[:-2]
dt = datetime.strptime(timestr, '%d %b %Y %H:%M:%S.%f')
self.time = dt
except Exception:
import traceback;traceback.print_exc()
log.error('Error parsing time: %r', newtime)
log.debug('\t\tMessage successfully parsed')
return self.msg
def _parse_mobile_oim(self):
self.name = self.email = (self._msgobj.get('From', self.name)).strip('<>')
def parse_time(self, timestr):
yr, mo, da, hr, mi, se, ms, tz = self.time_re.search(timestr).groups()
args = map(int, (yr,mo,da,hr,mi,se,ms))
args[-1] = args[-1] * 1000
if not tz:
log.warning_s('no time zone for %r', self)
return datetime(*args)
def __repr__(self):
return '<OfflineIM from %r (%s) sent at %s%s>' % (self.name, self.email, self.time,
': %r'%(self.msg) if self.msg else '')
def __cmp__(self, other):
try:
return cmp((self.time, self.runid, self.seqnum), (other.time, other.runid, other.seqnum))
except Exception:
return -1
class OIMExceptions:
AuthFailed = 'AuthenticationFailed'
class OfflineSBAdapter(EventMixin):
events = EventMixin.events | set ((
'on_buddy_join',
'on_buddy_leave',
'on_buddy_timeout',
'on_conn_success',
'on_authenticate',
'disconnect',
'contact_alias',
'needs_auth',
'recv_error',
'recv_text_msg',
'send_text_msg',
'typing_info',
'recv_action',
'recv_p2p_msg',
'transport_error',
))
POST_URL = "https://ows.messenger.msn.com/OimWS/oim.asmx"
#SOAP_ACT = "http://messenger.msn.com/ws/2004/09/oim/Store"
SOAP_ACT = 'http://messenger.live.com/ws/2006/09/oim/Store2'
OIM_NS = ('oim',"http://messenger.msn.com/ws/2004/09/oim/")
WSRM_NS = ('wsrm',"http://schemas.xmlsoap.org/ws/2003/03/rm")
WSUTIL_NS = ('wsutil',"http://schemas.xmlsoap.org/ws/2002/07/utility")
# Don't switch this to util.net.user_agent()
USER_AGENT= 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Messenger (BETA) 8.0.0328)'
CLIENT_STR = '8.1.0178'
def __init__(self, client, buddy):
EventMixin.__init__(self)
self.lockkey = ''
self.buildver = 'Digsby %r' % sys.REVISION
self.run_id = str(uuid.uuid4()).upper()
self.msgnum = 1
self.client = client
self.buddy = buddy
self.version = self.client.version
self._closed = False
self._connected = False
@property
def appid(self):
return self.client.appid
@property
def appcode(self):
return self.client.appcode
def get_token(self):
#return self.client.get_token('messenger.msn.com')
return self.client.get_token('messengersecure.live.com')
def set_token(self, newtoken):
#return self.client.set_token('messenger.msn.com', newtoken)
return self.client.set_token('messengersecure.live.com', newtoken)
token = property(get_token, set_token)
@property
def self_buddy(self):
return self.client.self_buddy
def invite(self, name):
self.on_buddy_join(name)
@event
def on_buddy_join(self, name):
"the buddy named 'name' has joined"
def connected(self):
return self._connected
@util.callbacks.callsback
def connect(self, callback):
log.info('Connecting OfflineMessageSender')
log.info('OfflineSBAdapter "connected"')
self.event('on_conn_success', self)
self._connected = True
callback.success()
@util.callbacks.callsback
def send_message(self, fmsg, callback=None):
text = fmsg.format_as('plaintext')
log.info('OfflineSBAdapter send_message: %r', text)
env = soap(self.OIM_NS[1])
env.Header += tag('From',
memberName=self.self_buddy.name,
#friendlyName=make_header(self.self_buddy.remote_alias),
proxy='MSNMSGR',
msnpVer=self.version,
buildVer=self.CLIENT_STR)
env.Header += tag('To',memberName=self.buddy.name)
env.Header += tag('Ticket',
passport=self.token.encode('xml'),
appid=self.appid,
lockkey = self.lockkey,
)
env.Header += (tag((self.WSRM_NS, 'Sequence'))
(tag((self.WSUTIL_NS, 'Identifier'), 'http://messenger.msn.com'),
tag('MessageNumber',self.msgnum))
)
env.Body += tag('MessageType','text')
env.Body += tag('Content',self._build_message(text))
self.event('send_text_msg', text)
def post_success(result):
log.info('Post result: %r', result._to_xml(pretty=False))
fault = result._findOne("Fault")
if fault:
if (OIMExceptions.AuthFailed in fault.faultcode._cdata.strip()):
# try authentication again...
self.authenticate(fault,
success=lambda: self.send_message(fmsg, callback=callback),
error =lambda e,*a,**k: (callback.error(e), log.info('Error from authenticate: %r, %r', a,k))
)
else:
log.info('Sending message failed: %r', result._to_xml(pretty=False))
callback.error(result)
elif result.Header.SequenceAcknowledgment:
log.info('Got SequenceAcknowledgment')
self.msgnum += 1
callback.success()
else:
log.info('Unknown response from posting OIM: %r', result._to_xml(pretty=False))
def post_error(exception):
log.info('Post exception: %r, %r, %r', type(exception), (exception._to_xml(pretty=False) if hasattr(exception, '_to_xml') else ''), vars(exception))
callback.error(exception)
self.post(env, success=post_success, error=post_error)
@util.callbacks.callsback
def authenticate(self, fault, callback=None):
lockcode = fault.detail.LockKeyChallenge._cdata.strip()
twnchal = fault.detail.TweenerChallenge._cdata.strip()
if not (lockcode or twnchal):
#assert lockcode or twnchal, (lockcode, twnchal, t._to_xml())
callback.error(fault)
return
log.info('OIM LockKey=%r, TweenerChallenge=%r', lockcode, twnchal)
if twnchal:
self.token = ''
if lockcode:
self.lockkey = ''
# Don't do this 'til we have both lockkey and tweener ticket
success = util.CallCounter(2, callback.success)
if lockcode:
log.info('Making lockkey from LockKeyChallenge')
self.lockkey = self.client.ns._challenge_response(lockcode, self.appcode)
success()
#env.Header.Ticket['lockkey'] = self.lockkey
else:
# knock the callcounter down one anyway
success()
if twnchal:
log.info('Requesting tweener authentication with TweenerChallenge')
def set_ticket(tck):
log.info('Got tweener ticket. Setting it on protocol and calling success()')
self.token = tck.decode('xml')
success()
import mail.passport
mail.passport.do_tweener_auth_3(self.client.username, self.client.password,
(twnchal,), success = set_ticket, error=callback.error)
else:
# knock the callcounter down one anyway. this will definitely call callback.success if we get here.
success()
@util.callbacks.callsback
def post(self, env, callback=None):
post_xml(self.POST_URL, env,
callback=callback,
Accept='*/*',
SOAPAction=self.SOAP_ACT,
ContentType='text/xml; charset=utf-8',
**{'User-Agent':self.USER_AGENT})
def _build_message(self, msg):
return '\r\n'.join([
'MIME-Version: 1.0',
'Content-Type: text/plain; charset=UTF-8',
'Content-Transfer-Encoding: base64',
'X-OIM-Message-Type: OfflineMessage',
'X-OIM-Run-Id: {%s}' % self.run_id,
'X-OIM-Sequence-Num: %d' % self.msgnum,
'',
msg.encode('utf-8').encode('base64'),
])
def leave(self):
self._closed = True
self._connected = False
# ------------------------------------------------------------------------------
| en | 0.861311 | # SOAPRequest the XML data and store it in messages #delete: http://msnpiki.msnfanatic.com/index.php/MSNP13:Offline_IM * T: Unknown, but has so far only been set to 11. * S: Unknown, but has so far only been set to 6. * RT: The date/time stamp for when the message was received by the server. This stamp can be used to sort the message in the proper order, although you are recommended to use a different method instead which will be explained later. * RS: Unknown, but most likely is set to 1 if the message has been read before ("Read Set"). * SZ: The size of the message, including headers * E: The e-mail address of the sender * I: This is the ID of the message, which should be used later on to retrieve the message. Note that the ID is a GUID in the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX. It was previously (the change was first noticed in March 2007) in the format of "MSGunix-timestamp.millseconds" (for example MSG1132093467.11) and the Message ID format could change again anytime. * F: Unknown, but has so far only been observed as either a GUID with a single 9 at the end, or as ".!!OIM" (in case you are already online when receiving the notification). * N: This field contains the friendlyname of the person, wrapped in a special encoding. This encoding is defined in RFC 2047, but to get you started there is a quick overview of the format below (see #Field_encoding). You are recommended however to implement a fully able e-mail parser to handle OIMs! o Note! When this field is found in a non-initial notification it will contain a space in the data field. You must filter this space (trim the string) in order to correctly decode this field! * SU: Unknown, has only been observed to contain one space. Example: <M> <T>11</T> <S>6</S> <RT>2007-05-14T15:52:53.377Z</RT> <RS>0</RS> <SZ>950</SZ> <E><EMAIL></E> <I>08CBD8BE-9972-433C-A9DA-84A0A725ABFA</I> <F>00000000-0000-0000-0000-000000000009</F> <N>=?utf-8?B?QWFyb24=?=</N> </M> # MObile SMS # rfc822 messages have a pretty bad API. We call get_payload(0) on it (to get the first part of a multipart message # as long as it continues to work. When we get a TypeError, it's because it tried call something on a string (the # real content) instead of a list (which is what there is when the message is_multipart()). By the end of this loop # payload will be the our rfc822 object that has the *real* message as it's payload. # ex: '27 Feb 2008 23:20:21.0425'...cut off the last 2 digits since datetime doesnt support that resolution #SOAP_ACT = "http://messenger.msn.com/ws/2004/09/oim/Store" # Don't switch this to util.net.user_agent() #return self.client.get_token('messenger.msn.com') #return self.client.set_token('messenger.msn.com', newtoken) #friendlyName=make_header(self.self_buddy.remote_alias), # try authentication again... #assert lockcode or twnchal, (lockcode, twnchal, t._to_xml()) # Don't do this 'til we have both lockkey and tweener ticket #env.Header.Ticket['lockkey'] = self.lockkey # knock the callcounter down one anyway # knock the callcounter down one anyway. this will definitely call callback.success if we get here. # ------------------------------------------------------------------------------ | 2.094401 | 2 |
packages-python/cactus_validator_socketio/validator-python/validator_socketio_module/IndyConnector.py | ty-lazar/cactus | 1 | 6615104 | from abc import ABCMeta, abstractmethod
import json
import time
from indy import ledger
import asyncio
from .AbstractConnector import AbstractConnector
class IndyConnector(AbstractConnector):
def __init__(self, socketio, sessionid, indy_dic):
self.moduleName = "IndyConnector"
self.indy_dic = indy_dic
print(f"##{self.moduleName}.__init__")
def getValidatorInformation(self, validatorURL):
"""Get the validator information including version, name, ID, and other information"""
print(f"##{self.moduleName}.getValidatorInformation()")
def sendSignedTransaction(self, signedTransaction):
"""Request a verifier to execute a ledger operation"""
print(f"##{self.moduleName}.sendSignedTransaction()")
def getBalance(self, address):
"""Get balance of an account for native token on a leder"""
print(f"##{self.moduleName}.getBalance()")
def execSyncFunction(self, address, funcName, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.execSyncFunction()")
command = args['method']['command']
if command== 'indy_ledger_submit_request':
return self.load_schema_or_credential_definition(args['args'])
print(f"##{self.moduleName} unknown command : {command}")
return "unknown command."
def load_schema_or_credential_definition(self, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.load_schema_or_credential_definition()")
pool_handle = self.indy_dic['pool_handle']
responseStr = self.run_coroutine_ensure_previous_request_applied(pool_handle, args, lambda response: response['result']['data'] is not None)
response = json.loads(responseStr)
return response
def startMonitor(self, clientId, cb):
"""Request a validator to start monitoring ledger"""
print(f"##{self.moduleName}.startMonitor()")
def stopMonitor(self, clientId):
"""Request a validator to stop monitoring ledger"""
print(f"##{self.moduleName}.stopMonitor()")
def cb(self, callbackData):
"""Callback function to call when receiving data from Ledger"""
print(f"##{self.moduleName}.cb()")
def nop(self):
"""Nop function for testing"""
print(f"##{self.moduleName}.nop()")
async def ensure_previous_request_applied(self, pool_handle, checker_request, checker):
for _ in range(3):
response = json.loads(await ledger.submit_request(pool_handle, checker_request))
try:
if checker(response):
return json.dumps(response)
except TypeError:
pass
time.sleep(5)
def run_coroutine_ensure_previous_request_applied(self, pool_handle, checker_request, checker, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self.ensure_previous_request_applied(pool_handle, checker_request, checker))
return results
| from abc import ABCMeta, abstractmethod
import json
import time
from indy import ledger
import asyncio
from .AbstractConnector import AbstractConnector
class IndyConnector(AbstractConnector):
def __init__(self, socketio, sessionid, indy_dic):
self.moduleName = "IndyConnector"
self.indy_dic = indy_dic
print(f"##{self.moduleName}.__init__")
def getValidatorInformation(self, validatorURL):
"""Get the validator information including version, name, ID, and other information"""
print(f"##{self.moduleName}.getValidatorInformation()")
def sendSignedTransaction(self, signedTransaction):
"""Request a verifier to execute a ledger operation"""
print(f"##{self.moduleName}.sendSignedTransaction()")
def getBalance(self, address):
"""Get balance of an account for native token on a leder"""
print(f"##{self.moduleName}.getBalance()")
def execSyncFunction(self, address, funcName, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.execSyncFunction()")
command = args['method']['command']
if command== 'indy_ledger_submit_request':
return self.load_schema_or_credential_definition(args['args'])
print(f"##{self.moduleName} unknown command : {command}")
return "unknown command."
def load_schema_or_credential_definition(self, args):
"""Execute a synchronous function held by a smart contract"""
print(f"##{self.moduleName}.load_schema_or_credential_definition()")
pool_handle = self.indy_dic['pool_handle']
responseStr = self.run_coroutine_ensure_previous_request_applied(pool_handle, args, lambda response: response['result']['data'] is not None)
response = json.loads(responseStr)
return response
def startMonitor(self, clientId, cb):
"""Request a validator to start monitoring ledger"""
print(f"##{self.moduleName}.startMonitor()")
def stopMonitor(self, clientId):
"""Request a validator to stop monitoring ledger"""
print(f"##{self.moduleName}.stopMonitor()")
def cb(self, callbackData):
"""Callback function to call when receiving data from Ledger"""
print(f"##{self.moduleName}.cb()")
def nop(self):
"""Nop function for testing"""
print(f"##{self.moduleName}.nop()")
async def ensure_previous_request_applied(self, pool_handle, checker_request, checker):
for _ in range(3):
response = json.loads(await ledger.submit_request(pool_handle, checker_request))
try:
if checker(response):
return json.dumps(response)
except TypeError:
pass
time.sleep(5)
def run_coroutine_ensure_previous_request_applied(self, pool_handle, checker_request, checker, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self.ensure_previous_request_applied(pool_handle, checker_request, checker))
return results
| en | 0.41864 | #{self.moduleName}.__init__") Get the validator information including version, name, ID, and other information #{self.moduleName}.getValidatorInformation()") Request a verifier to execute a ledger operation #{self.moduleName}.sendSignedTransaction()") Get balance of an account for native token on a leder #{self.moduleName}.getBalance()") Execute a synchronous function held by a smart contract #{self.moduleName}.execSyncFunction()") #{self.moduleName} unknown command : {command}") Execute a synchronous function held by a smart contract #{self.moduleName}.load_schema_or_credential_definition()") Request a validator to start monitoring ledger #{self.moduleName}.startMonitor()") Request a validator to stop monitoring ledger #{self.moduleName}.stopMonitor()") Callback function to call when receiving data from Ledger #{self.moduleName}.cb()") Nop function for testing #{self.moduleName}.nop()") | 2.573013 | 3 |
sentinel5dl/__main__.py | emissions-api/sentinel5dl | 7 | 6615105 | # -*- coding: utf-8 -*-
'''
Sentinel-5p Downloader
~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2019, The Emissions API Developers
:url: https://emissions-api.org
:license: MIT
'''
import argparse
import dateutil.parser
import certifi
import logging
import multiprocessing
import textwrap
import sentinel5dl
from sentinel5dl import search, download
PRODUCTS = (
'L1B_IR_SIR',
'L1B_IR_UVN',
'L1B_RA_BD1',
'L1B_RA_BD2',
'L1B_RA_BD3',
'L1B_RA_BD4',
'L1B_RA_BD5',
'L1B_RA_BD6',
'L1B_RA_BD7',
'L1B_RA_BD8',
'L2__AER_AI',
'L2__AER_LH',
'L2__CH4___',
'L2__CLOUD_',
'L2__CO____',
'L2__HCHO__',
'L2__NO2___',
'L2__NP_BD3',
'L2__NP_BD6',
'L2__NP_BD7',
'L2__O3_TCL',
'L2__O3____',
'L2__SO2___',
)
PRODUCTS_STR = textwrap.fill(', '.join(PRODUCTS),
subsequent_indent=' ',
initial_indent=' ')
PROCESSING_LEVELS = (
'L1B',
'L2'
)
PROCESSING_MODES = (
'Offline',
'Near real time',
'Reprocessing'
)
def is_polygon(polygon):
'''Validate if the supplied polygon string is in the necessary format to be
used as part of a WKT polygon string.
:param polygon: Polygon string in the form of lon1 lat1, lon2 lat2, ...
:return: WKT polygon
'''
values = [value.strip() for value in polygon.split(',')]
# Polygon must be at least a triangle
if len(values) < 4:
raise ValueError('Polygon must be at least a triangle')
# Check if we got float pairs
for value in values:
if len([float(x) for x in value.split()]) != 2:
raise ValueError('Polygon values must be pairs of numbers')
# Check if we got a closed polygon
if values[0] != values[-1]:
raise ValueError('Polygon is not closed')
return f'POLYGON(({polygon}))'
def main():
# Configure logging in the library
logging.basicConfig()
logger = logging.getLogger(sentinel5dl.__name__)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(
description='Search for and download Sentinel-5P data files',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f'AVAILABLE PRODUCTS\n{PRODUCTS_STR}'
)
# type= can use a callable, use that for most of this
parser.add_argument(
'--polygon',
type=is_polygon,
help='''Polygon defining an area by a set of coordinates.
Example: 30.1 10.0, 40.0 40.1, 20 40, 10 20, 30.1 10.0'''
)
parser.add_argument(
'--product',
choices=PRODUCTS,
metavar='PRODUCT',
default='L2__CO____',
help='Type of product to search for'
)
parser.add_argument(
'--level',
choices=PROCESSING_LEVELS,
default='L2',
help='Data processing level'
)
parser.add_argument(
'--mode',
choices=PROCESSING_MODES,
help='Data processing mode'
)
parser.add_argument(
'--begin-ts',
default='2019-09-01T00:00:00.000Z',
type=dateutil.parser.parse,
help='''Timestamp specifying the earliest sensing date.
Example: 2019-09-01T00:00:00.000Z'''
)
parser.add_argument(
'--end-ts',
default='2019-09-17T23:59:59.999Z',
type=dateutil.parser.parse,
help='''Timestamp specifying the latest sensing date.
Example: 2019-09-17T23:59:59.999Z'''
)
parser.add_argument(
'--use-certifi',
action='store_true',
help='''If a Certificate Authority (CA) bundle is not already supplied
by your operating system, certifi provides an easy way of
providing a cabundle.'''
)
parser.add_argument(
'--worker',
type=int,
default=1,
help='Number of parallel downloads',
)
parser.add_argument(
'download_dir',
metavar='download-dir',
help='Download directory'
)
args = parser.parse_args()
# Provide a Certificate Authority (CA) bundle
if args.use_certifi:
sentinel5dl.ca_info = certifi.where()
# Search for Sentinel-5 products
result = search(
polygon=args.polygon,
begin_ts=args.begin_ts,
end_ts=args.end_ts,
product=args.product,
processing_level=args.level,
processing_mode=args.mode
)
# Download found products to the download directory with number of workers
with multiprocessing.Pool(args.worker) as p:
p.starmap(download, map(
lambda product: ((product,), args.download_dir),
result.get('products')))
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
'''
Sentinel-5p Downloader
~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2019, The Emissions API Developers
:url: https://emissions-api.org
:license: MIT
'''
import argparse
import dateutil.parser
import certifi
import logging
import multiprocessing
import textwrap
import sentinel5dl
from sentinel5dl import search, download
PRODUCTS = (
'L1B_IR_SIR',
'L1B_IR_UVN',
'L1B_RA_BD1',
'L1B_RA_BD2',
'L1B_RA_BD3',
'L1B_RA_BD4',
'L1B_RA_BD5',
'L1B_RA_BD6',
'L1B_RA_BD7',
'L1B_RA_BD8',
'L2__AER_AI',
'L2__AER_LH',
'L2__CH4___',
'L2__CLOUD_',
'L2__CO____',
'L2__HCHO__',
'L2__NO2___',
'L2__NP_BD3',
'L2__NP_BD6',
'L2__NP_BD7',
'L2__O3_TCL',
'L2__O3____',
'L2__SO2___',
)
PRODUCTS_STR = textwrap.fill(', '.join(PRODUCTS),
subsequent_indent=' ',
initial_indent=' ')
PROCESSING_LEVELS = (
'L1B',
'L2'
)
PROCESSING_MODES = (
'Offline',
'Near real time',
'Reprocessing'
)
def is_polygon(polygon):
'''Validate if the supplied polygon string is in the necessary format to be
used as part of a WKT polygon string.
:param polygon: Polygon string in the form of lon1 lat1, lon2 lat2, ...
:return: WKT polygon
'''
values = [value.strip() for value in polygon.split(',')]
# Polygon must be at least a triangle
if len(values) < 4:
raise ValueError('Polygon must be at least a triangle')
# Check if we got float pairs
for value in values:
if len([float(x) for x in value.split()]) != 2:
raise ValueError('Polygon values must be pairs of numbers')
# Check if we got a closed polygon
if values[0] != values[-1]:
raise ValueError('Polygon is not closed')
return f'POLYGON(({polygon}))'
def main():
# Configure logging in the library
logging.basicConfig()
logger = logging.getLogger(sentinel5dl.__name__)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(
description='Search for and download Sentinel-5P data files',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f'AVAILABLE PRODUCTS\n{PRODUCTS_STR}'
)
# type= can use a callable, use that for most of this
parser.add_argument(
'--polygon',
type=is_polygon,
help='''Polygon defining an area by a set of coordinates.
Example: 30.1 10.0, 40.0 40.1, 20 40, 10 20, 30.1 10.0'''
)
parser.add_argument(
'--product',
choices=PRODUCTS,
metavar='PRODUCT',
default='L2__CO____',
help='Type of product to search for'
)
parser.add_argument(
'--level',
choices=PROCESSING_LEVELS,
default='L2',
help='Data processing level'
)
parser.add_argument(
'--mode',
choices=PROCESSING_MODES,
help='Data processing mode'
)
parser.add_argument(
'--begin-ts',
default='2019-09-01T00:00:00.000Z',
type=dateutil.parser.parse,
help='''Timestamp specifying the earliest sensing date.
Example: 2019-09-01T00:00:00.000Z'''
)
parser.add_argument(
'--end-ts',
default='2019-09-17T23:59:59.999Z',
type=dateutil.parser.parse,
help='''Timestamp specifying the latest sensing date.
Example: 2019-09-17T23:59:59.999Z'''
)
parser.add_argument(
'--use-certifi',
action='store_true',
help='''If a Certificate Authority (CA) bundle is not already supplied
by your operating system, certifi provides an easy way of
providing a cabundle.'''
)
parser.add_argument(
'--worker',
type=int,
default=1,
help='Number of parallel downloads',
)
parser.add_argument(
'download_dir',
metavar='download-dir',
help='Download directory'
)
args = parser.parse_args()
# Provide a Certificate Authority (CA) bundle
if args.use_certifi:
sentinel5dl.ca_info = certifi.where()
# Search for Sentinel-5 products
result = search(
polygon=args.polygon,
begin_ts=args.begin_ts,
end_ts=args.end_ts,
product=args.product,
processing_level=args.level,
processing_mode=args.mode
)
# Download found products to the download directory with number of workers
with multiprocessing.Pool(args.worker) as p:
p.starmap(download, map(
lambda product: ((product,), args.download_dir),
result.get('products')))
if __name__ == '__main__':
main()
| en | 0.718048 | # -*- coding: utf-8 -*- Sentinel-5p Downloader ~~~~~~~~~~~~~~~~~~~~~~ :copyright: 2019, The Emissions API Developers :url: https://emissions-api.org :license: MIT Validate if the supplied polygon string is in the necessary format to be used as part of a WKT polygon string. :param polygon: Polygon string in the form of lon1 lat1, lon2 lat2, ... :return: WKT polygon # Polygon must be at least a triangle # Check if we got float pairs # Check if we got a closed polygon # Configure logging in the library # type= can use a callable, use that for most of this Polygon defining an area by a set of coordinates. Example: 30.1 10.0, 40.0 40.1, 20 40, 10 20, 30.1 10.0 Timestamp specifying the earliest sensing date. Example: 2019-09-01T00:00:00.000Z Timestamp specifying the latest sensing date. Example: 2019-09-17T23:59:59.999Z If a Certificate Authority (CA) bundle is not already supplied by your operating system, certifi provides an easy way of providing a cabundle. # Provide a Certificate Authority (CA) bundle # Search for Sentinel-5 products # Download found products to the download directory with number of workers | 2.270593 | 2 |
modules/user_time.py | dngfx/MagicBot | 1 | 6615106 | <gh_stars>1-10
# --depends-on commands
# --depends-on location
import datetime
import enum
import pytz
from src import ModuleManager, utils
NOLOCATION_USER = "%s doesn't have a location set"
NOLOCATION_NAME = "Unknown location '%s'"
class LocationType(enum.Enum):
USER = 1
NAME = 2
class Module(ModuleManager.BaseModule):
_name = "Time"
def _find_setting(self, event):
query = None
target_user = None
if event["args"]:
query = event["args"]
if len(event["args_split"]) == 1 and event["server"].has_user_id(
event["args_split"][0]
):
target_user = event["server"].get_user(event["args_split"][0])
else:
target_user = event["user"]
if target_user:
location = target_user.get_setting("location", None)
if location:
return (LocationType.USER, target_user.nickname, location["timezone"])
if query:
location = self.exports.get_one("get-location")(query)
if location:
return (LocationType.NAME, location["name"], location["timezone"])
else:
return LocationType.NAME, event["args"], None
def _timezoned(self, dt, timezone):
dt = dt.astimezone(pytz.timezone(timezone))
utc_offset = (dt.utcoffset().total_seconds() / 60) / 60
tz = "UTC"
if not utc_offset == 0.0:
if utc_offset > 0:
tz += "+"
tz += "%g" % utc_offset
return "%s %s" % (utils.datetime.format.datetime_human(dt), tz)
@utils.hook("received.command.time")
@utils.kwarg("help", "Get the time for you or someone else")
@utils.kwarg("usage", "[nickname]")
@utils.kwarg("require_setting", "location")
@utils.kwarg("require_setting_unless", "1")
def time(self, event):
type, name, timezone = self._find_setting(event)
if not timezone == None:
human = self._timezoned(datetime.datetime.now(), timezone)
out = None
if type == LocationType.USER:
out = "Time for %s: %s" % (name, human)
else:
out = "It is %s in %s" % (human, name)
event["stdout"].write(out)
else:
out = None
if type == LocationType.USER:
out = NOLOCATION_USER
else:
out = NOLOCATION_NAME
event["stderr"].write(out % name)
@utils.export("time-localise")
def time_localise(self, user, dt):
location = user.get_setting("location", None)
timezone = "UTC"
if not location == None:
timezone = location["timezone"]
return self._timezoned(dt, timezone)
| # --depends-on commands
# --depends-on location
import datetime
import enum
import pytz
from src import ModuleManager, utils
NOLOCATION_USER = "%s doesn't have a location set"
NOLOCATION_NAME = "Unknown location '%s'"
class LocationType(enum.Enum):
USER = 1
NAME = 2
class Module(ModuleManager.BaseModule):
_name = "Time"
def _find_setting(self, event):
query = None
target_user = None
if event["args"]:
query = event["args"]
if len(event["args_split"]) == 1 and event["server"].has_user_id(
event["args_split"][0]
):
target_user = event["server"].get_user(event["args_split"][0])
else:
target_user = event["user"]
if target_user:
location = target_user.get_setting("location", None)
if location:
return (LocationType.USER, target_user.nickname, location["timezone"])
if query:
location = self.exports.get_one("get-location")(query)
if location:
return (LocationType.NAME, location["name"], location["timezone"])
else:
return LocationType.NAME, event["args"], None
def _timezoned(self, dt, timezone):
dt = dt.astimezone(pytz.timezone(timezone))
utc_offset = (dt.utcoffset().total_seconds() / 60) / 60
tz = "UTC"
if not utc_offset == 0.0:
if utc_offset > 0:
tz += "+"
tz += "%g" % utc_offset
return "%s %s" % (utils.datetime.format.datetime_human(dt), tz)
@utils.hook("received.command.time")
@utils.kwarg("help", "Get the time for you or someone else")
@utils.kwarg("usage", "[nickname]")
@utils.kwarg("require_setting", "location")
@utils.kwarg("require_setting_unless", "1")
def time(self, event):
type, name, timezone = self._find_setting(event)
if not timezone == None:
human = self._timezoned(datetime.datetime.now(), timezone)
out = None
if type == LocationType.USER:
out = "Time for %s: %s" % (name, human)
else:
out = "It is %s in %s" % (human, name)
event["stdout"].write(out)
else:
out = None
if type == LocationType.USER:
out = NOLOCATION_USER
else:
out = NOLOCATION_NAME
event["stderr"].write(out % name)
@utils.export("time-localise")
def time_localise(self, user, dt):
location = user.get_setting("location", None)
timezone = "UTC"
if not location == None:
timezone = location["timezone"]
return self._timezoned(dt, timezone) | en | 0.47962 | # --depends-on commands # --depends-on location | 2.526079 | 3 |
tools/format.py | NewLunarFire/deno | 0 | 6615107 | <filename>tools/format.py
#!/usr/bin/env python
import os
from third_party import third_party_path, fix_symlinks, google_env, clang_format_path
from util import root_path, run, find_exts
fix_symlinks()
prettier = os.path.join(third_party_path, "node_modules", "prettier",
"bin-prettier.js")
tools_path = os.path.join(root_path, "tools")
rustfmt_config = os.path.join(tools_path, "rustfmt.toml")
os.chdir(root_path)
run([clang_format_path, "-i", "-style", "Google"] +
find_exts("libdeno", ".cc", ".h"))
for fn in ["BUILD.gn", ".gn"] + find_exts("build_extra", ".gn", ".gni"):
run(["third_party/depot_tools/gn", "format", fn], env=google_env())
# TODO(ry) Install yapf in third_party.
run(["yapf", "-i"] + find_exts("tools/", ".py") +
find_exts("build_extra", ".py"))
run(["node", prettier, "--write"] + find_exts("js/", ".js", ".ts") +
find_exts("tests/", ".js", ".ts") +
["rollup.config.js", "tsconfig.json", "tslint.json"])
# Requires rustfmt 0.8.2 (flags were different in previous versions)
run(["rustfmt", "--config-path", rustfmt_config] + find_exts("src/", ".rs"))
| <filename>tools/format.py
#!/usr/bin/env python
import os
from third_party import third_party_path, fix_symlinks, google_env, clang_format_path
from util import root_path, run, find_exts
fix_symlinks()
prettier = os.path.join(third_party_path, "node_modules", "prettier",
"bin-prettier.js")
tools_path = os.path.join(root_path, "tools")
rustfmt_config = os.path.join(tools_path, "rustfmt.toml")
os.chdir(root_path)
run([clang_format_path, "-i", "-style", "Google"] +
find_exts("libdeno", ".cc", ".h"))
for fn in ["BUILD.gn", ".gn"] + find_exts("build_extra", ".gn", ".gni"):
run(["third_party/depot_tools/gn", "format", fn], env=google_env())
# TODO(ry) Install yapf in third_party.
run(["yapf", "-i"] + find_exts("tools/", ".py") +
find_exts("build_extra", ".py"))
run(["node", prettier, "--write"] + find_exts("js/", ".js", ".ts") +
find_exts("tests/", ".js", ".ts") +
["rollup.config.js", "tsconfig.json", "tslint.json"])
# Requires rustfmt 0.8.2 (flags were different in previous versions)
run(["rustfmt", "--config-path", rustfmt_config] + find_exts("src/", ".rs"))
| en | 0.873081 | #!/usr/bin/env python # TODO(ry) Install yapf in third_party. # Requires rustfmt 0.8.2 (flags were different in previous versions) | 1.961294 | 2 |
ansible-tests/validations/library/test_network_environment.py | rthallisey/clapper | 13 | 6615108 | #!/usr/bin/env python
import unittest
import validate_network_environment as validation
class TestNicConfigs(unittest.TestCase):
def test_non_dict(self):
errors = validation.check_nic_configs("controller.yaml", None)
self.assertEqual(len(errors), 1)
self.assertEqual('The nic_data parameter must be a dictionary.',
errors[0])
def _test_resources_invalid(self, nic_data):
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The nic_data must contain the 'resources' key and it"
" must be a dictionary.", errors[0])
def test_resources_dict(self):
self._test_resources_invalid({})
self._test_resources_invalid({'resources': None})
def test_resource_not_dict(self):
nic_data = {'resources': {'foo': None}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("'foo' is not a valid resource.", errors[0])
def _test_resource_properties_invalid(self, resource):
nic_data = {'resources': {'foo': resource}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'foo' resource must contain 'properties'.",
errors[0])
def test_resource_properties_not_dict(self):
self._test_resource_properties_invalid({})
self._test_resource_properties_invalid({'properties': None})
def test_resource_config_not_dict(self):
nic_data = {'resources': {'foo': {'properties': {'config': None}}}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'config' property of 'foo' must be"
" a dictionary.", errors[0])
def test_resource_os_net_config_not_dict(self):
nic_data = {
'resources': {
'foo': {
'properties': {
'config': {'os_net_config': None}
}
}
}
}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'os_net_config' section of 'foo' must be"
" a dictionary.", errors[0])
def nic_data(self, bridges):
return {
'resources': {
'foo': {
'properties': {
'config': {
'os_net_config': {
'network_config': bridges,
}
}
}
}
}
}
def test_network_config_not_list(self):
nic_data = self.nic_data(None)
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'network_config' section of 'foo' must be"
" a list.", errors[0])
def test_bridge_has_type(self):
nic_data = self.nic_data([{
'name': 'storage',
'members': [],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('must have a type', errors[0])
def test_bridge_has_name(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'members': [],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('must have a name', errors[0])
def test_ovs_bridge_has_members(self):
nic_data = self.nic_data([{
'name': 'storage',
'type': 'ovs_bridge',
'members': None,
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must contain a 'members' list", errors[0])
def test_ovs_bridge_members_dict(self):
nic_data = self.nic_data([{
'name': 'storage',
'type': 'ovs_bridge',
'members': [None],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must be a dictionary.", errors[0])
def test_bonds_have_type(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [{}],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must have a type.", errors[0])
def test_more_than_one_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
{'type': 'ovs_bond'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('Invalid bonding: There are 2 bonds for bridge storage',
errors[0])
def test_multiple_interfaces_without_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'interface'},
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('Invalid interface: When not using a bond, there can'
' only be 1 interface for bridge storage', errors[0])
def test_one_interface_without_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
def test_one_bond_no_interfaces(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
def test_one_bond_multiple_interfaces(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
{'type': 'interface'},
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
class TestCheckCidrOverlap(unittest.TestCase):
def test_empty(self):
errors = validation.check_cidr_overlap([])
self.assertEqual([], errors)
def test_none(self):
errors = validation.check_cidr_overlap(None)
self.assertEqual(len(errors), 1)
self.assertEqual("The argument must be iterable.", errors[0])
def test_network_none(self):
errors = validation.check_cidr_overlap([None])
self.assertEqual(len(errors), 1)
self.assertEqual("Invalid network: None", errors[0])
def test_single_network(self):
errors = validation.check_cidr_overlap(['172.16.0.0/24'])
self.assertEqual([], errors)
def test_non_overlapping_networks(self):
networks = ['172.16.0.0/24', '172.17.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual([], errors)
def test_identical_networks(self):
networks = ['172.16.0.0/24', '172.16.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.0.0/24 and 172.16.0.0/24 overlap.',
errors[0])
def test_first_cidr_is_subset_of_second(self):
networks = ['172.16.10.0/24', '172.16.0.0/16']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.10.0/24 and 172.16.0.0/16 overlap.',
errors[0])
def test_second_cidr_is_subset_of_first(self):
networks = ['172.16.0.0/16', '172.16.10.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.',
errors[0])
def test_multiple_overlapping_networks(self):
networks = ['172.16.0.0/16', '172.16.10.0/24',
'172.16.11.0/23', '172.17.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 3)
self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.',
errors[0])
self.assertEqual('Networks 172.16.0.0/16 and 172.16.11.0/23 overlap.',
errors[1])
self.assertEqual('Networks 172.16.10.0/24 and 172.16.11.0/23 overlap.',
errors[2])
class TestCheckAllocationPoolsPairing(unittest.TestCase):
def test_empty(self):
errors = validation.check_allocation_pools_pairing({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_allocation_pools_pairing(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The first argument must be a dictionary.', errors[0])
errors = validation.check_allocation_pools_pairing({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The second argument must be a dictionary.',
errors[0])
def test_pool_range_not_list(self):
pools = {'TestPools': None}
errors = validation.check_allocation_pools_pairing({}, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP ranges in TestPools must form a list.',
errors[0])
def _test_pool_invalid_range(self, addr_range):
filedata = {'TestNetCidr': '172.18.0.0/24'}
pools = {'TestAllocationPools': [addr_range]}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid format of the IP range in'
' TestAllocationPools: {}'.format(addr_range),
errors[0])
def test_pool_invalid_range(self):
broken_ranges = [None,
{},
{'start': 'foo', 'end': 'bar'},
{'start': '10.0.0.1', 'end': '10.0.0.0'},
]
for addr_range in broken_ranges:
self._test_pool_invalid_range(addr_range)
def test_pool_with_correct_range(self):
filedata = {
'StorageNetCidr': '172.18.0.0/24',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual([], errors)
def test_pool_without_cidr(self):
filedata = {}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The StorageNetCidr CIDR is not specified for'
' StorageAllocationPools.', errors[0])
def test_pool_with_invalid_cidr(self):
filedata = {
'StorageNetCidr': 'breakit',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid IP network: breakit', errors[0])
def test_pool_outside_cidr(self):
filedata = {
'StorageNetCidr': '172.18.0.0/25',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertIn('outside of subnet StorageNetCidr', errors[0])
def test_multiple_ranges_and_pools(self):
filedata = {
'StorageNetCidr': '172.18.0.0/24',
'TenantNetCidr': '172.16.0.0/24',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.20'},
{'start': '172.18.0.100', 'end': '172.18.0.200'},
],
'TenantAllocationPools': [
{'start': '172.16.0.20', 'end': '172.16.0.30'},
{'start': '172.16.0.70', 'end': '172.16.0.80'},
],
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual([], errors)
class TestStaticIpPoolCollision(unittest.TestCase):
def test_empty(self):
errors = validation.check_static_ip_pool_collision({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_static_ip_pool_collision(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The static IPs input must be a dictionary.',
errors[0])
errors = validation.check_static_ip_pool_collision({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The Pools input must be a dictionary.',
errors[0])
def test_pool_range_not_list(self):
pools = {'TestPools': None}
errors = validation.check_static_ip_pool_collision({}, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP ranges in TestPools must form a list.',
errors[0])
def _test_pool_invalid_range(self, addr_range):
static_ips = {}
pools = {'TestAllocationPools': [addr_range]}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid format of the IP range in'
' TestAllocationPools: {}'.format(addr_range),
errors[0])
def test_pool_invalid_range(self):
broken_ranges = [None,
{},
{'start': 'foo', 'end': 'bar'},
{'start': '10.0.0.1', 'end': '10.0.0.0'},
]
for addr_range in broken_ranges:
self._test_pool_invalid_range(addr_range)
def test_pool_with_correct_range(self):
static_ips = {}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual([], errors)
def test_static_ip_service_not_dict(self):
static_ips = {'ComputeIPs': None}
errors = validation.check_static_ip_pool_collision(static_ips, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The ComputeIPs must be a dictionary.', errors[0])
def test_static_ips_not_lists(self):
static_ips = {
'ComputeIPs': {
'internal_api': None
}
}
errors = validation.check_static_ip_pool_collision(static_ips, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The ComputeIPs->internal_api must be an array.',
errors[0])
def test_static_ips_not_parseable(self):
static_ips = {
'ComputeIPs': {
'internal_api': ['nonsense', None, '270.0.0.1'],
}
}
pools = {}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 3)
self.assertIn('nonsense is not a valid IP address', errors[0])
self.assertIn('None is not a valid IP address', errors[1])
self.assertIn('270.0.0.1 is not a valid IP address', errors[2])
def test_static_ip_collide_with_pool(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.150', '10.35.191.60']
}
}
pools = {
'InternalApiAllocationPools': [
{'start': '10.35.191.150', 'end': '10.35.191.240'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('IP address 10.35.191.150 from '
'ControllerIps[internal_api] is in the '
'InternalApiAllocationPools pool.', errors[0])
def test_static_ip_no_collisions(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.50', '10.35.191.60'],
'storage': ['192.168.100.20', '192.168.100.30'],
},
'ComputeIps': {
'internal_api': ['10.35.191.100', '10.35.191.110'],
'storage': ['192.168.100.45', '192.168.100.46']
}
}
pools = {
'InternalApiAllocationPools': [
{'start': '10.35.191.150', 'end': '10.35.191.240'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual([], errors)
class TestVlanIds(unittest.TestCase):
def test_empty(self):
errors = validation.check_vlan_ids({})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_vlan_ids(None)
self.assertEqual(len(errors), 1)
errors = validation.check_vlan_ids(42)
self.assertEqual(len(errors), 1)
errors = validation.check_vlan_ids("Ceci n'est pas un dict.")
self.assertEqual(len(errors), 1)
def test_id_collision(self):
vlans = {
'TenantNetworkVlanID': 204,
'StorageMgmtNetworkVlanID': 203,
'StorageNetworkVlanID': 202,
'ExternalNetworkVlanID': 100,
'InternalApiNetworkVlanID': 202,
}
errors = validation.check_vlan_ids(vlans)
self.assertEqual(len(errors), 1)
self.assertEqual('Vlan ID 202 (InternalApiNetworkVlanID) already'
' exists in StorageNetworkVlanID', errors[0])
def test_id_no_collisions(self):
vlans = {
'TenantNetworkVlanID': 204,
'StorageMgmtNetworkVlanID': 203,
'StorageNetworkVlanID': 202,
'ExternalNetworkVlanID': 100,
'InternalApiNetworkVlanID': 201,
}
errors = validation.check_vlan_ids(vlans)
self.assertEqual([], errors)
class TestStaticIpInCidr(unittest.TestCase):
def test_empty(self):
errors = validation.check_static_ip_in_cidr({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_static_ip_in_cidr(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The networks argument must be a dictionary.',
errors[0])
errors = validation.check_static_ip_in_cidr({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The static_ips argument must be a dictionary.',
errors[0])
def test_invalid_cidr(self):
errors = validation.check_static_ip_in_cidr(
{'StorageNetCidr': 'breakit'}, {})
self.assertEqual(len(errors), 1)
self.assertEqual("Network 'StorageNetCidr' has an invalid CIDR:"
" 'breakit'", errors[0])
def test_service_not_a_dict(self):
static_ips = {'ControllerIps': None}
errors = validation.check_static_ip_in_cidr({}, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps must be a dictionary.', errors[0])
def test_static_ips_not_a_list(self):
networks = {
'InternalApiNetCidr': '10.35.191.0/24',
}
static_ips = {
'ControllerIps': {
'internal_api': None,
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps->internal_api must be a list.',
errors[0])
def test_missing_cidr(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.120']
}
}
errors = validation.check_static_ip_in_cidr({}, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual("Service 'storage' does not have a corresponding"
" range: 'StorageNetCidr'.", errors[0])
def test_address_not_within_cidr(self):
networks = {
'StorageNetCidr': '192.168.100.0/24',
}
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.120', '192.168.101.0']
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP address 192.168.101.0 is outside of the'
' StorageNetCidr range: 192.168.100.0/24', errors[0])
def test_addresses_within_cidr(self):
networks = {
'StorageNetCidr': '192.168.100.0/24',
'InternalApiNetCidr': '10.35.191.0/24',
}
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.100', '10.35.191.110'],
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual([], errors)
class TestDuplicateStaticIps(unittest.TestCase):
def test_empty(self):
errors = validation.duplicate_static_ips({})
self.assertEqual([], errors)
def test_not_a_dict(self):
errors = validation.duplicate_static_ips(None)
self.assertEqual(len(errors), 1)
self.assertEqual('The static_ips argument must be a dictionary.',
errors[0])
def test_service_not_a_dict(self):
static_ips = {
'ControllerIps': None,
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps must be a dictionary.',
errors[0])
def test_static_ips_not_a_list(self):
static_ips = {
'ControllerIps': {
'internal_api': None,
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps->internal_api must be a list.',
errors[0])
def test_duplicate_ips_within_service(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.60', '10.35.191.60']
},
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_duplicate_ips_across_services(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.60', '10.35.191.70'],
'storage': ['192.168.100.1', '10.35.191.60', '192.168.100.3'],
},
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_duplicate_ips_across_roles(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.60', '10.35.191.110'],
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_no_duplicate_ips(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.100', '10.35.191.110'],
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual([], errors)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
import unittest
import validate_network_environment as validation
class TestNicConfigs(unittest.TestCase):
def test_non_dict(self):
errors = validation.check_nic_configs("controller.yaml", None)
self.assertEqual(len(errors), 1)
self.assertEqual('The nic_data parameter must be a dictionary.',
errors[0])
def _test_resources_invalid(self, nic_data):
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The nic_data must contain the 'resources' key and it"
" must be a dictionary.", errors[0])
def test_resources_dict(self):
self._test_resources_invalid({})
self._test_resources_invalid({'resources': None})
def test_resource_not_dict(self):
nic_data = {'resources': {'foo': None}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("'foo' is not a valid resource.", errors[0])
def _test_resource_properties_invalid(self, resource):
nic_data = {'resources': {'foo': resource}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'foo' resource must contain 'properties'.",
errors[0])
def test_resource_properties_not_dict(self):
self._test_resource_properties_invalid({})
self._test_resource_properties_invalid({'properties': None})
def test_resource_config_not_dict(self):
nic_data = {'resources': {'foo': {'properties': {'config': None}}}}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'config' property of 'foo' must be"
" a dictionary.", errors[0])
def test_resource_os_net_config_not_dict(self):
nic_data = {
'resources': {
'foo': {
'properties': {
'config': {'os_net_config': None}
}
}
}
}
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'os_net_config' section of 'foo' must be"
" a dictionary.", errors[0])
def nic_data(self, bridges):
return {
'resources': {
'foo': {
'properties': {
'config': {
'os_net_config': {
'network_config': bridges,
}
}
}
}
}
}
def test_network_config_not_list(self):
nic_data = self.nic_data(None)
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertEqual("The 'network_config' section of 'foo' must be"
" a list.", errors[0])
def test_bridge_has_type(self):
nic_data = self.nic_data([{
'name': 'storage',
'members': [],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('must have a type', errors[0])
def test_bridge_has_name(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'members': [],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('must have a name', errors[0])
def test_ovs_bridge_has_members(self):
nic_data = self.nic_data([{
'name': 'storage',
'type': 'ovs_bridge',
'members': None,
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must contain a 'members' list", errors[0])
def test_ovs_bridge_members_dict(self):
nic_data = self.nic_data([{
'name': 'storage',
'type': 'ovs_bridge',
'members': [None],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must be a dictionary.", errors[0])
def test_bonds_have_type(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [{}],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn("must have a type.", errors[0])
def test_more_than_one_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
{'type': 'ovs_bond'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('Invalid bonding: There are 2 bonds for bridge storage',
errors[0])
def test_multiple_interfaces_without_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'interface'},
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual(len(errors), 1)
self.assertIn('Invalid interface: When not using a bond, there can'
' only be 1 interface for bridge storage', errors[0])
def test_one_interface_without_bond(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
def test_one_bond_no_interfaces(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
def test_one_bond_multiple_interfaces(self):
nic_data = self.nic_data([{
'type': 'ovs_bridge',
'name': 'storage',
'members': [
{'type': 'ovs_bond'},
{'type': 'interface'},
{'type': 'interface'},
],
}])
errors = validation.check_nic_configs("controller.yaml", nic_data)
self.assertEqual([], errors)
class TestCheckCidrOverlap(unittest.TestCase):
def test_empty(self):
errors = validation.check_cidr_overlap([])
self.assertEqual([], errors)
def test_none(self):
errors = validation.check_cidr_overlap(None)
self.assertEqual(len(errors), 1)
self.assertEqual("The argument must be iterable.", errors[0])
def test_network_none(self):
errors = validation.check_cidr_overlap([None])
self.assertEqual(len(errors), 1)
self.assertEqual("Invalid network: None", errors[0])
def test_single_network(self):
errors = validation.check_cidr_overlap(['172.16.0.0/24'])
self.assertEqual([], errors)
def test_non_overlapping_networks(self):
networks = ['172.16.0.0/24', '172.17.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual([], errors)
def test_identical_networks(self):
networks = ['172.16.0.0/24', '172.16.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.0.0/24 and 172.16.0.0/24 overlap.',
errors[0])
def test_first_cidr_is_subset_of_second(self):
networks = ['172.16.10.0/24', '172.16.0.0/16']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.10.0/24 and 172.16.0.0/16 overlap.',
errors[0])
def test_second_cidr_is_subset_of_first(self):
networks = ['172.16.0.0/16', '172.16.10.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 1)
self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.',
errors[0])
def test_multiple_overlapping_networks(self):
networks = ['172.16.0.0/16', '172.16.10.0/24',
'172.16.11.0/23', '172.17.0.0/24']
errors = validation.check_cidr_overlap(networks)
self.assertEqual(len(errors), 3)
self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.',
errors[0])
self.assertEqual('Networks 172.16.0.0/16 and 172.16.11.0/23 overlap.',
errors[1])
self.assertEqual('Networks 172.16.10.0/24 and 172.16.11.0/23 overlap.',
errors[2])
class TestCheckAllocationPoolsPairing(unittest.TestCase):
def test_empty(self):
errors = validation.check_allocation_pools_pairing({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_allocation_pools_pairing(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The first argument must be a dictionary.', errors[0])
errors = validation.check_allocation_pools_pairing({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The second argument must be a dictionary.',
errors[0])
def test_pool_range_not_list(self):
pools = {'TestPools': None}
errors = validation.check_allocation_pools_pairing({}, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP ranges in TestPools must form a list.',
errors[0])
def _test_pool_invalid_range(self, addr_range):
filedata = {'TestNetCidr': '172.18.0.0/24'}
pools = {'TestAllocationPools': [addr_range]}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid format of the IP range in'
' TestAllocationPools: {}'.format(addr_range),
errors[0])
def test_pool_invalid_range(self):
broken_ranges = [None,
{},
{'start': 'foo', 'end': 'bar'},
{'start': '10.0.0.1', 'end': '10.0.0.0'},
]
for addr_range in broken_ranges:
self._test_pool_invalid_range(addr_range)
def test_pool_with_correct_range(self):
filedata = {
'StorageNetCidr': '172.18.0.0/24',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual([], errors)
def test_pool_without_cidr(self):
filedata = {}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The StorageNetCidr CIDR is not specified for'
' StorageAllocationPools.', errors[0])
def test_pool_with_invalid_cidr(self):
filedata = {
'StorageNetCidr': 'breakit',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid IP network: breakit', errors[0])
def test_pool_outside_cidr(self):
filedata = {
'StorageNetCidr': '172.18.0.0/25',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual(len(errors), 1)
self.assertIn('outside of subnet StorageNetCidr', errors[0])
def test_multiple_ranges_and_pools(self):
filedata = {
'StorageNetCidr': '172.18.0.0/24',
'TenantNetCidr': '172.16.0.0/24',
}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.20'},
{'start': '172.18.0.100', 'end': '172.18.0.200'},
],
'TenantAllocationPools': [
{'start': '172.16.0.20', 'end': '172.16.0.30'},
{'start': '172.16.0.70', 'end': '172.16.0.80'},
],
}
errors = validation.check_allocation_pools_pairing(filedata, pools)
self.assertEqual([], errors)
class TestStaticIpPoolCollision(unittest.TestCase):
def test_empty(self):
errors = validation.check_static_ip_pool_collision({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_static_ip_pool_collision(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The static IPs input must be a dictionary.',
errors[0])
errors = validation.check_static_ip_pool_collision({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The Pools input must be a dictionary.',
errors[0])
def test_pool_range_not_list(self):
pools = {'TestPools': None}
errors = validation.check_static_ip_pool_collision({}, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP ranges in TestPools must form a list.',
errors[0])
def _test_pool_invalid_range(self, addr_range):
static_ips = {}
pools = {'TestAllocationPools': [addr_range]}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('Invalid format of the IP range in'
' TestAllocationPools: {}'.format(addr_range),
errors[0])
def test_pool_invalid_range(self):
broken_ranges = [None,
{},
{'start': 'foo', 'end': 'bar'},
{'start': '10.0.0.1', 'end': '10.0.0.0'},
]
for addr_range in broken_ranges:
self._test_pool_invalid_range(addr_range)
def test_pool_with_correct_range(self):
static_ips = {}
pools = {
'StorageAllocationPools': [
{'start': '172.18.0.10', 'end': '172.18.0.200'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual([], errors)
def test_static_ip_service_not_dict(self):
static_ips = {'ComputeIPs': None}
errors = validation.check_static_ip_pool_collision(static_ips, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The ComputeIPs must be a dictionary.', errors[0])
def test_static_ips_not_lists(self):
static_ips = {
'ComputeIPs': {
'internal_api': None
}
}
errors = validation.check_static_ip_pool_collision(static_ips, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The ComputeIPs->internal_api must be an array.',
errors[0])
def test_static_ips_not_parseable(self):
static_ips = {
'ComputeIPs': {
'internal_api': ['nonsense', None, '270.0.0.1'],
}
}
pools = {}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 3)
self.assertIn('nonsense is not a valid IP address', errors[0])
self.assertIn('None is not a valid IP address', errors[1])
self.assertIn('270.0.0.1 is not a valid IP address', errors[2])
def test_static_ip_collide_with_pool(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.150', '10.35.191.60']
}
}
pools = {
'InternalApiAllocationPools': [
{'start': '10.35.191.150', 'end': '10.35.191.240'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual(len(errors), 1)
self.assertEqual('IP address 10.35.191.150 from '
'ControllerIps[internal_api] is in the '
'InternalApiAllocationPools pool.', errors[0])
def test_static_ip_no_collisions(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.50', '10.35.191.60'],
'storage': ['192.168.100.20', '192.168.100.30'],
},
'ComputeIps': {
'internal_api': ['10.35.191.100', '10.35.191.110'],
'storage': ['192.168.100.45', '192.168.100.46']
}
}
pools = {
'InternalApiAllocationPools': [
{'start': '10.35.191.150', 'end': '10.35.191.240'}
]
}
errors = validation.check_static_ip_pool_collision(static_ips, pools)
self.assertEqual([], errors)
class TestVlanIds(unittest.TestCase):
def test_empty(self):
errors = validation.check_vlan_ids({})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_vlan_ids(None)
self.assertEqual(len(errors), 1)
errors = validation.check_vlan_ids(42)
self.assertEqual(len(errors), 1)
errors = validation.check_vlan_ids("Ceci n'est pas un dict.")
self.assertEqual(len(errors), 1)
def test_id_collision(self):
vlans = {
'TenantNetworkVlanID': 204,
'StorageMgmtNetworkVlanID': 203,
'StorageNetworkVlanID': 202,
'ExternalNetworkVlanID': 100,
'InternalApiNetworkVlanID': 202,
}
errors = validation.check_vlan_ids(vlans)
self.assertEqual(len(errors), 1)
self.assertEqual('Vlan ID 202 (InternalApiNetworkVlanID) already'
' exists in StorageNetworkVlanID', errors[0])
def test_id_no_collisions(self):
vlans = {
'TenantNetworkVlanID': 204,
'StorageMgmtNetworkVlanID': 203,
'StorageNetworkVlanID': 202,
'ExternalNetworkVlanID': 100,
'InternalApiNetworkVlanID': 201,
}
errors = validation.check_vlan_ids(vlans)
self.assertEqual([], errors)
class TestStaticIpInCidr(unittest.TestCase):
def test_empty(self):
errors = validation.check_static_ip_in_cidr({}, {})
self.assertEqual([], errors)
def test_non_dict(self):
errors = validation.check_static_ip_in_cidr(None, {})
self.assertEqual(len(errors), 1)
self.assertEqual('The networks argument must be a dictionary.',
errors[0])
errors = validation.check_static_ip_in_cidr({}, None)
self.assertEqual(len(errors), 1)
self.assertEqual('The static_ips argument must be a dictionary.',
errors[0])
def test_invalid_cidr(self):
errors = validation.check_static_ip_in_cidr(
{'StorageNetCidr': 'breakit'}, {})
self.assertEqual(len(errors), 1)
self.assertEqual("Network 'StorageNetCidr' has an invalid CIDR:"
" 'breakit'", errors[0])
def test_service_not_a_dict(self):
static_ips = {'ControllerIps': None}
errors = validation.check_static_ip_in_cidr({}, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps must be a dictionary.', errors[0])
def test_static_ips_not_a_list(self):
networks = {
'InternalApiNetCidr': '10.35.191.0/24',
}
static_ips = {
'ControllerIps': {
'internal_api': None,
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps->internal_api must be a list.',
errors[0])
def test_missing_cidr(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.120']
}
}
errors = validation.check_static_ip_in_cidr({}, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual("Service 'storage' does not have a corresponding"
" range: 'StorageNetCidr'.", errors[0])
def test_address_not_within_cidr(self):
networks = {
'StorageNetCidr': '192.168.100.0/24',
}
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.120', '192.168.101.0']
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The IP address 192.168.101.0 is outside of the'
' StorageNetCidr range: 192.168.100.0/24', errors[0])
def test_addresses_within_cidr(self):
networks = {
'StorageNetCidr': '192.168.100.0/24',
'InternalApiNetCidr': '10.35.191.0/24',
}
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.100', '10.35.191.110'],
}
}
errors = validation.check_static_ip_in_cidr(networks, static_ips)
self.assertEqual([], errors)
class TestDuplicateStaticIps(unittest.TestCase):
def test_empty(self):
errors = validation.duplicate_static_ips({})
self.assertEqual([], errors)
def test_not_a_dict(self):
errors = validation.duplicate_static_ips(None)
self.assertEqual(len(errors), 1)
self.assertEqual('The static_ips argument must be a dictionary.',
errors[0])
def test_service_not_a_dict(self):
static_ips = {
'ControllerIps': None,
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps must be a dictionary.',
errors[0])
def test_static_ips_not_a_list(self):
static_ips = {
'ControllerIps': {
'internal_api': None,
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertEqual('The ControllerIps->internal_api must be a list.',
errors[0])
def test_duplicate_ips_within_service(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.60', '10.35.191.60']
},
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_duplicate_ips_across_services(self):
static_ips = {
'ControllerIps': {
'internal_api': ['10.35.191.60', '10.35.191.70'],
'storage': ['192.168.100.1', '10.35.191.60', '192.168.100.3'],
},
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_duplicate_ips_across_roles(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.60', '10.35.191.110'],
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual(len(errors), 1)
self.assertIn('The 10.35.191.60 IP address was entered multiple times',
errors[0])
def test_no_duplicate_ips(self):
static_ips = {
'ControllerIps': {
'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'],
'internal_api': ['10.35.191.60', '10.35.191.70']
},
'ComputeIps': {
'storage': ['192.168.100.125', '192.168.100.135'],
'internal_api': ['10.35.191.100', '10.35.191.110'],
}
}
errors = validation.duplicate_static_ips(static_ips)
self.assertEqual([], errors)
if __name__ == '__main__':
unittest.main()
| ru | 0.26433 | #!/usr/bin/env python | 2.871921 | 3 |
condor_tuxedo_pipeline.py | danforthcenter/htcondor-tools | 3 | 6615109 | <gh_stars>1-10
#!/usr/bin/env python
# submit jobs to queue on behalf of pacbio software
import os, sys
import argparse
import subprocess
def hisat_stringtie_script( args ):
script = ["#!/bin/bash\n"]
## CREATE HISAT LINE
hisat_line = "hisat2 --dta-cufflinks -p {threads} --max-intronlen {maxintron} -x {idxfile}".format(threads=args.threads, maxintron=args.max_intronlen, idxfile=args.hisat_index)
# add reads files to hisat_line
if args.single:
hisat_line += " -U $2"
else:
hisat_line += " -1 $2 -2 $3"
# add samtools sort line
hisat_line += " | samtools sort -@{threads} -T $1.HISAT2tmp - -o $1.HISAT2.bam\n".format(threads=args.threads)
## CREATE STRINGTIE LINE
stringtie_line = "stringtie $1.HISAT2.bam -G {gff} -p {threads} -o $1.stringtie.gtf\n".format(gff=args.gffref, threads=args.threads)
## FINISH SCRIPT
script.append( hisat_line )
script.append( stringtie_line )
return script
# Build job file for hisat/stringtie
def hisat_stringtie_jobfile( args, hisat_script_filename ):
jobfile = ["universe = vanilla\n",
"getenv = true\n",
"accounting_group = {}\n".format(args.condor_group),
"logdir = {}\n".format(args.logdir),
"initialdir = {}\n".format(args.initialdir),
"executable = /bin/bash\n"
]
config_fields = "outprefix,condition"
# check if reads are single end or paired end
if args.single:
cmdstr = hisat_script_filename + " $(outprefix) $(reads)"
config_fields+= ",reads"
else:
cmdstr = hisat_script_filename + " $(outprefix) $(reads1) $(reads2)"
config_fields += ",reads1,reads2"
jobfile.append( "arguments = \"{}\"\n".format(cmdstr) )
jobfile.append( "log = $(logdir)/$(outprefix).log\n" )
jobfile.append( "output = $(logdir)/$(outprefix).out\n" )
jobfile.append( "error = $(logdir)/$(outprefix).error\n" )
jobfile.append( "request_cpus = {}\n".format( args.threads ) )
jobfile.append( "request_memory = {}\n".format( args.threads * args.memory ) )
jobfile.append( "queue {cfields} from {cfile}\n".format( cfields=config_fields, cfile=args.configfile ) )
return jobfile
# build cuffmerge/cuffdiff script
def cuffmerge_cuffdiff_script( args ):
script = ["#!/bin/bash\n"]
assembly_list = ""
bamfiles = ""
bamlist = {}
labels = set()
# build necessary strings for script
with open(args.configfile, 'r') as fh:
for line in fh:
line = line.split()
assembly_list += line[0] + ".stringtie.gtf "
if not line[1] in bamlist:
bamlist[line[1]] = []
bamlist[line[1]].append(line[0] + ".HISAT2.bam")
labels.add( line[1] )
# build bam string
for l in labels:
bamfiles += ",".join(bamlist[l]) + " "
script.append("ls {} > assembly_list.txt\n".format(assembly_list) )
script.append("cuffmerge -p {threads} -g {gff} assembly_list.txt\n".format(threads=args.threads, gff=args.gffref) )
script.append("cuffdiff -p {threads} {multiread} -o {expprefix}.cuffdiff merged_asm/merged.gtf --max-bundle-frags 50000000 {bamfiles} -L {labels}\n".format(
threads=args.threads, multiread=args.multiread, expprefix=args.expprefix, bamfiles=bamfiles, labels=",".join(sorted(labels)) ) )
return script
# build job file using pacbio's args
def cuffmerge_cuffdiff_jobfile( args, cuffmerge_cuffdiff_script_filename ):
jobfile = ["universe = vanilla\n",
"getenv = true\n",
"accounting_group = {}\n".format(args.condor_group),
"logdir = {}\n".format(args.logdir),
"initialdir = {}\n".format(args.initialdir),
"executable = /bin/bash\n"
]
jobfile.append( "arguments = \"{}\"\n".format(cuffmerge_cuffdiff_script_filename) )
jobfile.append( "log = $(logdir)/{}.log\n".format(args.expprefix) )
jobfile.append( "output = $(logdir)/{}.out\n".format(args.expprefix) )
jobfile.append( "error = $(logdir)/{}.error\n".format(args.expprefix) )
jobfile.append( "request_cpus = {}\n".format( args.threads ) )
jobfile.append( "request_memory = {}\n".format( args.threads * args.memory ) )
jobfile.append( "queue\n" )
return jobfile
# build dagman file for managing DAG
def dagman_file(args, hisat_jobfile_filename, cuffmerge_cuffdiff_jobfile_filename):
dagfile = ["JOB hisat {}\n".format(hisat_jobfile_filename),
"JOB cuff {}\n".format(cuffmerge_cuffdiff_jobfile_filename),
"PARENT hisat CHILD cuff\n"
]
return dagfile
# write job/script file
def write_file( jobfile, filename ):
with open( filename, 'w' ) as fh:
for line in jobfile:
fh.write( line )
################
##### MAIN #####
################
def main( prog_name, argv ):
# ARG PROCESSING
parser = argparse.ArgumentParser( prog=prog_name, description= 'designed to build and run a tuxedo suite pipeline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument('-g','--gff-ref', dest='gffref', metavar='GFFREF', required=True, type=str, help='gff reference file to use in cuffdiff and cuffmerge')
parser.add_argument('-f','--fasta-ref', dest='faref', metavar='FAREF', required=True, type=str, help='reference genome file to use')
parser.add_argument('-c','--configfile', dest='configfile', metavar='CONFIG', required=True, type=str, help='file containing all replicate information with each line in the form "outprefix condition reads[1 reads2]"')
parser.add_argument('-x','--hisat-index', dest='hisat_index', metavar='HISAT_INDEX', required=True, type=str, help='prefix of HISAT2 index set in hisat2-build command')
parser.add_argument('-l','--log-directory', dest='logdir', metavar='LOGDIR', required=True, type=str, help='directory to write log files to')
parser.add_argument('-i','--initial-directory', dest='initialdir', metavar='INITIALDIR', required=True, type=str, help='initial directory to run these commands in')
parser.add_argument('-e','--experiment-prefix', dest='expprefix', metavar='EXPPREFIX', default="tuxedo_run", type=str, help='prefix given to cuffdiff run (this output goes in the initial directory)')
parser.add_argument('--condor-group', dest='condor_group', metavar='CONDOR_GROUP', default="$ENV(CONDOR_GROUP)", type=str, help=argparse.SUPPRESS)
parser.add_argument('-p','--threads', dest='threads', metavar='NPROC', default=10, type=int, help='NPROC for jobs')
parser.add_argument('--max-intronlen', dest='max_intronlen', metavar='MAX_INTRONLEN', default=50000, type=int, help='maximum intron length to use in HISAT2')
parser.add_argument('-m','--memory', dest='memory', metavar='MEM', default=3000, type=int, help='megabytes to allocate in jobfile per thread')
parser.add_argument('--single', dest='single', default=False, action='store_true', help='flag to indicate single end reads are used')
parser.add_argument('-u','--multi-read-correct', dest='multiread', default='', const='--multi-read-correct', action='store_const', help='flag to indicate use of multiple mapped reads in cuffdiff')
args = parser.parse_args(argv)
## INITIALIZE
hisat_script_filename = args.initialdir + "/hisat_stringtie_script.sh"
hisat_jobfile_filename = args.initialdir + "/hisat_stringtie_script.job"
cuffmerge_cuffdiff_script_filename = args.initialdir + "/cuffmerge_cuffdiff_script.sh"
cuffmerge_cuffdiff_jobfile_filename = args.initialdir + "/cuffmerge_cuffdiff_script.job"
dag_filename = args.initialdir + "/" + args.expprefix + ".dag"
#########################
## CREATE FILES ######
#################
## CREATE HISAT/STRINGTIE SCRIPT
hisat_stringtie_script_string = hisat_stringtie_script( args )
## CREATE HISAT/STRINGTIE JOBFILE
hisat_stringtie_jobfile_string = hisat_stringtie_jobfile( args, hisat_script_filename )
## CREATE CUFFMERGE/CUFFDIFF SCRIPT
cuffmerge_cuffdiff_script_string = cuffmerge_cuffdiff_script( args )
## CREATE CUFFMERGE/CUFFDIFF JOBFILE
cuffmerge_cuffdiff_jobfile_string = cuffmerge_cuffdiff_jobfile( args, cuffmerge_cuffdiff_script_filename )
## CREATE DAGMAN FILE
dagman_string = dagman_file(args, hisat_jobfile_filename, cuffmerge_cuffdiff_jobfile_filename)
########################
## WRITE FILES ######
################
## WRITE SCRIPTS AND JOBFILES
write_file( hisat_stringtie_script_string, hisat_script_filename )
write_file( hisat_stringtie_jobfile_string, hisat_jobfile_filename )
write_file( cuffmerge_cuffdiff_script_string, cuffmerge_cuffdiff_script_filename )
write_file( cuffmerge_cuffdiff_jobfile_string, cuffmerge_cuffdiff_jobfile_filename )
## WRITE DAGMAN FILE
write_file( dagman_string, dag_filename )
### ~~~~~~~~~~ ###
## SUBMIT JOBFILE TO QUEUE
queue_id = subprocess.check_output( ["condor_submit_dag", dag_filename] )
if __name__ =='__main__':
main(sys.argv[0], sys.argv[1:])
| #!/usr/bin/env python
# submit jobs to queue on behalf of pacbio software
import os, sys
import argparse
import subprocess
def hisat_stringtie_script( args ):
script = ["#!/bin/bash\n"]
## CREATE HISAT LINE
hisat_line = "hisat2 --dta-cufflinks -p {threads} --max-intronlen {maxintron} -x {idxfile}".format(threads=args.threads, maxintron=args.max_intronlen, idxfile=args.hisat_index)
# add reads files to hisat_line
if args.single:
hisat_line += " -U $2"
else:
hisat_line += " -1 $2 -2 $3"
# add samtools sort line
hisat_line += " | samtools sort -@{threads} -T $1.HISAT2tmp - -o $1.HISAT2.bam\n".format(threads=args.threads)
## CREATE STRINGTIE LINE
stringtie_line = "stringtie $1.HISAT2.bam -G {gff} -p {threads} -o $1.stringtie.gtf\n".format(gff=args.gffref, threads=args.threads)
## FINISH SCRIPT
script.append( hisat_line )
script.append( stringtie_line )
return script
# Build job file for hisat/stringtie
def hisat_stringtie_jobfile( args, hisat_script_filename ):
jobfile = ["universe = vanilla\n",
"getenv = true\n",
"accounting_group = {}\n".format(args.condor_group),
"logdir = {}\n".format(args.logdir),
"initialdir = {}\n".format(args.initialdir),
"executable = /bin/bash\n"
]
config_fields = "outprefix,condition"
# check if reads are single end or paired end
if args.single:
cmdstr = hisat_script_filename + " $(outprefix) $(reads)"
config_fields+= ",reads"
else:
cmdstr = hisat_script_filename + " $(outprefix) $(reads1) $(reads2)"
config_fields += ",reads1,reads2"
jobfile.append( "arguments = \"{}\"\n".format(cmdstr) )
jobfile.append( "log = $(logdir)/$(outprefix).log\n" )
jobfile.append( "output = $(logdir)/$(outprefix).out\n" )
jobfile.append( "error = $(logdir)/$(outprefix).error\n" )
jobfile.append( "request_cpus = {}\n".format( args.threads ) )
jobfile.append( "request_memory = {}\n".format( args.threads * args.memory ) )
jobfile.append( "queue {cfields} from {cfile}\n".format( cfields=config_fields, cfile=args.configfile ) )
return jobfile
# build cuffmerge/cuffdiff script
def cuffmerge_cuffdiff_script( args ):
script = ["#!/bin/bash\n"]
assembly_list = ""
bamfiles = ""
bamlist = {}
labels = set()
# build necessary strings for script
with open(args.configfile, 'r') as fh:
for line in fh:
line = line.split()
assembly_list += line[0] + ".stringtie.gtf "
if not line[1] in bamlist:
bamlist[line[1]] = []
bamlist[line[1]].append(line[0] + ".HISAT2.bam")
labels.add( line[1] )
# build bam string
for l in labels:
bamfiles += ",".join(bamlist[l]) + " "
script.append("ls {} > assembly_list.txt\n".format(assembly_list) )
script.append("cuffmerge -p {threads} -g {gff} assembly_list.txt\n".format(threads=args.threads, gff=args.gffref) )
script.append("cuffdiff -p {threads} {multiread} -o {expprefix}.cuffdiff merged_asm/merged.gtf --max-bundle-frags 50000000 {bamfiles} -L {labels}\n".format(
threads=args.threads, multiread=args.multiread, expprefix=args.expprefix, bamfiles=bamfiles, labels=",".join(sorted(labels)) ) )
return script
# build job file using pacbio's args
def cuffmerge_cuffdiff_jobfile( args, cuffmerge_cuffdiff_script_filename ):
jobfile = ["universe = vanilla\n",
"getenv = true\n",
"accounting_group = {}\n".format(args.condor_group),
"logdir = {}\n".format(args.logdir),
"initialdir = {}\n".format(args.initialdir),
"executable = /bin/bash\n"
]
jobfile.append( "arguments = \"{}\"\n".format(cuffmerge_cuffdiff_script_filename) )
jobfile.append( "log = $(logdir)/{}.log\n".format(args.expprefix) )
jobfile.append( "output = $(logdir)/{}.out\n".format(args.expprefix) )
jobfile.append( "error = $(logdir)/{}.error\n".format(args.expprefix) )
jobfile.append( "request_cpus = {}\n".format( args.threads ) )
jobfile.append( "request_memory = {}\n".format( args.threads * args.memory ) )
jobfile.append( "queue\n" )
return jobfile
# build dagman file for managing DAG
def dagman_file(args, hisat_jobfile_filename, cuffmerge_cuffdiff_jobfile_filename):
dagfile = ["JOB hisat {}\n".format(hisat_jobfile_filename),
"JOB cuff {}\n".format(cuffmerge_cuffdiff_jobfile_filename),
"PARENT hisat CHILD cuff\n"
]
return dagfile
# write job/script file
def write_file( jobfile, filename ):
with open( filename, 'w' ) as fh:
for line in jobfile:
fh.write( line )
################
##### MAIN #####
################
def main( prog_name, argv ):
# ARG PROCESSING
parser = argparse.ArgumentParser( prog=prog_name, description= 'designed to build and run a tuxedo suite pipeline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument('-g','--gff-ref', dest='gffref', metavar='GFFREF', required=True, type=str, help='gff reference file to use in cuffdiff and cuffmerge')
parser.add_argument('-f','--fasta-ref', dest='faref', metavar='FAREF', required=True, type=str, help='reference genome file to use')
parser.add_argument('-c','--configfile', dest='configfile', metavar='CONFIG', required=True, type=str, help='file containing all replicate information with each line in the form "outprefix condition reads[1 reads2]"')
parser.add_argument('-x','--hisat-index', dest='hisat_index', metavar='HISAT_INDEX', required=True, type=str, help='prefix of HISAT2 index set in hisat2-build command')
parser.add_argument('-l','--log-directory', dest='logdir', metavar='LOGDIR', required=True, type=str, help='directory to write log files to')
parser.add_argument('-i','--initial-directory', dest='initialdir', metavar='INITIALDIR', required=True, type=str, help='initial directory to run these commands in')
parser.add_argument('-e','--experiment-prefix', dest='expprefix', metavar='EXPPREFIX', default="tuxedo_run", type=str, help='prefix given to cuffdiff run (this output goes in the initial directory)')
parser.add_argument('--condor-group', dest='condor_group', metavar='CONDOR_GROUP', default="$ENV(CONDOR_GROUP)", type=str, help=argparse.SUPPRESS)
parser.add_argument('-p','--threads', dest='threads', metavar='NPROC', default=10, type=int, help='NPROC for jobs')
parser.add_argument('--max-intronlen', dest='max_intronlen', metavar='MAX_INTRONLEN', default=50000, type=int, help='maximum intron length to use in HISAT2')
parser.add_argument('-m','--memory', dest='memory', metavar='MEM', default=3000, type=int, help='megabytes to allocate in jobfile per thread')
parser.add_argument('--single', dest='single', default=False, action='store_true', help='flag to indicate single end reads are used')
parser.add_argument('-u','--multi-read-correct', dest='multiread', default='', const='--multi-read-correct', action='store_const', help='flag to indicate use of multiple mapped reads in cuffdiff')
args = parser.parse_args(argv)
## INITIALIZE
hisat_script_filename = args.initialdir + "/hisat_stringtie_script.sh"
hisat_jobfile_filename = args.initialdir + "/hisat_stringtie_script.job"
cuffmerge_cuffdiff_script_filename = args.initialdir + "/cuffmerge_cuffdiff_script.sh"
cuffmerge_cuffdiff_jobfile_filename = args.initialdir + "/cuffmerge_cuffdiff_script.job"
dag_filename = args.initialdir + "/" + args.expprefix + ".dag"
#########################
## CREATE FILES ######
#################
## CREATE HISAT/STRINGTIE SCRIPT
hisat_stringtie_script_string = hisat_stringtie_script( args )
## CREATE HISAT/STRINGTIE JOBFILE
hisat_stringtie_jobfile_string = hisat_stringtie_jobfile( args, hisat_script_filename )
## CREATE CUFFMERGE/CUFFDIFF SCRIPT
cuffmerge_cuffdiff_script_string = cuffmerge_cuffdiff_script( args )
## CREATE CUFFMERGE/CUFFDIFF JOBFILE
cuffmerge_cuffdiff_jobfile_string = cuffmerge_cuffdiff_jobfile( args, cuffmerge_cuffdiff_script_filename )
## CREATE DAGMAN FILE
dagman_string = dagman_file(args, hisat_jobfile_filename, cuffmerge_cuffdiff_jobfile_filename)
########################
## WRITE FILES ######
################
## WRITE SCRIPTS AND JOBFILES
write_file( hisat_stringtie_script_string, hisat_script_filename )
write_file( hisat_stringtie_jobfile_string, hisat_jobfile_filename )
write_file( cuffmerge_cuffdiff_script_string, cuffmerge_cuffdiff_script_filename )
write_file( cuffmerge_cuffdiff_jobfile_string, cuffmerge_cuffdiff_jobfile_filename )
## WRITE DAGMAN FILE
write_file( dagman_string, dag_filename )
### ~~~~~~~~~~ ###
## SUBMIT JOBFILE TO QUEUE
queue_id = subprocess.check_output( ["condor_submit_dag", dag_filename] )
if __name__ =='__main__':
main(sys.argv[0], sys.argv[1:]) | en | 0.25019 | #!/usr/bin/env python # submit jobs to queue on behalf of pacbio software ## CREATE HISAT LINE # add reads files to hisat_line # add samtools sort line ## CREATE STRINGTIE LINE ## FINISH SCRIPT # Build job file for hisat/stringtie # check if reads are single end or paired end # build cuffmerge/cuffdiff script # build necessary strings for script # build bam string # build job file using pacbio's args # build dagman file for managing DAG # write job/script file ################ ##### MAIN ##### ################ # ARG PROCESSING ## INITIALIZE ######################### ## CREATE FILES ###### ################# ## CREATE HISAT/STRINGTIE SCRIPT ## CREATE HISAT/STRINGTIE JOBFILE ## CREATE CUFFMERGE/CUFFDIFF SCRIPT ## CREATE CUFFMERGE/CUFFDIFF JOBFILE ## CREATE DAGMAN FILE ######################## ## WRITE FILES ###### ################ ## WRITE SCRIPTS AND JOBFILES ## WRITE DAGMAN FILE ### ~~~~~~~~~~ ### ## SUBMIT JOBFILE TO QUEUE | 2.18993 | 2 |
Lognormal Bias/bias_plane.py | alfredholmes/UK-Company-Data | 1 | 6615110 | import analysis, numpy as np, csv
from scipy.optimize import minimize
import matplotlib.pyplot as plt
def main():
X, Y, Z, W = analysis.generate_bias((-1, 1), (1, 2), 100)
mean_params = fit_plane(X, Y, Z)
sd_params = fit_plane(X, Y, W)
with open('plane_params.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(mean_params)
writer.writerow(sd_params)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_surface(X, Y, Z, alpha=0.5)
#ax.plot_surface(X, Y, plane(X, Y, mean_params), alpha=0.5)
#plt.show()
def plane(X, Y, params):
a, b, c, d = params
return -(a * X + b * Y - d) / c
def fit_plane(X, Y, Z):
return minimize(error, (1, 1, 1, 1), args=(X, Y, Z)).x
def error(params, X, Y, Z):
a, b, c, d = params
return (((a * X + b * Y - d) / c + Z) ** 2).sum()
if __name__ == '__main__':
main() | import analysis, numpy as np, csv
from scipy.optimize import minimize
import matplotlib.pyplot as plt
def main():
X, Y, Z, W = analysis.generate_bias((-1, 1), (1, 2), 100)
mean_params = fit_plane(X, Y, Z)
sd_params = fit_plane(X, Y, W)
with open('plane_params.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(mean_params)
writer.writerow(sd_params)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_surface(X, Y, Z, alpha=0.5)
#ax.plot_surface(X, Y, plane(X, Y, mean_params), alpha=0.5)
#plt.show()
def plane(X, Y, params):
a, b, c, d = params
return -(a * X + b * Y - d) / c
def fit_plane(X, Y, Z):
return minimize(error, (1, 1, 1, 1), args=(X, Y, Z)).x
def error(params, X, Y, Z):
a, b, c, d = params
return (((a * X + b * Y - d) / c + Z) ** 2).sum()
if __name__ == '__main__':
main() | en | 0.24686 | #fig = plt.figure() #ax = fig.add_subplot(111, projection='3d') #ax.plot_surface(X, Y, Z, alpha=0.5) #ax.plot_surface(X, Y, plane(X, Y, mean_params), alpha=0.5) #plt.show() | 2.856724 | 3 |
integrationtest/vm/multihosts/ha/test_concur_expunge_ha_vms.py | bgerxx/woodpecker | 0 | 6615111 | <filename>integrationtest/vm/multihosts/ha/test_concur_expunge_ha_vms.py
'''
New Integration Test for concurrent expunge vm on ceph
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.ha_operations as ha_ops
import time
import os
import threading
import random
vm = None
vms = []
ts = []
vm_num = 20
exec_info = []
delay_policy1 = None
test_obj_dict = test_state.TestStateDict()
def create_vm_wrapper(i, vm_creation_option):
global vms, exec_info
try:
vm = test_vm_header.ZstackTestVm()
vm_creation_option.set_name("vm-%s" %(i))
vm.set_creation_option(vm_creation_option)
vm.create()
ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
vms.append(vm)
except:
exec_info.append("vm-%s" %(i))
def destroy_vm_wrapper(i, vm_uuid):
global exec_info
try:
vm_ops.destroy_vm(vm_uuid)
except:
exec_info.append("vm-%s" %(i))
def expunge_vm_wrapper(i, vm_uuid):
global vms, exec_info
try:
vm_ops.expunge_vm(vm_uuid)
except:
exec_info.append("vm-%s" %(i))
def check_exception(ops_string):
global exec_info
if exec_info:
issue_vms_string = ' '.join(exec_info)
test_util.test_fail("%s is failed to be %s." %(issue_vms_string, ops_string))
def test():
global vms, exec_info, delete_policy1
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
if ps.type != inventory.CEPH_PRIMARY_STORAGE_TYPE:
test_util.test_skip('this test is for moniter expunge vm on ceph, not found ceph, skip test.')
delete_policy1 = test_lib.lib_set_delete_policy('vm', 'Delay')
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
cpuNum = 1
memorySize = 268435456
name = 'vm-offering-allocator-strategy'
new_offering_option = test_util.InstanceOfferingOption()
new_offering_option.set_cpuNum(cpuNum)
new_offering_option.set_memorySize(memorySize)
new_offering_option.set_name(name)
new_offering = vm_ops.create_instance_offering(new_offering_option)
test_obj_dict.add_instance_offering(new_offering)
instance_offering_uuid = new_offering.uuid
each_vm_cpu_consume = cpuNum
vm_creation_option = test_util.VmOption()
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
#trigger vm create
exec_info = []
ts = []
for i in range(vm_num):
t = threading.Thread(target=create_vm_wrapper, args=(i, vm_creation_option))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("created")
#trigger vm destroy
exec_info = []
ts = []
for i,vm in zip(range(vm_num),vms):
t = threading.Thread(target=destroy_vm_wrapper, args=(i, vm.vm.uuid))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("destroyed")
#trigger vm expunge
exec_info = []
ts = []
for i,vm in zip(range(vm_num),vms):
t = threading.Thread(target=expunge_vm_wrapper, args=(i, vm.vm.uuid))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("expunged")
test_lib.lib_set_delete_policy('vm', delete_policy1)
test_util.test_pass('Create VM Test Success')
def error_cleanup():
global vms
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
def env_recover():
global delete_policy1
test_lib.lib_set_delete_policy('vm', delete_policy1)
| <filename>integrationtest/vm/multihosts/ha/test_concur_expunge_ha_vms.py
'''
New Integration Test for concurrent expunge vm on ceph
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.ha_operations as ha_ops
import time
import os
import threading
import random
vm = None
vms = []
ts = []
vm_num = 20
exec_info = []
delay_policy1 = None
test_obj_dict = test_state.TestStateDict()
def create_vm_wrapper(i, vm_creation_option):
global vms, exec_info
try:
vm = test_vm_header.ZstackTestVm()
vm_creation_option.set_name("vm-%s" %(i))
vm.set_creation_option(vm_creation_option)
vm.create()
ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
vms.append(vm)
except:
exec_info.append("vm-%s" %(i))
def destroy_vm_wrapper(i, vm_uuid):
global exec_info
try:
vm_ops.destroy_vm(vm_uuid)
except:
exec_info.append("vm-%s" %(i))
def expunge_vm_wrapper(i, vm_uuid):
global vms, exec_info
try:
vm_ops.expunge_vm(vm_uuid)
except:
exec_info.append("vm-%s" %(i))
def check_exception(ops_string):
global exec_info
if exec_info:
issue_vms_string = ' '.join(exec_info)
test_util.test_fail("%s is failed to be %s." %(issue_vms_string, ops_string))
def test():
global vms, exec_info, delete_policy1
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
if ps.type != inventory.CEPH_PRIMARY_STORAGE_TYPE:
test_util.test_skip('this test is for moniter expunge vm on ceph, not found ceph, skip test.')
delete_policy1 = test_lib.lib_set_delete_policy('vm', 'Delay')
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
cpuNum = 1
memorySize = 268435456
name = 'vm-offering-allocator-strategy'
new_offering_option = test_util.InstanceOfferingOption()
new_offering_option.set_cpuNum(cpuNum)
new_offering_option.set_memorySize(memorySize)
new_offering_option.set_name(name)
new_offering = vm_ops.create_instance_offering(new_offering_option)
test_obj_dict.add_instance_offering(new_offering)
instance_offering_uuid = new_offering.uuid
each_vm_cpu_consume = cpuNum
vm_creation_option = test_util.VmOption()
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
#trigger vm create
exec_info = []
ts = []
for i in range(vm_num):
t = threading.Thread(target=create_vm_wrapper, args=(i, vm_creation_option))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("created")
#trigger vm destroy
exec_info = []
ts = []
for i,vm in zip(range(vm_num),vms):
t = threading.Thread(target=destroy_vm_wrapper, args=(i, vm.vm.uuid))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("destroyed")
#trigger vm expunge
exec_info = []
ts = []
for i,vm in zip(range(vm_num),vms):
t = threading.Thread(target=expunge_vm_wrapper, args=(i, vm.vm.uuid))
ts.append(t)
t.start()
for t in ts:
t.join()
check_exception("expunged")
test_lib.lib_set_delete_policy('vm', delete_policy1)
test_util.test_pass('Create VM Test Success')
def error_cleanup():
global vms
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
def env_recover():
global delete_policy1
test_lib.lib_set_delete_policy('vm', delete_policy1)
| en | 0.43469 | New Integration Test for concurrent expunge vm on ceph
@author: SyZhao #trigger vm create #trigger vm destroy #trigger vm expunge | 1.846309 | 2 |
src/linkedin/learner/utils/singleton.py | linkedin/lambda-learner | 34 | 6615112 | from typing import Dict
class Singleton(type):
"""A Metaclass for building singleton classes.
Usage:
class MySingletonClass(metaclass=Singleton):
pass
"""
_instances: Dict[object, object] = {}
def __call__(klass, *args, **kwargs):
if klass not in klass._instances:
klass._instances[klass] = super(Singleton, klass).__call__(*args, **kwargs)
return klass._instances[klass]
| from typing import Dict
class Singleton(type):
"""A Metaclass for building singleton classes.
Usage:
class MySingletonClass(metaclass=Singleton):
pass
"""
_instances: Dict[object, object] = {}
def __call__(klass, *args, **kwargs):
if klass not in klass._instances:
klass._instances[klass] = super(Singleton, klass).__call__(*args, **kwargs)
return klass._instances[klass]
| en | 0.437858 | A Metaclass for building singleton classes. Usage: class MySingletonClass(metaclass=Singleton): pass | 3.481744 | 3 |
p03.2x/word_balance.py | LukeBriggsDev/GCSE-Code-Tasks | 0 | 6615113 | <filename>p03.2x/word_balance.py
"""
Problem:
The function 'balance' divides a piece of text into two equal halves.
For example: 'turtle' -> 'tur' and 'tle'
If words have an odd number of characters, it ignores the middle one:
For example: 'turtles' -> 'tur' and 'les'
Each letter is given a scor
e, A=1, B=2 and so on, and a total score
found for each half.
If the first half has a greater score, it should print "Start"
If the second half has a greater score, it should print "End"
If they are the same, it should print "Balanced"
Tests:
>>> balance("turtle")
Start
>>> balance("elephant")
End
>>> balance("gorilla")
Start
>>> balance("horse")
End
>>> balance("kayak")
Balanced
"""
import doctest
import math
def run_tests():
doctest.testmod(verbose=True)
def balance(word):
first_half = word[:len(word)//2]
second_half = word[math.ceil(len(word)/2):]
first_score = sum([ord(chr) - ord('a') + 1 for chr in first_half])
second_score = sum([ord(chr) - ord('a') + 1 for chr in second_half])
print("Start" if first_score > second_score else "End" if first_score < second_score else "Balanced")
if __name__ == "__main__":
run_tests() | <filename>p03.2x/word_balance.py
"""
Problem:
The function 'balance' divides a piece of text into two equal halves.
For example: 'turtle' -> 'tur' and 'tle'
If words have an odd number of characters, it ignores the middle one:
For example: 'turtles' -> 'tur' and 'les'
Each letter is given a scor
e, A=1, B=2 and so on, and a total score
found for each half.
If the first half has a greater score, it should print "Start"
If the second half has a greater score, it should print "End"
If they are the same, it should print "Balanced"
Tests:
>>> balance("turtle")
Start
>>> balance("elephant")
End
>>> balance("gorilla")
Start
>>> balance("horse")
End
>>> balance("kayak")
Balanced
"""
import doctest
import math
def run_tests():
doctest.testmod(verbose=True)
def balance(word):
first_half = word[:len(word)//2]
second_half = word[math.ceil(len(word)/2):]
first_score = sum([ord(chr) - ord('a') + 1 for chr in first_half])
second_score = sum([ord(chr) - ord('a') + 1 for chr in second_half])
print("Start" if first_score > second_score else "End" if first_score < second_score else "Balanced")
if __name__ == "__main__":
run_tests() | en | 0.803988 | Problem: The function 'balance' divides a piece of text into two equal halves. For example: 'turtle' -> 'tur' and 'tle' If words have an odd number of characters, it ignores the middle one: For example: 'turtles' -> 'tur' and 'les' Each letter is given a scor e, A=1, B=2 and so on, and a total score found for each half. If the first half has a greater score, it should print "Start" If the second half has a greater score, it should print "End" If they are the same, it should print "Balanced" Tests: >>> balance("turtle") Start >>> balance("elephant") End >>> balance("gorilla") Start >>> balance("horse") End >>> balance("kayak") Balanced | 4.460511 | 4 |
compiled/construct/docstrings.py | smarek/ci_targets | 4 | 6615114 | from construct import *
from construct.lib import *
docstrings__complex_subtype = Struct(
)
docstrings = Struct(
'one' / Int8ub,
'two' / Pointer(0, Int8ub),
'three' / Computed(lambda this: 66),
)
_schema = docstrings
| from construct import *
from construct.lib import *
docstrings__complex_subtype = Struct(
)
docstrings = Struct(
'one' / Int8ub,
'two' / Pointer(0, Int8ub),
'three' / Computed(lambda this: 66),
)
_schema = docstrings
| none | 1 | 1.990531 | 2 | |
app/utils/constants.py | knightops/knightops | 2 | 6615115 | SUCCESS = '90000' #成功
PARAM_FAIL = '90001' # 请求参数缺失或格式不正确
ERR_MSG = {
SUCCESS: '成功',
PARAM_FAIL: '请求参数缺失或格式不正确'
} | SUCCESS = '90000' #成功
PARAM_FAIL = '90001' # 请求参数缺失或格式不正确
ERR_MSG = {
SUCCESS: '成功',
PARAM_FAIL: '请求参数缺失或格式不正确'
} | zh | 0.992504 | #成功 # 请求参数缺失或格式不正确 | 1.078124 | 1 |
run/sphinx.py | adabru/speech | 0 | 6615116 | #!/usr/bin/python
# local
from unix_socket import UnixSocket
from logger import logger
sock_keyboard = UnixSocket("/tmp/evdev_keypress.sock", 100)
sock_gui = UnixSocket("/tmp/speech_gui.sock", 100)
modifiers = set()
hold_modifiers = False
pause_engine = False
confirm_function = None
# direct communicators
def sendGuiState(key):
msg = "%s%s%s%s%s%s%s" % (
"1" if pause_engine else "0",
"1" if hold_modifiers else "0",
"1" if "shift" in modifiers else "0",
"1" if "ctrl" in modifiers else "0",
"1" if "alt" in modifiers else "0",
"1" if "win" in modifiers else "0",
key,
)
sock_gui.try_send(msg)
# keys and modifiers
def sendKeyCode(keyboardCode):
global hold_modifiers
mods = ""
for mod in modifiers:
mods += f"{mod}+"
# sock_keyboard.try_send(f"{mods}{keyboardCode}")
# logger.info(f"hold_modifiers: {hold_modifiers}")
if not hold_modifiers:
releaseModifiers(False)
sendGuiState(keyboardCode)
def activateModifier(modifier):
modifiers.add(modifier)
logger.info(f"mods: {'+'.join(modifiers)}")
sendGuiState(modifier)
def holdModifiers():
global hold_modifiers
hold_modifiers = True
logger.info(f"hold modifiers - mods: {'+'.join(modifiers)}")
sendGuiState("hold")
def releaseModifiers(send=True):
global hold_modifiers
modifiers.clear()
hold_modifiers = False
if send:
sendGuiState("release")
#
def pauseEngine():
global pause_engine
pause_engine = True
logger.info("pause engine")
sendGuiState("pause")
def continueEngine():
global pause_engine
pause_engine = False
logger.info("continue engine")
sendGuiState("continue")
#
def noop(phone="noop"):
()
def getPermissionFor(f, infoText):
global confirm_function
confirm_function = f
sendGuiState(infoText)
switchLm("confirmation")
#
def poweroff():
noop()
# cmdCommand = "shutdown -h now"
# process = subprocess.Popen(cmdCommand.split(), stdout=subprocess.PIPE)
keyCodes = {
"A": "a",
"B": "b",
"C": "c",
"D": "d",
"E": "e",
"F": "f",
"G": "g",
"J": "j",
"H": "h",
"I": "i",
"K": "k",
"L": "l",
"M": "m",
"N": "n",
"O": "o",
"P": "p",
"Q": "q",
"R": "r",
"S": "s",
"T": "t",
"U": "u",
"V": "v",
"W": "w",
"X": "x",
"Y": "y",
"Z": "z",
"ZERO": "0",
"NULL": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
"ROUND_LEFT": "(",
"ROUND_RIGHT": ")",
"TAG_LEFT": "<",
"TAG_RIGHT": ">",
"SQUARE_LEFT": "[",
"SQUARE_RIGHT": "]",
"ESCAPE": "escape",
"UP": "up",
"DOWN": "down",
"LEFT": "left",
"RIGHT": "right",
"PAGE_UP": "pgup",
"PAGE_DOWN": "pgdown",
"START": "home",
"END": "end",
"SPACE": "space",
"ENTER": "enter",
"TAB": "tab",
"DELETE": "del",
"BACKSPACE": "backspace",
# "SAY <TEXT>": Text("%(text)s",
# "F ONE": "f1",
# "F TWO": "f2",
# "F THREE": "f3",
# "F FOUR": "f4",
# "F FIVE": "f5",
# "F SIX": "f6",
# "F SEVEN": "f7",
# "F EIGHT": "f8",
# "F NINE": "f9",
# "F TEN": "f10",
# "F ELEVEN": "f11",
# "F TWELVE": "f12",
# "DEGREE": "°",
"CARET": "^",
"CURLY_LEFT": "{",
"CURLY_RIGHT": "}",
"QUESTION": "?",
"EXCLAMATION": "!",
"BACK_TICK": "`",
"PIPE": "|",
"MINUS": "minus",
"DOT": ".",
"COMMA": "comma",
"BACKSLASH": "\\",
"UNDERSCORE": "_",
"STAR": "*",
"COLON": "colon",
"SEMICOLON": ";",
"AT": "@",
"DOUBLE_QUOTE": '"',
"SINGLE_QUOTE": "'",
"HASH": "hash",
"DOLLAR": "$",
"EURO": "euro",
"PERCENT": "percent",
"SLASH": "slash",
"AMPERSAND": "&",
"EQUAL": "=",
"PLUS": "+",
"COMPOSE": "menu",
}
mapping = {}
for (key, keyValue) in keyCodes.items():
mapping[key] = lambda keyValue=keyValue: sendKeyCode(keyValue)
mapping["WIN"] = lambda: activateModifier("win")
mapping["ALT"] = lambda: activateModifier("alt")
mapping["CONTROL"] = lambda: activateModifier("ctrl")
mapping["SHIFT"] = lambda: activateModifier("shift")
mapping["CUT"] = lambda: ()
mapping["COPY"] = lambda: ()
mapping["PASTE"] = lambda: ()
mapping["HOLD"] = lambda: holdModifiers()
mapping["RELEASE"] = lambda: releaseModifiers()
mapping["PAUSE"] = lambda: pauseEngine()
mapping["CONTINUE"] = lambda: continueEngine()
#
mapping["POWEROFF"] = lambda: getPermissionFor(poweroff, "poweroff system? [Yes|No]")
# noop
mapping["YES"] = lambda: noop()
mapping["NO"] = lambda: noop()
import os, sys
# https://github.com/bambocher/pocketsphinx-python
from pocketsphinx import LiveSpeech
# modelName = 'cmusphinx-voxforge-de-5.2'
# modelName = 'an4_sphere/an4'
modelName = "own"
# modelName = 'confirmation'
modelPath = f"/home/adabru/repo/speech_commands/sphinx/{modelName}"
current_lm = "own"
global speech
if len(sys.argv) > 1:
import os
from pocketsphinx import AudioFile
config = {
"verbose": False,
"audio_file": sys.argv[1],
"buffer_size": 2048,
"no_search": False,
"full_utt": False,
"hmm": os.path.join(modelPath, f"model_parameters/{modelName}.ci_cont"),
"lm": os.path.join(modelPath, f"etc/{modelName}.lm"),
"dic": os.path.join(modelPath, f"etc/{modelName}.dic"),
}
speech = AudioFile(**config)
def getSpeech(lm_name):
# https://github.com/cmusphinx/pocketsphinx/blob/master/include/cmdln_macro.h
# https://github.com/cmusphinx/sphinxbase/blob/master/include/sphinxbase/fe.h
return LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
pip=1000.0, # Phone insertion penalty
wip=1000.0, # Word insertion penalty
nwpen=1000.0, # New word transition penalty
# pbeam=1e-10, # Beam width applied to phone transitions
# fwdtree=False, # Run forward lexicon-tree search (1st pass)
# fwdflat=False, # Run forward flat-lexicon search over word lattice (2nd pass)
# logfn="/home/adabru/repo/speech_commands/sphinx/own/logdir/test.txt",
# senlogdir="/home/adabru/repo/speech_commands/sphinx/own/logdir",
# backtrace=True,
hmm=os.path.join(modelPath, f"model_parameters/{modelName}.ci_cont"),
lm=os.path.join(modelPath, f"etc/{lm_name}.lm"),
dic=os.path.join(modelPath, f"etc/{modelName}.dic"),
)
def switchLm(lm_name):
global current_lm
current_lm = lm_name
def getSegment():
while True:
last_lm = current_lm
for phrase in getSpeech(current_lm):
segments = phrase.seg()
findings = ""
for s in segments:
findings += f"{s.word} {s.prob} "
logger.debug(findings)
for s in segments:
yield s
if last_lm != current_lm:
break
logger.debug("entering loop")
for s in getSegment():
if s.word == "<s>" or s.word == "</s>" or s.word == "<sil>":
continue
if not pause_engine or s.word == "CONTINUE":
if confirm_function:
if s.word == "YES":
confirm_function()
confirm_function = None
switchLm("keyboard")
sendGuiState(s.word)
elif s.word == "NO":
confirm_function = None
switchLm("keyboard")
sendGuiState(s.word)
# else:
# mapping[s.word]()
| #!/usr/bin/python
# local
from unix_socket import UnixSocket
from logger import logger
sock_keyboard = UnixSocket("/tmp/evdev_keypress.sock", 100)
sock_gui = UnixSocket("/tmp/speech_gui.sock", 100)
modifiers = set()
hold_modifiers = False
pause_engine = False
confirm_function = None
# direct communicators
def sendGuiState(key):
msg = "%s%s%s%s%s%s%s" % (
"1" if pause_engine else "0",
"1" if hold_modifiers else "0",
"1" if "shift" in modifiers else "0",
"1" if "ctrl" in modifiers else "0",
"1" if "alt" in modifiers else "0",
"1" if "win" in modifiers else "0",
key,
)
sock_gui.try_send(msg)
# keys and modifiers
def sendKeyCode(keyboardCode):
global hold_modifiers
mods = ""
for mod in modifiers:
mods += f"{mod}+"
# sock_keyboard.try_send(f"{mods}{keyboardCode}")
# logger.info(f"hold_modifiers: {hold_modifiers}")
if not hold_modifiers:
releaseModifiers(False)
sendGuiState(keyboardCode)
def activateModifier(modifier):
modifiers.add(modifier)
logger.info(f"mods: {'+'.join(modifiers)}")
sendGuiState(modifier)
def holdModifiers():
global hold_modifiers
hold_modifiers = True
logger.info(f"hold modifiers - mods: {'+'.join(modifiers)}")
sendGuiState("hold")
def releaseModifiers(send=True):
global hold_modifiers
modifiers.clear()
hold_modifiers = False
if send:
sendGuiState("release")
#
def pauseEngine():
global pause_engine
pause_engine = True
logger.info("pause engine")
sendGuiState("pause")
def continueEngine():
global pause_engine
pause_engine = False
logger.info("continue engine")
sendGuiState("continue")
#
def noop(phone="noop"):
()
def getPermissionFor(f, infoText):
global confirm_function
confirm_function = f
sendGuiState(infoText)
switchLm("confirmation")
#
def poweroff():
noop()
# cmdCommand = "shutdown -h now"
# process = subprocess.Popen(cmdCommand.split(), stdout=subprocess.PIPE)
keyCodes = {
"A": "a",
"B": "b",
"C": "c",
"D": "d",
"E": "e",
"F": "f",
"G": "g",
"J": "j",
"H": "h",
"I": "i",
"K": "k",
"L": "l",
"M": "m",
"N": "n",
"O": "o",
"P": "p",
"Q": "q",
"R": "r",
"S": "s",
"T": "t",
"U": "u",
"V": "v",
"W": "w",
"X": "x",
"Y": "y",
"Z": "z",
"ZERO": "0",
"NULL": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
"ROUND_LEFT": "(",
"ROUND_RIGHT": ")",
"TAG_LEFT": "<",
"TAG_RIGHT": ">",
"SQUARE_LEFT": "[",
"SQUARE_RIGHT": "]",
"ESCAPE": "escape",
"UP": "up",
"DOWN": "down",
"LEFT": "left",
"RIGHT": "right",
"PAGE_UP": "pgup",
"PAGE_DOWN": "pgdown",
"START": "home",
"END": "end",
"SPACE": "space",
"ENTER": "enter",
"TAB": "tab",
"DELETE": "del",
"BACKSPACE": "backspace",
# "SAY <TEXT>": Text("%(text)s",
# "F ONE": "f1",
# "F TWO": "f2",
# "F THREE": "f3",
# "F FOUR": "f4",
# "F FIVE": "f5",
# "F SIX": "f6",
# "F SEVEN": "f7",
# "F EIGHT": "f8",
# "F NINE": "f9",
# "F TEN": "f10",
# "F ELEVEN": "f11",
# "F TWELVE": "f12",
# "DEGREE": "°",
"CARET": "^",
"CURLY_LEFT": "{",
"CURLY_RIGHT": "}",
"QUESTION": "?",
"EXCLAMATION": "!",
"BACK_TICK": "`",
"PIPE": "|",
"MINUS": "minus",
"DOT": ".",
"COMMA": "comma",
"BACKSLASH": "\\",
"UNDERSCORE": "_",
"STAR": "*",
"COLON": "colon",
"SEMICOLON": ";",
"AT": "@",
"DOUBLE_QUOTE": '"',
"SINGLE_QUOTE": "'",
"HASH": "hash",
"DOLLAR": "$",
"EURO": "euro",
"PERCENT": "percent",
"SLASH": "slash",
"AMPERSAND": "&",
"EQUAL": "=",
"PLUS": "+",
"COMPOSE": "menu",
}
mapping = {}
for (key, keyValue) in keyCodes.items():
mapping[key] = lambda keyValue=keyValue: sendKeyCode(keyValue)
mapping["WIN"] = lambda: activateModifier("win")
mapping["ALT"] = lambda: activateModifier("alt")
mapping["CONTROL"] = lambda: activateModifier("ctrl")
mapping["SHIFT"] = lambda: activateModifier("shift")
mapping["CUT"] = lambda: ()
mapping["COPY"] = lambda: ()
mapping["PASTE"] = lambda: ()
mapping["HOLD"] = lambda: holdModifiers()
mapping["RELEASE"] = lambda: releaseModifiers()
mapping["PAUSE"] = lambda: pauseEngine()
mapping["CONTINUE"] = lambda: continueEngine()
#
mapping["POWEROFF"] = lambda: getPermissionFor(poweroff, "poweroff system? [Yes|No]")
# noop
mapping["YES"] = lambda: noop()
mapping["NO"] = lambda: noop()
import os, sys
# https://github.com/bambocher/pocketsphinx-python
from pocketsphinx import LiveSpeech
# modelName = 'cmusphinx-voxforge-de-5.2'
# modelName = 'an4_sphere/an4'
modelName = "own"
# modelName = 'confirmation'
modelPath = f"/home/adabru/repo/speech_commands/sphinx/{modelName}"
current_lm = "own"
global speech
if len(sys.argv) > 1:
import os
from pocketsphinx import AudioFile
config = {
"verbose": False,
"audio_file": sys.argv[1],
"buffer_size": 2048,
"no_search": False,
"full_utt": False,
"hmm": os.path.join(modelPath, f"model_parameters/{modelName}.ci_cont"),
"lm": os.path.join(modelPath, f"etc/{modelName}.lm"),
"dic": os.path.join(modelPath, f"etc/{modelName}.dic"),
}
speech = AudioFile(**config)
def getSpeech(lm_name):
# https://github.com/cmusphinx/pocketsphinx/blob/master/include/cmdln_macro.h
# https://github.com/cmusphinx/sphinxbase/blob/master/include/sphinxbase/fe.h
return LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
pip=1000.0, # Phone insertion penalty
wip=1000.0, # Word insertion penalty
nwpen=1000.0, # New word transition penalty
# pbeam=1e-10, # Beam width applied to phone transitions
# fwdtree=False, # Run forward lexicon-tree search (1st pass)
# fwdflat=False, # Run forward flat-lexicon search over word lattice (2nd pass)
# logfn="/home/adabru/repo/speech_commands/sphinx/own/logdir/test.txt",
# senlogdir="/home/adabru/repo/speech_commands/sphinx/own/logdir",
# backtrace=True,
hmm=os.path.join(modelPath, f"model_parameters/{modelName}.ci_cont"),
lm=os.path.join(modelPath, f"etc/{lm_name}.lm"),
dic=os.path.join(modelPath, f"etc/{modelName}.dic"),
)
def switchLm(lm_name):
global current_lm
current_lm = lm_name
def getSegment():
while True:
last_lm = current_lm
for phrase in getSpeech(current_lm):
segments = phrase.seg()
findings = ""
for s in segments:
findings += f"{s.word} {s.prob} "
logger.debug(findings)
for s in segments:
yield s
if last_lm != current_lm:
break
logger.debug("entering loop")
for s in getSegment():
if s.word == "<s>" or s.word == "</s>" or s.word == "<sil>":
continue
if not pause_engine or s.word == "CONTINUE":
if confirm_function:
if s.word == "YES":
confirm_function()
confirm_function = None
switchLm("keyboard")
sendGuiState(s.word)
elif s.word == "NO":
confirm_function = None
switchLm("keyboard")
sendGuiState(s.word)
# else:
# mapping[s.word]()
| en | 0.559499 | #!/usr/bin/python # local # direct communicators # keys and modifiers # sock_keyboard.try_send(f"{mods}{keyboardCode}") # logger.info(f"hold_modifiers: {hold_modifiers}") # # # # cmdCommand = "shutdown -h now" # process = subprocess.Popen(cmdCommand.split(), stdout=subprocess.PIPE) # "SAY <TEXT>": Text("%(text)s", # "F ONE": "f1", # "F TWO": "f2", # "F THREE": "f3", # "F FOUR": "f4", # "F FIVE": "f5", # "F SIX": "f6", # "F SEVEN": "f7", # "F EIGHT": "f8", # "F NINE": "f9", # "F TEN": "f10", # "F ELEVEN": "f11", # "F TWELVE": "f12", # "DEGREE": "°", # # noop # https://github.com/bambocher/pocketsphinx-python # modelName = 'cmusphinx-voxforge-de-5.2' # modelName = 'an4_sphere/an4' # modelName = 'confirmation' # https://github.com/cmusphinx/pocketsphinx/blob/master/include/cmdln_macro.h # https://github.com/cmusphinx/sphinxbase/blob/master/include/sphinxbase/fe.h # Phone insertion penalty # Word insertion penalty # New word transition penalty # pbeam=1e-10, # Beam width applied to phone transitions # fwdtree=False, # Run forward lexicon-tree search (1st pass) # fwdflat=False, # Run forward flat-lexicon search over word lattice (2nd pass) # logfn="/home/adabru/repo/speech_commands/sphinx/own/logdir/test.txt", # senlogdir="/home/adabru/repo/speech_commands/sphinx/own/logdir", # backtrace=True, # else: # mapping[s.word]() | 2.641114 | 3 |
utils.py | marshuang80/DeepDream | 0 | 6615117 | <reponame>marshuang80/DeepDream<gh_stars>0
import torch
import torch.nn as nn
class L2Loss(nn.Module):
"""Loss function for DeepDream is defined as the L2 norm of a predfined layer"""
def __init__(self):
super(L2Loss, self).__init__()
def forward(self, x):
return torch.norm(x, p='fro')
class Normalize(nn.Module):
"""Normalize each image with the mean and std of the VGG network
Images have to be normalized with this mean and std to be compatible
with the learned parameters of the pretrained network
Extends:
nn.Module
"""
def __init__(self, device):
super(Normalize, self).__init__()
# vgg mean and std
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1).float().to(device)
self.std = torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1).float().to(device)
def forward(self, img):
return (img - self.mean) / self.std
def build_model(pretrained, optim_layer, device):
"""Build the DeepDream model by sequentially adding pretrained model layers
Parameters
----------
pretrained: torchvisions.models
pretrained CNN model
optim_layer: int
index of the CNN layer to optimize for deepdream
device: torch.device
GPU (with number) or CPU
"""
model = nn.Sequential()
count = 0
# normalize image with vgg mean and std
model.add_module('norm', Normalize(device))
# rebuild model until 'optim_layer'
for i, l in enumerate(pretrained.children()):
model.add_module('{}'.format(i), l)
# count CNN layers
if isinstance(l, nn.Conv2d):
if count == optim_layer:
print("break")
break
count += 1
return model
def process_tensor(input_img, device):
"""convert numpy array to tensor and send to device
Parameters
----------
input_img: np.array
numpy array containing image
device: torch.device
GPU (with number) or CPU
"""
input_img = torch.tensor(input_img).unsqueeze(0)
input_img = input_img.type(torch.FloatTensor)
input_img = input_img.to(device)
input_img.requires_grad = True
return input_img
| import torch
import torch.nn as nn
class L2Loss(nn.Module):
"""Loss function for DeepDream is defined as the L2 norm of a predfined layer"""
def __init__(self):
super(L2Loss, self).__init__()
def forward(self, x):
return torch.norm(x, p='fro')
class Normalize(nn.Module):
"""Normalize each image with the mean and std of the VGG network
Images have to be normalized with this mean and std to be compatible
with the learned parameters of the pretrained network
Extends:
nn.Module
"""
def __init__(self, device):
super(Normalize, self).__init__()
# vgg mean and std
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1).float().to(device)
self.std = torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1).float().to(device)
def forward(self, img):
return (img - self.mean) / self.std
def build_model(pretrained, optim_layer, device):
"""Build the DeepDream model by sequentially adding pretrained model layers
Parameters
----------
pretrained: torchvisions.models
pretrained CNN model
optim_layer: int
index of the CNN layer to optimize for deepdream
device: torch.device
GPU (with number) or CPU
"""
model = nn.Sequential()
count = 0
# normalize image with vgg mean and std
model.add_module('norm', Normalize(device))
# rebuild model until 'optim_layer'
for i, l in enumerate(pretrained.children()):
model.add_module('{}'.format(i), l)
# count CNN layers
if isinstance(l, nn.Conv2d):
if count == optim_layer:
print("break")
break
count += 1
return model
def process_tensor(input_img, device):
"""convert numpy array to tensor and send to device
Parameters
----------
input_img: np.array
numpy array containing image
device: torch.device
GPU (with number) or CPU
"""
input_img = torch.tensor(input_img).unsqueeze(0)
input_img = input_img.type(torch.FloatTensor)
input_img = input_img.to(device)
input_img.requires_grad = True
return input_img | en | 0.779275 | Loss function for DeepDream is defined as the L2 norm of a predfined layer Normalize each image with the mean and std of the VGG network Images have to be normalized with this mean and std to be compatible with the learned parameters of the pretrained network Extends: nn.Module # vgg mean and std Build the DeepDream model by sequentially adding pretrained model layers Parameters ---------- pretrained: torchvisions.models pretrained CNN model optim_layer: int index of the CNN layer to optimize for deepdream device: torch.device GPU (with number) or CPU # normalize image with vgg mean and std # rebuild model until 'optim_layer' # count CNN layers convert numpy array to tensor and send to device Parameters ---------- input_img: np.array numpy array containing image device: torch.device GPU (with number) or CPU | 3.077854 | 3 |
oldnumba/tests/builtins/test_builtin_pow.py | meawoppl/numba | 1 | 6615118 | <gh_stars>1-10
"""
>>> pow3(2,3,5)
3
>>> pow3(3,3,5)
2
>>> pow3_const()
3
>>> pow2(2,3)
8
>>> pow2(3,3)
27
>>> pow2(3.0,3)
27.0
>>> pow2(3,3.0)
27.0
>>> pow2(3.0,3.0)
27.0
>>> pow2(1.5, 2)
2.25
>>> pow2(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow_op(3,3)
27
>>> pow_op(3.0,3)
27.0
>>> pow_op(3,3.0)
27.0
>>> pow_op(3.0,3.0)
27.0
>>> pow_op(1.5, 2)
2.25
>>> pow_op(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow2_const()
8
>>> c1, c2 = 1.2 + 4.1j, 0.6 + 0.5j
>>> allclose(pow2(c1, c2), pow(c1, c2))
True
>>> d1, d2 = 4.2, 5.1
>>> allclose(pow2(d1, d2), pow(d1, d2))
True
"""
from numba import autojit
from numpy import allclose
@autojit
def pow3(a,b,c):
return pow(a,b,c)
@autojit
def pow3_const():
return pow(2,3,5)
@autojit(nopython=True)
def pow2(a,b):
return pow(a,b)
@autojit(nopython=True)
def pow_op(a,b):
return a**b
@autojit(nopython=True)
def pow2_const():
return pow(2,3)
if __name__ == '__main__':
import numba
numba.testing.testmod() | """
>>> pow3(2,3,5)
3
>>> pow3(3,3,5)
2
>>> pow3_const()
3
>>> pow2(2,3)
8
>>> pow2(3,3)
27
>>> pow2(3.0,3)
27.0
>>> pow2(3,3.0)
27.0
>>> pow2(3.0,3.0)
27.0
>>> pow2(1.5, 2)
2.25
>>> pow2(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow_op(3,3)
27
>>> pow_op(3.0,3)
27.0
>>> pow_op(3,3.0)
27.0
>>> pow_op(3.0,3.0)
27.0
>>> pow_op(1.5, 2)
2.25
>>> pow_op(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow2_const()
8
>>> c1, c2 = 1.2 + 4.1j, 0.6 + 0.5j
>>> allclose(pow2(c1, c2), pow(c1, c2))
True
>>> d1, d2 = 4.2, 5.1
>>> allclose(pow2(d1, d2), pow(d1, d2))
True
"""
from numba import autojit
from numpy import allclose
@autojit
def pow3(a,b,c):
return pow(a,b,c)
@autojit
def pow3_const():
return pow(2,3,5)
@autojit(nopython=True)
def pow2(a,b):
return pow(a,b)
@autojit(nopython=True)
def pow_op(a,b):
return a**b
@autojit(nopython=True)
def pow2_const():
return pow(2,3)
if __name__ == '__main__':
import numba
numba.testing.testmod() | en | 0.273357 | >>> pow3(2,3,5) 3 >>> pow3(3,3,5) 2 >>> pow3_const() 3 >>> pow2(2,3) 8 >>> pow2(3,3) 27 >>> pow2(3.0,3) 27.0 >>> pow2(3,3.0) 27.0 >>> pow2(3.0,3.0) 27.0 >>> pow2(1.5, 2) 2.25 >>> pow2(1.5, 1.5) == pow(1.5, 1.5) True >>> pow_op(3,3) 27 >>> pow_op(3.0,3) 27.0 >>> pow_op(3,3.0) 27.0 >>> pow_op(3.0,3.0) 27.0 >>> pow_op(1.5, 2) 2.25 >>> pow_op(1.5, 1.5) == pow(1.5, 1.5) True >>> pow2_const() 8 >>> c1, c2 = 1.2 + 4.1j, 0.6 + 0.5j >>> allclose(pow2(c1, c2), pow(c1, c2)) True >>> d1, d2 = 4.2, 5.1 >>> allclose(pow2(d1, d2), pow(d1, d2)) True | 3.291166 | 3 |
main.py | the-sink/tinyserial | 0 | 6615119 | import serial
import serial.tools.list_ports
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.simpledialog import askinteger,askstring
import threading
import pygubu
import configparser
import time
import sys
import os
# Set up configparser and read settings (if it exists)
config = configparser.ConfigParser()
config.read('settings.ini')
config.sections()
settings = {'theme': 'clam'}
for setting in settings.keys():
if config.has_option('DEFAULT', setting):
settings[setting] = config['DEFAULT'][setting]
else:
config['DEFAULT'][setting] = settings[setting]
# Declare serial terminal class
class SerialTerminal:
def __init__(self):
# list of common baud rates
self.available_baud_rates = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 31250, 57600, 115200]
# generate ui using pygubu
builder = pygubu.Builder()
builder.add_from_file('serial_terminal.ui')
# get required ui elements
self.main_window = builder.get_object('main_window')
self.menu_bar = builder.get_object('menu_bar')
self.file_menu = builder.get_object('file_menu')
self.serial_menu = builder.get_object('serial_menu')
self.window_menu = builder.get_object('window_menu')
self.baud_rate_menu = builder.get_object('baud_rate')
self.input = builder.get_object('input')
self.output = builder.get_object('output')
self.scroll_bar = builder.get_object('output_scroll')
self.send_button = builder.get_object('send_button')
self.serial_port_list = builder.get_object('serial_port')
# set up ui theme
self.style = ttk.Style(self.main_window)
self.style.theme_use(settings['theme'])
# build required variables
self.baud_rate = tk.IntVar(self.main_window, 9600)
self.serial_port = tk.StringVar(self.main_window)
self.clear_output_on_send = tk.BooleanVar(self.main_window, True)
self.autoscroll = tk.BooleanVar(self.main_window, True)
self.theme = tk.StringVar(self.main_window, self.style.theme_use())
# initialize empty serial connection
self.serial = serial.Serial()
self.connected = False
# build runtime ui elements
self.file_menu.add_command(label='Save output to file...', command=self.save_to_file)
self.file_menu.add_separator()
self.file_menu.add_command(label='Quit', command=self.shutdown)
self.window_menu.add_checkbutton(label='Clear Input on Send', var=self.clear_output_on_send)
self.window_menu.add_checkbutton(label='Autoscroll', var=self.autoscroll)
self.window_menu.add_separator()
self.window_menu.add_command(label='Clear Output', command=self.clear_output_text)
self.baud_rate_menu.add_command(label='Custom', command=self.ask_custom_baud_rate)
self.baud_rate_menu.add_separator()
for speed in self.available_baud_rates:
self.baud_rate_menu.add_radiobutton(label=str(speed), var=self.baud_rate, value=speed)
self.update_port_list()
for theme_name in self.style.theme_names():
builder.get_object('theme_menu').add_radiobutton(label=theme_name, var=self.theme, value=theme_name, command=self.change_theme)
self.serial_menu.add_command(label='Connect', command=self.connect_button)
# hook callbacks defined using pygubu to class functions
builder.connect_callbacks(self)
# link text box and scrollbar
self.output['yscrollcommand'] = self.scroll_bar.set
self.scroll_bar.command = self.output.yview
# set window icon and hook close button to shutdown function
self.main_window.iconphoto(True, tk.PhotoImage(file='resources/window-icon.png'))
self.main_window.protocol("WM_DELETE_WINDOW", self.shutdown)
# displays a prompt to input a custom baud rate
def ask_custom_baud_rate(self):
prompt = askinteger('Custom', 'Input a custom baud rate:')
if prompt:
self.baud_rate.set(prompt)
# updates list of available ports in the Serial menu
def update_port_list(self):
self.serial_port_list.delete(0, tk.END)
self.serial_port_list.add_command(label='Refresh', command=self.update_port_list)
self.serial_port_list.add_separator()
for port in serial.tools.list_ports.comports():
self.serial_port_list.add_radiobutton(label=port.description, var=self.serial_port, value=port.device)
# writes text to the output box
def write_to_output(self, stri):
self.output.config(state=tk.NORMAL)
self.output.insert(tk.END, stri)
self.output.config(state=tk.DISABLED)
if self.autoscroll.get():
self.output.see(tk.END)
# reads data from pySerial (should be run in a thread)
def read_serial(self):
while self.connected:
try:
if (self.serial.inWaiting() > 0):
data_str = self.serial.read(self.serial.inWaiting()).decode('utf8')
self.write_to_output(data_str)
except serial.serialutil.SerialException:
self.connected = False
self.serial_connection_change()
break
time.sleep(0.01)
# writes data to the connected serial port
def write_serial(self):
if self.connected:
self.serial.write(str.encode(self.input.get()))
if len(self.output.get('1.0', tk.END)) > 1:
self.write_to_output('\n')
self.write_to_output('> ' + self.input.get() + '\n')
if self.clear_output_on_send.get():
self.input.delete(0, tk.END)
# enables or disables the serial connection
def serial_connection_change(self):
if self.connected:
self.serial = serial.Serial(self.serial_port.get(), self.baud_rate.get())
thread = threading.Thread(target=self.read_serial)
thread.start()
else:
self.serial.close()
try:
self.menu_bar.entryconfigure(3, label=self.connected and 'Connected' or 'Disconnected')
self.serial_menu.entryconfigure(3, label=self.connected and 'Disconnect' or 'Connect')
self.send_button['state'] = self.connected and tk.NORMAL or tk.DISABLED
except RuntimeError: # occurs when terminal is being closed
pass
# handles the connect button in the serial menu
def connect_button(self):
if len(self.serial_port.get()) > 0:
self.connected = not self.connected
self.serial_connection_change()
# clears all text in the output box
def clear_output_text(self):
self.output.config(state=tk.NORMAL)
self.output.delete('1.0', tk.END)
self.output.config(state=tk.DISABLED)
# changes the ui theme and saves it
def change_theme(self):
selected_theme = self.theme.get()
self.style.theme_use(selected_theme)
settings['theme'] = selected_theme
self.save_settings()
def save_to_file(self):
prompt = askstring('Save to file', 'File name to save to:')
if prompt and len(prompt) > 0:
if not os.path.exists('output'):
os.makedirs('output')
file = open(f'output/{prompt}.txt',"w+")
file.write(self.output.get('1.0', tk.END))
file.close()
# saves settings to settings.ini
def save_settings(self):
for setting in settings.keys():
config['DEFAULT'][setting] = settings[setting]
with open('settings.ini', 'w') as conf:
config.write(conf)
# executes neccesary tasks before stopping the script
def shutdown(self):
self.connected = False
self.serial.close()
self.save_settings()
sys.exit()
def run(self):
self.main_window.mainloop()
if __name__ == '__main__':
app = SerialTerminal()
app.run()
| import serial
import serial.tools.list_ports
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.simpledialog import askinteger,askstring
import threading
import pygubu
import configparser
import time
import sys
import os
# Set up configparser and read settings (if it exists)
config = configparser.ConfigParser()
config.read('settings.ini')
config.sections()
settings = {'theme': 'clam'}
for setting in settings.keys():
if config.has_option('DEFAULT', setting):
settings[setting] = config['DEFAULT'][setting]
else:
config['DEFAULT'][setting] = settings[setting]
# Declare serial terminal class
class SerialTerminal:
def __init__(self):
# list of common baud rates
self.available_baud_rates = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 31250, 57600, 115200]
# generate ui using pygubu
builder = pygubu.Builder()
builder.add_from_file('serial_terminal.ui')
# get required ui elements
self.main_window = builder.get_object('main_window')
self.menu_bar = builder.get_object('menu_bar')
self.file_menu = builder.get_object('file_menu')
self.serial_menu = builder.get_object('serial_menu')
self.window_menu = builder.get_object('window_menu')
self.baud_rate_menu = builder.get_object('baud_rate')
self.input = builder.get_object('input')
self.output = builder.get_object('output')
self.scroll_bar = builder.get_object('output_scroll')
self.send_button = builder.get_object('send_button')
self.serial_port_list = builder.get_object('serial_port')
# set up ui theme
self.style = ttk.Style(self.main_window)
self.style.theme_use(settings['theme'])
# build required variables
self.baud_rate = tk.IntVar(self.main_window, 9600)
self.serial_port = tk.StringVar(self.main_window)
self.clear_output_on_send = tk.BooleanVar(self.main_window, True)
self.autoscroll = tk.BooleanVar(self.main_window, True)
self.theme = tk.StringVar(self.main_window, self.style.theme_use())
# initialize empty serial connection
self.serial = serial.Serial()
self.connected = False
# build runtime ui elements
self.file_menu.add_command(label='Save output to file...', command=self.save_to_file)
self.file_menu.add_separator()
self.file_menu.add_command(label='Quit', command=self.shutdown)
self.window_menu.add_checkbutton(label='Clear Input on Send', var=self.clear_output_on_send)
self.window_menu.add_checkbutton(label='Autoscroll', var=self.autoscroll)
self.window_menu.add_separator()
self.window_menu.add_command(label='Clear Output', command=self.clear_output_text)
self.baud_rate_menu.add_command(label='Custom', command=self.ask_custom_baud_rate)
self.baud_rate_menu.add_separator()
for speed in self.available_baud_rates:
self.baud_rate_menu.add_radiobutton(label=str(speed), var=self.baud_rate, value=speed)
self.update_port_list()
for theme_name in self.style.theme_names():
builder.get_object('theme_menu').add_radiobutton(label=theme_name, var=self.theme, value=theme_name, command=self.change_theme)
self.serial_menu.add_command(label='Connect', command=self.connect_button)
# hook callbacks defined using pygubu to class functions
builder.connect_callbacks(self)
# link text box and scrollbar
self.output['yscrollcommand'] = self.scroll_bar.set
self.scroll_bar.command = self.output.yview
# set window icon and hook close button to shutdown function
self.main_window.iconphoto(True, tk.PhotoImage(file='resources/window-icon.png'))
self.main_window.protocol("WM_DELETE_WINDOW", self.shutdown)
# displays a prompt to input a custom baud rate
def ask_custom_baud_rate(self):
prompt = askinteger('Custom', 'Input a custom baud rate:')
if prompt:
self.baud_rate.set(prompt)
# updates list of available ports in the Serial menu
def update_port_list(self):
self.serial_port_list.delete(0, tk.END)
self.serial_port_list.add_command(label='Refresh', command=self.update_port_list)
self.serial_port_list.add_separator()
for port in serial.tools.list_ports.comports():
self.serial_port_list.add_radiobutton(label=port.description, var=self.serial_port, value=port.device)
# writes text to the output box
def write_to_output(self, stri):
self.output.config(state=tk.NORMAL)
self.output.insert(tk.END, stri)
self.output.config(state=tk.DISABLED)
if self.autoscroll.get():
self.output.see(tk.END)
# reads data from pySerial (should be run in a thread)
def read_serial(self):
while self.connected:
try:
if (self.serial.inWaiting() > 0):
data_str = self.serial.read(self.serial.inWaiting()).decode('utf8')
self.write_to_output(data_str)
except serial.serialutil.SerialException:
self.connected = False
self.serial_connection_change()
break
time.sleep(0.01)
# writes data to the connected serial port
def write_serial(self):
if self.connected:
self.serial.write(str.encode(self.input.get()))
if len(self.output.get('1.0', tk.END)) > 1:
self.write_to_output('\n')
self.write_to_output('> ' + self.input.get() + '\n')
if self.clear_output_on_send.get():
self.input.delete(0, tk.END)
# enables or disables the serial connection
def serial_connection_change(self):
if self.connected:
self.serial = serial.Serial(self.serial_port.get(), self.baud_rate.get())
thread = threading.Thread(target=self.read_serial)
thread.start()
else:
self.serial.close()
try:
self.menu_bar.entryconfigure(3, label=self.connected and 'Connected' or 'Disconnected')
self.serial_menu.entryconfigure(3, label=self.connected and 'Disconnect' or 'Connect')
self.send_button['state'] = self.connected and tk.NORMAL or tk.DISABLED
except RuntimeError: # occurs when terminal is being closed
pass
# handles the connect button in the serial menu
def connect_button(self):
if len(self.serial_port.get()) > 0:
self.connected = not self.connected
self.serial_connection_change()
# clears all text in the output box
def clear_output_text(self):
self.output.config(state=tk.NORMAL)
self.output.delete('1.0', tk.END)
self.output.config(state=tk.DISABLED)
# changes the ui theme and saves it
def change_theme(self):
selected_theme = self.theme.get()
self.style.theme_use(selected_theme)
settings['theme'] = selected_theme
self.save_settings()
def save_to_file(self):
prompt = askstring('Save to file', 'File name to save to:')
if prompt and len(prompt) > 0:
if not os.path.exists('output'):
os.makedirs('output')
file = open(f'output/{prompt}.txt',"w+")
file.write(self.output.get('1.0', tk.END))
file.close()
# saves settings to settings.ini
def save_settings(self):
for setting in settings.keys():
config['DEFAULT'][setting] = settings[setting]
with open('settings.ini', 'w') as conf:
config.write(conf)
# executes neccesary tasks before stopping the script
def shutdown(self):
self.connected = False
self.serial.close()
self.save_settings()
sys.exit()
def run(self):
self.main_window.mainloop()
if __name__ == '__main__':
app = SerialTerminal()
app.run()
| en | 0.694394 | # Set up configparser and read settings (if it exists) # Declare serial terminal class # list of common baud rates # generate ui using pygubu # get required ui elements # set up ui theme # build required variables # initialize empty serial connection # build runtime ui elements # hook callbacks defined using pygubu to class functions # link text box and scrollbar # set window icon and hook close button to shutdown function # displays a prompt to input a custom baud rate # updates list of available ports in the Serial menu # writes text to the output box # reads data from pySerial (should be run in a thread) # writes data to the connected serial port # enables or disables the serial connection # occurs when terminal is being closed # handles the connect button in the serial menu # clears all text in the output box # changes the ui theme and saves it # saves settings to settings.ini # executes neccesary tasks before stopping the script | 2.784094 | 3 |
src/py/predictive_analysis/experiments/linear_popularity/proposal_plot_tags/report_writer.py | FredRanieri/jellyfish | 0 | 6615120 | <gh_stars>0
"""
This module defines functions that may be used to write reports based on the result
of the predictive analsysis pipe-line.
User applications should use the factory method 'get_writer' in order to access
the available report writers.
"""
import math
from tabulate import tabulate
import matplotlib.pyplot as plt
import numpy as np
from year_ap_predictive.library_types import (
GroupDataSet,
GroupModels,
GroupScores
)
from year_ap_predictive.predictive_framework import (
to_numpy_2d_matrix
)
def get_chart_dimensions(x: int):
return (math.ceil(x / math.ceil(math.sqrt(x))), math.ceil(math.sqrt(x)))
def plot_and_show(grouped_data: GroupDataSet, models: GroupModels, scores: GroupScores, early: int, late: int) -> None:
"""
Plots the real data and the learned curve. Assumes both feature and value vector are univariate.
"""
n_groups = len(models.keys())
n_rows, m_cols = get_chart_dimensions(n_groups)
_, ax = plt.subplots(n_rows, m_cols, sharex='col', figsize=(20, 10))
for i, p in enumerate(models.items()):
row_index = math.floor(i / m_cols)
col_index = i % m_cols
group_key, model = p
X = to_numpy_2d_matrix(grouped_data[group_key].featureMatrix).reshape(-1, 1)
Y = to_numpy_2d_matrix(grouped_data[group_key].valueMatrix).reshape(-1, 1)
test_x = np.linspace(X.min(), X.max()).reshape(-1, 1)
test_y = model.predict(test_x)
X = np.power(10, X)
Y = np.power(10, Y)
test_x = np.power(10, test_x)
test_y = np.power(10, test_y)
ax[row_index][col_index].loglog(X, Y, 'bo', label='sample data points')
ax[row_index][col_index].loglog(test_x, test_y, 'r-', label='learned line')
ax[row_index][col_index].loglog(test_x, test_x, 'k--', label='y=x')
ax[row_index][col_index].legend(loc="upper left")
ax[row_index][col_index].grid(True)
ax[row_index][col_index].set_title(group_key)
ax[row_index][col_index].set_xlabel('Log of View Count at %dth day' % (early))
ax[row_index][col_index].set_ylabel('Log of View Count at %dth day' % (late))
plt.show()
def show_coeffs(grouped_data: GroupDataSet, models: GroupModels, scores: GroupScores, early: int, late: int) -> None:
coeffs = [[k, "%.2f" % v.intercept_[0], "%.2f" % v.coef_[0][0]] for k, v in models.items()]
print("Coefficients of the linear regression per group")
print(tabulate(coeffs, ["group", "intercept", "rate"], tablefmt="fancy_grid"))
| """
This module defines functions that may be used to write reports based on the result
of the predictive analsysis pipe-line.
User applications should use the factory method 'get_writer' in order to access
the available report writers.
"""
import math
from tabulate import tabulate
import matplotlib.pyplot as plt
import numpy as np
from year_ap_predictive.library_types import (
GroupDataSet,
GroupModels,
GroupScores
)
from year_ap_predictive.predictive_framework import (
to_numpy_2d_matrix
)
def get_chart_dimensions(x: int):
return (math.ceil(x / math.ceil(math.sqrt(x))), math.ceil(math.sqrt(x)))
def plot_and_show(grouped_data: GroupDataSet, models: GroupModels, scores: GroupScores, early: int, late: int) -> None:
"""
Plots the real data and the learned curve. Assumes both feature and value vector are univariate.
"""
n_groups = len(models.keys())
n_rows, m_cols = get_chart_dimensions(n_groups)
_, ax = plt.subplots(n_rows, m_cols, sharex='col', figsize=(20, 10))
for i, p in enumerate(models.items()):
row_index = math.floor(i / m_cols)
col_index = i % m_cols
group_key, model = p
X = to_numpy_2d_matrix(grouped_data[group_key].featureMatrix).reshape(-1, 1)
Y = to_numpy_2d_matrix(grouped_data[group_key].valueMatrix).reshape(-1, 1)
test_x = np.linspace(X.min(), X.max()).reshape(-1, 1)
test_y = model.predict(test_x)
X = np.power(10, X)
Y = np.power(10, Y)
test_x = np.power(10, test_x)
test_y = np.power(10, test_y)
ax[row_index][col_index].loglog(X, Y, 'bo', label='sample data points')
ax[row_index][col_index].loglog(test_x, test_y, 'r-', label='learned line')
ax[row_index][col_index].loglog(test_x, test_x, 'k--', label='y=x')
ax[row_index][col_index].legend(loc="upper left")
ax[row_index][col_index].grid(True)
ax[row_index][col_index].set_title(group_key)
ax[row_index][col_index].set_xlabel('Log of View Count at %dth day' % (early))
ax[row_index][col_index].set_ylabel('Log of View Count at %dth day' % (late))
plt.show()
def show_coeffs(grouped_data: GroupDataSet, models: GroupModels, scores: GroupScores, early: int, late: int) -> None:
coeffs = [[k, "%.2f" % v.intercept_[0], "%.2f" % v.coef_[0][0]] for k, v in models.items()]
print("Coefficients of the linear regression per group")
print(tabulate(coeffs, ["group", "intercept", "rate"], tablefmt="fancy_grid")) | en | 0.880719 | This module defines functions that may be used to write reports based on the result of the predictive analsysis pipe-line. User applications should use the factory method 'get_writer' in order to access the available report writers. Plots the real data and the learned curve. Assumes both feature and value vector are univariate. | 2.803192 | 3 |
others/nikkei2019-2-qual/nikkei2019-2-qualb.py | c-yan/atcoder | 1 | 6615121 | <reponame>c-yan/atcoder
N = int(input())
D = list(map(int, input().split()))
m = 998244353
if D[0] != 0:
print(0)
exit()
c = {}
for i in D:
c.setdefault(i, 0)
c[i] += 1
if c[0] != 1:
print(0)
exit()
result = 1
for i in range(1, max(D) + 1):
if i not in c:
print(0)
exit()
result *= pow(c[i - 1], c[i], m)
result %= m
print(result)
| N = int(input())
D = list(map(int, input().split()))
m = 998244353
if D[0] != 0:
print(0)
exit()
c = {}
for i in D:
c.setdefault(i, 0)
c[i] += 1
if c[0] != 1:
print(0)
exit()
result = 1
for i in range(1, max(D) + 1):
if i not in c:
print(0)
exit()
result *= pow(c[i - 1], c[i], m)
result %= m
print(result) | none | 1 | 2.580519 | 3 | |
bin.py | mazzini22/conversor-binary-decimal | 0 | 6615122 | print ('welcome to my humble conversor binary to decimal\n')
print ('type the digits in binary (right to left)\n')
b1 =int( input ())
if b1== 1:
b1 = 1
else:
b1 = 0
b2=int( input ())
if b2==1:
b2 = 2
else:
b2 = 0
b3=int( input ())
if b3==1:
b3 = 4
else:
b3 = 0
b4=int( input ())
if b4==1:
b4 = 8
else:
b4 = 0
b5=int( input ())
if b5==1:
b5 = 16
else:
b5 = 0
b6=int( input ())
if b6==1:
b6 = 32
else:
b7 = 0
b7=int( input ())
if b7==1:
b7 = 64
else:
b7 = 0
print('the decimal number is: ',b1+b2+b3+b4+b5+b6+b7) | print ('welcome to my humble conversor binary to decimal\n')
print ('type the digits in binary (right to left)\n')
b1 =int( input ())
if b1== 1:
b1 = 1
else:
b1 = 0
b2=int( input ())
if b2==1:
b2 = 2
else:
b2 = 0
b3=int( input ())
if b3==1:
b3 = 4
else:
b3 = 0
b4=int( input ())
if b4==1:
b4 = 8
else:
b4 = 0
b5=int( input ())
if b5==1:
b5 = 16
else:
b5 = 0
b6=int( input ())
if b6==1:
b6 = 32
else:
b7 = 0
b7=int( input ())
if b7==1:
b7 = 64
else:
b7 = 0
print('the decimal number is: ',b1+b2+b3+b4+b5+b6+b7) | none | 1 | 3.909814 | 4 | |
test/functional/pageobjectmodel/pageobject/photoviewpage.py | jiteshmohite/Python-Page-Object-Model-Framework | 6 | 6615123 | <reponame>jiteshmohite/Python-Page-Object-Model-Framework
from test.functional.pageobjectmodel.locators.photo_viewer_locator import PhotoViewerLocator
from test.functional.pageobjectmodel.pageobject import *
"""
This class contains all common method which we will require while executing photo viewer test
"""
class PhotoViewer(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def zoom_photo(self):
self.zoom_view(PhotoViewerLocator.PHOTO_VIEW_ID)
| from test.functional.pageobjectmodel.locators.photo_viewer_locator import PhotoViewerLocator
from test.functional.pageobjectmodel.pageobject import *
"""
This class contains all common method which we will require while executing photo viewer test
"""
class PhotoViewer(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def zoom_photo(self):
self.zoom_view(PhotoViewerLocator.PHOTO_VIEW_ID) | en | 0.930418 | This class contains all common method which we will require while executing photo viewer test | 2.182445 | 2 |
bigchaindb/pipelines/stale.py | ldmberman/bigchaindb | 0 | 6615124 | """This module monitors for stale transactions.
It reassigns transactions which have been assigned a node but
remain in the backlog past a certain amount of time.
"""
import logging
from multipipes import Pipeline, Node
from bigchaindb import Bigchain
from time import sleep
logger = logging.getLogger(__name__)
class StaleTransactionMonitor:
"""This class encapsulates the logic for re-assigning stale transactions.
Note:
Methods of this class will be executed in different processes.
"""
def __init__(self, timeout=5, backlog_reassign_delay=None):
"""Initialize StaleTransaction monitor
Args:
timeout: how often to check for stale tx (in sec)
backlog_reassign_delay: How stale a transaction should
be before reassignment (in sec). If supplied, overrides
the Bigchain default value.
"""
self.bigchain = Bigchain(backlog_reassign_delay=backlog_reassign_delay)
self.timeout = timeout
def check_transactions(self):
"""Poll backlog for stale transactions
Returns:
txs (list): txs to be re assigned
"""
sleep(self.timeout)
for tx in self.bigchain.get_stale_transactions():
yield tx
def reassign_transactions(self, tx):
"""Put tx back in backlog with new assignee
Returns:
transaction
"""
# NOTE: Maybe this is to verbose?
logger.info('Reassigning transaction with id %s', tx['id'])
self.bigchain.reassign_transaction(tx)
return tx
def create_pipeline(timeout=5, backlog_reassign_delay=5):
"""Create and return the pipeline of operations to be distributed
on different processes.
"""
stm = StaleTransactionMonitor(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
monitor_pipeline = Pipeline([
Node(stm.check_transactions),
Node(stm.reassign_transactions)
])
return monitor_pipeline
def start(timeout=5, backlog_reassign_delay=None):
"""Create, start, and return the block pipeline."""
pipeline = create_pipeline(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
pipeline.start()
return pipeline
| """This module monitors for stale transactions.
It reassigns transactions which have been assigned a node but
remain in the backlog past a certain amount of time.
"""
import logging
from multipipes import Pipeline, Node
from bigchaindb import Bigchain
from time import sleep
logger = logging.getLogger(__name__)
class StaleTransactionMonitor:
"""This class encapsulates the logic for re-assigning stale transactions.
Note:
Methods of this class will be executed in different processes.
"""
def __init__(self, timeout=5, backlog_reassign_delay=None):
"""Initialize StaleTransaction monitor
Args:
timeout: how often to check for stale tx (in sec)
backlog_reassign_delay: How stale a transaction should
be before reassignment (in sec). If supplied, overrides
the Bigchain default value.
"""
self.bigchain = Bigchain(backlog_reassign_delay=backlog_reassign_delay)
self.timeout = timeout
def check_transactions(self):
"""Poll backlog for stale transactions
Returns:
txs (list): txs to be re assigned
"""
sleep(self.timeout)
for tx in self.bigchain.get_stale_transactions():
yield tx
def reassign_transactions(self, tx):
"""Put tx back in backlog with new assignee
Returns:
transaction
"""
# NOTE: Maybe this is to verbose?
logger.info('Reassigning transaction with id %s', tx['id'])
self.bigchain.reassign_transaction(tx)
return tx
def create_pipeline(timeout=5, backlog_reassign_delay=5):
"""Create and return the pipeline of operations to be distributed
on different processes.
"""
stm = StaleTransactionMonitor(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
monitor_pipeline = Pipeline([
Node(stm.check_transactions),
Node(stm.reassign_transactions)
])
return monitor_pipeline
def start(timeout=5, backlog_reassign_delay=None):
"""Create, start, and return the block pipeline."""
pipeline = create_pipeline(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
pipeline.start()
return pipeline
| en | 0.833813 | This module monitors for stale transactions. It reassigns transactions which have been assigned a node but remain in the backlog past a certain amount of time. This class encapsulates the logic for re-assigning stale transactions. Note: Methods of this class will be executed in different processes. Initialize StaleTransaction monitor Args: timeout: how often to check for stale tx (in sec) backlog_reassign_delay: How stale a transaction should be before reassignment (in sec). If supplied, overrides the Bigchain default value. Poll backlog for stale transactions Returns: txs (list): txs to be re assigned Put tx back in backlog with new assignee Returns: transaction # NOTE: Maybe this is to verbose? Create and return the pipeline of operations to be distributed on different processes. Create, start, and return the block pipeline. | 2.926371 | 3 |
msph/clients/framework.py | CultCornholio/solenya | 11 | 6615125 | from functools import wraps
from aiohttp import ClientSession
from asyncio import get_event_loop
from types import SimpleNamespace
class ClientError(Exception):
pass
class Resource(SimpleNamespace):
def __init__(self, uri:str=str(), params:dict=dict(), headers:dict=dict(),
data:dict=dict(), json:dict=dict(), func_kwargs=dict()) -> None:
self.uri = uri
self.params = params
self.headers = headers
self.data = data
self.json = json
self.func_kwargs = func_kwargs
class Response(SimpleNamespace):
def __init__(self, status, json, resource) -> None:
self.json = json
self.status = status
self.resource = resource
class Client(object):
def __init__(self, base_url, base_headers = None):
self.base_url = base_url
if base_headers:
self.base_headers = base_headers
else:
self.base_headers = {}
self.loop = get_event_loop()
self.aio = False
async def do_get_request(self, resource, raise_on_status_code):
async with ClientSession(loop = self.loop) as session:
data_or_json = {}
if resource.data:
data_or_json['data'] = resource.data
if resource.json:
data_or_json['json'] = resource.json
async with session.get(
url = self.base_url + resource.uri,
headers={**self.base_headers, **resource.headers},
params=resource.params,
**data_or_json
) as r:
json_ = await r.json()
if r.status != 200 and raise_on_status_code:
raise ClientError(f'Server did not return 200. Status code: {r.status}.')
return Response(r.status, json_, resource)
async def handler(self, func, *args, raise_on_status_code, **kwargs):
resource = func(*args, **kwargs)
resource.func_kwargs = kwargs
r = await self.do_get_request(resource, raise_on_status_code)
return r
def endpoint(self, func):
@wraps(func)
def wrapper(*args, raise_on_status_code = True, **kwargs):
return self.handler(
func, *args,
raise_on_status_code = raise_on_status_code,
**kwargs) if self.aio \
else self.loop.run_until_complete(
self.handler(
func, *args,
raise_on_status_code = raise_on_status_code,
**kwargs)
)
return wrapper
| from functools import wraps
from aiohttp import ClientSession
from asyncio import get_event_loop
from types import SimpleNamespace
class ClientError(Exception):
pass
class Resource(SimpleNamespace):
def __init__(self, uri:str=str(), params:dict=dict(), headers:dict=dict(),
data:dict=dict(), json:dict=dict(), func_kwargs=dict()) -> None:
self.uri = uri
self.params = params
self.headers = headers
self.data = data
self.json = json
self.func_kwargs = func_kwargs
class Response(SimpleNamespace):
def __init__(self, status, json, resource) -> None:
self.json = json
self.status = status
self.resource = resource
class Client(object):
def __init__(self, base_url, base_headers = None):
self.base_url = base_url
if base_headers:
self.base_headers = base_headers
else:
self.base_headers = {}
self.loop = get_event_loop()
self.aio = False
async def do_get_request(self, resource, raise_on_status_code):
async with ClientSession(loop = self.loop) as session:
data_or_json = {}
if resource.data:
data_or_json['data'] = resource.data
if resource.json:
data_or_json['json'] = resource.json
async with session.get(
url = self.base_url + resource.uri,
headers={**self.base_headers, **resource.headers},
params=resource.params,
**data_or_json
) as r:
json_ = await r.json()
if r.status != 200 and raise_on_status_code:
raise ClientError(f'Server did not return 200. Status code: {r.status}.')
return Response(r.status, json_, resource)
async def handler(self, func, *args, raise_on_status_code, **kwargs):
resource = func(*args, **kwargs)
resource.func_kwargs = kwargs
r = await self.do_get_request(resource, raise_on_status_code)
return r
def endpoint(self, func):
@wraps(func)
def wrapper(*args, raise_on_status_code = True, **kwargs):
return self.handler(
func, *args,
raise_on_status_code = raise_on_status_code,
**kwargs) if self.aio \
else self.loop.run_until_complete(
self.handler(
func, *args,
raise_on_status_code = raise_on_status_code,
**kwargs)
)
return wrapper
| none | 1 | 2.520551 | 3 | |
log/connect.py | deep2696/DBMS_Website | 0 | 6615126 | import _mysql
db = _mysql.connect(host='localhost', user='root', passwd='<PASSWORD>', db='dbms_database') | import _mysql
db = _mysql.connect(host='localhost', user='root', passwd='<PASSWORD>', db='dbms_database') | none | 1 | 1.68858 | 2 | |
src/utils/__init__.py | charset-and-forget/importer | 0 | 6615127 | def chunks(n, source):
source = iter(source)
while True:
bunch = []
for i in range(n):
try:
bunch.append(next(source))
except StopIteration:
if bunch:
yield bunch
return
yield bunch
| def chunks(n, source):
source = iter(source)
while True:
bunch = []
for i in range(n):
try:
bunch.append(next(source))
except StopIteration:
if bunch:
yield bunch
return
yield bunch
| none | 1 | 3.255349 | 3 | |
exh/utils/__init__.py | KenyC/Exh | 7 | 6615128 | import numpy as np
from IPython.display import Math, display, HTML
import itertools
def getAssignment(n):
"""Returns all possible assignment of values to n independent boolean variables"""
iterator = [np.array(2**(n-i-1)*(2**(i) * [False] + 2**(i) * [True]), dtype = "bool") for i in range(n)]
return np.transpose(np.stack(iterator))
def entails(a, b):
return np.all(np.logical_or(np.logical_not(a), b))
def remove_doubles(fs):
"""Returns a list of elements from iterable fs, without double values"""
toReturn = []
for f in fs:
if all(f != g for g in toReturn):
toReturn.append(f)
return toReturn
def get(array, index_tuple):
"""Get value from multi-dimensional array "array" at indices specified by tuple "index_tuple" """
to_return = array
for index in index_tuple:
to_return = to_return[index]
return to_return
def add_functions_as_methods(fs):
def decorator(Class):
for f in fs:
setattr(Class, f.__name__, f)
return Class
return decorator
def jprint(*args):
"""Replacement for print in IPython"""
display(HTML(" ".join(list(map(str, args)))))
def automatic_var_names():
"""Generator of default variable names"""
typical_names = ["x{}", "y{}", "z{}"]
for x in itertools.chain([""], itertools.count()):
for var in typical_names:
yield var.format(x)
| import numpy as np
from IPython.display import Math, display, HTML
import itertools
def getAssignment(n):
"""Returns all possible assignment of values to n independent boolean variables"""
iterator = [np.array(2**(n-i-1)*(2**(i) * [False] + 2**(i) * [True]), dtype = "bool") for i in range(n)]
return np.transpose(np.stack(iterator))
def entails(a, b):
return np.all(np.logical_or(np.logical_not(a), b))
def remove_doubles(fs):
"""Returns a list of elements from iterable fs, without double values"""
toReturn = []
for f in fs:
if all(f != g for g in toReturn):
toReturn.append(f)
return toReturn
def get(array, index_tuple):
"""Get value from multi-dimensional array "array" at indices specified by tuple "index_tuple" """
to_return = array
for index in index_tuple:
to_return = to_return[index]
return to_return
def add_functions_as_methods(fs):
def decorator(Class):
for f in fs:
setattr(Class, f.__name__, f)
return Class
return decorator
def jprint(*args):
"""Replacement for print in IPython"""
display(HTML(" ".join(list(map(str, args)))))
def automatic_var_names():
"""Generator of default variable names"""
typical_names = ["x{}", "y{}", "z{}"]
for x in itertools.chain([""], itertools.count()):
for var in typical_names:
yield var.format(x)
| en | 0.627729 | Returns all possible assignment of values to n independent boolean variables Returns a list of elements from iterable fs, without double values Get value from multi-dimensional array "array" at indices specified by tuple "index_tuple" Replacement for print in IPython Generator of default variable names | 3.206882 | 3 |
all/078-Inverte-Palavras-Maiores-Que-X.py | brenodt/Desafio-365-dias-programando | 0 | 6615129 | def spin_words(sentence):
words = sentence.split(" ") # Creates List containing each word
lens = [len(word) for word in words] # Aux. List w/ all lenghts
# For each word in list
for index in range(len(words)):
if lens[index] >= 5: # For words w/ 5 or more chars
word = words[index] # saves pointer for current word
words[index] = word[::-1] # Replaces the words in list reversed
return " ".join(words) # Joins all words in list with spaces between them
def inverte_palavras(texto, valor):
# De dentro para fora:
# 1: Separa as palavras em um iterável usando .split(" ")
# 2: Usa compreensão de lista para iterar cada palavra
# 3: Usa operador ternário para avaliar a condição e retornar dois valores possíveis;
# 'Palavra invertida' se o tamanho for maior que valor, senão retorna 'Palavra'
# 4: Usa fatiamento para inverter a str. [inicio:fim:passo] => Passo -1 inverte o texto
# 5: Usa " ".join() para criar um texto único, contendo espaço entre as palavras
return " ".join([palavra[::-1] if len(palavra) >= valor else palavra for palavra in texto.split(" ")])
| def spin_words(sentence):
words = sentence.split(" ") # Creates List containing each word
lens = [len(word) for word in words] # Aux. List w/ all lenghts
# For each word in list
for index in range(len(words)):
if lens[index] >= 5: # For words w/ 5 or more chars
word = words[index] # saves pointer for current word
words[index] = word[::-1] # Replaces the words in list reversed
return " ".join(words) # Joins all words in list with spaces between them
def inverte_palavras(texto, valor):
# De dentro para fora:
# 1: Separa as palavras em um iterável usando .split(" ")
# 2: Usa compreensão de lista para iterar cada palavra
# 3: Usa operador ternário para avaliar a condição e retornar dois valores possíveis;
# 'Palavra invertida' se o tamanho for maior que valor, senão retorna 'Palavra'
# 4: Usa fatiamento para inverter a str. [inicio:fim:passo] => Passo -1 inverte o texto
# 5: Usa " ".join() para criar um texto único, contendo espaço entre as palavras
return " ".join([palavra[::-1] if len(palavra) >= valor else palavra for palavra in texto.split(" ")])
| pt | 0.846262 | # Creates List containing each word # Aux. List w/ all lenghts # For each word in list # For words w/ 5 or more chars # saves pointer for current word # Replaces the words in list reversed # Joins all words in list with spaces between them # De dentro para fora: # 1: Separa as palavras em um iterável usando .split(" ") # 2: Usa compreensão de lista para iterar cada palavra # 3: Usa operador ternário para avaliar a condição e retornar dois valores possíveis; # 'Palavra invertida' se o tamanho for maior que valor, senão retorna 'Palavra' # 4: Usa fatiamento para inverter a str. [inicio:fim:passo] => Passo -1 inverte o texto # 5: Usa " ".join() para criar um texto único, contendo espaço entre as palavras | 3.873212 | 4 |
Docking_Schrodinger_Glide_templates/Glide_SP_core_templates/Glide_SP_core_ensemble_template/grids/grid_in.py | bctaylor/bccgc4 | 1 | 6615130 | <filename>Docking_Schrodinger_Glide_templates/Glide_SP_core_templates/Glide_SP_core_ensemble_template/grids/grid_in.py
#code to write ten input files for grid generation with optional constraints
#the reason why this has to be done with all core constraint Glide docking is because the xglide.py script doesn't accept constraints
#therefore we must generate individual grids for each receptor and dock to each individually, then use the glide_ensemble_merge command to merge together all the files and parse out the minimum score pose out of the 10 centroids for the final line-up
import glob
method = 'TICA' #clustering method
dock = 'SP'
path = '/scratch/jegan/GLIDE_'+dock+'_core_docking/'+method+'_docking/grids/'
for num in range(10):
with open(path+'/'+method+'_grid_'+str(num)+'.in','w') as grid_in: #individual input grid files for each centroid
center = 'GRID_CENTER 5.4472, -0.7566, 13.6435\n' #predetermined center of mass of the cocrystal ligand
grid_in.write(center)
filename = 'GRIDFILE '+path+method+'_grid_'+str(num)+'.zip\n'
grid_in.write(filename)
inbox = 'INNERBOX 10, 10, 10\n'
grid_in.write(inbox)
outbox = 'OUTERBOX 42, 42, 42\n'
grid_in.write(outbox)
com_constraint = 'POSIT_CONSTRAINTS "com 7.580000 -2.110000 9.290000 6.000000"\n' #an optional positional constraint that is at a certain point and restrained within a 6 angstrom sphere. However, we didn't end up using this in our results as preliminary docking was uneffective
grid_in.write(com_constraint)
receptor = 'RECEP_FILE /home/jegan/final_centroids/mae_receptors/holo/'+method+'/'+method+'_'+str(num)+'.maegz\n' #the receptor file in maestro format
grid_in.write(receptor)
| <filename>Docking_Schrodinger_Glide_templates/Glide_SP_core_templates/Glide_SP_core_ensemble_template/grids/grid_in.py
#code to write ten input files for grid generation with optional constraints
#the reason why this has to be done with all core constraint Glide docking is because the xglide.py script doesn't accept constraints
#therefore we must generate individual grids for each receptor and dock to each individually, then use the glide_ensemble_merge command to merge together all the files and parse out the minimum score pose out of the 10 centroids for the final line-up
import glob
method = 'TICA' #clustering method
dock = 'SP'
path = '/scratch/jegan/GLIDE_'+dock+'_core_docking/'+method+'_docking/grids/'
for num in range(10):
with open(path+'/'+method+'_grid_'+str(num)+'.in','w') as grid_in: #individual input grid files for each centroid
center = 'GRID_CENTER 5.4472, -0.7566, 13.6435\n' #predetermined center of mass of the cocrystal ligand
grid_in.write(center)
filename = 'GRIDFILE '+path+method+'_grid_'+str(num)+'.zip\n'
grid_in.write(filename)
inbox = 'INNERBOX 10, 10, 10\n'
grid_in.write(inbox)
outbox = 'OUTERBOX 42, 42, 42\n'
grid_in.write(outbox)
com_constraint = 'POSIT_CONSTRAINTS "com 7.580000 -2.110000 9.290000 6.000000"\n' #an optional positional constraint that is at a certain point and restrained within a 6 angstrom sphere. However, we didn't end up using this in our results as preliminary docking was uneffective
grid_in.write(com_constraint)
receptor = 'RECEP_FILE /home/jegan/final_centroids/mae_receptors/holo/'+method+'/'+method+'_'+str(num)+'.maegz\n' #the receptor file in maestro format
grid_in.write(receptor)
| en | 0.882319 | #code to write ten input files for grid generation with optional constraints #the reason why this has to be done with all core constraint Glide docking is because the xglide.py script doesn't accept constraints #therefore we must generate individual grids for each receptor and dock to each individually, then use the glide_ensemble_merge command to merge together all the files and parse out the minimum score pose out of the 10 centroids for the final line-up #clustering method #individual input grid files for each centroid #predetermined center of mass of the cocrystal ligand #an optional positional constraint that is at a certain point and restrained within a 6 angstrom sphere. However, we didn't end up using this in our results as preliminary docking was uneffective #the receptor file in maestro format | 2.055119 | 2 |
Chapter-07/collections/ansible_collections/community/aws/plugins/modules/aws_msk_config.py | PacktPublishing/Ansible-for-Real-life-Automation | 7 | 6615131 | #!/usr/bin/python
# Copyright: (c) 2021, <NAME> (@oukooveu)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: aws_msk_config
short_description: Manage Amazon MSK cluster configurations.
version_added: "2.0.0"
description:
- Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations.
author:
- <NAME> (@oukooveu)
options:
state:
description: Create (C(present)) or delete (C(absent)) cluster configuration.
choices: ['present', 'absent']
default: 'present'
type: str
name:
description: The name of the configuration.
required: true
type: str
description:
description: The description of the configuration.
type: str
config:
description: Contents of the server.properties file.
type: dict
aliases: ['configuration']
kafka_versions:
description:
- The versions of Apache Kafka with which you can use this MSK configuration.
- Required when I(state=present).
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
"""
EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- aws_msk_config:
name: kafka-cluster-configuration
state: present
kafka_versions:
- 2.6.0
- 2.6.1
config:
auto.create.topics.enable: false
num.partitions: 1
default.replication.factor: 3
zookeeper.session.timeout.ms: 18000
- aws_msk_config:
name: kafka-cluster-configuration
state: absent
"""
RETURN = r"""
# These are examples of possible return values, and in general should use other names for return values.
arn:
description: The Amazon Resource Name (ARN) of the configuration.
type: str
returned: I(state=present)
sample: "arn:aws:kafka:<region>:<account>:configuration/<name>/<resource-id>"
revision:
description: The revision number.
type: int
returned: I(state=present)
sample: 1
server_properties:
description: Contents of the server.properties file.
type: str
returned: I(state=present)
sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000"
response:
description: The response from actual API call.
type: dict
returned: always
sample: {}
"""
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
camel_dict_to_snake_dict,
AWSRetry,
)
def dict_to_prop(d):
"""convert dictionary to multi-line properties"""
if len(d) == 0:
return ""
return "\n".join("{0}={1}".format(k, v) for k, v in d.items())
def prop_to_dict(p):
"""convert properties to dictionary"""
if len(p) == 0:
return {}
r_dict = {}
for s in p.decode().split("\n"):
kv = s.split("=")
r_dict[kv[0].strip()] = kv[1].strip()
return r_dict
# python >= 2.7 is required:
# return {
# k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n"))
# }
@AWSRetry.jittered_backoff(retries=5, delay=5)
def get_configurations_with_backoff(client):
paginator = client.get_paginator("list_configurations")
return paginator.paginate().build_full_result()
def find_active_config(client, module):
"""
looking for configuration by name
"""
name = module.params["name"]
try:
all_configs = get_configurations_with_backoff(client)["Configurations"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="failed to obtain kafka configurations")
active_configs = list(
item
for item in all_configs
if item["Name"] == name and item["State"] == "ACTIVE"
)
if active_configs:
if len(active_configs) == 1:
return active_configs[0]
else:
module.fail_json_aws(
msg="found more than one active config with name '{0}'".format(name)
)
return None
def get_configuration_revision(client, module, arn, revision):
try:
return client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to describe kafka configuration revision")
def is_configuration_changed(module, current):
"""
compare configuration's description and properties
python 2.7+ version:
prop_module = {str(k): str(v) for k, v in module.params.get("config").items()}
"""
prop_module = {}
for k, v in module.params.get("config").items():
prop_module[str(k)] = str(v)
if prop_to_dict(current.get("ServerProperties", "")) == prop_module:
if current.get("Description", "") == module.params.get("description"):
return False
return True
def create_config(client, module):
"""create new or update existing configuration"""
config = find_active_config(client, module)
# create new configuration
if not config:
if module.check_mode:
return True, {}
try:
response = client.create_configuration(
Name=module.params.get("name"),
Description=module.params.get("description"),
KafkaVersions=module.params.get("kafka_versions"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
aws_retry=True
)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to create kafka configuration")
# update existing configuration (creates new revision)
else:
# it's required because 'config' doesn't contain 'ServerProperties'
response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"])
if not is_configuration_changed(module, response):
return False, response
if module.check_mode:
return True, {}
try:
response = client.update_configuration(
Arn=config["Arn"],
Description=module.params.get("description"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
aws_retry=True
)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to update kafka configuration")
arn = response["Arn"]
revision = response["LatestRevision"]["Revision"]
result = get_configuration_revision(client, module, arn=arn, revision=revision)
return True, result
def delete_config(client, module):
"""delete configuration"""
config = find_active_config(client, module)
if module.check_mode:
if config:
return True, config
else:
return False, {}
if config:
try:
response = client.delete_configuration(Arn=config["Arn"], aws_retry=True)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to delete the kafka configuration")
return True, response
return False, {}
def main():
module_args = dict(
name=dict(type="str", required=True),
description=dict(type="str", default=""),
state=dict(choices=["present", "absent"], default="present"),
config=dict(type="dict", aliases=["configuration"], default={}),
kafka_versions=dict(type="list", elements="str"),
)
module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True)
client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff())
if module.params["state"] == "present":
changed, response = create_config(client, module)
elif module.params["state"] == "absent":
changed, response = delete_config(client, module)
# return some useless staff in check mode if configuration doesn't exists
# can be useful when these options are referenced by other modules during check mode run
if module.check_mode and not response.get("Arn"):
arn = "arn:aws:kafka:region:account:configuration/name/id"
revision = 1
server_properties = ""
else:
arn = response.get("Arn")
revision = response.get("Revision")
server_properties = response.get("ServerProperties", "")
module.exit_json(
changed=changed,
arn=arn,
revision=revision,
server_properties=server_properties,
response=camel_dict_to_snake_dict(response),
)
if __name__ == "__main__":
main()
| #!/usr/bin/python
# Copyright: (c) 2021, <NAME> (@oukooveu)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: aws_msk_config
short_description: Manage Amazon MSK cluster configurations.
version_added: "2.0.0"
description:
- Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations.
author:
- <NAME> (@oukooveu)
options:
state:
description: Create (C(present)) or delete (C(absent)) cluster configuration.
choices: ['present', 'absent']
default: 'present'
type: str
name:
description: The name of the configuration.
required: true
type: str
description:
description: The description of the configuration.
type: str
config:
description: Contents of the server.properties file.
type: dict
aliases: ['configuration']
kafka_versions:
description:
- The versions of Apache Kafka with which you can use this MSK configuration.
- Required when I(state=present).
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
"""
EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- aws_msk_config:
name: kafka-cluster-configuration
state: present
kafka_versions:
- 2.6.0
- 2.6.1
config:
auto.create.topics.enable: false
num.partitions: 1
default.replication.factor: 3
zookeeper.session.timeout.ms: 18000
- aws_msk_config:
name: kafka-cluster-configuration
state: absent
"""
RETURN = r"""
# These are examples of possible return values, and in general should use other names for return values.
arn:
description: The Amazon Resource Name (ARN) of the configuration.
type: str
returned: I(state=present)
sample: "arn:aws:kafka:<region>:<account>:configuration/<name>/<resource-id>"
revision:
description: The revision number.
type: int
returned: I(state=present)
sample: 1
server_properties:
description: Contents of the server.properties file.
type: str
returned: I(state=present)
sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000"
response:
description: The response from actual API call.
type: dict
returned: always
sample: {}
"""
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (
camel_dict_to_snake_dict,
AWSRetry,
)
def dict_to_prop(d):
"""convert dictionary to multi-line properties"""
if len(d) == 0:
return ""
return "\n".join("{0}={1}".format(k, v) for k, v in d.items())
def prop_to_dict(p):
"""convert properties to dictionary"""
if len(p) == 0:
return {}
r_dict = {}
for s in p.decode().split("\n"):
kv = s.split("=")
r_dict[kv[0].strip()] = kv[1].strip()
return r_dict
# python >= 2.7 is required:
# return {
# k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n"))
# }
@AWSRetry.jittered_backoff(retries=5, delay=5)
def get_configurations_with_backoff(client):
paginator = client.get_paginator("list_configurations")
return paginator.paginate().build_full_result()
def find_active_config(client, module):
"""
looking for configuration by name
"""
name = module.params["name"]
try:
all_configs = get_configurations_with_backoff(client)["Configurations"]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="failed to obtain kafka configurations")
active_configs = list(
item
for item in all_configs
if item["Name"] == name and item["State"] == "ACTIVE"
)
if active_configs:
if len(active_configs) == 1:
return active_configs[0]
else:
module.fail_json_aws(
msg="found more than one active config with name '{0}'".format(name)
)
return None
def get_configuration_revision(client, module, arn, revision):
try:
return client.describe_configuration_revision(Arn=arn, Revision=revision, aws_retry=True)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to describe kafka configuration revision")
def is_configuration_changed(module, current):
"""
compare configuration's description and properties
python 2.7+ version:
prop_module = {str(k): str(v) for k, v in module.params.get("config").items()}
"""
prop_module = {}
for k, v in module.params.get("config").items():
prop_module[str(k)] = str(v)
if prop_to_dict(current.get("ServerProperties", "")) == prop_module:
if current.get("Description", "") == module.params.get("description"):
return False
return True
def create_config(client, module):
"""create new or update existing configuration"""
config = find_active_config(client, module)
# create new configuration
if not config:
if module.check_mode:
return True, {}
try:
response = client.create_configuration(
Name=module.params.get("name"),
Description=module.params.get("description"),
KafkaVersions=module.params.get("kafka_versions"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
aws_retry=True
)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to create kafka configuration")
# update existing configuration (creates new revision)
else:
# it's required because 'config' doesn't contain 'ServerProperties'
response = get_configuration_revision(client, module, arn=config["Arn"], revision=config["LatestRevision"]["Revision"])
if not is_configuration_changed(module, response):
return False, response
if module.check_mode:
return True, {}
try:
response = client.update_configuration(
Arn=config["Arn"],
Description=module.params.get("description"),
ServerProperties=dict_to_prop(module.params.get("config")).encode(),
aws_retry=True
)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to update kafka configuration")
arn = response["Arn"]
revision = response["LatestRevision"]["Revision"]
result = get_configuration_revision(client, module, arn=arn, revision=revision)
return True, result
def delete_config(client, module):
"""delete configuration"""
config = find_active_config(client, module)
if module.check_mode:
if config:
return True, config
else:
return False, {}
if config:
try:
response = client.delete_configuration(Arn=config["Arn"], aws_retry=True)
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e:
module.fail_json_aws(e, "failed to delete the kafka configuration")
return True, response
return False, {}
def main():
module_args = dict(
name=dict(type="str", required=True),
description=dict(type="str", default=""),
state=dict(choices=["present", "absent"], default="present"),
config=dict(type="dict", aliases=["configuration"], default={}),
kafka_versions=dict(type="list", elements="str"),
)
module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True)
client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff())
if module.params["state"] == "present":
changed, response = create_config(client, module)
elif module.params["state"] == "absent":
changed, response = delete_config(client, module)
# return some useless staff in check mode if configuration doesn't exists
# can be useful when these options are referenced by other modules during check mode run
if module.check_mode and not response.get("Arn"):
arn = "arn:aws:kafka:region:account:configuration/name/id"
revision = 1
server_properties = ""
else:
arn = response.get("Arn")
revision = response.get("Revision")
server_properties = response.get("ServerProperties", "")
module.exit_json(
changed=changed,
arn=arn,
revision=revision,
server_properties=server_properties,
response=camel_dict_to_snake_dict(response),
)
if __name__ == "__main__":
main()
| en | 0.604718 | #!/usr/bin/python # Copyright: (c) 2021, <NAME> (@oukooveu) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- module: aws_msk_config short_description: Manage Amazon MSK cluster configurations. version_added: "2.0.0" description: - Create, delete and modify Amazon MSK (Managed Streaming for Apache Kafka) cluster configurations. author: - <NAME> (@oukooveu) options: state: description: Create (C(present)) or delete (C(absent)) cluster configuration. choices: ['present', 'absent'] default: 'present' type: str name: description: The name of the configuration. required: true type: str description: description: The description of the configuration. type: str config: description: Contents of the server.properties file. type: dict aliases: ['configuration'] kafka_versions: description: - The versions of Apache Kafka with which you can use this MSK configuration. - Required when I(state=present). type: list elements: str extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 # Note: These examples do not set authentication details, see the AWS Guide for details. - aws_msk_config: name: kafka-cluster-configuration state: present kafka_versions: - 2.6.0 - 2.6.1 config: auto.create.topics.enable: false num.partitions: 1 default.replication.factor: 3 zookeeper.session.timeout.ms: 18000 - aws_msk_config: name: kafka-cluster-configuration state: absent # These are examples of possible return values, and in general should use other names for return values. arn: description: The Amazon Resource Name (ARN) of the configuration. type: str returned: I(state=present) sample: "arn:aws:kafka:<region>:<account>:configuration/<name>/<resource-id>" revision: description: The revision number. type: int returned: I(state=present) sample: 1 server_properties: description: Contents of the server.properties file. type: str returned: I(state=present) sample: "default.replication.factor=3\nnum.io.threads=8\nzookeeper.session.timeout.ms=18000" response: description: The response from actual API call. type: dict returned: always sample: {} # handled by AnsibleAWSModule convert dictionary to multi-line properties convert properties to dictionary # python >= 2.7 is required: # return { # k.strip(): v.strip() for k, v in (i.split("=") for i in p.decode().split("\n")) # } looking for configuration by name compare configuration's description and properties python 2.7+ version: prop_module = {str(k): str(v) for k, v in module.params.get("config").items()} create new or update existing configuration # create new configuration # update existing configuration (creates new revision) # it's required because 'config' doesn't contain 'ServerProperties' delete configuration # return some useless staff in check mode if configuration doesn't exists # can be useful when these options are referenced by other modules during check mode run | 1.9155 | 2 |
setup.py | filannim/Temporal-Footprint | 2 | 6615132 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 <NAME>
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: <NAME>
# email: <EMAIL>
#
# For details, see www.cs.man.ac.uk/~filannim/
import distutils.core
import os
import temporal_footprint
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'LICENSE')
# This class is required in order to allow python setup.py test to work
# correctly. The code has been copied by the official py.test website.
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
# Importing setuptools adds some features like "setup.py develop", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
# Current folder
current_folder = os.path.abspath(os.path.dirname(__file__))
distutils.core.setup(
name="temporal_footprint",
description="Temporal footprint extractor from Wikipedia pages.",
long_description=long_description,
version=temporal_footprint.__version__,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/filannim/Temporal-Footprint",
download_url="https://github.com/filannim/Temporal-Footprint",
setup_requires = [],
packages = ['temporal_footprint'],
include_package_data = True,
install_requires = ['matplotlib', 'numpy', 'scipy'],
classifiers=[],
) | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 <NAME>
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: <NAME>
# email: <EMAIL>
#
# For details, see www.cs.man.ac.uk/~filannim/
import distutils.core
import os
import temporal_footprint
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'LICENSE')
# This class is required in order to allow python setup.py test to work
# correctly. The code has been copied by the official py.test website.
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
# Importing setuptools adds some features like "setup.py develop", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
# Current folder
current_folder = os.path.abspath(os.path.dirname(__file__))
distutils.core.setup(
name="temporal_footprint",
description="Temporal footprint extractor from Wikipedia pages.",
long_description=long_description,
version=temporal_footprint.__version__,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/filannim/Temporal-Footprint",
download_url="https://github.com/filannim/Temporal-Footprint",
setup_requires = [],
packages = ['temporal_footprint'],
include_package_data = True,
install_requires = ['matplotlib', 'numpy', 'scipy'],
classifiers=[],
) | en | 0.84078 | # -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2014 <NAME> # # gnTEAM, School of Computer Science, University of Manchester. # All rights reserved. This program and the accompanying materials # are made available under the terms of the GNU General Public License. # # author: <NAME> # email: <EMAIL> # # For details, see www.cs.man.ac.uk/~filannim/ # This class is required in order to allow python setup.py test to work # correctly. The code has been copied by the official py.test website. # Importing setuptools adds some features like "setup.py develop", but # it's optional so swallow the error if it's not there. # Current folder | 1.824444 | 2 |
12/main.py | maskarb/adventofcode-2021 | 0 | 6615133 | <filename>12/main.py<gh_stars>0
import os
import sys
import time
from functools import cached_property
def get_input(filename):
with open(filename, 'r') as f:
return f.read().splitlines()
class Node:
def __init__(self, val):
self.name = val.lower()
self.val = val
self.edges = []
@cached_property
def big(self):
return self.val == self.val.upper()
def __eq__(self, other) -> bool:
return self.val == other.val
def __lt__(self, other) -> bool:
return self.name < other.name
def __hash__(self) -> int:
concated_int = ''.join(str(ord(c)) for c in self.val)
return int(concated_int)
def __repr__(self):
return f"Node {self.name}"
class Graph:
def __init__(self, nodes={}):
self.nodes = nodes
def add_node(self, val):
if self.nodes.get(val):
return
new_node = Node(val)
self.nodes[new_node.val] = new_node
def add_edge(self, node1, node2):
node1.edges.append(node2)
node2.edges.append(node1)
def __repr__(self):
return "".join((f"{key}: {value.edges}\n") for key, value in self.nodes.items())
def any_double_small(self, path):
return len(path) != len(set(path))
def all_paths(self, node1, node2, path=[], level=0):
path = path + [node1]
if node1 == node2:
return [path]
paths = []
for node in self.nodes[node1.val].edges:
if node not in path or node.big:
subpaths = self.all_paths(node, node2, path, level=level+1)
for subpath in subpaths:
paths.append(subpath)
return paths
def all_paths_2(self, node1, node2, path=[]):
path = path + [node1]
if node1 == node2:
return [path]
paths = []
for node in self.nodes[node1.val].edges:
smalls_in_path = [node for node in path if not node.big]
if node not in path or node.big or not self.any_double_small(smalls_in_path) and node.name != 'start':
subpaths = self.all_paths_2(node, node2, path)
for subpath in subpaths:
paths.append(subpath)
return paths
def main_p1(lines):
g = Graph()
for line in lines:
nodes = line.split('-')
for n in nodes:
g.add_node(n)
g.add_edge(g.nodes[nodes[0]], g.nodes[nodes[1]])
print(g)
paths = g.all_paths(g.nodes['start'], g.nodes['end'])
print(f'result p1: {len(paths)}')
paths = g.all_paths_2(g.nodes['start'], g.nodes['end'])
print(f'result p2: {len(paths)}')
def main_p2(lines):
g = Graph()
for line in lines:
nodes = line.split('-')
for n in nodes:
g.add_node(n)
g.add_edge(g.nodes[nodes[0]], g.nodes[nodes[1]])
print(g)
paths = g.all_paths_2(g.nodes['start'], g.nodes['end'])
print(f'result: {len(paths)}')
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] not in ('p1', 'p2'):
print('you done did it wrong')
exit()
filename = os.path.join(sys.argv[0].split('/', 1)[0], 'input.txt')
lines = get_input(filename)
part = sys.argv[1:]
if 'p1' in part or 'p2' in part:
start = time.perf_counter()
main_p1(lines)
end = time.perf_counter()
print(end - start)
| <filename>12/main.py<gh_stars>0
import os
import sys
import time
from functools import cached_property
def get_input(filename):
with open(filename, 'r') as f:
return f.read().splitlines()
class Node:
def __init__(self, val):
self.name = val.lower()
self.val = val
self.edges = []
@cached_property
def big(self):
return self.val == self.val.upper()
def __eq__(self, other) -> bool:
return self.val == other.val
def __lt__(self, other) -> bool:
return self.name < other.name
def __hash__(self) -> int:
concated_int = ''.join(str(ord(c)) for c in self.val)
return int(concated_int)
def __repr__(self):
return f"Node {self.name}"
class Graph:
def __init__(self, nodes={}):
self.nodes = nodes
def add_node(self, val):
if self.nodes.get(val):
return
new_node = Node(val)
self.nodes[new_node.val] = new_node
def add_edge(self, node1, node2):
node1.edges.append(node2)
node2.edges.append(node1)
def __repr__(self):
return "".join((f"{key}: {value.edges}\n") for key, value in self.nodes.items())
def any_double_small(self, path):
return len(path) != len(set(path))
def all_paths(self, node1, node2, path=[], level=0):
path = path + [node1]
if node1 == node2:
return [path]
paths = []
for node in self.nodes[node1.val].edges:
if node not in path or node.big:
subpaths = self.all_paths(node, node2, path, level=level+1)
for subpath in subpaths:
paths.append(subpath)
return paths
def all_paths_2(self, node1, node2, path=[]):
path = path + [node1]
if node1 == node2:
return [path]
paths = []
for node in self.nodes[node1.val].edges:
smalls_in_path = [node for node in path if not node.big]
if node not in path or node.big or not self.any_double_small(smalls_in_path) and node.name != 'start':
subpaths = self.all_paths_2(node, node2, path)
for subpath in subpaths:
paths.append(subpath)
return paths
def main_p1(lines):
g = Graph()
for line in lines:
nodes = line.split('-')
for n in nodes:
g.add_node(n)
g.add_edge(g.nodes[nodes[0]], g.nodes[nodes[1]])
print(g)
paths = g.all_paths(g.nodes['start'], g.nodes['end'])
print(f'result p1: {len(paths)}')
paths = g.all_paths_2(g.nodes['start'], g.nodes['end'])
print(f'result p2: {len(paths)}')
def main_p2(lines):
g = Graph()
for line in lines:
nodes = line.split('-')
for n in nodes:
g.add_node(n)
g.add_edge(g.nodes[nodes[0]], g.nodes[nodes[1]])
print(g)
paths = g.all_paths_2(g.nodes['start'], g.nodes['end'])
print(f'result: {len(paths)}')
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] not in ('p1', 'p2'):
print('you done did it wrong')
exit()
filename = os.path.join(sys.argv[0].split('/', 1)[0], 'input.txt')
lines = get_input(filename)
part = sys.argv[1:]
if 'p1' in part or 'p2' in part:
start = time.perf_counter()
main_p1(lines)
end = time.perf_counter()
print(end - start)
| none | 1 | 3.273399 | 3 | |
users/urls.py | broadsinatlanta/higashi-hiroshima-tours- | 0 | 6615134 | from django.contrib import admin
from django.contrib.auth import views as av
from django.urls import path
from users import views as uv
urlpatterns = [
path('', uv.index, name='user-home'),
# User views
path('register/', uv.register, name='user-register'),
path('team/', uv.TeamView.as_view(), name='team'),
path('profile/<str:username>/', uv.user_profile, name='user-about'),
path('profile/', uv.profile, name='user-profile'),
# Authentication views
path('login/', av.LoginView.as_view(template_name='users/login.html'), name='user-login'),
path('logout/', av.LogoutView.as_view(template_name='users/logout.html'), name='user-logout'),
path('password-reset/', av.PasswordResetView.as_view(template_name='users/password_reset.html'), name='password_reset'),
path('password-reset/done/', av.PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/', av.PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'), name='password_reset_confirm'),
path('password-reset-complete', av.PasswordResetCompleteView.as_view(template_name='users/password_reset_complete.html'), name='password_reset_complete'),
] | from django.contrib import admin
from django.contrib.auth import views as av
from django.urls import path
from users import views as uv
urlpatterns = [
path('', uv.index, name='user-home'),
# User views
path('register/', uv.register, name='user-register'),
path('team/', uv.TeamView.as_view(), name='team'),
path('profile/<str:username>/', uv.user_profile, name='user-about'),
path('profile/', uv.profile, name='user-profile'),
# Authentication views
path('login/', av.LoginView.as_view(template_name='users/login.html'), name='user-login'),
path('logout/', av.LogoutView.as_view(template_name='users/logout.html'), name='user-logout'),
path('password-reset/', av.PasswordResetView.as_view(template_name='users/password_reset.html'), name='password_reset'),
path('password-reset/done/', av.PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/', av.PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'), name='password_reset_confirm'),
path('password-reset-complete', av.PasswordResetCompleteView.as_view(template_name='users/password_reset_complete.html'), name='password_reset_complete'),
] | en | 0.800463 | # User views # Authentication views | 1.855067 | 2 |
xtalx/xtalx.py | phasesensors/xtalx_python | 0 | 6615135 | # Copyright (c) 2020-2021 by Phase Advanced Sensor Systems Corp.
import threading
import errno
import usb
import usb.util
import btype
FC_FLAGS_VALID = (1 << 15)
FC_FLAG_NO_TEMP_PRESSURE = (1 << 4)
FC_FLAG_PRESSURE_FAILED = (1 << 3)
FC_FLAG_TEMP_FAILED = (1 << 2)
FC_FLAG_PRESSURE_UPDATE = (1 << 1)
FC_FLAG_TEMP_UPDATE = (1 << 0)
class FrequencyPacket24(btype.Struct):
'''
Firmware revisions 1.0.6 and earlier return a 24-byte packet if the sensor
doesn't have enough data to perform a temperature-compensated pressure
measurement yet or if the sensor doesn't have a calibration applied in
flash.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
_EXPECTED_SIZE = 24
class FrequencyPacket40(btype.Struct):
'''
Firmware revisions 1.0.6 and earlier return a 40-byte packet if the sensor
has enough data to perform a temperature-compensated pressure measurement.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
pressure_psi = btype.float64_t()
temp_c = btype.float64_t()
_EXPECTED_SIZE = 40
class FrequencyPacket56(btype.Struct):
'''
Firmware revisions 1.0.7 and higher always return a 56-byte packet that
contains flags indicating the validity of things like the temperature-
compensated pressure measurement. These firmware versions also return the
MCU temperature as a control.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
pressure_psi = btype.float64_t()
temp_c = btype.float64_t()
mcu_temp_c = btype.float64_t()
rsrv2 = btype.Array(btype.uint8_t(), 8)
_EXPECTED_SIZE = 56
class Measurement:
'''
Object encapsulating the results of an XtalX sensor measurement. The
following fields are defined:
sensor - Reference to the XtalX that generated the Measurement.
ref_freq - Frequency of the sensor's reference crystal.
pressure_edges - Number of pressure crystal ticks used to generate the
Measurement.
pressure_ref_clocks - Number of reference clock ticks that elapsed
while counting pressure_edges pressure crystal ticks.
pressure_freq - Measured pressure crystal frequency.
temp_edges - Number of temperature crystal ticks used to generate the
Measurement.
temp_ref_clocks - Number of temperature crystal ticks that elapsed
while counting temp_edges temperature crystal ticks.
temp_freq - Measured temperature crystal frequency.
mcu_temp_c - Microcontroller's internal junction temperature.
pressure_psi - Temperature-compensated pressure measured in PSI.
temp_c - Temperature measured in degrees Celsius.
flags - A set of validity and error flags.
If the sensor is uncalibrated or has not sampled enough data to generate
a temperature-compensated pressure measurement then some or all of
temp_freq, pressure_freq, pressure_psi and temp_c may be None.
The flags field is a bitmask which may include any of the following bits;
it may be None if the firmware version predates the introduction of status
flags:
FC_FLAGS_VALID - The flags field contains valid information (always set
or flags will be None).
FC_FLAG_NO_TEMP_PRESSURE - Will be set if pressure_psi and temp_c could
not be generated; the sensor may be uncalibrated or may not have
generated both temperature and pressure crystal readings yet.
FC_FLAG_PRESSURE_FAILED - Will be set if 0.5 seconds elapse without a
pressure crystal measurement completing; this indicates that a
sensor failure has caused the pressure crystal to stop ticking.
FC_FLAG_TEMP_FAILED - Will be set if 0.5 seconds elapse without a
temperature crystal measurement completing; this indicates that a
sensor failure has caused the temperature crystal to stop ticking.
FC_FLAG_PRESSURE_UPDATE - Indicates that the current Measurement
incorporates a new reading from the pressure crystal; it may still
be incorporating the previous reading from the temperature crystal.
FC_FLAG_TEMP_UPDATE - Indicates that the current Measurement
incorporates a new reading from the temperature crystal; it may
still be incorporating the previous reading from the pressure
crystal.
Note that since the temperature and pressure crystals tick asynchronously
with respect to one another, a measurement on one crystal is likely to
complete while a measurement on the other crystal is still pending and so
typically only one of FC_FLAG_PRESSURE_UPDATE or FC_FLAG_TEMP_UPDATE will
be set.
'''
def __init__(self, sensor, ref_freq, pressure_edges, pressure_ref_clocks,
temp_edges, temp_ref_clocks, mcu_temp_c, pressure_psi,
temp_c, flags):
self.sensor = sensor
self.ref_freq = ref_freq
self.pressure_edges = pressure_edges
self.pressure_ref_clocks = pressure_ref_clocks
self.temp_edges = temp_edges
self.temp_ref_clocks = temp_ref_clocks
self.mcu_temp_c = mcu_temp_c
self.pressure_psi = pressure_psi
self.temp_c = temp_c
self.flags = flags
if temp_ref_clocks > 3:
self.temp_freq = ref_freq * temp_edges / temp_ref_clocks
else:
self.temp_freq = None
if pressure_ref_clocks > 3:
self.pressure_freq = ref_freq * pressure_edges / pressure_ref_clocks
else:
self.pressure_freq = None
@staticmethod
def _from_packet(sensor, packet):
mt, p, t = None, None, None
if sensor.usb_dev.bcdDevice < 0x0107:
if len(packet) == 24:
fp = FrequencyPacket24.unpack(packet)
else:
fp = FrequencyPacket40.unpack(packet)
p = fp.pressure_psi
t = fp.temp_c
else:
fp = FrequencyPacket56.unpack(packet)
mt = fp.mcu_temp_c
assert fp.flags and (fp.flags & FC_FLAGS_VALID)
if (fp.flags & FC_FLAG_NO_TEMP_PRESSURE) == 0:
p = fp.pressure_psi
t = fp.temp_c
flags = fp.flags if fp.flags & FC_FLAGS_VALID else None
return Measurement(sensor, fp.ref_freq, fp.pressure_edges,
fp.pressure_ref_clocks, fp.temp_edges,
fp.temp_ref_clocks, mt, p, t, flags)
def tostring(self, verbose=False):
s = '%s: ' % self.sensor
if verbose:
s += ('C %u pe %u prc %u pf %f te %u trc %u tf %f p %s t %s '
'mt %s' % (self.ref_freq, self.pressure_edges,
self.pressure_ref_clocks, self.pressure_freq,
self.temp_edges, self.temp_ref_clocks,
self.temp_freq, self.pressure_psi, self.temp_c,
self.mcu_temp_c))
else:
if self.pressure_psi is None:
p = 'n/a'
else:
p = '%f' % self.pressure_psi
if self.temp_c is None:
t = 'n/a'
else:
t = '%f' % self.temp_c
s += '%s PSI, %s C' % (p, t)
return s
class XtalX:
'''
Given a USB device handle acquired via find() or find_one(), creates an
XtalX object that can be used to communicate with a sensor.
'''
def __init__(self, usb_dev):
self.usb_dev = usb_dev
self.lock = threading.RLock()
self._halt_yield = True
self.thread = None
try:
self.serial_num = usb_dev.serial_number
self.git_sha1 = usb.util.get_string(usb_dev, 6)
self.fw_version = usb_dev.bcdDevice
except ValueError as e:
if str(e) == 'The device has no langid':
raise Exception(
'Device has no langid, ensure running as root!') from e
if self.usb_dev.bcdDevice >= 0x0103:
try:
self.report_id = usb.util.get_string(usb_dev, 15)
except ValueError:
self.report_id = None
else:
self.report_id = None
self.usb_path = '%s:%s' % (
usb_dev.bus, '.'.join('%u' % n for n in usb_dev.port_numbers))
def __str__(self):
return 'XtalX(%s)' % self.serial_num
def _set_configuration(self, bConfigurationValue):
with self.lock:
cfg = None
try:
cfg = self.usb_dev.get_active_configuration()
except usb.core.USBError as e:
if e.strerror != 'Configuration not set':
raise
if cfg is None or cfg.bConfigurationValue != bConfigurationValue:
usb.util.dispose_resources(self.usb_dev)
self.usb_dev.set_configuration(bConfigurationValue)
def _set_measurement_config(self):
self._set_configuration(2)
def read_measurement(self):
'''
Synchronously read a single measurement from the sensor, blocking if no
measurement is currently available.
'''
with self.lock:
p = self.usb_dev.read(0x81, 64)
return Measurement._from_packet(self, p)
def _yield_measurements(self, do_reset):
with self.lock:
if do_reset:
self.usb_dev.reset()
self._set_measurement_config()
while not self._halt_yield:
try:
yield self.read_measurement()
except usb.core.USBError as e:
if e.errno != errno.ETIMEDOUT:
raise
continue
def yield_measurements(self, do_reset=True):
'''
Yields Measurement objects synchronously in the current thread,
blocking while waiting for new measurements to be acquired.
'''
with self.lock:
self._halt_yield = False
yield from self._yield_measurements(do_reset)
def halt_yield(self):
'''
Halts an ongoing yield_measurements() call, causing it to eventually
terminate the generator loop.
'''
self._halt_yield = True
def _read_measurements_async(self, handler, do_reset):
with self.lock:
for m in self._yield_measurements(do_reset):
handler(m)
def read_measurements(self, handler, do_reset=True):
'''
Reads measurements asynchronously in a separate thread, calling the
handler as measurements become available. The handler should take a
single Measurement object as an argument.
'''
with self.lock:
assert self.thread is None
self._halt_yield = False
self.thread = threading.Thread(target=self._read_measurements_async,
args=(handler, do_reset),
daemon=False)
self.thread.start()
def join_read(self):
'''
Blocks the current thread until the asynchronous read thread completes.
Typically this blocks indefinitely until some error occurs, however the
read thread will also exit if someone sets the _halt_yield field to
True (see XtalX.halt_read()).
'''
self.thread.join()
def halt_read(self):
'''
Halts any asynchronous measurement thread and waits for it to finish
cleanly.
'''
self._halt_yield = True
self.join_read()
def find(**kwargs):
'''
Returns a list of USB device handles for all XtalX sensors. **kwargs can
be any keyword argument accepted by usb.core.find(); typically you will
leave it empty.
'''
return list(usb.core.find(find_all=True, idVendor=0x0483, idProduct=0xA34E,
product='XtalX', **kwargs))
def find_one(**kwargs):
'''
Returns a single USB device handle for an XtalX sensor if only a single
sensor is attached. If multiple sensors are found, an exception is raised.
**kwargs can be any keyword argument accepted by usb.core.find(); typically
you will leave it empty.
'''
usb_devs = find(**kwargs)
if len(usb_devs) > 1:
raise Exception('Multiple matching devices: %s' %
', '.join(ud.serial_number for ud in usb_devs))
if not usb_devs:
raise Exception('No matching devices.')
return usb_devs[0]
| # Copyright (c) 2020-2021 by Phase Advanced Sensor Systems Corp.
import threading
import errno
import usb
import usb.util
import btype
FC_FLAGS_VALID = (1 << 15)
FC_FLAG_NO_TEMP_PRESSURE = (1 << 4)
FC_FLAG_PRESSURE_FAILED = (1 << 3)
FC_FLAG_TEMP_FAILED = (1 << 2)
FC_FLAG_PRESSURE_UPDATE = (1 << 1)
FC_FLAG_TEMP_UPDATE = (1 << 0)
class FrequencyPacket24(btype.Struct):
'''
Firmware revisions 1.0.6 and earlier return a 24-byte packet if the sensor
doesn't have enough data to perform a temperature-compensated pressure
measurement yet or if the sensor doesn't have a calibration applied in
flash.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
_EXPECTED_SIZE = 24
class FrequencyPacket40(btype.Struct):
'''
Firmware revisions 1.0.6 and earlier return a 40-byte packet if the sensor
has enough data to perform a temperature-compensated pressure measurement.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
pressure_psi = btype.float64_t()
temp_c = btype.float64_t()
_EXPECTED_SIZE = 40
class FrequencyPacket56(btype.Struct):
'''
Firmware revisions 1.0.7 and higher always return a 56-byte packet that
contains flags indicating the validity of things like the temperature-
compensated pressure measurement. These firmware versions also return the
MCU temperature as a control.
'''
ref_freq = btype.uint32_t()
pressure_edges = btype.uint32_t()
pressure_ref_clocks = btype.uint32_t()
temp_edges = btype.uint32_t()
temp_ref_clocks = btype.uint32_t()
flags = btype.uint16_t()
seq_num = btype.uint8_t()
rsrv = btype.uint8_t()
pressure_psi = btype.float64_t()
temp_c = btype.float64_t()
mcu_temp_c = btype.float64_t()
rsrv2 = btype.Array(btype.uint8_t(), 8)
_EXPECTED_SIZE = 56
class Measurement:
'''
Object encapsulating the results of an XtalX sensor measurement. The
following fields are defined:
sensor - Reference to the XtalX that generated the Measurement.
ref_freq - Frequency of the sensor's reference crystal.
pressure_edges - Number of pressure crystal ticks used to generate the
Measurement.
pressure_ref_clocks - Number of reference clock ticks that elapsed
while counting pressure_edges pressure crystal ticks.
pressure_freq - Measured pressure crystal frequency.
temp_edges - Number of temperature crystal ticks used to generate the
Measurement.
temp_ref_clocks - Number of temperature crystal ticks that elapsed
while counting temp_edges temperature crystal ticks.
temp_freq - Measured temperature crystal frequency.
mcu_temp_c - Microcontroller's internal junction temperature.
pressure_psi - Temperature-compensated pressure measured in PSI.
temp_c - Temperature measured in degrees Celsius.
flags - A set of validity and error flags.
If the sensor is uncalibrated or has not sampled enough data to generate
a temperature-compensated pressure measurement then some or all of
temp_freq, pressure_freq, pressure_psi and temp_c may be None.
The flags field is a bitmask which may include any of the following bits;
it may be None if the firmware version predates the introduction of status
flags:
FC_FLAGS_VALID - The flags field contains valid information (always set
or flags will be None).
FC_FLAG_NO_TEMP_PRESSURE - Will be set if pressure_psi and temp_c could
not be generated; the sensor may be uncalibrated or may not have
generated both temperature and pressure crystal readings yet.
FC_FLAG_PRESSURE_FAILED - Will be set if 0.5 seconds elapse without a
pressure crystal measurement completing; this indicates that a
sensor failure has caused the pressure crystal to stop ticking.
FC_FLAG_TEMP_FAILED - Will be set if 0.5 seconds elapse without a
temperature crystal measurement completing; this indicates that a
sensor failure has caused the temperature crystal to stop ticking.
FC_FLAG_PRESSURE_UPDATE - Indicates that the current Measurement
incorporates a new reading from the pressure crystal; it may still
be incorporating the previous reading from the temperature crystal.
FC_FLAG_TEMP_UPDATE - Indicates that the current Measurement
incorporates a new reading from the temperature crystal; it may
still be incorporating the previous reading from the pressure
crystal.
Note that since the temperature and pressure crystals tick asynchronously
with respect to one another, a measurement on one crystal is likely to
complete while a measurement on the other crystal is still pending and so
typically only one of FC_FLAG_PRESSURE_UPDATE or FC_FLAG_TEMP_UPDATE will
be set.
'''
def __init__(self, sensor, ref_freq, pressure_edges, pressure_ref_clocks,
temp_edges, temp_ref_clocks, mcu_temp_c, pressure_psi,
temp_c, flags):
self.sensor = sensor
self.ref_freq = ref_freq
self.pressure_edges = pressure_edges
self.pressure_ref_clocks = pressure_ref_clocks
self.temp_edges = temp_edges
self.temp_ref_clocks = temp_ref_clocks
self.mcu_temp_c = mcu_temp_c
self.pressure_psi = pressure_psi
self.temp_c = temp_c
self.flags = flags
if temp_ref_clocks > 3:
self.temp_freq = ref_freq * temp_edges / temp_ref_clocks
else:
self.temp_freq = None
if pressure_ref_clocks > 3:
self.pressure_freq = ref_freq * pressure_edges / pressure_ref_clocks
else:
self.pressure_freq = None
@staticmethod
def _from_packet(sensor, packet):
mt, p, t = None, None, None
if sensor.usb_dev.bcdDevice < 0x0107:
if len(packet) == 24:
fp = FrequencyPacket24.unpack(packet)
else:
fp = FrequencyPacket40.unpack(packet)
p = fp.pressure_psi
t = fp.temp_c
else:
fp = FrequencyPacket56.unpack(packet)
mt = fp.mcu_temp_c
assert fp.flags and (fp.flags & FC_FLAGS_VALID)
if (fp.flags & FC_FLAG_NO_TEMP_PRESSURE) == 0:
p = fp.pressure_psi
t = fp.temp_c
flags = fp.flags if fp.flags & FC_FLAGS_VALID else None
return Measurement(sensor, fp.ref_freq, fp.pressure_edges,
fp.pressure_ref_clocks, fp.temp_edges,
fp.temp_ref_clocks, mt, p, t, flags)
def tostring(self, verbose=False):
s = '%s: ' % self.sensor
if verbose:
s += ('C %u pe %u prc %u pf %f te %u trc %u tf %f p %s t %s '
'mt %s' % (self.ref_freq, self.pressure_edges,
self.pressure_ref_clocks, self.pressure_freq,
self.temp_edges, self.temp_ref_clocks,
self.temp_freq, self.pressure_psi, self.temp_c,
self.mcu_temp_c))
else:
if self.pressure_psi is None:
p = 'n/a'
else:
p = '%f' % self.pressure_psi
if self.temp_c is None:
t = 'n/a'
else:
t = '%f' % self.temp_c
s += '%s PSI, %s C' % (p, t)
return s
class XtalX:
'''
Given a USB device handle acquired via find() or find_one(), creates an
XtalX object that can be used to communicate with a sensor.
'''
def __init__(self, usb_dev):
self.usb_dev = usb_dev
self.lock = threading.RLock()
self._halt_yield = True
self.thread = None
try:
self.serial_num = usb_dev.serial_number
self.git_sha1 = usb.util.get_string(usb_dev, 6)
self.fw_version = usb_dev.bcdDevice
except ValueError as e:
if str(e) == 'The device has no langid':
raise Exception(
'Device has no langid, ensure running as root!') from e
if self.usb_dev.bcdDevice >= 0x0103:
try:
self.report_id = usb.util.get_string(usb_dev, 15)
except ValueError:
self.report_id = None
else:
self.report_id = None
self.usb_path = '%s:%s' % (
usb_dev.bus, '.'.join('%u' % n for n in usb_dev.port_numbers))
def __str__(self):
return 'XtalX(%s)' % self.serial_num
def _set_configuration(self, bConfigurationValue):
with self.lock:
cfg = None
try:
cfg = self.usb_dev.get_active_configuration()
except usb.core.USBError as e:
if e.strerror != 'Configuration not set':
raise
if cfg is None or cfg.bConfigurationValue != bConfigurationValue:
usb.util.dispose_resources(self.usb_dev)
self.usb_dev.set_configuration(bConfigurationValue)
def _set_measurement_config(self):
self._set_configuration(2)
def read_measurement(self):
'''
Synchronously read a single measurement from the sensor, blocking if no
measurement is currently available.
'''
with self.lock:
p = self.usb_dev.read(0x81, 64)
return Measurement._from_packet(self, p)
def _yield_measurements(self, do_reset):
with self.lock:
if do_reset:
self.usb_dev.reset()
self._set_measurement_config()
while not self._halt_yield:
try:
yield self.read_measurement()
except usb.core.USBError as e:
if e.errno != errno.ETIMEDOUT:
raise
continue
def yield_measurements(self, do_reset=True):
'''
Yields Measurement objects synchronously in the current thread,
blocking while waiting for new measurements to be acquired.
'''
with self.lock:
self._halt_yield = False
yield from self._yield_measurements(do_reset)
def halt_yield(self):
'''
Halts an ongoing yield_measurements() call, causing it to eventually
terminate the generator loop.
'''
self._halt_yield = True
def _read_measurements_async(self, handler, do_reset):
with self.lock:
for m in self._yield_measurements(do_reset):
handler(m)
def read_measurements(self, handler, do_reset=True):
'''
Reads measurements asynchronously in a separate thread, calling the
handler as measurements become available. The handler should take a
single Measurement object as an argument.
'''
with self.lock:
assert self.thread is None
self._halt_yield = False
self.thread = threading.Thread(target=self._read_measurements_async,
args=(handler, do_reset),
daemon=False)
self.thread.start()
def join_read(self):
'''
Blocks the current thread until the asynchronous read thread completes.
Typically this blocks indefinitely until some error occurs, however the
read thread will also exit if someone sets the _halt_yield field to
True (see XtalX.halt_read()).
'''
self.thread.join()
def halt_read(self):
'''
Halts any asynchronous measurement thread and waits for it to finish
cleanly.
'''
self._halt_yield = True
self.join_read()
def find(**kwargs):
'''
Returns a list of USB device handles for all XtalX sensors. **kwargs can
be any keyword argument accepted by usb.core.find(); typically you will
leave it empty.
'''
return list(usb.core.find(find_all=True, idVendor=0x0483, idProduct=0xA34E,
product='XtalX', **kwargs))
def find_one(**kwargs):
'''
Returns a single USB device handle for an XtalX sensor if only a single
sensor is attached. If multiple sensors are found, an exception is raised.
**kwargs can be any keyword argument accepted by usb.core.find(); typically
you will leave it empty.
'''
usb_devs = find(**kwargs)
if len(usb_devs) > 1:
raise Exception('Multiple matching devices: %s' %
', '.join(ud.serial_number for ud in usb_devs))
if not usb_devs:
raise Exception('No matching devices.')
return usb_devs[0]
| en | 0.83409 | # Copyright (c) 2020-2021 by Phase Advanced Sensor Systems Corp. Firmware revisions 1.0.6 and earlier return a 24-byte packet if the sensor doesn't have enough data to perform a temperature-compensated pressure measurement yet or if the sensor doesn't have a calibration applied in flash. Firmware revisions 1.0.6 and earlier return a 40-byte packet if the sensor has enough data to perform a temperature-compensated pressure measurement. Firmware revisions 1.0.7 and higher always return a 56-byte packet that contains flags indicating the validity of things like the temperature- compensated pressure measurement. These firmware versions also return the MCU temperature as a control. Object encapsulating the results of an XtalX sensor measurement. The following fields are defined: sensor - Reference to the XtalX that generated the Measurement. ref_freq - Frequency of the sensor's reference crystal. pressure_edges - Number of pressure crystal ticks used to generate the Measurement. pressure_ref_clocks - Number of reference clock ticks that elapsed while counting pressure_edges pressure crystal ticks. pressure_freq - Measured pressure crystal frequency. temp_edges - Number of temperature crystal ticks used to generate the Measurement. temp_ref_clocks - Number of temperature crystal ticks that elapsed while counting temp_edges temperature crystal ticks. temp_freq - Measured temperature crystal frequency. mcu_temp_c - Microcontroller's internal junction temperature. pressure_psi - Temperature-compensated pressure measured in PSI. temp_c - Temperature measured in degrees Celsius. flags - A set of validity and error flags. If the sensor is uncalibrated or has not sampled enough data to generate a temperature-compensated pressure measurement then some or all of temp_freq, pressure_freq, pressure_psi and temp_c may be None. The flags field is a bitmask which may include any of the following bits; it may be None if the firmware version predates the introduction of status flags: FC_FLAGS_VALID - The flags field contains valid information (always set or flags will be None). FC_FLAG_NO_TEMP_PRESSURE - Will be set if pressure_psi and temp_c could not be generated; the sensor may be uncalibrated or may not have generated both temperature and pressure crystal readings yet. FC_FLAG_PRESSURE_FAILED - Will be set if 0.5 seconds elapse without a pressure crystal measurement completing; this indicates that a sensor failure has caused the pressure crystal to stop ticking. FC_FLAG_TEMP_FAILED - Will be set if 0.5 seconds elapse without a temperature crystal measurement completing; this indicates that a sensor failure has caused the temperature crystal to stop ticking. FC_FLAG_PRESSURE_UPDATE - Indicates that the current Measurement incorporates a new reading from the pressure crystal; it may still be incorporating the previous reading from the temperature crystal. FC_FLAG_TEMP_UPDATE - Indicates that the current Measurement incorporates a new reading from the temperature crystal; it may still be incorporating the previous reading from the pressure crystal. Note that since the temperature and pressure crystals tick asynchronously with respect to one another, a measurement on one crystal is likely to complete while a measurement on the other crystal is still pending and so typically only one of FC_FLAG_PRESSURE_UPDATE or FC_FLAG_TEMP_UPDATE will be set. Given a USB device handle acquired via find() or find_one(), creates an XtalX object that can be used to communicate with a sensor. Synchronously read a single measurement from the sensor, blocking if no measurement is currently available. Yields Measurement objects synchronously in the current thread, blocking while waiting for new measurements to be acquired. Halts an ongoing yield_measurements() call, causing it to eventually terminate the generator loop. Reads measurements asynchronously in a separate thread, calling the handler as measurements become available. The handler should take a single Measurement object as an argument. Blocks the current thread until the asynchronous read thread completes. Typically this blocks indefinitely until some error occurs, however the read thread will also exit if someone sets the _halt_yield field to True (see XtalX.halt_read()). Halts any asynchronous measurement thread and waits for it to finish cleanly. Returns a list of USB device handles for all XtalX sensors. **kwargs can be any keyword argument accepted by usb.core.find(); typically you will leave it empty. Returns a single USB device handle for an XtalX sensor if only a single sensor is attached. If multiple sensors are found, an exception is raised. **kwargs can be any keyword argument accepted by usb.core.find(); typically you will leave it empty. | 2.197054 | 2 |
soluciones/fortaleza_de_contrasenia/fortaleza_de_contrasenia.py | estefaniamiguel/blockly | 0 | 6615136 | from consola import leer_caracter
from consola import leer_entrada_completa
from consola import obtener_caracter
from consola import avanzar_caracter
from consola import hay_mas_caracteres
from consola import imprimir
from consola import cambiar_color_texto
longitud = None
def Analizar_fortaleza():
global longitud
Longitud_de_contraseña()
if 8 < longitud:
cambiar_color_texto("#33cc00")
imprimir('VERDE')
else:
if 5 > longitud:
cambiar_color_texto("#ff4040")
imprimir('ROJO')
else:
cambiar_color_texto("#ffff00")
imprimir('AMARILLO')
def Longitud_de_contraseña():
global longitud
longitud = 0
while hay_mas_caracteres():
longitud = longitud + 1
avanzar_caracter()
Analizar_fortaleza()
| from consola import leer_caracter
from consola import leer_entrada_completa
from consola import obtener_caracter
from consola import avanzar_caracter
from consola import hay_mas_caracteres
from consola import imprimir
from consola import cambiar_color_texto
longitud = None
def Analizar_fortaleza():
global longitud
Longitud_de_contraseña()
if 8 < longitud:
cambiar_color_texto("#33cc00")
imprimir('VERDE')
else:
if 5 > longitud:
cambiar_color_texto("#ff4040")
imprimir('ROJO')
else:
cambiar_color_texto("#ffff00")
imprimir('AMARILLO')
def Longitud_de_contraseña():
global longitud
longitud = 0
while hay_mas_caracteres():
longitud = longitud + 1
avanzar_caracter()
Analizar_fortaleza()
| none | 1 | 2.817864 | 3 | |
tests/network/nanovault/data/ws/pocketable.py | Matoking/siliqua | 8 | 6615137 | <gh_stars>1-10
from tests.network.nanovault.conftest import WebSocketReplay
DATA = [
WebSocketReplay(
["<KEY>"],
{
"event": "newTransaction",
"data": {
"account": "<KEY>",
"amount": "1000000000000000000000000000000",
"is_send": "true",
"subtype": "send",
"hash": "82CDDC385108D25E520B9CB2C7CB539CDF2FD5C9C3D7F4992AB6E18D0135B85F",
"block": {
"account": "<KEY>",
"balance": "0",
"link": "5114AB75C910A20726BFD3E8A3B9335B1738F36D87F4D246EE5A2B91AEB0D8CC",
"link_as_account": "<KEY>",
"previous": "4AF3568F9ADDC65302FEDBBF2BAD60FD2175D7E671DDA980D55AEA5D343D8BEA",
"representative": "nano_1awsn43we17c1oshdru4azeqjz9wii41dy8npubm4rg11so7dx3jtqgoeahy",
"signature": "AB85B448F40F482AC24006F7A3A00D25211B2017CE498CE40728435A41124E4E678675C8D994D4FC4596607499C23470A9188DE4A011253F54F8ABC00457CD0B",
"type": "state",
"work": "9d86cf7e0bb936a9"
}
}
}
)
]
| from tests.network.nanovault.conftest import WebSocketReplay
DATA = [
WebSocketReplay(
["<KEY>"],
{
"event": "newTransaction",
"data": {
"account": "<KEY>",
"amount": "1000000000000000000000000000000",
"is_send": "true",
"subtype": "send",
"hash": "82CDDC385108D25E520B9CB2C7CB539CDF2FD5C9C3D7F4992AB6E18D0135B85F",
"block": {
"account": "<KEY>",
"balance": "0",
"link": "5114AB75C910A20726BFD3E8A3B9335B1738F36D87F4D246EE5A2B91AEB0D8CC",
"link_as_account": "<KEY>",
"previous": "4AF3568F9ADDC65302FEDBBF2BAD60FD2175D7E671DDA980D55AEA5D343D8BEA",
"representative": "nano_1awsn43we17c1oshdru4azeqjz9wii41dy8npubm4rg11so7dx3jtqgoeahy",
"signature": "AB85B448F40F482AC24006F7A3A00D25211B2017CE498CE40728435A41124E4E678675C8D994D4FC4596607499C23470A9188DE4A011253F54F8ABC00457CD0B",
"type": "state",
"work": "9d86cf7e0bb936a9"
}
}
}
)
] | none | 1 | 1.48014 | 1 | |
tasks/message_tasks.py | FAF21/spoofcheckselftest | 29 | 6615138 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
@author: lunarca
Copyright 2017
"""
import os
import requests
import logging
from tasks import selftest_task_queue
from emailprotectionslib import spf as spflib
from emailprotectionslib import dmarc as dmarclib
@selftest_task_queue.task
def email_spoofing_analysis(domain):
spf_record = spflib.SpfRecord.from_domain(domain)
dmarc_record = dmarclib.DmarcRecord.from_domain(domain)
spf_existence = spf_record.record is not None
spf_strong = spf_record.is_record_strong() if spf_existence else False
dmarc_existence = dmarc_record.record is not None
dmarc_policy = ""
dmarc_strong = False
dmarc_aggregate_reports = False
dmarc_forensic_reports = False
org_domain = None
org_record_record = None
org_sp = None
org_policy = None
org_aggregate_reports = None
org_forensic_reports = None
is_subdomain = False
if dmarc_record is not None:
dmarc_strong = dmarc_record.is_record_strong()
dmarc_policy = dmarc_record.policy
dmarc_aggregate_reports = dmarc_record.rua is not None and dmarc_record.rua != ""
dmarc_forensic_reports = dmarc_record.ruf is not None and dmarc_record.ruf != ""
if not dmarc_existence:
try:
org_domain = dmarc_record.get_org_domain()
org_record = dmarc_record.get_org_record()
org_record_record = org_record.record
org_sp = org_record.subdomain_policy
org_policy = org_record.policy
org_aggregate_reports = dmarc_record.rua is not None and org_record.rua != ""
org_forensic_reports = dmarc_record.ruf is not None and org_record.ruf != ""
is_subdomain = True
except dmarclib.OrgDomainException:
org_domain = None
org_record_record = None
org_sp = None
org_policy = None
org_aggregate_reports = None
org_forensic_reports = None
domain_vulnerable = not (dmarc_strong and (spf_strong or org_domain is not None))
output = {
'opcode': "test",
'message': {
'vulnerable': domain_vulnerable,
'isSubdomain': is_subdomain,
'spf': {
'existence': spf_existence,
'strongConfiguration': spf_strong,
'record': spf_record.record if spf_existence else None,
},
'dmarc': {
'existence': dmarc_existence,
'policy': dmarc_policy,
'aggregateReports': dmarc_aggregate_reports,
'forensicReports': dmarc_forensic_reports,
'record': dmarc_record.record if dmarc_existence else None,
'orgRecord': {
'existence': org_record_record is not None,
'domain': org_domain,
'record': org_record_record,
'sp': org_sp,
'policy': org_policy,
'rua': org_aggregate_reports,
'ruf': org_forensic_reports,
},
},
},
}
return output
@selftest_task_queue.task
def check_recaptcha_solution(user_solution, ip_address):
payload = {
"secret": os.environ["RECAPTCHA_SECRET_KEY"],
"response": user_solution,
"remoteip": ip_address,
}
recaptcha_response = requests.post("https://www.google.com/recaptcha/api/siteverify", data=payload)
logging.debug("[RECAPTCHA Response] " + recaptcha_response.text)
return recaptcha_response.json()["success"]
| # -*- coding: utf-8 -*-
"""
@author: lunarca
Copyright 2017
"""
import os
import requests
import logging
from tasks import selftest_task_queue
from emailprotectionslib import spf as spflib
from emailprotectionslib import dmarc as dmarclib
@selftest_task_queue.task
def email_spoofing_analysis(domain):
spf_record = spflib.SpfRecord.from_domain(domain)
dmarc_record = dmarclib.DmarcRecord.from_domain(domain)
spf_existence = spf_record.record is not None
spf_strong = spf_record.is_record_strong() if spf_existence else False
dmarc_existence = dmarc_record.record is not None
dmarc_policy = ""
dmarc_strong = False
dmarc_aggregate_reports = False
dmarc_forensic_reports = False
org_domain = None
org_record_record = None
org_sp = None
org_policy = None
org_aggregate_reports = None
org_forensic_reports = None
is_subdomain = False
if dmarc_record is not None:
dmarc_strong = dmarc_record.is_record_strong()
dmarc_policy = dmarc_record.policy
dmarc_aggregate_reports = dmarc_record.rua is not None and dmarc_record.rua != ""
dmarc_forensic_reports = dmarc_record.ruf is not None and dmarc_record.ruf != ""
if not dmarc_existence:
try:
org_domain = dmarc_record.get_org_domain()
org_record = dmarc_record.get_org_record()
org_record_record = org_record.record
org_sp = org_record.subdomain_policy
org_policy = org_record.policy
org_aggregate_reports = dmarc_record.rua is not None and org_record.rua != ""
org_forensic_reports = dmarc_record.ruf is not None and org_record.ruf != ""
is_subdomain = True
except dmarclib.OrgDomainException:
org_domain = None
org_record_record = None
org_sp = None
org_policy = None
org_aggregate_reports = None
org_forensic_reports = None
domain_vulnerable = not (dmarc_strong and (spf_strong or org_domain is not None))
output = {
'opcode': "test",
'message': {
'vulnerable': domain_vulnerable,
'isSubdomain': is_subdomain,
'spf': {
'existence': spf_existence,
'strongConfiguration': spf_strong,
'record': spf_record.record if spf_existence else None,
},
'dmarc': {
'existence': dmarc_existence,
'policy': dmarc_policy,
'aggregateReports': dmarc_aggregate_reports,
'forensicReports': dmarc_forensic_reports,
'record': dmarc_record.record if dmarc_existence else None,
'orgRecord': {
'existence': org_record_record is not None,
'domain': org_domain,
'record': org_record_record,
'sp': org_sp,
'policy': org_policy,
'rua': org_aggregate_reports,
'ruf': org_forensic_reports,
},
},
},
}
return output
@selftest_task_queue.task
def check_recaptcha_solution(user_solution, ip_address):
payload = {
"secret": os.environ["RECAPTCHA_SECRET_KEY"],
"response": user_solution,
"remoteip": ip_address,
}
recaptcha_response = requests.post("https://www.google.com/recaptcha/api/siteverify", data=payload)
logging.debug("[RECAPTCHA Response] " + recaptcha_response.text)
return recaptcha_response.json()["success"] | en | 0.655861 | # -*- coding: utf-8 -*- @author: lunarca Copyright 2017 | 2.293262 | 2 |
juriscraper/oral_args/united_states/federal_appellate/ca9.py | albertisfu/juriscraper | 0 | 6615139 | <gh_stars>0
"""Scraper for Ninth Circuit of Appeals
CourtID: ca9
Court Short Name: ca9
"""
from juriscraper.lib.html_utils import get_row_column_text
from juriscraper.OralArgumentSiteLinear import OralArgumentSiteLinear
class Site(OralArgumentSiteLinear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "https://www.ca9.uscourts.gov/media/"
def _process_html(self):
path = "//table[@id='search-results-table']//tr"
rows = self.html.xpath(path)
for row in rows[1:-2]:
parts = row.xpath(".//td[6]/a/@href")[0].split("/")
# the components needed to build the URL for the media file are
# are all availble in this HTML element. The path consisters of
# year, month, day, and the docket number.mp3
year = parts[-3][1:5]
month = parts[-3][5:7]
day = parts[-3][7:]
docket_number = parts[-2]
# Build URL for media file with the information we have
# No need to traverse another page.
url = f"https://cdn.ca9.uscourts.gov/datastore/media/{year}/{month}/{day}/{docket_number}.mp3"
self.cases.append(
{
"date": get_row_column_text(row, 5),
"docket": get_row_column_text(row, 2),
"judge": get_row_column_text(row, 3),
"name": get_row_column_text(row, 1),
"url": url,
}
)
| """Scraper for Ninth Circuit of Appeals
CourtID: ca9
Court Short Name: ca9
"""
from juriscraper.lib.html_utils import get_row_column_text
from juriscraper.OralArgumentSiteLinear import OralArgumentSiteLinear
class Site(OralArgumentSiteLinear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "https://www.ca9.uscourts.gov/media/"
def _process_html(self):
path = "//table[@id='search-results-table']//tr"
rows = self.html.xpath(path)
for row in rows[1:-2]:
parts = row.xpath(".//td[6]/a/@href")[0].split("/")
# the components needed to build the URL for the media file are
# are all availble in this HTML element. The path consisters of
# year, month, day, and the docket number.mp3
year = parts[-3][1:5]
month = parts[-3][5:7]
day = parts[-3][7:]
docket_number = parts[-2]
# Build URL for media file with the information we have
# No need to traverse another page.
url = f"https://cdn.ca9.uscourts.gov/datastore/media/{year}/{month}/{day}/{docket_number}.mp3"
self.cases.append(
{
"date": get_row_column_text(row, 5),
"docket": get_row_column_text(row, 2),
"judge": get_row_column_text(row, 3),
"name": get_row_column_text(row, 1),
"url": url,
}
) | en | 0.849597 | Scraper for Ninth Circuit of Appeals CourtID: ca9 Court Short Name: ca9 # the components needed to build the URL for the media file are # are all availble in this HTML element. The path consisters of # year, month, day, and the docket number.mp3 # Build URL for media file with the information we have # No need to traverse another page. | 2.836418 | 3 |
tests/fixtures.py | Neoteroi/BlackSheep-SQLAlchemy | 4 | 6615140 | import asyncio
from multiprocessing import Process
from time import sleep
import pytest
import uvicorn
from blacksheep.client import ClientSession
from blacksheep.client.pool import ClientConnectionPools
from tests.utils import get_sleep_time
from .app import app
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for all test cases."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
def client_session(server_host, server_port, event_loop):
# It is important to pass the instance of ClientConnectionPools,
# to ensure that the connections are reused and closed
session = ClientSession(
loop=event_loop,
base_url=f"http://{server_host}:{server_port}",
pools=ClientConnectionPools(event_loop),
)
yield session
asyncio.run(session.close())
@pytest.fixture(scope="module")
def server_host():
return "127.0.0.1"
@pytest.fixture(scope="module")
def server_port():
return 44555
@pytest.fixture(scope="module")
def connection_string():
return "sqlite:///example.db"
def start_server():
uvicorn.run(app, host="127.0.0.1", port=44555, log_level="debug")
@pytest.fixture(scope="module", autouse=True)
def server(server_host, server_port):
server_process = Process(target=start_server)
server_process.start()
sleep(get_sleep_time())
yield 1
sleep(1.2)
server_process.terminate()
| import asyncio
from multiprocessing import Process
from time import sleep
import pytest
import uvicorn
from blacksheep.client import ClientSession
from blacksheep.client.pool import ClientConnectionPools
from tests.utils import get_sleep_time
from .app import app
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for all test cases."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
def client_session(server_host, server_port, event_loop):
# It is important to pass the instance of ClientConnectionPools,
# to ensure that the connections are reused and closed
session = ClientSession(
loop=event_loop,
base_url=f"http://{server_host}:{server_port}",
pools=ClientConnectionPools(event_loop),
)
yield session
asyncio.run(session.close())
@pytest.fixture(scope="module")
def server_host():
return "127.0.0.1"
@pytest.fixture(scope="module")
def server_port():
return 44555
@pytest.fixture(scope="module")
def connection_string():
return "sqlite:///example.db"
def start_server():
uvicorn.run(app, host="127.0.0.1", port=44555, log_level="debug")
@pytest.fixture(scope="module", autouse=True)
def server(server_host, server_port):
server_process = Process(target=start_server)
server_process.start()
sleep(get_sleep_time())
yield 1
sleep(1.2)
server_process.terminate()
| en | 0.914307 | Create an instance of the default event loop for all test cases. # It is important to pass the instance of ClientConnectionPools, # to ensure that the connections are reused and closed | 2.392442 | 2 |
adminsec/ldap.py | bihealth/hpc-access | 0 | 6615141 | <reponame>bihealth/hpc-access<gh_stars>0
import ldap3
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class LdapConnector:
"""Connect to the two LDAPs and provide some search functions."""
connection1 = None
connection2 = None
def __init__(self, test_mode=False, test_setup_server1=None, test_setup_server2=None):
self.test_mode = test_mode
self.test_setup_server1 = test_setup_server1
self.test_setup_server2 = test_setup_server2
def connect(self):
# Open LDAP connections and bind
test_mode = {}
if self.test_mode:
test_mode["client_strategy"] = ldap3.MOCK_SYNC
if settings.ENABLE_LDAP:
server1 = ldap3.Server(settings.AUTH_LDAP_SERVER_URI)
self.connection1 = ldap3.Connection(
server1,
user=settings.AUTH_LDAP_BIND_DN,
password=settings.AUTH_<PASSWORD>,
**test_mode
)
if self.test_mode:
self.test_setup_server1(self.connection1)
if not self.connection1.bind():
raise ConnectionError("Could not connect to LDAP")
if settings.ENABLE_LDAP_SECONDARY:
server2 = ldap3.Server(settings.AUTH_LDAP2_SERVER_URI)
self.connection2 = ldap3.Connection(
server2,
user=settings.AUTH_LDAP2_BIND_DN,
password=settings.AUTH_<PASSWORD>2_BIND_PASSWORD,
**test_mode
)
if self.test_mode:
self.test_setup_server2(self.connection2)
if not self.connection2.bind():
raise ConnectionError("Could not connect to LDAP2")
return True
def get_ldap_username_domain_by_mail(self, mail):
"""Load user information from a given email."""
email_domains = []
email_domains2 = []
if settings.ENABLE_LDAP:
email_domains = settings.INSTITUTE_EMAIL_DOMAINS.split(",")
if settings.ENABLE_LDAP_SECONDARY:
email_domains2 = settings.INSTITUTE2_EMAIL_DOMAINS.split(",")
if mail.split("@")[1].lower() in email_domains:
connection = self.connection1
if not connection:
raise ImproperlyConfigured("LDAP not activated but required for request.")
search_base = settings.AUTH_LDAP_USER_SEARCH_BASE
domain = settings.AUTH_LDAP_USERNAME_DOMAIN
elif mail.split("@")[1].lower() in email_domains2:
connection = self.connection2
if not connection:
raise ImproperlyConfigured("LDAP2 not activated but required for request.")
search_base = settings.AUTH_LDAP2_USER_SEARCH_BASE
domain = settings.AUTH_LDAP2_USERNAME_DOMAIN
else:
raise ImproperlyConfigured("Email not valid")
search_params = {
"search_base": search_base,
"search_filter": "(&(objectclass=person)(mail={}))".format(mail),
"attributes": ["sAMAccountName"],
}
if not connection.search(**search_params):
raise Exception("No user found")
if not len(connection.entries) == 1:
raise Exception("Less or more than one user found")
if "sAMAccountName" not in connection.entries[0]:
raise Exception("Username attribute (sAMAccountName) not found!")
return connection.entries[0]["sAMAccountName"][0], domain
| import ldap3
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class LdapConnector:
"""Connect to the two LDAPs and provide some search functions."""
connection1 = None
connection2 = None
def __init__(self, test_mode=False, test_setup_server1=None, test_setup_server2=None):
self.test_mode = test_mode
self.test_setup_server1 = test_setup_server1
self.test_setup_server2 = test_setup_server2
def connect(self):
# Open LDAP connections and bind
test_mode = {}
if self.test_mode:
test_mode["client_strategy"] = ldap3.MOCK_SYNC
if settings.ENABLE_LDAP:
server1 = ldap3.Server(settings.AUTH_LDAP_SERVER_URI)
self.connection1 = ldap3.Connection(
server1,
user=settings.AUTH_LDAP_BIND_DN,
password=settings.AUTH_<PASSWORD>,
**test_mode
)
if self.test_mode:
self.test_setup_server1(self.connection1)
if not self.connection1.bind():
raise ConnectionError("Could not connect to LDAP")
if settings.ENABLE_LDAP_SECONDARY:
server2 = ldap3.Server(settings.AUTH_LDAP2_SERVER_URI)
self.connection2 = ldap3.Connection(
server2,
user=settings.AUTH_LDAP2_BIND_DN,
password=settings.AUTH_<PASSWORD>2_BIND_PASSWORD,
**test_mode
)
if self.test_mode:
self.test_setup_server2(self.connection2)
if not self.connection2.bind():
raise ConnectionError("Could not connect to LDAP2")
return True
def get_ldap_username_domain_by_mail(self, mail):
"""Load user information from a given email."""
email_domains = []
email_domains2 = []
if settings.ENABLE_LDAP:
email_domains = settings.INSTITUTE_EMAIL_DOMAINS.split(",")
if settings.ENABLE_LDAP_SECONDARY:
email_domains2 = settings.INSTITUTE2_EMAIL_DOMAINS.split(",")
if mail.split("@")[1].lower() in email_domains:
connection = self.connection1
if not connection:
raise ImproperlyConfigured("LDAP not activated but required for request.")
search_base = settings.AUTH_LDAP_USER_SEARCH_BASE
domain = settings.AUTH_LDAP_USERNAME_DOMAIN
elif mail.split("@")[1].lower() in email_domains2:
connection = self.connection2
if not connection:
raise ImproperlyConfigured("LDAP2 not activated but required for request.")
search_base = settings.AUTH_LDAP2_USER_SEARCH_BASE
domain = settings.AUTH_LDAP2_USERNAME_DOMAIN
else:
raise ImproperlyConfigured("Email not valid")
search_params = {
"search_base": search_base,
"search_filter": "(&(objectclass=person)(mail={}))".format(mail),
"attributes": ["sAMAccountName"],
}
if not connection.search(**search_params):
raise Exception("No user found")
if not len(connection.entries) == 1:
raise Exception("Less or more than one user found")
if "sAMAccountName" not in connection.entries[0]:
raise Exception("Username attribute (sAMAccountName) not found!")
return connection.entries[0]["sAMAccountName"][0], domain | en | 0.864524 | Connect to the two LDAPs and provide some search functions. # Open LDAP connections and bind Load user information from a given email. | 2.308379 | 2 |
pyFiles/logicWindows.py | vmay23/StreamD | 0 | 6615142 | from os import link
import PySimpleGUI as sg
from trace import *
from windows import *
from myFunctions import *
from youtube import *
def MainLogic():
g_path = '' #store the path
g_youtube_filter = {} #dictionary
win_main, win_openFolder, win_openFile = MainWindow(), None, None
while True:
window, events, values = sg.read_all_windows()
#progress_bar = win_main.find_element('progress_bar') #drawWindows.py
#window[key].Update(value)
# ----------------------------------------------------
# =========== MAIN File WINDOW ================
# ----------------------------------------------------
#options events
#events_and_values = Check_Events_and_Values(events, values)
#print(events_and_values)
if window == win_main:
g_youtube_filter = Check_Events_and_Values(events, values)
Log('\nevents_and_values\n', g_youtube_filter)
#---------------------#
#--- type of link ---#
#---------------------#
# if 'Link_Type' == '-PODCAST-' or 'Link_Type' == '-SONG-' or 'Link_Type' == '-AUDIOBOOK-':
# Video_Quality = Min
# Video_Output = Defv
# Video_WideScr = Off
# GreyedOut()
#---------------------#
#---------------------#
#-- checkbox check ---#
#---------------------#
# if '-ONLY_AUDIO-' == Enabled:
# Video_Quality = Min
# Video_Output = Defv
# Video_WideScr = Off
# GreyedOut()
#
#
# if 'SPLIT_TOO' == Enabled:
# SplitAudio()
#---------------------#
#------------
# CLOSE event
if window == win_main and events == sg.WIN_CLOSED:
Log(None, "Linha 27 - if window == win_mainWindow and events == sg.WIN_CLOSED: ")
break
#------------
# START event
if window == win_main and events == 'START':
Log(None, "Linha 32 - if window == win_mainWindow and events == 'START': ")
if g_youtube_filter['link'] is not "":
win_openFolder = OpenFolder()
win_main.hide()
# ----------------------------------------------------
# ----------------------------------------------------
# =========== OPEN Folder WINDOW ==============
# ----------------------------------------------------
#------------
# close event
if window == win_openFolder and events == sg.WINDOW_CLOSED:
Log(None, "Linha 51 - if window == win_openFolder and events == sg.WINDOW_CLOSED: ")
win_main.un_hide()
win_openFolder.close()
#-------------
# cancel event
if window == win_openFolder and events == 'Cancel':
Log(None,"Linha 58 - if window == win_openFolder and events == 'Cancel': ")
win_main.un_hide()
win_openFolder.close()
#---------
# ok event
if window == win_openFolder and events == 'OK':
Log(None, "Linha 65 - if window == win_openFolder and events == 'OK': ")
#fullData = values['_FILES_'].split(';')
g_path = values['_FILES_']
g_path = str(g_path)
Log('Path ', g_path)
win_openFolder.close()
win_main.un_hide()
#-------------------------------------------------------------#
#------------ IF EVERYTHING IS IN PLACE... -----------------#
if g_youtube_filter['link'] is not "" and g_path is not "":
#-------------------------------------------#
#===== INVOKE THE YOUTUBE A.P.I ============#
YouTubeDownload(g_path, g_youtube_filter)
#___________________________________________#
#-------------------------------------------------------------# | from os import link
import PySimpleGUI as sg
from trace import *
from windows import *
from myFunctions import *
from youtube import *
def MainLogic():
g_path = '' #store the path
g_youtube_filter = {} #dictionary
win_main, win_openFolder, win_openFile = MainWindow(), None, None
while True:
window, events, values = sg.read_all_windows()
#progress_bar = win_main.find_element('progress_bar') #drawWindows.py
#window[key].Update(value)
# ----------------------------------------------------
# =========== MAIN File WINDOW ================
# ----------------------------------------------------
#options events
#events_and_values = Check_Events_and_Values(events, values)
#print(events_and_values)
if window == win_main:
g_youtube_filter = Check_Events_and_Values(events, values)
Log('\nevents_and_values\n', g_youtube_filter)
#---------------------#
#--- type of link ---#
#---------------------#
# if 'Link_Type' == '-PODCAST-' or 'Link_Type' == '-SONG-' or 'Link_Type' == '-AUDIOBOOK-':
# Video_Quality = Min
# Video_Output = Defv
# Video_WideScr = Off
# GreyedOut()
#---------------------#
#---------------------#
#-- checkbox check ---#
#---------------------#
# if '-ONLY_AUDIO-' == Enabled:
# Video_Quality = Min
# Video_Output = Defv
# Video_WideScr = Off
# GreyedOut()
#
#
# if 'SPLIT_TOO' == Enabled:
# SplitAudio()
#---------------------#
#------------
# CLOSE event
if window == win_main and events == sg.WIN_CLOSED:
Log(None, "Linha 27 - if window == win_mainWindow and events == sg.WIN_CLOSED: ")
break
#------------
# START event
if window == win_main and events == 'START':
Log(None, "Linha 32 - if window == win_mainWindow and events == 'START': ")
if g_youtube_filter['link'] is not "":
win_openFolder = OpenFolder()
win_main.hide()
# ----------------------------------------------------
# ----------------------------------------------------
# =========== OPEN Folder WINDOW ==============
# ----------------------------------------------------
#------------
# close event
if window == win_openFolder and events == sg.WINDOW_CLOSED:
Log(None, "Linha 51 - if window == win_openFolder and events == sg.WINDOW_CLOSED: ")
win_main.un_hide()
win_openFolder.close()
#-------------
# cancel event
if window == win_openFolder and events == 'Cancel':
Log(None,"Linha 58 - if window == win_openFolder and events == 'Cancel': ")
win_main.un_hide()
win_openFolder.close()
#---------
# ok event
if window == win_openFolder and events == 'OK':
Log(None, "Linha 65 - if window == win_openFolder and events == 'OK': ")
#fullData = values['_FILES_'].split(';')
g_path = values['_FILES_']
g_path = str(g_path)
Log('Path ', g_path)
win_openFolder.close()
win_main.un_hide()
#-------------------------------------------------------------#
#------------ IF EVERYTHING IS IN PLACE... -----------------#
if g_youtube_filter['link'] is not "" and g_path is not "":
#-------------------------------------------#
#===== INVOKE THE YOUTUBE A.P.I ============#
YouTubeDownload(g_path, g_youtube_filter)
#___________________________________________#
#-------------------------------------------------------------# | en | 0.137311 | #store the path #dictionary #progress_bar = win_main.find_element('progress_bar') #drawWindows.py #window[key].Update(value) # ---------------------------------------------------- # =========== MAIN File WINDOW ================ # ---------------------------------------------------- #options events #events_and_values = Check_Events_and_Values(events, values) #print(events_and_values) #---------------------# #--- type of link ---# #---------------------# # if 'Link_Type' == '-PODCAST-' or 'Link_Type' == '-SONG-' or 'Link_Type' == '-AUDIOBOOK-': # Video_Quality = Min # Video_Output = Defv # Video_WideScr = Off # GreyedOut() #---------------------# #---------------------# #-- checkbox check ---# #---------------------# # if '-ONLY_AUDIO-' == Enabled: # Video_Quality = Min # Video_Output = Defv # Video_WideScr = Off # GreyedOut() # # # if 'SPLIT_TOO' == Enabled: # SplitAudio() #---------------------# #------------ # CLOSE event #------------ # START event # ---------------------------------------------------- # ---------------------------------------------------- # =========== OPEN Folder WINDOW ============== # ---------------------------------------------------- #------------ # close event #------------- # cancel event #--------- # ok event #fullData = values['_FILES_'].split(';') #-------------------------------------------------------------# #------------ IF EVERYTHING IS IN PLACE... -----------------# #-------------------------------------------# #===== INVOKE THE YOUTUBE A.P.I ============# #___________________________________________# #-------------------------------------------------------------# | 2.508482 | 3 |
similarity.py | Katsuya-Ishiyama/paper_recommend | 0 | 6615143 | <reponame>Katsuya-Ishiyama/paper_recommend<gh_stars>0
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def calculate_similarity_of_interest(interest, descriptions):
corpus = interest + descriptions
vectorizer = TfidfVectorizer(ngram_range=(1, 3),
stop_words='english')
tfidf_matrix = vectorizer.fit_transform(corpus).toarray()
interest_tfidf_matrix = tfidf_matrix[0, :]
descriptions_tfidf_matrix = tfidf_matrix[1:, :]
similarity = cosine_similarity(X=interest_tfidf_matrix.reshape(1, -1),
Y=descriptions_tfidf_matrix)
return similarity[0].tolist()
| # -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def calculate_similarity_of_interest(interest, descriptions):
corpus = interest + descriptions
vectorizer = TfidfVectorizer(ngram_range=(1, 3),
stop_words='english')
tfidf_matrix = vectorizer.fit_transform(corpus).toarray()
interest_tfidf_matrix = tfidf_matrix[0, :]
descriptions_tfidf_matrix = tfidf_matrix[1:, :]
similarity = cosine_similarity(X=interest_tfidf_matrix.reshape(1, -1),
Y=descriptions_tfidf_matrix)
return similarity[0].tolist() | en | 0.769321 | # -*- coding: utf-8 -*- | 3.289504 | 3 |
spider/pipelines.py | justlaputa/boc-currency | 0 | 6615144 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import logging
from google.cloud import bigquery
class BigQueryPipeline(object):
def __init__(self):
self.rows = []
def open_spider(self, spider):
self.bq_client = bigquery.Client()
dataset_ref = self.bq_client.dataset('exchange_rate')
table_ref = dataset_ref.table('boc')
try:
self.table = self.bq_client.get_table(table_ref)
except Exception:
logging.error('bigquery table not found')
raise
def process_item(self, item, spider):
self.rows.append(dict(item))
return item
def close_spider(self, spider):
if len(self.rows) <= 0:
logging.info('no record to send, we are done')
return
logging.info('sending %d records to bigquery', len(self.rows))
errors = self.bq_client.insert_rows(self.table, self.rows)
if len(errors) > 0:
logging.warning('errors while inserting records to bigquery: {}'.format(errors))
| # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import logging
from google.cloud import bigquery
class BigQueryPipeline(object):
def __init__(self):
self.rows = []
def open_spider(self, spider):
self.bq_client = bigquery.Client()
dataset_ref = self.bq_client.dataset('exchange_rate')
table_ref = dataset_ref.table('boc')
try:
self.table = self.bq_client.get_table(table_ref)
except Exception:
logging.error('bigquery table not found')
raise
def process_item(self, item, spider):
self.rows.append(dict(item))
return item
def close_spider(self, spider):
if len(self.rows) <= 0:
logging.info('no record to send, we are done')
return
logging.info('sending %d records to bigquery', len(self.rows))
errors = self.bq_client.insert_rows(self.table, self.rows)
if len(errors) > 0:
logging.warning('errors while inserting records to bigquery: {}'.format(errors))
| en | 0.663433 | # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html | 2.473211 | 2 |
run_layout.py | aspdac-submission-pcb-layout/PCB-Layout-Framework | 6 | 6615145 | <filename>run_layout.py<gh_stars>1-10
"""
Run PCB Layout Job.
This script is a template for using this place and route framework. This script
removes all routing in the kicad_pcb file given, then places and routes the
design.
Usage:
run_layout.py [options] KICAD_PCB
Options:
--skip_placement Don't run autoplacement.
-p PITER Placement iterations [default: 2000].
-m MOVES Moves per placement iteration [default: 25].
-r RITERS Ripup and reroute iterations [default: 20].
-e ENLARGE Enlarge board boundary for routing [default: 0].
-l CHANGEW Layer change weight for routing [default: 1000].
"""
import time
from docopt import docopt
from ucsdpcb import PcbPlacer
from ucsdpcb import PcbRouter
from ucsdpcb import PcbDB
def main(arguments):
print(arguments)
print('Loading database...')
db = PcbDB.kicadPcbDataBase(arguments['KICAD_PCB'])
db.printNodes()
db.removeRoutedSegmentsAndVias() # start without any previous routing
if not arguments['--skip_placement']:
placement_start_time = time.time()
placer = PcbPlacer.GridBasedPlacer(db)
placer.set_num_iterations(arguments['-p'])
placer.set_iterations_moves(arguments['-m'])
placer.set_rtree(True)
placer.set_two_sided(False)
placer.set_base_lam(0.1329209929630061)
placer.set_lamtemp_update(0.8853585421875213)
placer.set_lambda_schedule(0.9753893051539414)
print('Placing...')
placer.test_placer_flow()
placement_end_time = time.time()
db.printKiCad()
router_start_time = time.time()
router = PcbRouter.GridBasedRouter(db)
router.set_num_iterations(arguments['-r'])
router.set_enlarge_boundary(arguments['-e'])
router.set_layer_change_weight(arguments['-l'])
print('Routing...')
router.initialization() # must be the last call to router before route()
router.route()
router_end_time = time.time()
db.printKiCad()
if not arguments['--skip_placement']:
print(
'Placement finished in ' +
str(placement_end_time - placement_start_time) +
' seconds (' +
str(arguments['KICAD_PCB']) +
')'
)
print(
'Routing finished in ' +
str(router_end_time - router_start_time) +
' seconds (' +
str(arguments['KICAD_PCB']) +
', ' +
str(arguments['-r']) +
' iteration)'
)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1')
arguments['-p'] = int(arguments['-p'])
arguments['-m'] = int(arguments['-m'])
arguments['-r'] = int(arguments['-r'])
arguments['-e'] = int(arguments['-e'])
arguments['-l'] = int(arguments['-l'])
main(arguments)
| <filename>run_layout.py<gh_stars>1-10
"""
Run PCB Layout Job.
This script is a template for using this place and route framework. This script
removes all routing in the kicad_pcb file given, then places and routes the
design.
Usage:
run_layout.py [options] KICAD_PCB
Options:
--skip_placement Don't run autoplacement.
-p PITER Placement iterations [default: 2000].
-m MOVES Moves per placement iteration [default: 25].
-r RITERS Ripup and reroute iterations [default: 20].
-e ENLARGE Enlarge board boundary for routing [default: 0].
-l CHANGEW Layer change weight for routing [default: 1000].
"""
import time
from docopt import docopt
from ucsdpcb import PcbPlacer
from ucsdpcb import PcbRouter
from ucsdpcb import PcbDB
def main(arguments):
print(arguments)
print('Loading database...')
db = PcbDB.kicadPcbDataBase(arguments['KICAD_PCB'])
db.printNodes()
db.removeRoutedSegmentsAndVias() # start without any previous routing
if not arguments['--skip_placement']:
placement_start_time = time.time()
placer = PcbPlacer.GridBasedPlacer(db)
placer.set_num_iterations(arguments['-p'])
placer.set_iterations_moves(arguments['-m'])
placer.set_rtree(True)
placer.set_two_sided(False)
placer.set_base_lam(0.1329209929630061)
placer.set_lamtemp_update(0.8853585421875213)
placer.set_lambda_schedule(0.9753893051539414)
print('Placing...')
placer.test_placer_flow()
placement_end_time = time.time()
db.printKiCad()
router_start_time = time.time()
router = PcbRouter.GridBasedRouter(db)
router.set_num_iterations(arguments['-r'])
router.set_enlarge_boundary(arguments['-e'])
router.set_layer_change_weight(arguments['-l'])
print('Routing...')
router.initialization() # must be the last call to router before route()
router.route()
router_end_time = time.time()
db.printKiCad()
if not arguments['--skip_placement']:
print(
'Placement finished in ' +
str(placement_end_time - placement_start_time) +
' seconds (' +
str(arguments['KICAD_PCB']) +
')'
)
print(
'Routing finished in ' +
str(router_end_time - router_start_time) +
' seconds (' +
str(arguments['KICAD_PCB']) +
', ' +
str(arguments['-r']) +
' iteration)'
)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1')
arguments['-p'] = int(arguments['-p'])
arguments['-m'] = int(arguments['-m'])
arguments['-r'] = int(arguments['-r'])
arguments['-e'] = int(arguments['-e'])
arguments['-l'] = int(arguments['-l'])
main(arguments)
| en | 0.611156 | Run PCB Layout Job. This script is a template for using this place and route framework. This script removes all routing in the kicad_pcb file given, then places and routes the design. Usage: run_layout.py [options] KICAD_PCB Options: --skip_placement Don't run autoplacement. -p PITER Placement iterations [default: 2000]. -m MOVES Moves per placement iteration [default: 25]. -r RITERS Ripup and reroute iterations [default: 20]. -e ENLARGE Enlarge board boundary for routing [default: 0]. -l CHANGEW Layer change weight for routing [default: 1000]. # start without any previous routing # must be the last call to router before route() | 2.605852 | 3 |
tests.py | mrahtz/easy_tf_log | 115 | 6615146 | <reponame>mrahtz/easy_tf_log<gh_stars>100-1000
import importlib
import os
import os.path as osp
import queue
import tempfile
import time
import unittest
from multiprocessing import Queue, Process
import numpy as np
import tensorflow as tf
import easy_tf_log
if tf.__version__ >= '2':
import tensorflow.compat.v1.train as tf_train
# Needed for creation of a TensorFlow 1 `summary` op (which behave
# differently from a TensorFlow 2 `summary` op), and a TensorFlow 1
# `FileWriter` (TensorFlow 2 does has `tf.summary.create_file_writer, but
# the object it returns seems to be slightly different - it doesn't have the
# `add_summary` method.)
import tensorflow.compat.v1.summary as tf1_summary
# FileWriter is not compatible with eager execution.
tf.compat.v1.disable_eager_execution()
else:
import tensorflow.train as tf_train
import tensorflow.summary as tf1_summary
class TestEasyTFLog(unittest.TestCase):
def setUp(self):
importlib.reload(easy_tf_log)
print(self._testMethodName)
def test_no_setup(self):
"""
Test that if tflog() is used without any extra setup, a directory
'logs' is created in the current directory containing the event file.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
easy_tf_log.tflog('var', 0)
self.assertEqual(os.listdir(), ['logs'])
self.assertIn('events.out.tfevents', os.listdir('logs')[0])
def test_set_dir(self):
"""
Confirm that set_dir works.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
easy_tf_log.set_dir('logs2')
easy_tf_log.tflog('var', 0)
self.assertEqual(os.listdir(), ['logs2'])
self.assertIn('events.out.tfevents', os.listdir('logs2')[0])
def test_set_writer(self):
"""
Check that when using an EventFileWriter from a FileWriter,
the resulting events file contains events from both the FileWriter
and easy_tf_log.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
writer = tf1_summary.FileWriter('logs')
var = tf.Variable(0.0)
summary_op = tf1_summary.scalar('tf_var', var)
if tf.__version__ >= '2':
sess = tf.compat.v1.Session()
else:
sess = tf.Session()
sess.run(var.initializer)
summary = sess.run(summary_op)
writer.add_summary(summary)
easy_tf_log.set_writer(writer.event_writer)
easy_tf_log.tflog('easy-tf-log_var', 0)
self.assertEqual(os.listdir(), ['logs'])
event_filename = osp.join('logs', os.listdir('logs')[0])
self.assertIn('events.out.tfevents', event_filename)
tags = set()
for event in tf_train.summary_iterator(event_filename):
for value in event.summary.value:
tags.add(value.tag)
self.assertIn('tf_var', tags)
self.assertIn('easy-tf-log_var', tags)
def test_full(self):
"""
Log a few values and check that the event file contain the expected
values.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
for i in range(10):
easy_tf_log.tflog('foo', i)
for i in range(10):
easy_tf_log.tflog('bar', i)
event_filename = osp.join('logs', os.listdir('logs')[0])
event_n = 0
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
if event_n <= 10:
self.assertEqual(event.step, event_n - 1)
self.assertEqual(event.summary.value[0].tag, "foo")
self.assertEqual(event.summary.value[0].simple_value,
float(event_n - 1))
if event_n > 10 and event_n <= 20:
self.assertEqual(event.step, event_n - 10 - 1)
self.assertEqual(event.summary.value[0].tag, "bar")
self.assertEqual(event.summary.value[0].simple_value,
float(event_n - 10 - 1))
event_n += 1
def test_explicit_step(self):
"""
Log a few values explicitly setting the step number.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
for i in range(5):
easy_tf_log.tflog('foo', i, step=(10 * i))
# These ones should continue from where the previous ones left off
for i in range(5):
easy_tf_log.tflog('foo', i)
event_filename = osp.join('logs', os.listdir('logs')[0])
event_n = 0
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
if event_n <= 5:
self.assertEqual(event.step, 10 * (event_n - 1))
if event_n > 5 and event_n <= 10:
self.assertEqual(event.step, 40 + (event_n - 5))
event_n += 1
def test_fork(self):
with tempfile.TemporaryDirectory() as temp_dir:
easy_tf_log.set_dir(temp_dir)
def f(queue):
easy_tf_log.tflog('foo', 0)
queue.put(True)
q = Queue()
Process(target=f, args=[q], daemon=True).start()
try:
q.get(timeout=1.0)
except queue.Empty:
self.fail("Process did not return")
def test_measure_rate(self):
with tempfile.TemporaryDirectory() as temp_dir:
logger = easy_tf_log.Logger(log_dir=temp_dir)
logger.measure_rate('foo', 0)
time.sleep(1)
logger.measure_rate('foo', 10)
time.sleep(1)
logger.measure_rate('foo', 25)
event_filename = list(os.scandir(temp_dir))[0].path
event_n = 0
rates = []
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
rates.append(event.summary.value[0].simple_value)
event_n += 1
np.testing.assert_array_almost_equal(rates, [10., 15.], decimal=1)
if __name__ == '__main__':
unittest.main()
| import importlib
import os
import os.path as osp
import queue
import tempfile
import time
import unittest
from multiprocessing import Queue, Process
import numpy as np
import tensorflow as tf
import easy_tf_log
if tf.__version__ >= '2':
import tensorflow.compat.v1.train as tf_train
# Needed for creation of a TensorFlow 1 `summary` op (which behave
# differently from a TensorFlow 2 `summary` op), and a TensorFlow 1
# `FileWriter` (TensorFlow 2 does has `tf.summary.create_file_writer, but
# the object it returns seems to be slightly different - it doesn't have the
# `add_summary` method.)
import tensorflow.compat.v1.summary as tf1_summary
# FileWriter is not compatible with eager execution.
tf.compat.v1.disable_eager_execution()
else:
import tensorflow.train as tf_train
import tensorflow.summary as tf1_summary
class TestEasyTFLog(unittest.TestCase):
def setUp(self):
importlib.reload(easy_tf_log)
print(self._testMethodName)
def test_no_setup(self):
"""
Test that if tflog() is used without any extra setup, a directory
'logs' is created in the current directory containing the event file.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
easy_tf_log.tflog('var', 0)
self.assertEqual(os.listdir(), ['logs'])
self.assertIn('events.out.tfevents', os.listdir('logs')[0])
def test_set_dir(self):
"""
Confirm that set_dir works.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
easy_tf_log.set_dir('logs2')
easy_tf_log.tflog('var', 0)
self.assertEqual(os.listdir(), ['logs2'])
self.assertIn('events.out.tfevents', os.listdir('logs2')[0])
def test_set_writer(self):
"""
Check that when using an EventFileWriter from a FileWriter,
the resulting events file contains events from both the FileWriter
and easy_tf_log.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
writer = tf1_summary.FileWriter('logs')
var = tf.Variable(0.0)
summary_op = tf1_summary.scalar('tf_var', var)
if tf.__version__ >= '2':
sess = tf.compat.v1.Session()
else:
sess = tf.Session()
sess.run(var.initializer)
summary = sess.run(summary_op)
writer.add_summary(summary)
easy_tf_log.set_writer(writer.event_writer)
easy_tf_log.tflog('easy-tf-log_var', 0)
self.assertEqual(os.listdir(), ['logs'])
event_filename = osp.join('logs', os.listdir('logs')[0])
self.assertIn('events.out.tfevents', event_filename)
tags = set()
for event in tf_train.summary_iterator(event_filename):
for value in event.summary.value:
tags.add(value.tag)
self.assertIn('tf_var', tags)
self.assertIn('easy-tf-log_var', tags)
def test_full(self):
"""
Log a few values and check that the event file contain the expected
values.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
for i in range(10):
easy_tf_log.tflog('foo', i)
for i in range(10):
easy_tf_log.tflog('bar', i)
event_filename = osp.join('logs', os.listdir('logs')[0])
event_n = 0
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
if event_n <= 10:
self.assertEqual(event.step, event_n - 1)
self.assertEqual(event.summary.value[0].tag, "foo")
self.assertEqual(event.summary.value[0].simple_value,
float(event_n - 1))
if event_n > 10 and event_n <= 20:
self.assertEqual(event.step, event_n - 10 - 1)
self.assertEqual(event.summary.value[0].tag, "bar")
self.assertEqual(event.summary.value[0].simple_value,
float(event_n - 10 - 1))
event_n += 1
def test_explicit_step(self):
"""
Log a few values explicitly setting the step number.
"""
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
for i in range(5):
easy_tf_log.tflog('foo', i, step=(10 * i))
# These ones should continue from where the previous ones left off
for i in range(5):
easy_tf_log.tflog('foo', i)
event_filename = osp.join('logs', os.listdir('logs')[0])
event_n = 0
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
if event_n <= 5:
self.assertEqual(event.step, 10 * (event_n - 1))
if event_n > 5 and event_n <= 10:
self.assertEqual(event.step, 40 + (event_n - 5))
event_n += 1
def test_fork(self):
with tempfile.TemporaryDirectory() as temp_dir:
easy_tf_log.set_dir(temp_dir)
def f(queue):
easy_tf_log.tflog('foo', 0)
queue.put(True)
q = Queue()
Process(target=f, args=[q], daemon=True).start()
try:
q.get(timeout=1.0)
except queue.Empty:
self.fail("Process did not return")
def test_measure_rate(self):
with tempfile.TemporaryDirectory() as temp_dir:
logger = easy_tf_log.Logger(log_dir=temp_dir)
logger.measure_rate('foo', 0)
time.sleep(1)
logger.measure_rate('foo', 10)
time.sleep(1)
logger.measure_rate('foo', 25)
event_filename = list(os.scandir(temp_dir))[0].path
event_n = 0
rates = []
for event in tf_train.summary_iterator(event_filename):
if event_n == 0: # metadata
event_n += 1
continue
rates.append(event.summary.value[0].simple_value)
event_n += 1
np.testing.assert_array_almost_equal(rates, [10., 15.], decimal=1)
if __name__ == '__main__':
unittest.main() | en | 0.889612 | # Needed for creation of a TensorFlow 1 `summary` op (which behave # differently from a TensorFlow 2 `summary` op), and a TensorFlow 1 # `FileWriter` (TensorFlow 2 does has `tf.summary.create_file_writer, but # the object it returns seems to be slightly different - it doesn't have the # `add_summary` method.) # FileWriter is not compatible with eager execution. Test that if tflog() is used without any extra setup, a directory 'logs' is created in the current directory containing the event file. Confirm that set_dir works. Check that when using an EventFileWriter from a FileWriter, the resulting events file contains events from both the FileWriter and easy_tf_log. Log a few values and check that the event file contain the expected values. # metadata Log a few values explicitly setting the step number. # These ones should continue from where the previous ones left off # metadata # metadata | 2.355169 | 2 |
webPage/generator/unitTests/filerw_test.py | CyberDani/personal-roadmap | 0 | 6615147 | import os
import sys
import unittest
sys.path.append('..')
from modules import filerw
from modules import htmlBuilder
class FileReadWriterTests(unittest.TestCase):
def test_fileExists_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.fileExists(file)
with self.assertRaises(Exception):
filerw.fileExists("")
with self.assertRaises(Exception):
filerw.fileExists()
with self.assertRaises(Exception):
filerw.fileExists(None)
with self.assertRaises(Exception):
filerw.fileExists(23)
with self.assertRaises(Exception):
filerw.fileExists(False)
def test_fileExists_example(self):
file = open("./unitTests/temp/testFile.txt", "w")
file.close()
self.assertTrue(filerw.fileExists("./unitTests/temp/testFile.txt"))
os.remove("./unitTests/temp/testFile.txt")
self.assertFalse(filerw.fileExists("./unitTests/temp/testFile.txt"))
def test_getLinesByFilePathWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePathWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesByFilePathWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLinesByFilePath_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_getLinesWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLines_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_writeLinesPrefixedToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFile_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [])
self.assertEqual(len(readLines), 0)
def test_writeLinesPrefixedToFile_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [""])
self.assertEqual(len(readLines), 1)
# empty line
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("[-]", ["\n"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFile("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("line: ", ["\n", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("text: ", ["", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneString(self):
readLines = self.helper_writeLinesPrefixedToFile("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "Greetings: hey")
def test_writeLinesPrefixedToFile_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
def test_writeLinesPrefixedToFile_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
def test_writeLinesPrefixedToFile_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(".", ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFile_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFile_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def helper_writeLinesPrefixedToFile(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFile(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeLinesPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "") # empty line
def test_writeLinesPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [""])
self.assertEqual(len(readLines), 2)
# empty lines
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[-]", ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("line: ", ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("text: ", ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "Greetings: hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(".", ["hey\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 7)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
self.assertEqual(readLines[6], "")
def helper_writeLinesPrefixedToFileThenAppendNewLine(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeStringsPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, [""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(5, ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\t\they\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tmagnificent\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\t\t\they\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
self.assertEqual(readLines[4], "\n")
def helper_writeStringsIndentedToFileThenAppendNewLine(self, indent, lines):
file = open("./unitTests/temp/test.txt", "w")
tabs = htmlBuilder.getEscapedTabs(indent)
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, tabs, lines)
file.close()
return filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
def test_writeLinesToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine("text.txt", ["firstLine"])
def test_writeLinesToFileThenAppendNewLine_noLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileThenAppendNewLine_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileThenAppendNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileThenAppendNewLine_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileThenAppendNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_writeLinesToFileThenAppendNewLine_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine(file, ["firstLine"])
def test_writeLinesToFileByFilePathThenAppendNewLine_Noline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine_afterSomethingElse(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["first", "second", "third", "fourth"])
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1line(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_2lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_wwriteLinesToFileByFilePathThenAppendNewLine_3lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFile("text.txt", ["firstLine"])
def test_writeLinesToFile_Noline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFile_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFile_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFile_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_writeLinesToFileByFilePath_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath(file, ["firstLine"])
def test_writeLinesToFileByFilePath_noLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_noLine_afterSomeLines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["hey", "little", "man"])
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_emptyLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_1line(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFileByFilePath_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePath_2lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFileByFilePath_3lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_rTrimNewLines_nonSense(self):
with self.assertRaises(Exception):
filerw.rTrimNewLines()
with self.assertRaises(Exception):
filerw.rTrimNewLines("hello")
with self.assertRaises(Exception):
filerw.rTrimNewLines(None)
with self.assertRaises(Exception):
filerw.rTrimNewLines("hey\n")
with self.assertRaises(Exception):
filerw.rTrimNewLines(False)
with self.assertRaises(Exception):
filerw.rTrimNewLines(["one", None, "three"])
def test_rTrimNewLines_emptyList(self):
result = filerw.rTrimNewLines([])
self.assertEqual(len(result), 0)
def test_rTrimNewLines_oneElement(self):
result = filerw.rTrimNewLines(["Hello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello!")
result = filerw.rTrimNewLines(["\n\tHello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["\n\tHello!\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["Hello\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
result = filerw.rTrimNewLines(["Hello\n\n\n\n\n\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
def test_rTrimNewLines_twoElements(self):
result = filerw.rTrimNewLines(["Hello", "hey\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["hey\n", "Hello\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[1], "Hello")
self.assertEqual(result[0], "hey")
result = filerw.rTrimNewLines(["Hello", "hey"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["Hello", "\n\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "")
def test_rTrimNewLines_threeElements(self):
result = filerw.rTrimNewLines(["Hello\n", "hey", "hi\n\n"])
self.assertEqual(len(result), 3)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
self.assertEqual(result[2], "hi")
| import os
import sys
import unittest
sys.path.append('..')
from modules import filerw
from modules import htmlBuilder
class FileReadWriterTests(unittest.TestCase):
def test_fileExists_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.fileExists(file)
with self.assertRaises(Exception):
filerw.fileExists("")
with self.assertRaises(Exception):
filerw.fileExists()
with self.assertRaises(Exception):
filerw.fileExists(None)
with self.assertRaises(Exception):
filerw.fileExists(23)
with self.assertRaises(Exception):
filerw.fileExists(False)
def test_fileExists_example(self):
file = open("./unitTests/temp/testFile.txt", "w")
file.close()
self.assertTrue(filerw.fileExists("./unitTests/temp/testFile.txt"))
os.remove("./unitTests/temp/testFile.txt")
self.assertFalse(filerw.fileExists("./unitTests/temp/testFile.txt"))
def test_getLinesByFilePathWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePathWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesByFilePathWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLinesByFilePath_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_getLinesWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLines_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_writeLinesPrefixedToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFile_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [])
self.assertEqual(len(readLines), 0)
def test_writeLinesPrefixedToFile_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [""])
self.assertEqual(len(readLines), 1)
# empty line
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("[-]", ["\n"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFile("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("line: ", ["\n", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("text: ", ["", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneString(self):
readLines = self.helper_writeLinesPrefixedToFile("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "Greetings: hey")
def test_writeLinesPrefixedToFile_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
def test_writeLinesPrefixedToFile_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
def test_writeLinesPrefixedToFile_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(".", ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFile_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFile_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def helper_writeLinesPrefixedToFile(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFile(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeLinesPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "") # empty line
def test_writeLinesPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [""])
self.assertEqual(len(readLines), 2)
# empty lines
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[-]", ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("line: ", ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("text: ", ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "Greetings: hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(".", ["hey\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 7)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
self.assertEqual(readLines[6], "")
def helper_writeLinesPrefixedToFileThenAppendNewLine(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeStringsPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, [""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(5, ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\t\they\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tmagnificent\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\t\t\they\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
self.assertEqual(readLines[4], "\n")
def helper_writeStringsIndentedToFileThenAppendNewLine(self, indent, lines):
file = open("./unitTests/temp/test.txt", "w")
tabs = htmlBuilder.getEscapedTabs(indent)
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, tabs, lines)
file.close()
return filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
def test_writeLinesToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine("text.txt", ["firstLine"])
def test_writeLinesToFileThenAppendNewLine_noLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileThenAppendNewLine_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileThenAppendNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileThenAppendNewLine_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileThenAppendNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_writeLinesToFileThenAppendNewLine_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine(file, ["firstLine"])
def test_writeLinesToFileByFilePathThenAppendNewLine_Noline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine_afterSomethingElse(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["first", "second", "third", "fourth"])
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1line(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_2lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_wwriteLinesToFileByFilePathThenAppendNewLine_3lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFile("text.txt", ["firstLine"])
def test_writeLinesToFile_Noline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFile_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFile_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFile_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_writeLinesToFileByFilePath_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath(file, ["firstLine"])
def test_writeLinesToFileByFilePath_noLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_noLine_afterSomeLines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["hey", "little", "man"])
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_emptyLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_1line(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFileByFilePath_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePath_2lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFileByFilePath_3lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_rTrimNewLines_nonSense(self):
with self.assertRaises(Exception):
filerw.rTrimNewLines()
with self.assertRaises(Exception):
filerw.rTrimNewLines("hello")
with self.assertRaises(Exception):
filerw.rTrimNewLines(None)
with self.assertRaises(Exception):
filerw.rTrimNewLines("hey\n")
with self.assertRaises(Exception):
filerw.rTrimNewLines(False)
with self.assertRaises(Exception):
filerw.rTrimNewLines(["one", None, "three"])
def test_rTrimNewLines_emptyList(self):
result = filerw.rTrimNewLines([])
self.assertEqual(len(result), 0)
def test_rTrimNewLines_oneElement(self):
result = filerw.rTrimNewLines(["Hello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello!")
result = filerw.rTrimNewLines(["\n\tHello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["\n\tHello!\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["Hello\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
result = filerw.rTrimNewLines(["Hello\n\n\n\n\n\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
def test_rTrimNewLines_twoElements(self):
result = filerw.rTrimNewLines(["Hello", "hey\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["hey\n", "Hello\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[1], "Hello")
self.assertEqual(result[0], "hey")
result = filerw.rTrimNewLines(["Hello", "hey"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["Hello", "\n\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "")
def test_rTrimNewLines_threeElements(self):
result = filerw.rTrimNewLines(["Hello\n", "hey", "hi\n\n"])
self.assertEqual(len(result), 3)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
self.assertEqual(result[2], "hi")
| en | 0.481027 | # empty line # empty line # empty lines | 2.880117 | 3 |
src/experiments/early_stopper.py | eugval/Motion_Prediction | 0 | 6615148 |
class SmoothedEarlyStopper(object):
def __init__(self, patience, weight_factor = 0.3, seek_decrease = False, after_stop_training = 0, verbose = 1):
'''
Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance
has been decreasing over a number of epochs
:param patience: number of epochs where performance needs to be decreasing to trigger early stopping.
:param after_stop_training: number of epochskeep training for a number of epochs after stopping,
just for illustration purposes without saving the model.
:param verbose: verbosity of print messages
'''
self.training_rounds = 0
self.moving_average = 'start'
self.weight_factor = weight_factor
self.stop = False
self.decreasing_performance = 0
self.after_stop_training = after_stop_training
self.patience = patience
self.seek_decrease = seek_decrease
self.verbose = verbose
def checkpoint(self, performance_metric):
'''
Checks whether to trigger early stopping and whether to save the model.
:param performance_metric: the metric used to monitor early stopping
:return: True if the model is to be saved, false otherwise.
'''
if(self.moving_average =='start'):
self.moving_average = performance_metric
#If early stopping is not triggered and the performance is increasing, save the model
if (not self.stop and self.performed_better(performance_metric)):
if (self.verbose>0):
print("Performance increasing in the val set, saving the model!")
self.moving_average = self.weight_factor*performance_metric +(1-self.weight_factor)*self.moving_average
self.decreasing_performance = 0
return True
else:
#record decreasing performance
self.decreasing_performance += 1
self.moving_average = self.weight_factor*performance_metric +(1-self.weight_factor)*self.moving_average
#If performance has been decreasing more than the patience trigger early stopping
if (self.decreasing_performance >= self.patience):
print("Early stopping activated : {} consecutive epochs where performance is decreasing!".format(
self.decreasing_performance))
print("Model saved yields a val set accuracy of : {}".format(self.moving_average))
self.stop = True
return False
else:
#Else just continue training but don't save the model
if (self.verbose>0):
print("Performance is decreasing on the val set, taking note and continuing!")
return False
def continue_training(self):
'''
Checks whether training is to be contunued or not
:return: False if breaking out of training loop, true otherwise
'''
if(not self.stop):
return True
elif(self.stop and self.after_stop_training <=0):
return False
else:
self.after_stop_training -= 1
return True
def performed_better(self, performance_metric):
if(self.verbose):
print('moving average performance : {}'.format(self.moving_average))
print('current performance : {}'.format(performance_metric))
if(self.seek_decrease):
return performance_metric <= self.moving_average
else:
return performance_metric >= self.moving_average
class EarlyStopper(object):
def __init__(self, patience, seek_decrease =False, after_stop_training = 0, verbose = 1):
'''
Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance
has been decreasing over a number of epochs
:param patience: number of epochs where performance needs to be decreasing to trigger early stopping.
:param after_stop_training: number of epochskeep training for a number of epochs after stopping,
just for illustration purposes without saving the model.
:param verbose: verbosity of print mesages
'''
self.patience = patience
self.decreasing_performance = 0
self.last_saved_val = 0.0
self.verbose = verbose
self.seek_decrease = seek_decrease
self.stop = False
self.after_stop_training = after_stop_training
def checkpoint(self, performance_metric):
'''
Checks whether to trigger early stopping and whether to save the model.
:param performance_metric: the metric used to monitor early stopping
:return: True if the model is to be saved, false otherwise.
'''
if(self.seek_decrease):
performance_metric = -performance_metric
#If early stopping is not triggered and the performance is increasing, save the model
if (not self.stop and self.performed_better(performance_metric)):
if (self.verbose>0):
print("Performance increasing in the val set, saving the model!")
self.last_saved_val = performance_metric
self.decreasing_performance = 0
return True
else:
#record decreasing performance
self.decreasing_performance += 1
#If performance has been decreasing more than the patience trigger early stopping
if (self.decreasing_performance >= self.patience):
print("Early stopping activated : {} consecutive epochs where performance is decreasing!".format(
self.decreasing_performance))
print("Model saved yields a val set accuracy of : {}".format(self.last_saved_val))
self.stop = True
return False
else:
#Else just continue training but don't save the model
if (self.verbose>0):
print("Performance is decreasing on the val set, taking note and continuing!")
return False
def continue_training(self):
'''
Checks whether training is to be contunued or not
:return: False if breaking out of training loop, true otherwise
'''
if(not self.stop):
return True
elif(self.stop and self.after_stop_training <=0):
return False
else:
self.after_stop_training -= 1
return True
def performed_better(self, performance_metric):
if(self.last_saved_val == 0.0 and performance_metric != 0):
return True
else:
return performance_metric >= self.last_saved_val
|
class SmoothedEarlyStopper(object):
def __init__(self, patience, weight_factor = 0.3, seek_decrease = False, after_stop_training = 0, verbose = 1):
'''
Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance
has been decreasing over a number of epochs
:param patience: number of epochs where performance needs to be decreasing to trigger early stopping.
:param after_stop_training: number of epochskeep training for a number of epochs after stopping,
just for illustration purposes without saving the model.
:param verbose: verbosity of print messages
'''
self.training_rounds = 0
self.moving_average = 'start'
self.weight_factor = weight_factor
self.stop = False
self.decreasing_performance = 0
self.after_stop_training = after_stop_training
self.patience = patience
self.seek_decrease = seek_decrease
self.verbose = verbose
def checkpoint(self, performance_metric):
'''
Checks whether to trigger early stopping and whether to save the model.
:param performance_metric: the metric used to monitor early stopping
:return: True if the model is to be saved, false otherwise.
'''
if(self.moving_average =='start'):
self.moving_average = performance_metric
#If early stopping is not triggered and the performance is increasing, save the model
if (not self.stop and self.performed_better(performance_metric)):
if (self.verbose>0):
print("Performance increasing in the val set, saving the model!")
self.moving_average = self.weight_factor*performance_metric +(1-self.weight_factor)*self.moving_average
self.decreasing_performance = 0
return True
else:
#record decreasing performance
self.decreasing_performance += 1
self.moving_average = self.weight_factor*performance_metric +(1-self.weight_factor)*self.moving_average
#If performance has been decreasing more than the patience trigger early stopping
if (self.decreasing_performance >= self.patience):
print("Early stopping activated : {} consecutive epochs where performance is decreasing!".format(
self.decreasing_performance))
print("Model saved yields a val set accuracy of : {}".format(self.moving_average))
self.stop = True
return False
else:
#Else just continue training but don't save the model
if (self.verbose>0):
print("Performance is decreasing on the val set, taking note and continuing!")
return False
def continue_training(self):
'''
Checks whether training is to be contunued or not
:return: False if breaking out of training loop, true otherwise
'''
if(not self.stop):
return True
elif(self.stop and self.after_stop_training <=0):
return False
else:
self.after_stop_training -= 1
return True
def performed_better(self, performance_metric):
if(self.verbose):
print('moving average performance : {}'.format(self.moving_average))
print('current performance : {}'.format(performance_metric))
if(self.seek_decrease):
return performance_metric <= self.moving_average
else:
return performance_metric >= self.moving_average
class EarlyStopper(object):
def __init__(self, patience, seek_decrease =False, after_stop_training = 0, verbose = 1):
'''
Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance
has been decreasing over a number of epochs
:param patience: number of epochs where performance needs to be decreasing to trigger early stopping.
:param after_stop_training: number of epochskeep training for a number of epochs after stopping,
just for illustration purposes without saving the model.
:param verbose: verbosity of print mesages
'''
self.patience = patience
self.decreasing_performance = 0
self.last_saved_val = 0.0
self.verbose = verbose
self.seek_decrease = seek_decrease
self.stop = False
self.after_stop_training = after_stop_training
def checkpoint(self, performance_metric):
'''
Checks whether to trigger early stopping and whether to save the model.
:param performance_metric: the metric used to monitor early stopping
:return: True if the model is to be saved, false otherwise.
'''
if(self.seek_decrease):
performance_metric = -performance_metric
#If early stopping is not triggered and the performance is increasing, save the model
if (not self.stop and self.performed_better(performance_metric)):
if (self.verbose>0):
print("Performance increasing in the val set, saving the model!")
self.last_saved_val = performance_metric
self.decreasing_performance = 0
return True
else:
#record decreasing performance
self.decreasing_performance += 1
#If performance has been decreasing more than the patience trigger early stopping
if (self.decreasing_performance >= self.patience):
print("Early stopping activated : {} consecutive epochs where performance is decreasing!".format(
self.decreasing_performance))
print("Model saved yields a val set accuracy of : {}".format(self.last_saved_val))
self.stop = True
return False
else:
#Else just continue training but don't save the model
if (self.verbose>0):
print("Performance is decreasing on the val set, taking note and continuing!")
return False
def continue_training(self):
'''
Checks whether training is to be contunued or not
:return: False if breaking out of training loop, true otherwise
'''
if(not self.stop):
return True
elif(self.stop and self.after_stop_training <=0):
return False
else:
self.after_stop_training -= 1
return True
def performed_better(self, performance_metric):
if(self.last_saved_val == 0.0 and performance_metric != 0):
return True
else:
return performance_metric >= self.last_saved_val
| en | 0.874414 | Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance has been decreasing over a number of epochs :param patience: number of epochs where performance needs to be decreasing to trigger early stopping. :param after_stop_training: number of epochskeep training for a number of epochs after stopping, just for illustration purposes without saving the model. :param verbose: verbosity of print messages Checks whether to trigger early stopping and whether to save the model. :param performance_metric: the metric used to monitor early stopping :return: True if the model is to be saved, false otherwise. #If early stopping is not triggered and the performance is increasing, save the model #record decreasing performance #If performance has been decreasing more than the patience trigger early stopping #Else just continue training but don't save the model Checks whether training is to be contunued or not :return: False if breaking out of training loop, true otherwise Early Stopper class, checkpoints the model at each epoch and triggers stop training if val set performance has been decreasing over a number of epochs :param patience: number of epochs where performance needs to be decreasing to trigger early stopping. :param after_stop_training: number of epochskeep training for a number of epochs after stopping, just for illustration purposes without saving the model. :param verbose: verbosity of print mesages Checks whether to trigger early stopping and whether to save the model. :param performance_metric: the metric used to monitor early stopping :return: True if the model is to be saved, false otherwise. #If early stopping is not triggered and the performance is increasing, save the model #record decreasing performance #If performance has been decreasing more than the patience trigger early stopping #Else just continue training but don't save the model Checks whether training is to be contunued or not :return: False if breaking out of training loop, true otherwise | 3.178532 | 3 |
predict.py | swang8/p2_image_classifier | 0 | 6615149 | import argparse
from pathlib import Path
import json
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets,transforms
from torchvision import models
from collections import OrderedDict
def load_checkpoint(filepath, args):
if torch.cuda.is_available() and args.gpu:
checkpoint = torch.load(filepath)
else:
checkpoint = torch.load(filepath, map_location='cpu')
model = None
if checkpoint['model_name'] == 'vgg13':
model = models.vgg13(pretrained=True)
elif checkpoint['model_name'] == 'alexnet':
model = models.alexnet(pretrained=True)
model.class_to_idx = checkpoint['class_to_id']
model.classifier = build_classifier(checkpoint['classifier_input'], checkpoint['classifier_output'], checkpoint['classifier_hidden'])
model.load_state_dict(checkpoint['state_dict'])
return model
def build_classifier(input_size, output_size, hidden_layers):
layers = []
for i in range(len(hidden_layers)):
if i == 0:
layers.append(nn.Linear(input_size, hidden_layers[i]))
layers.append(nn.ReLU())
in_size, out_size = hidden_layers[i], hidden_layers[i+1]
layers.append(nn.Linear(in_size, out_size))
layers.append(nn.ReLU())
elif i == len(hidden_layers)-1:
in_size, out_size = hidden_layers[i], output_size
layers.append(nn.Linear(in_size, out_size))
else:
in_size, out_size = hidden_layers[i], hidden_layers[i+1]
layers.append(nn.Linear(in_size, out_size))
layers.append(nn.ReLU())
layers.extend([nn.LogSoftmax(dim=1)])
#print(layers)
model = nn.Sequential(*layers)
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
from PIL import Image
im = Image.open(image)
orignal_size = im.size
# re-size, keep the aspect ratio
new_size = 256
if orignal_size[0] < orignal_size[1]:
size = (new_size, orignal_size[1] * new_size / orignal_size[0])
else:
size = (orignal_size[0] * new_size / orignal_size[1], new_size)
im.thumbnail(size)
#print(im.size)
# crop center of 224
weight = 224
height = 224
left = (size[0] - weight)/2
top = (size[1] - height)/2
right = (size[0] + weight)/2
bottom = (size[1] + height)/2
im = im.crop((left, top, right, bottom))
#plt.imshow(im)
# to np
np_img = np.array(im)
# convert values
np_img = np_img/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_img = (np_img - mean)/std
np_img = np_img.transpose(2,0,1)
#print(np_img.shape)
return np_img
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.eval()
model.to('cpu')
idx_to_class = {v: k for k, v in model.class_to_idx.items()}
#print(idx_to_class)
np_img = torch.FloatTensor([process_image(image_path)])
output = model.forward(np_img)
probs = torch.exp(output).data.numpy()[0]
topk_index = np.argsort(probs)[-topk:][::-1]
topk_class = [idx_to_class[x] for x in topk_index]
topk_probs = probs[topk_index]
return topk_probs, topk_class
if __name__ == "__main__":
usage = '''
Basic usage:
python predict.py /path/to/image checkpoint
Options:
Return top K most likely classes:
python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names:
python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference:
python predict.py input checkpoint --gpu
'''
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("img_path", help="The full path to the image")
parser.add_argument("checkpoint", help="The full path to the checkpoint file")
parser.add_argument("--category_names", help="A json file used for mapping of categories to real names")
parser.add_argument("--top_K", type=int, help='show top K most likely classes', default=1)
parser.add_argument("--gpu", type=bool, default=False, help='Use GPU or Not')
args = parser.parse_args()
model = load_checkpoint(args.checkpoint, args)
topk_probs, topk_class = predict(args.img_path, model, args.top_K)
print()
print("Predicted class: ", topk_class)
if args.category_names is not None:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
topk_class_names = [cat_to_name[i] for i in topk_class]
print("Predicted class names: ", topk_class_names)
print("Probability: ", topk_probs)
| import argparse
from pathlib import Path
import json
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets,transforms
from torchvision import models
from collections import OrderedDict
def load_checkpoint(filepath, args):
if torch.cuda.is_available() and args.gpu:
checkpoint = torch.load(filepath)
else:
checkpoint = torch.load(filepath, map_location='cpu')
model = None
if checkpoint['model_name'] == 'vgg13':
model = models.vgg13(pretrained=True)
elif checkpoint['model_name'] == 'alexnet':
model = models.alexnet(pretrained=True)
model.class_to_idx = checkpoint['class_to_id']
model.classifier = build_classifier(checkpoint['classifier_input'], checkpoint['classifier_output'], checkpoint['classifier_hidden'])
model.load_state_dict(checkpoint['state_dict'])
return model
def build_classifier(input_size, output_size, hidden_layers):
layers = []
for i in range(len(hidden_layers)):
if i == 0:
layers.append(nn.Linear(input_size, hidden_layers[i]))
layers.append(nn.ReLU())
in_size, out_size = hidden_layers[i], hidden_layers[i+1]
layers.append(nn.Linear(in_size, out_size))
layers.append(nn.ReLU())
elif i == len(hidden_layers)-1:
in_size, out_size = hidden_layers[i], output_size
layers.append(nn.Linear(in_size, out_size))
else:
in_size, out_size = hidden_layers[i], hidden_layers[i+1]
layers.append(nn.Linear(in_size, out_size))
layers.append(nn.ReLU())
layers.extend([nn.LogSoftmax(dim=1)])
#print(layers)
model = nn.Sequential(*layers)
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
from PIL import Image
im = Image.open(image)
orignal_size = im.size
# re-size, keep the aspect ratio
new_size = 256
if orignal_size[0] < orignal_size[1]:
size = (new_size, orignal_size[1] * new_size / orignal_size[0])
else:
size = (orignal_size[0] * new_size / orignal_size[1], new_size)
im.thumbnail(size)
#print(im.size)
# crop center of 224
weight = 224
height = 224
left = (size[0] - weight)/2
top = (size[1] - height)/2
right = (size[0] + weight)/2
bottom = (size[1] + height)/2
im = im.crop((left, top, right, bottom))
#plt.imshow(im)
# to np
np_img = np.array(im)
# convert values
np_img = np_img/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_img = (np_img - mean)/std
np_img = np_img.transpose(2,0,1)
#print(np_img.shape)
return np_img
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.eval()
model.to('cpu')
idx_to_class = {v: k for k, v in model.class_to_idx.items()}
#print(idx_to_class)
np_img = torch.FloatTensor([process_image(image_path)])
output = model.forward(np_img)
probs = torch.exp(output).data.numpy()[0]
topk_index = np.argsort(probs)[-topk:][::-1]
topk_class = [idx_to_class[x] for x in topk_index]
topk_probs = probs[topk_index]
return topk_probs, topk_class
if __name__ == "__main__":
usage = '''
Basic usage:
python predict.py /path/to/image checkpoint
Options:
Return top K most likely classes:
python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names:
python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference:
python predict.py input checkpoint --gpu
'''
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("img_path", help="The full path to the image")
parser.add_argument("checkpoint", help="The full path to the checkpoint file")
parser.add_argument("--category_names", help="A json file used for mapping of categories to real names")
parser.add_argument("--top_K", type=int, help='show top K most likely classes', default=1)
parser.add_argument("--gpu", type=bool, default=False, help='Use GPU or Not')
args = parser.parse_args()
model = load_checkpoint(args.checkpoint, args)
topk_probs, topk_class = predict(args.img_path, model, args.top_K)
print()
print("Predicted class: ", topk_class)
if args.category_names is not None:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
topk_class_names = [cat_to_name[i] for i in topk_class]
print("Predicted class names: ", topk_class_names)
print("Probability: ", topk_probs)
| en | 0.511221 | #print(layers) Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array # re-size, keep the aspect ratio #print(im.size) # crop center of 224 #plt.imshow(im) # to np # convert values #print(np_img.shape) Predict the class (or classes) of an image using a trained deep learning model. # TODO: Implement the code to predict the class from an image file #print(idx_to_class) Basic usage: python predict.py /path/to/image checkpoint Options: Return top K most likely classes: python predict.py input checkpoint --top_k 3 Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json Use GPU for inference: python predict.py input checkpoint --gpu | 2.355001 | 2 |
examples/plotting/cos_fitting.py | heprom/pymicro | 30 | 6615150 | <filename>examples/plotting/cos_fitting.py
#!/usr/bin/env python
import numpy as np
import os
from matplotlib import pyplot as plt
from pymicro.xray.fitting import fit
'''Basic curve fitting example with a cosine function. The data is
also fitted with a custom function slightly different than the default
Cosine, which lead to the same result.'''
x = np.linspace(-3.0, 3.0, 31)
# generate some noisy data
np.random.seed(13)
y = np.cos(4 * x / np.pi - 0.5) + 0.05 * np.random.randn(len(x))
# custom function
def C(x, p):
return np.sin(np.pi * (x - p[0].value) / (2 * p[1].value)) * p[2].value
# perform fitting
F = fit(y, x, expression=C, nb_params=3)
C = fit(y, x, expression='Cosine')
plt.plot(x, y, 'bo', label='data points')
plt.plot(x, C(x), 'k-', label='cosine fit')
plt.plot(x, F(x), 'r--', label='custom fit')
plt.xlim(-3, 3)
plt.grid()
plt.legend(numpoints=1)
image_name = os.path.splitext(__file__)[0] + '.png'
print('writting %s' % image_name)
plt.savefig(image_name, format='png')
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| <filename>examples/plotting/cos_fitting.py
#!/usr/bin/env python
import numpy as np
import os
from matplotlib import pyplot as plt
from pymicro.xray.fitting import fit
'''Basic curve fitting example with a cosine function. The data is
also fitted with a custom function slightly different than the default
Cosine, which lead to the same result.'''
x = np.linspace(-3.0, 3.0, 31)
# generate some noisy data
np.random.seed(13)
y = np.cos(4 * x / np.pi - 0.5) + 0.05 * np.random.randn(len(x))
# custom function
def C(x, p):
return np.sin(np.pi * (x - p[0].value) / (2 * p[1].value)) * p[2].value
# perform fitting
F = fit(y, x, expression=C, nb_params=3)
C = fit(y, x, expression='Cosine')
plt.plot(x, y, 'bo', label='data points')
plt.plot(x, C(x), 'k-', label='cosine fit')
plt.plot(x, F(x), 'r--', label='custom fit')
plt.xlim(-3, 3)
plt.grid()
plt.legend(numpoints=1)
image_name = os.path.splitext(__file__)[0] + '.png'
print('writting %s' % image_name)
plt.savefig(image_name, format='png')
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| en | 0.817089 | #!/usr/bin/env python Basic curve fitting example with a cosine function. The data is also fitted with a custom function slightly different than the default Cosine, which lead to the same result. # generate some noisy data # custom function # perform fitting | 3.571894 | 4 |