max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
onelearn/utils.py | onelearn/onelearn | 16 | 6621051 | <gh_stars>10-100
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import os
from math import log, exp
import numpy as np
from numpy.random import uniform
from numba import njit
from numba.types import float32, uint32
def get_type(class_):
"""Gives the numba type of an object is numba.jit decorators are enabled and None
otherwise. This helps to get correct coverage of the code
Parameters
----------
class_ : `object`
A class
Returns
-------
output : `object`
A numba type of None
"""
class_type = getattr(class_, "class_type", None)
if class_type is None:
return class_type
else:
return class_type.instance_type
@njit
def resize_array(arr, keep, size, fill=0):
"""Resize the given array along the first axis only, preserving the same
dtype and second axis size (if it's two-dimensional)
Parameters
----------
arr : `np.array`
Input array
keep : `int`
Keep the first `keep` elements (according to the first axis)
size : `int`
Target size of the first axis of new array (
fill : {`None`, 0, 1}, default=0
Controls the values in the resized array before putting back the first elements
* If None, the array is not filled
* If 1 the array is filled with ones
* If 0 the array is filled with zeros
Returns
-------
output : `np.array`
New array of shape (size,) or (size, arr.shape[1]) with `keep` first
elements preserved (along first axis)
"""
if arr.ndim == 1:
if fill is None:
new = np.empty((size,), dtype=arr.dtype)
elif fill == 1:
new = np.ones((size,), dtype=arr.dtype)
else:
new = np.zeros((size,), dtype=arr.dtype)
new[:keep] = arr[:keep]
return new
elif arr.ndim == 2:
_, n_cols = arr.shape
if fill is None:
new = np.empty((size, n_cols), dtype=arr.dtype)
elif fill == 1:
new = np.ones((size, n_cols), dtype=arr.dtype)
else:
new = np.zeros((size, n_cols), dtype=arr.dtype)
new[:keep] = arr[:keep]
return new
else:
raise ValueError("resize_array can resize only 1D and 2D arrays")
# Sadly there is no function to sample for a discrete distribution in numba
@njit(uint32(float32[::1]))
def sample_discrete(distribution):
"""Samples according to the given discrete distribution.
Parameters
----------
distribution : `np.array', shape=(size,), dtype='float32'
The discrete distribution we want to sample from. This must contain
non-negative entries that sum to one.
Returns
-------
output : `uint32`
Output sampled in {0, 1, 2, distribution.size} according to the given
distribution
Notes
-----
It is useless to np.cumsum and np.searchsorted here, since we want a single
sample for this distribution and since it changes at each call. So nothing
is better here than simple O(n).
Warning
-------
No test is performed here for efficiency: distribution must contain non-
negative values that sum to one.
"""
# Notes
U = uniform(0.0, 1.0)
cumsum = 0.0
size = distribution.size
for j in range(size):
cumsum += distribution[j]
if U <= cumsum:
return j
return size - 1
@njit(float32(float32, float32))
def log_sum_2_exp(a, b):
"""Computation of log( (e^a + e^b) / 2) in an overflow-proof way
Parameters
----------
a : `float32`
First number
b : `float32`
Second number
Returns
-------
output : `float32`
Value of log( (e^a + e^b) / 2) for the given a and b
"""
# TODO: if |a - b| > 50 skip
# TODO: try several log and exp implementations
if a > b:
return a + log((1 + exp(b - a)) / 2)
else:
return b + log((1 + exp(a - b)) / 2)
| # Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import os
from math import log, exp
import numpy as np
from numpy.random import uniform
from numba import njit
from numba.types import float32, uint32
def get_type(class_):
"""Gives the numba type of an object is numba.jit decorators are enabled and None
otherwise. This helps to get correct coverage of the code
Parameters
----------
class_ : `object`
A class
Returns
-------
output : `object`
A numba type of None
"""
class_type = getattr(class_, "class_type", None)
if class_type is None:
return class_type
else:
return class_type.instance_type
@njit
def resize_array(arr, keep, size, fill=0):
"""Resize the given array along the first axis only, preserving the same
dtype and second axis size (if it's two-dimensional)
Parameters
----------
arr : `np.array`
Input array
keep : `int`
Keep the first `keep` elements (according to the first axis)
size : `int`
Target size of the first axis of new array (
fill : {`None`, 0, 1}, default=0
Controls the values in the resized array before putting back the first elements
* If None, the array is not filled
* If 1 the array is filled with ones
* If 0 the array is filled with zeros
Returns
-------
output : `np.array`
New array of shape (size,) or (size, arr.shape[1]) with `keep` first
elements preserved (along first axis)
"""
if arr.ndim == 1:
if fill is None:
new = np.empty((size,), dtype=arr.dtype)
elif fill == 1:
new = np.ones((size,), dtype=arr.dtype)
else:
new = np.zeros((size,), dtype=arr.dtype)
new[:keep] = arr[:keep]
return new
elif arr.ndim == 2:
_, n_cols = arr.shape
if fill is None:
new = np.empty((size, n_cols), dtype=arr.dtype)
elif fill == 1:
new = np.ones((size, n_cols), dtype=arr.dtype)
else:
new = np.zeros((size, n_cols), dtype=arr.dtype)
new[:keep] = arr[:keep]
return new
else:
raise ValueError("resize_array can resize only 1D and 2D arrays")
# Sadly there is no function to sample for a discrete distribution in numba
@njit(uint32(float32[::1]))
def sample_discrete(distribution):
"""Samples according to the given discrete distribution.
Parameters
----------
distribution : `np.array', shape=(size,), dtype='float32'
The discrete distribution we want to sample from. This must contain
non-negative entries that sum to one.
Returns
-------
output : `uint32`
Output sampled in {0, 1, 2, distribution.size} according to the given
distribution
Notes
-----
It is useless to np.cumsum and np.searchsorted here, since we want a single
sample for this distribution and since it changes at each call. So nothing
is better here than simple O(n).
Warning
-------
No test is performed here for efficiency: distribution must contain non-
negative values that sum to one.
"""
# Notes
U = uniform(0.0, 1.0)
cumsum = 0.0
size = distribution.size
for j in range(size):
cumsum += distribution[j]
if U <= cumsum:
return j
return size - 1
@njit(float32(float32, float32))
def log_sum_2_exp(a, b):
"""Computation of log( (e^a + e^b) / 2) in an overflow-proof way
Parameters
----------
a : `float32`
First number
b : `float32`
Second number
Returns
-------
output : `float32`
Value of log( (e^a + e^b) / 2) for the given a and b
"""
# TODO: if |a - b| > 50 skip
# TODO: try several log and exp implementations
if a > b:
return a + log((1 + exp(b - a)) / 2)
else:
return b + log((1 + exp(a - b)) / 2) | en | 0.740215 | # Authors: <NAME> <<EMAIL>> # License: BSD 3 clause Gives the numba type of an object is numba.jit decorators are enabled and None otherwise. This helps to get correct coverage of the code Parameters ---------- class_ : `object` A class Returns ------- output : `object` A numba type of None Resize the given array along the first axis only, preserving the same dtype and second axis size (if it's two-dimensional) Parameters ---------- arr : `np.array` Input array keep : `int` Keep the first `keep` elements (according to the first axis) size : `int` Target size of the first axis of new array ( fill : {`None`, 0, 1}, default=0 Controls the values in the resized array before putting back the first elements * If None, the array is not filled * If 1 the array is filled with ones * If 0 the array is filled with zeros Returns ------- output : `np.array` New array of shape (size,) or (size, arr.shape[1]) with `keep` first elements preserved (along first axis) # Sadly there is no function to sample for a discrete distribution in numba Samples according to the given discrete distribution. Parameters ---------- distribution : `np.array', shape=(size,), dtype='float32' The discrete distribution we want to sample from. This must contain non-negative entries that sum to one. Returns ------- output : `uint32` Output sampled in {0, 1, 2, distribution.size} according to the given distribution Notes ----- It is useless to np.cumsum and np.searchsorted here, since we want a single sample for this distribution and since it changes at each call. So nothing is better here than simple O(n). Warning ------- No test is performed here for efficiency: distribution must contain non- negative values that sum to one. # Notes Computation of log( (e^a + e^b) / 2) in an overflow-proof way Parameters ---------- a : `float32` First number b : `float32` Second number Returns ------- output : `float32` Value of log( (e^a + e^b) / 2) for the given a and b # TODO: if |a - b| > 50 skip # TODO: try several log and exp implementations | 3.085034 | 3 |
pyfos/pyfos_util.py | ValeJM/PyFos | 0 | 6621052 | # Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`pyfos_util` - PyFOS module to provide utility functions.
*********************************************************************************************************
The :mod:`pyfos_util` provides a utility functions.
"""
import http.client as httplib
import json
import xmltodict
from requests.utils import quote
VF_ID = "?vf-id="
HTTP = "http://"
HTTPS = "https://"
GET = "GET "
POST = "POST "
PATCH = "PATCH "
PUT = "PUT "
DELETE = "DELETE "
isErrorRequest = 0
class test():
def __init__(self, title, description):
self.title = title
self.description = description
self.overall_passed = True
self.overall_result_description = None
self.requests = []
class result():
"Stores requests and responses"
def __init__(self, passed, request, response):
self.passed = passed
self.request = request
self.response = response
current_test = None
executed_tests = []
current_request = None
class json_encoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'reprJSON'):
return obj.reprJSON()
else:
return json.JSONEncoder.default(self, obj)
def parse_page(page):
# print (page)
ret_elements = xmltodict.parse(page)
return ret_elements
def set_response_parse(response):
page = response.read()
test_parse_response(response, page)
if response.status >= 100 and response.status < 200:
ret_error = {"info-code": response.status,
"info-message": response.reason,
"info-type": "Informational"}
elif response.status >= 200 and response.status < 300:
ret_error = {"success-code": response.status,
"success-message": response.reason,
"success-type": "Success"}
elif response.status >= 300 and response.status < 400:
ret_error = {"redirection-code": response.status,
"redirection-message": response.reason,
"redirection-type": "Redirection"}
elif response.status >= 400 and response.status < 500:
# if response.status == 404:
# ret_error = {"server-error-code": response.status,
# "server-error-message": response.reason,
# "server-error-type": "Server error",
# "error-message": "No such URI"}
# return ret_error
# page = response.read()
# print page
ret_error = parse_page(page)
else:
ret_error = {"server-error-code": response.status,
"server-error-message": response.reason,
"server-error-type": "Server error"}
return ret_error
def get_response_parse(response):
page = response.read()
# print page
test_parse_response(response, page)
if response.status >= 100 and response.status < 200:
ret_error = {"info-code": response.status,
"info-message": response.reason,
"info-type": "Informational"}
return ret_error
elif response.status >= 200 and response.status < 300:
# print page
ret_elements = parse_page(page)
return ret_elements["Response"]
elif response.status >= 300 and response.status < 400:
ret_error = {"redirection-code": response.status,
"redirection-message": response.reason,
"redirection-type": "Redirection"}
return ret_error
elif response.status >= 400 and response.status < 500:
# page = response.read()
# print page
ret_error = parse_page(page)
else:
ret_error = {"server-error-code": response.status,
"server-error-message": response.reason,
"server-error-type": "Server error"}
return ret_error
def test_title_set(title, description):
global current_test
if current_test is None:
print("Starting test case", title, ":", description)
current_test = test(title, description)
else:
executed_tests.append(current_test)
print("Starting test case", title, ":", description)
current_test = test(title, description)
def test_explicit_result_passed(description):
global current_test
if current_test is None:
print("current_test is set to None")
else:
current_test.overall_passed = True
current_test.overall_result_description = description
def test_explicit_result_failed(description):
global current_test
if current_test is None:
print("current_test is set to None")
else:
current_test.overall_passed = False
current_test.overall_result_description = description
def test_add_to_failed_requests(request, resp):
global current_test
# if any is failing, mark the overall to be false
if current_test:
current_test.overall_passed = False
current_test.requests.append(result(False, request, resp))
def test_add_to_succeeded_requests(request, resp):
global current_test
# leave the overall mark as is
if current_test:
current_test.requests.append(result(True, request, resp))
def test_negative_test_set(isErrReq):
global isErrorRequest
isErrorRequest = isErrReq
def test_parse_response(response, page):
# Print switch response as is if isDebug is set
# and set the failed error if response has error
resp = ("\nResponse:\n" + str(response.status) +
" " + response.reason + "\n" + str(page))
if isErrorRequest == 0 and response.status >= 400:
test_add_to_failed_requests(current_request, resp)
else:
test_add_to_succeeded_requests(current_request, resp)
def response_print(response):
"""Print dictionary into JSON format
:param response: dictionary of information to be printed
"""
print(json.dumps(response, cls=json_encoder, sort_keys=True, indent=4))
def test_results_print():
global current_test
global executed_tests
# make sure to pick up the last test
if current_test is not None:
executed_tests.append(current_test)
total = len(executed_tests)
failed = 0
failed_requests = 0
passed = 0
passed_requests = 0
for test in executed_tests:
if test.overall_passed is False:
failed += 1
else:
passed += 1
for request in test.requests:
if request.passed is True:
passed_requests += 1
else:
failed_requests += 1
print("\nTEST RESULTS SUMMARY:")
print("=====================\n")
print("Passed test cases:\t\t", passed)
print("Failed test cases:\t\t", failed)
print("Total test cases:\t\t", total)
print("Successful requests:\t\t", passed_requests)
print("Failed requests:\t\t", failed_requests)
print("Total requests:\t\t\t", passed_requests + failed_requests)
if failed == 0:
print("\nTest cases completed successfully.\n")
else:
print("\nFailed tests:")
print("=========================\n")
count = 0
for test in executed_tests:
if test.overall_passed is False:
print("Error #", count, ":", test.title)
print("Test description:", test.description)
if test.overall_result_description is not None:
print("Test result description:",
test.overall_result_description)
if len(test.requests) > 0:
print("=========")
for request in test.requests:
print(request.request)
print(request.response)
print(" ")
count += 1
def http_connection(session):
ip_addr = session.get("ip_addr")
isHttps = session.get("ishttps")
if isHttps == "1":
conn = httplib.HTTPSConnection(ip_addr)
else:
conn = httplib.HTTPConnection(ip_addr)
return conn
def vfidstr_get(session):
vfid = session.get("vfid")
if vfid == -1:
return ""
else:
return VF_ID + str(vfid)
def debug(session, http_cmd, cmd, content):
debug = session.get("debug")
isHttps = session.get("ishttps")
if isHttps == "1":
http_cmd += HTTPS
else:
http_cmd += HTTP
if content == "":
request = http_cmd + session.get("ip_addr") + cmd
if debug:
print(request)
else:
request = (http_cmd + session.get("ip_addr") +
cmd + " - CONTENT -> " + content)
if debug:
print(request)
# Track the responses
global current_request
current_request = request
def get_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, GET, cmd + vfidstr, content)
conn.request("GET", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return get_response_parse(resp)
def put_request(session, cmd, content):
return patch_request(session, cmd, content)
def put_request_orig(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, PUT, cmd + vfidstr, content)
conn.request("PUT", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def patch_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, PATCH, cmd + vfidstr, content)
conn.request("PATCH", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def post_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, POST, cmd + vfidstr, content)
conn.request("POST", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def delete_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, DELETE, cmd + vfidstr, content)
conn.request("DELETE", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def encode_slotport(name):
return quote(name, safe='')
def get_from_list(resp, key, index):
if isinstance(resp, dict):
if key in resp:
return resp[key][index]
else:
return None
else:
return None
def get_from_dict(resp, key):
if isinstance(resp, dict):
if key in resp:
return resp[key]
else:
return None
else:
return None
def is_success_resp(resp):
if (isinstance(resp, dict) and 'success-type' in resp and
resp['success-type'] == 'Success'):
return True
else:
return False
def is_failed_resp(resp):
if isinstance(resp, dict) and 'errors' in resp:
return True
elif isinstance(resp, dict) and 'server-error-code' in resp:
return True
else:
return False
| # Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`pyfos_util` - PyFOS module to provide utility functions.
*********************************************************************************************************
The :mod:`pyfos_util` provides a utility functions.
"""
import http.client as httplib
import json
import xmltodict
from requests.utils import quote
VF_ID = "?vf-id="
HTTP = "http://"
HTTPS = "https://"
GET = "GET "
POST = "POST "
PATCH = "PATCH "
PUT = "PUT "
DELETE = "DELETE "
isErrorRequest = 0
class test():
def __init__(self, title, description):
self.title = title
self.description = description
self.overall_passed = True
self.overall_result_description = None
self.requests = []
class result():
"Stores requests and responses"
def __init__(self, passed, request, response):
self.passed = passed
self.request = request
self.response = response
current_test = None
executed_tests = []
current_request = None
class json_encoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'reprJSON'):
return obj.reprJSON()
else:
return json.JSONEncoder.default(self, obj)
def parse_page(page):
# print (page)
ret_elements = xmltodict.parse(page)
return ret_elements
def set_response_parse(response):
page = response.read()
test_parse_response(response, page)
if response.status >= 100 and response.status < 200:
ret_error = {"info-code": response.status,
"info-message": response.reason,
"info-type": "Informational"}
elif response.status >= 200 and response.status < 300:
ret_error = {"success-code": response.status,
"success-message": response.reason,
"success-type": "Success"}
elif response.status >= 300 and response.status < 400:
ret_error = {"redirection-code": response.status,
"redirection-message": response.reason,
"redirection-type": "Redirection"}
elif response.status >= 400 and response.status < 500:
# if response.status == 404:
# ret_error = {"server-error-code": response.status,
# "server-error-message": response.reason,
# "server-error-type": "Server error",
# "error-message": "No such URI"}
# return ret_error
# page = response.read()
# print page
ret_error = parse_page(page)
else:
ret_error = {"server-error-code": response.status,
"server-error-message": response.reason,
"server-error-type": "Server error"}
return ret_error
def get_response_parse(response):
page = response.read()
# print page
test_parse_response(response, page)
if response.status >= 100 and response.status < 200:
ret_error = {"info-code": response.status,
"info-message": response.reason,
"info-type": "Informational"}
return ret_error
elif response.status >= 200 and response.status < 300:
# print page
ret_elements = parse_page(page)
return ret_elements["Response"]
elif response.status >= 300 and response.status < 400:
ret_error = {"redirection-code": response.status,
"redirection-message": response.reason,
"redirection-type": "Redirection"}
return ret_error
elif response.status >= 400 and response.status < 500:
# page = response.read()
# print page
ret_error = parse_page(page)
else:
ret_error = {"server-error-code": response.status,
"server-error-message": response.reason,
"server-error-type": "Server error"}
return ret_error
def test_title_set(title, description):
global current_test
if current_test is None:
print("Starting test case", title, ":", description)
current_test = test(title, description)
else:
executed_tests.append(current_test)
print("Starting test case", title, ":", description)
current_test = test(title, description)
def test_explicit_result_passed(description):
global current_test
if current_test is None:
print("current_test is set to None")
else:
current_test.overall_passed = True
current_test.overall_result_description = description
def test_explicit_result_failed(description):
global current_test
if current_test is None:
print("current_test is set to None")
else:
current_test.overall_passed = False
current_test.overall_result_description = description
def test_add_to_failed_requests(request, resp):
global current_test
# if any is failing, mark the overall to be false
if current_test:
current_test.overall_passed = False
current_test.requests.append(result(False, request, resp))
def test_add_to_succeeded_requests(request, resp):
global current_test
# leave the overall mark as is
if current_test:
current_test.requests.append(result(True, request, resp))
def test_negative_test_set(isErrReq):
global isErrorRequest
isErrorRequest = isErrReq
def test_parse_response(response, page):
# Print switch response as is if isDebug is set
# and set the failed error if response has error
resp = ("\nResponse:\n" + str(response.status) +
" " + response.reason + "\n" + str(page))
if isErrorRequest == 0 and response.status >= 400:
test_add_to_failed_requests(current_request, resp)
else:
test_add_to_succeeded_requests(current_request, resp)
def response_print(response):
"""Print dictionary into JSON format
:param response: dictionary of information to be printed
"""
print(json.dumps(response, cls=json_encoder, sort_keys=True, indent=4))
def test_results_print():
global current_test
global executed_tests
# make sure to pick up the last test
if current_test is not None:
executed_tests.append(current_test)
total = len(executed_tests)
failed = 0
failed_requests = 0
passed = 0
passed_requests = 0
for test in executed_tests:
if test.overall_passed is False:
failed += 1
else:
passed += 1
for request in test.requests:
if request.passed is True:
passed_requests += 1
else:
failed_requests += 1
print("\nTEST RESULTS SUMMARY:")
print("=====================\n")
print("Passed test cases:\t\t", passed)
print("Failed test cases:\t\t", failed)
print("Total test cases:\t\t", total)
print("Successful requests:\t\t", passed_requests)
print("Failed requests:\t\t", failed_requests)
print("Total requests:\t\t\t", passed_requests + failed_requests)
if failed == 0:
print("\nTest cases completed successfully.\n")
else:
print("\nFailed tests:")
print("=========================\n")
count = 0
for test in executed_tests:
if test.overall_passed is False:
print("Error #", count, ":", test.title)
print("Test description:", test.description)
if test.overall_result_description is not None:
print("Test result description:",
test.overall_result_description)
if len(test.requests) > 0:
print("=========")
for request in test.requests:
print(request.request)
print(request.response)
print(" ")
count += 1
def http_connection(session):
ip_addr = session.get("ip_addr")
isHttps = session.get("ishttps")
if isHttps == "1":
conn = httplib.HTTPSConnection(ip_addr)
else:
conn = httplib.HTTPConnection(ip_addr)
return conn
def vfidstr_get(session):
vfid = session.get("vfid")
if vfid == -1:
return ""
else:
return VF_ID + str(vfid)
def debug(session, http_cmd, cmd, content):
debug = session.get("debug")
isHttps = session.get("ishttps")
if isHttps == "1":
http_cmd += HTTPS
else:
http_cmd += HTTP
if content == "":
request = http_cmd + session.get("ip_addr") + cmd
if debug:
print(request)
else:
request = (http_cmd + session.get("ip_addr") +
cmd + " - CONTENT -> " + content)
if debug:
print(request)
# Track the responses
global current_request
current_request = request
def get_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, GET, cmd + vfidstr, content)
conn.request("GET", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return get_response_parse(resp)
def put_request(session, cmd, content):
return patch_request(session, cmd, content)
def put_request_orig(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, PUT, cmd + vfidstr, content)
conn.request("PUT", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def patch_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, PATCH, cmd + vfidstr, content)
conn.request("PATCH", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def post_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, POST, cmd + vfidstr, content)
conn.request("POST", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def delete_request(session, cmd, content):
credential = session.get("credential")
vfidstr = vfidstr_get(session)
conn = http_connection(session)
debug(session, DELETE, cmd + vfidstr, content)
conn.request("DELETE", cmd + vfidstr, content, credential)
resp = conn.getresponse()
return set_response_parse(resp)
def encode_slotport(name):
return quote(name, safe='')
def get_from_list(resp, key, index):
if isinstance(resp, dict):
if key in resp:
return resp[key][index]
else:
return None
else:
return None
def get_from_dict(resp, key):
if isinstance(resp, dict):
if key in resp:
return resp[key]
else:
return None
else:
return None
def is_success_resp(resp):
if (isinstance(resp, dict) and 'success-type' in resp and
resp['success-type'] == 'Success'):
return True
else:
return False
def is_failed_resp(resp):
if isinstance(resp, dict) and 'errors' in resp:
return True
elif isinstance(resp, dict) and 'server-error-code' in resp:
return True
else:
return False
| en | 0.769175 | # Copyright 2017 Brocade Communications Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. :mod:`pyfos_util` - PyFOS module to provide utility functions. ********************************************************************************************************* The :mod:`pyfos_util` provides a utility functions. # print (page) # if response.status == 404: # ret_error = {"server-error-code": response.status, # "server-error-message": response.reason, # "server-error-type": "Server error", # "error-message": "No such URI"} # return ret_error # page = response.read() # print page # print page # print page # page = response.read() # print page # if any is failing, mark the overall to be false # leave the overall mark as is # Print switch response as is if isDebug is set # and set the failed error if response has error Print dictionary into JSON format :param response: dictionary of information to be printed # make sure to pick up the last test #", count, ":", test.title) # Track the responses | 2.034297 | 2 |
utility/test_gen_resource_tests.py | Ed-Fi-Exchange-OSS/Suite-3-Performance-Testing | 0 | 6621053 | <reponame>Ed-Fi-Exchange-OSS/Suite-3-Performance-Testing<filename>utility/test_gen_resource_tests.py
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from pytest import mark, fixture
import json
import generate_lib as Generator
class Test_parse_args(object):
def test_lowercase(self):
argv = ['-m', 'a', '-n', 'b', '-r', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
def test_uppercase(self):
argv = ['-M', 'a', '-N', 'b', '-R', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert not metadataUrl
assert not namespace
assert not resource
def test_long_names(self):
argv = ['--metadataUrl', 'a', '--namespace', 'b', '--resource', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
def test_mix(self):
argv = ['--metadataUrl', 'a', '-n', 'b', '--resource', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
class Test_conversions(object):
@mark.parametrize('input,expected', [
('camelCase', 'camel_case'),
('nocamel', 'nocamel'),
('PascalCase','pascal_case'),
('kebab-case','kebab-case')
])
def test_convert_camel_to_snake(self, input, expected):
assert Generator._convert_camel_to_snake(input) == expected
@mark.parametrize('input,expected', [
('noDash', 'noDash'),
('camel-case', 'camelCase'),
('Camel-Case', 'camelCase'),
('three-part-word', 'threePartWord'),
('--', '')
])
def test_convert_kebab_to_camel(self, input, expected):
assert Generator._convert_kebab_to_camel(input) == expected
class Test_build_reference_dictionary(object):
# A simple scenario
def test_accountCodeReference(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_accountCodeReference')
expected = {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
assert o == expected
# More complex - has a reference
def test_accountAccountCode(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_accountAccountCode')
expected = {
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}
assert o == expected
# Multiple references, and an array
def test_account(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_account')
expected = {
'accountCodes': [{
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}],
'accountIdentifier': 'UniqueIdAttribute()',
'educationOrganizationReference': {
'educationOrganizationId': 0
},
'fiscalYear': 'current_year()',
'id': 'UniqueIdAttribute()'
}
assert o == expected
class Test_flatten_dictionary(object):
def test_fully_loaded_dictionary(self):
input = {
'accountCodes': [{
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}],
'accountIdentifier': 'UniqueIdAttribute()',
'educationOrganizationReference': {
'educationOrganizationId': 0
},
'fiscalYear': 'current_year()',
'id': 'UniqueIdAttribute()'
}
expected = """ id = UniqueIdAttribute()
accountCodes = [{\'accountCodeReference\': {\'accountCodeNumber\': \'PLACEHOLDER\', \'educationOrganizationId\': 0, \'accountClassificationDescriptor\': build_descriptor(\'AccountClassification\', \'PLACEHOLDER\'), \'fiscalYear\': current_year()}}]
educationOrganizationReference = {\'educationOrganizationId\': 0}
accountIdentifier = UniqueIdAttribute()
fiscalYear = current_year()
"""
actual = Generator._flatten_dictionary(input)
assert actual == expected
SAMPLE_DATA = {
"swagger": "2.0",
"basePath": "/data/v3",
"consumes": [
"application/json"
],
"definitions": {
"edFi_account": {
"properties": {
"id": {
"description": "",
"type": "string"
},
"accountCodes": {
"description": "An unordered collection of accountAccountCodes. The set of account codes defined for the education accounting system organized by account code type (e.g., fund, function, object) that map to the account.",
"items": {
"$ref": "#/definitions/edFi_accountAccountCode"
},
"type": "array"
},
"accountIdentifier": {
"description": "The alphanumeric string that identifies the account.",
"x-Ed-Fi-isIdentity": True,
"maxLength": 50,
"type": "string"
},
"fiscalYear": {
"description": "The financial accounting year.",
"format": "int32",
"x-Ed-Fi-isIdentity": True,
"type": "integer"
},
"educationOrganizationReference": {
"$ref": "#/definitions/edFi_educationOrganizationReference"
},
"accountName": {
"description": "A descriptive name for the account.",
"maxLength": 100,
"type": "string"
},
"aggregateHashValue": {
"description": "",
"format": "int64",
"type": "integer"
},
"_etag": {
"description": "A unique system-generated value that identifies the version of the resource.",
"type": "string"
}
},
"required": [
"accountIdentifier",
"fiscalYear",
"id",
"accountCodes",
"educationOrganizationReference"
],
"type": "object"
},
"edFi_accountAccountCode": {
"properties": {
"accountCodeReference": {
"$ref": "#/definitions/edFi_accountCodeReference"
}
},
"required": [
"accountCodeReference"
],
"type": "object"
},
"edFi_accountCodeReference": {
"properties": {
"accountClassificationDescriptor": {
"description": "The type of account code associated with the account.",
"maxLength": 306,
"type": "string"
},
"accountCodeNumber": {
"description": "An account code defined for the education accounting system by the education organization.",
"maxLength": 50,
"type": "string"
},
"educationOrganizationId": {
"description": "The identifier assigned to an education organization.",
"format": "int32",
"type": "integer"
},
"fiscalYear": {
"description": "The financial accounting year.",
"format": "int32",
"type": "integer"
},
"link": {
"$ref": "#/definitions/link"
}
},
"required": [
"accountClassificationDescriptor",
"accountCodeNumber",
"educationOrganizationId",
"fiscalYear"
],
"type": "object"
},
"edFi_educationOrganizationReference": {
"properties": {
"educationOrganizationId": {
"description": "The identifier assigned to an education organization.",
"format": "int32",
"type": "integer"
},
"link": {
"$ref": "#/definitions/link"
}
},
"required": [
"educationOrganizationId"
],
"type": "object"
},
"grandBend_applicant": {
"properties": {
"id": {
"description": "",
"type": "string"
},
"applicantIdentifier": {
"description": "A unique alphanumeric code assigned to an applicant.",
"x-Ed-Fi-isIdentity": True,
"maxLength": 32,
"type": "string"
},
"educationOrganizationReference": {
"$ref": "#/definitions/edFi_educationOrganizationReference"
},
"addresses": {
"description": "An unordered collection of applicantAddresses. The set of elements that describes an address, including the street address, city, state, and ZIP code.",
"items": {
"$ref": "#/definitions/grandBend_applicantAddress"
},
"type": "array"
},
"birthDate": {
"description": "The month, day, and year on which an individual was born.",
"format": "date-time",
"type": "string"
},
"citizenshipStatusDescriptor": {
"description": "An indicator of whether or not the person is a U.S. citizen.",
"maxLength": 306,
"type": "string"
},
"firstName": {
"description": "A name given to an individual at birth, baptism, or during another naming ceremony, or through legal change.",
"maxLength": 75,
"type": "string"
},
"generationCodeSuffix": {
"description": "An appendage, if any, used to denote an individual's generation in his family (e.g., Jr., Sr., III).",
"maxLength": 10,
"type": "string"
},
"highestCompletedLevelOfEducationDescriptor": {
"description": "The extent of formal instruction an individual has received (e.g., the highest grade in school completed or its equivalent or the highest degree received).",
"maxLength": 306,
"type": "string"
},
"highlyQualifiedAcademicSubjectDescriptor": {
"description": "An applicant subject in which a teacher applicant is classified as highly qualified.",
"maxLength": 306,
"type": "string"
},
"highlyQualifiedTeacher": {
"description": "An indication of whether a teacher applicant is classified as highly qualified for his/her prospective assignment according to state definition. This attribute indicates the teacher is highly qualified for ALL Sections to be taught.",
"type": "boolean"
},
"hispanicLatinoEthnicity": {
"description": "An indication that the individual traces his or her origin or descent to Mexico, Puerto Rico, Cuba, Central, and South America, and other Spanish cultures, regardless of race. The term, \"Spanish origin,\" can be used in addition to \"Hispanic or Latino.\"",
"type": "boolean"
},
"lastSurname": {
"description": "The name borne in common by members of a family.",
"maxLength": 75,
"type": "string"
},
"loginId": {
"description": "The login ID for the user; used for security access control interface.",
"maxLength": 60,
"type": "string"
},
"maidenName": {
"description": "The person's maiden name.",
"maxLength": 75,
"type": "string"
},
"middleName": {
"description": "A secondary name given to an individual at birth, baptism, or during another naming ceremony.",
"maxLength": 75,
"type": "string"
},
"personalTitlePrefix": {
"description": "A prefix used to denote the title, degree, position, or seniority of the person.",
"maxLength": 30,
"type": "string"
},
"sexDescriptor": {
"description": "A person's gender.",
"maxLength": 306,
"type": "string"
},
"yearsOfPriorProfessionalExperience": {
"description": "The total number of years that an individual has previously held a similar professional position in one or more education institutions.",
"format": "double",
"type": "number"
},
"yearsOfPriorTeachingExperience": {
"description": "The total number of years that an individual has previously held a teaching position in one or more education institutions.",
"format": "double",
"type": "number"
},
"_etag": {
"description": "A unique system-generated value that identifies the version of the resource.",
"type": "string"
}
},
"required": [
"applicantIdentifier",
"firstName",
"id",
"lastSurname",
"educationOrganizationReference"
],
"type": "object"
}
}
} | # SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from pytest import mark, fixture
import json
import generate_lib as Generator
class Test_parse_args(object):
def test_lowercase(self):
argv = ['-m', 'a', '-n', 'b', '-r', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
def test_uppercase(self):
argv = ['-M', 'a', '-N', 'b', '-R', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert not metadataUrl
assert not namespace
assert not resource
def test_long_names(self):
argv = ['--metadataUrl', 'a', '--namespace', 'b', '--resource', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
def test_mix(self):
argv = ['--metadataUrl', 'a', '-n', 'b', '--resource', 'c']
metadataUrl, namespace, resource = Generator.parse_args(argv, lambda: None)
assert metadataUrl == 'a'
assert namespace == 'b'
assert resource == 'c'
class Test_conversions(object):
@mark.parametrize('input,expected', [
('camelCase', 'camel_case'),
('nocamel', 'nocamel'),
('PascalCase','pascal_case'),
('kebab-case','kebab-case')
])
def test_convert_camel_to_snake(self, input, expected):
assert Generator._convert_camel_to_snake(input) == expected
@mark.parametrize('input,expected', [
('noDash', 'noDash'),
('camel-case', 'camelCase'),
('Camel-Case', 'camelCase'),
('three-part-word', 'threePartWord'),
('--', '')
])
def test_convert_kebab_to_camel(self, input, expected):
assert Generator._convert_kebab_to_camel(input) == expected
class Test_build_reference_dictionary(object):
# A simple scenario
def test_accountCodeReference(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_accountCodeReference')
expected = {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
assert o == expected
# More complex - has a reference
def test_accountAccountCode(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_accountAccountCode')
expected = {
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}
assert o == expected
# Multiple references, and an array
def test_account(self):
o = Generator._build_reference_dictionary(SAMPLE_DATA, 'edFi_account')
expected = {
'accountCodes': [{
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}],
'accountIdentifier': 'UniqueIdAttribute()',
'educationOrganizationReference': {
'educationOrganizationId': 0
},
'fiscalYear': 'current_year()',
'id': 'UniqueIdAttribute()'
}
assert o == expected
class Test_flatten_dictionary(object):
def test_fully_loaded_dictionary(self):
input = {
'accountCodes': [{
'accountCodeReference': {
'accountClassificationDescriptor': "build_descriptor('AccountClassification', 'PLACEHOLDER')",
'accountCodeNumber': "'PLACEHOLDER'",
'educationOrganizationId': 0,
'fiscalYear': 'current_year()'
}
}],
'accountIdentifier': 'UniqueIdAttribute()',
'educationOrganizationReference': {
'educationOrganizationId': 0
},
'fiscalYear': 'current_year()',
'id': 'UniqueIdAttribute()'
}
expected = """ id = UniqueIdAttribute()
accountCodes = [{\'accountCodeReference\': {\'accountCodeNumber\': \'PLACEHOLDER\', \'educationOrganizationId\': 0, \'accountClassificationDescriptor\': build_descriptor(\'AccountClassification\', \'PLACEHOLDER\'), \'fiscalYear\': current_year()}}]
educationOrganizationReference = {\'educationOrganizationId\': 0}
accountIdentifier = UniqueIdAttribute()
fiscalYear = current_year()
"""
actual = Generator._flatten_dictionary(input)
assert actual == expected
SAMPLE_DATA = {
"swagger": "2.0",
"basePath": "/data/v3",
"consumes": [
"application/json"
],
"definitions": {
"edFi_account": {
"properties": {
"id": {
"description": "",
"type": "string"
},
"accountCodes": {
"description": "An unordered collection of accountAccountCodes. The set of account codes defined for the education accounting system organized by account code type (e.g., fund, function, object) that map to the account.",
"items": {
"$ref": "#/definitions/edFi_accountAccountCode"
},
"type": "array"
},
"accountIdentifier": {
"description": "The alphanumeric string that identifies the account.",
"x-Ed-Fi-isIdentity": True,
"maxLength": 50,
"type": "string"
},
"fiscalYear": {
"description": "The financial accounting year.",
"format": "int32",
"x-Ed-Fi-isIdentity": True,
"type": "integer"
},
"educationOrganizationReference": {
"$ref": "#/definitions/edFi_educationOrganizationReference"
},
"accountName": {
"description": "A descriptive name for the account.",
"maxLength": 100,
"type": "string"
},
"aggregateHashValue": {
"description": "",
"format": "int64",
"type": "integer"
},
"_etag": {
"description": "A unique system-generated value that identifies the version of the resource.",
"type": "string"
}
},
"required": [
"accountIdentifier",
"fiscalYear",
"id",
"accountCodes",
"educationOrganizationReference"
],
"type": "object"
},
"edFi_accountAccountCode": {
"properties": {
"accountCodeReference": {
"$ref": "#/definitions/edFi_accountCodeReference"
}
},
"required": [
"accountCodeReference"
],
"type": "object"
},
"edFi_accountCodeReference": {
"properties": {
"accountClassificationDescriptor": {
"description": "The type of account code associated with the account.",
"maxLength": 306,
"type": "string"
},
"accountCodeNumber": {
"description": "An account code defined for the education accounting system by the education organization.",
"maxLength": 50,
"type": "string"
},
"educationOrganizationId": {
"description": "The identifier assigned to an education organization.",
"format": "int32",
"type": "integer"
},
"fiscalYear": {
"description": "The financial accounting year.",
"format": "int32",
"type": "integer"
},
"link": {
"$ref": "#/definitions/link"
}
},
"required": [
"accountClassificationDescriptor",
"accountCodeNumber",
"educationOrganizationId",
"fiscalYear"
],
"type": "object"
},
"edFi_educationOrganizationReference": {
"properties": {
"educationOrganizationId": {
"description": "The identifier assigned to an education organization.",
"format": "int32",
"type": "integer"
},
"link": {
"$ref": "#/definitions/link"
}
},
"required": [
"educationOrganizationId"
],
"type": "object"
},
"grandBend_applicant": {
"properties": {
"id": {
"description": "",
"type": "string"
},
"applicantIdentifier": {
"description": "A unique alphanumeric code assigned to an applicant.",
"x-Ed-Fi-isIdentity": True,
"maxLength": 32,
"type": "string"
},
"educationOrganizationReference": {
"$ref": "#/definitions/edFi_educationOrganizationReference"
},
"addresses": {
"description": "An unordered collection of applicantAddresses. The set of elements that describes an address, including the street address, city, state, and ZIP code.",
"items": {
"$ref": "#/definitions/grandBend_applicantAddress"
},
"type": "array"
},
"birthDate": {
"description": "The month, day, and year on which an individual was born.",
"format": "date-time",
"type": "string"
},
"citizenshipStatusDescriptor": {
"description": "An indicator of whether or not the person is a U.S. citizen.",
"maxLength": 306,
"type": "string"
},
"firstName": {
"description": "A name given to an individual at birth, baptism, or during another naming ceremony, or through legal change.",
"maxLength": 75,
"type": "string"
},
"generationCodeSuffix": {
"description": "An appendage, if any, used to denote an individual's generation in his family (e.g., Jr., Sr., III).",
"maxLength": 10,
"type": "string"
},
"highestCompletedLevelOfEducationDescriptor": {
"description": "The extent of formal instruction an individual has received (e.g., the highest grade in school completed or its equivalent or the highest degree received).",
"maxLength": 306,
"type": "string"
},
"highlyQualifiedAcademicSubjectDescriptor": {
"description": "An applicant subject in which a teacher applicant is classified as highly qualified.",
"maxLength": 306,
"type": "string"
},
"highlyQualifiedTeacher": {
"description": "An indication of whether a teacher applicant is classified as highly qualified for his/her prospective assignment according to state definition. This attribute indicates the teacher is highly qualified for ALL Sections to be taught.",
"type": "boolean"
},
"hispanicLatinoEthnicity": {
"description": "An indication that the individual traces his or her origin or descent to Mexico, Puerto Rico, Cuba, Central, and South America, and other Spanish cultures, regardless of race. The term, \"Spanish origin,\" can be used in addition to \"Hispanic or Latino.\"",
"type": "boolean"
},
"lastSurname": {
"description": "The name borne in common by members of a family.",
"maxLength": 75,
"type": "string"
},
"loginId": {
"description": "The login ID for the user; used for security access control interface.",
"maxLength": 60,
"type": "string"
},
"maidenName": {
"description": "The person's maiden name.",
"maxLength": 75,
"type": "string"
},
"middleName": {
"description": "A secondary name given to an individual at birth, baptism, or during another naming ceremony.",
"maxLength": 75,
"type": "string"
},
"personalTitlePrefix": {
"description": "A prefix used to denote the title, degree, position, or seniority of the person.",
"maxLength": 30,
"type": "string"
},
"sexDescriptor": {
"description": "A person's gender.",
"maxLength": 306,
"type": "string"
},
"yearsOfPriorProfessionalExperience": {
"description": "The total number of years that an individual has previously held a similar professional position in one or more education institutions.",
"format": "double",
"type": "number"
},
"yearsOfPriorTeachingExperience": {
"description": "The total number of years that an individual has previously held a teaching position in one or more education institutions.",
"format": "double",
"type": "number"
},
"_etag": {
"description": "A unique system-generated value that identifies the version of the resource.",
"type": "string"
}
},
"required": [
"applicantIdentifier",
"firstName",
"id",
"lastSurname",
"educationOrganizationReference"
],
"type": "object"
}
}
} | en | 0.600877 | # SPDX-License-Identifier: Apache-2.0 # Licensed to the Ed-Fi Alliance under one or more agreements. # The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0. # See the LICENSE and NOTICES files in the project root for more information. # A simple scenario # More complex - has a reference # Multiple references, and an array id = UniqueIdAttribute() accountCodes = [{\'accountCodeReference\': {\'accountCodeNumber\': \'PLACEHOLDER\', \'educationOrganizationId\': 0, \'accountClassificationDescriptor\': build_descriptor(\'AccountClassification\', \'PLACEHOLDER\'), \'fiscalYear\': current_year()}}] educationOrganizationReference = {\'educationOrganizationId\': 0} accountIdentifier = UniqueIdAttribute() fiscalYear = current_year() | 2.244612 | 2 |
catd/__init__.py | dqwert/catd | 0 | 6621054 | from catd import util
from .WordNet import WordNet
from .Doc import Doc
from .WordNode import WordNode
import logging
__version__ = '0.5.0'
| from catd import util
from .WordNet import WordNet
from .Doc import Doc
from .WordNode import WordNode
import logging
__version__ = '0.5.0'
| none | 1 | 1.17816 | 1 | |
pox/openflow/discovery.py | korrigans84/pox_network | 416 | 6621055 | # Copyright 2011-2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is loosely based on the discovery component in NOX.
"""
This module discovers the connectivity between OpenFlow switches by sending
out LLDP packets. To be notified of this information, listen to LinkEvents
on core.openflow_discovery.
It's possible that some of this should be abstracted out into a generic
Discovery module, or a Discovery superclass.
"""
from pox.lib.revent import *
from pox.lib.recoco import Timer
from pox.lib.util import dpid_to_str, str_to_bool
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import struct
import time
from collections import namedtuple
from random import shuffle, random
log = core.getLogger()
class LLDPSender (object):
"""
Sends out discovery packets
"""
SendItem = namedtuple("LLDPSenderItem", ('dpid','port_num','packet'))
#NOTE: This class keeps the packets to send in a flat list, which makes
# adding/removing them on switch join/leave or (especially) port
# status changes relatively expensive. Could easily be improved.
# Maximum times to run the timer per second
_sends_per_sec = 15
def __init__ (self, send_cycle_time, ttl = 120):
"""
Initialize an LLDP packet sender
send_cycle_time is the time (in seconds) that this sender will take to
send every discovery packet. Thus, it should be the link timeout
interval at most.
ttl is the time (in seconds) for which a receiving LLDP agent should
consider the rest of the data to be valid. We don't use this, but
other LLDP agents might. Can't be 0 (this means revoke).
"""
# Packets remaining to be sent in this cycle
self._this_cycle = []
# Packets we've already sent in this cycle
self._next_cycle = []
# Packets to send in a batch
self._send_chunk_size = 1
self._timer = None
self._ttl = ttl
self._send_cycle_time = send_cycle_time
core.listen_to_dependencies(self)
def _handle_openflow_PortStatus (self, event):
"""
Track changes to switch ports
"""
if event.added:
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr)
elif event.deleted:
self.del_port(event.dpid, event.port)
elif event.modified:
if event.ofp.desc.config & of.OFPPC_PORT_DOWN == 0:
# It's not down, so... try sending a discovery now
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr, False)
def _handle_openflow_ConnectionUp (self, event):
self.del_switch(event.dpid, set_timer = False)
ports = [(p.port_no, p.hw_addr) for p in event.ofp.ports]
for port_num, port_addr in ports:
self.add_port(event.dpid, port_num, port_addr, set_timer = False)
self._set_timer()
def _handle_openflow_ConnectionDown (self, event):
self.del_switch(event.dpid)
def del_switch (self, dpid, set_timer = True):
self._this_cycle = [p for p in self._this_cycle if p.dpid != dpid]
self._next_cycle = [p for p in self._next_cycle if p.dpid != dpid]
if set_timer: self._set_timer()
def del_port (self, dpid, port_num, set_timer = True):
if port_num > of.OFPP_MAX: return
self._this_cycle = [p for p in self._this_cycle
if p.dpid != dpid or p.port_num != port_num]
self._next_cycle = [p for p in self._next_cycle
if p.dpid != dpid or p.port_num != port_num]
if set_timer: self._set_timer()
def add_port (self, dpid, port_num, port_addr, set_timer = True):
if port_num > of.OFPP_MAX: return
self.del_port(dpid, port_num, set_timer = False)
packet = self.create_packet_out(dpid, port_num, port_addr)
self._next_cycle.insert(0, LLDPSender.SendItem(dpid, port_num, packet))
if set_timer: self._set_timer()
core.openflow.sendToDPID(dpid, packet) # Send one immediately
def _set_timer (self):
if self._timer: self._timer.cancel()
self._timer = None
num_packets = len(self._this_cycle) + len(self._next_cycle)
if num_packets == 0: return
self._send_chunk_size = 1 # One at a time
interval = self._send_cycle_time / float(num_packets)
if interval < 1.0 / self._sends_per_sec:
# Would require too many sends per sec -- send more than one at once
interval = 1.0 / self._sends_per_sec
chunk = float(num_packets) / self._send_cycle_time / self._sends_per_sec
self._send_chunk_size = chunk
self._timer = Timer(interval,
self._timer_handler, recurring=True)
def _timer_handler (self):
"""
Called by a timer to actually send packets.
Picks the first packet off this cycle's list, sends it, and then puts
it on the next-cycle list. When this cycle's list is empty, starts
the next cycle.
"""
num = int(self._send_chunk_size)
fpart = self._send_chunk_size - num
if random() < fpart: num += 1
for _ in range(num):
if len(self._this_cycle) == 0:
self._this_cycle = self._next_cycle
self._next_cycle = []
#shuffle(self._this_cycle)
item = self._this_cycle.pop(0)
self._next_cycle.append(item)
core.openflow.sendToDPID(item.dpid, item.packet)
def create_packet_out (self, dpid, port_num, port_addr):
"""
Create an ofp_packet_out containing a discovery packet
"""
eth = self._create_discovery_packet(dpid, port_num, port_addr, self._ttl)
po = of.ofp_packet_out(action = of.ofp_action_output(port=port_num))
po.data = eth.pack()
return po.pack()
@staticmethod
def _create_discovery_packet (dpid, port_num, port_addr, ttl):
"""
Build discovery packet
"""
chassis_id = pkt.chassis_id(subtype=pkt.chassis_id.SUB_LOCAL)
chassis_id.id = ('dpid:' + hex(int(dpid))[2:]).encode()
# Maybe this should be a MAC. But a MAC of what? Local port, maybe?
port_id = pkt.port_id(subtype=pkt.port_id.SUB_PORT, id=str(port_num))
ttl = pkt.ttl(ttl = ttl)
sysdesc = pkt.system_description()
sysdesc.payload = ('dpid:' + hex(int(dpid))[2:]).encode()
discovery_packet = pkt.lldp()
discovery_packet.tlvs.append(chassis_id)
discovery_packet.tlvs.append(port_id)
discovery_packet.tlvs.append(ttl)
discovery_packet.tlvs.append(sysdesc)
discovery_packet.tlvs.append(pkt.end_tlv())
eth = pkt.ethernet(type=pkt.ethernet.LLDP_TYPE)
eth.src = port_addr
eth.dst = pkt.ETHERNET.NDP_MULTICAST
eth.payload = discovery_packet
return eth
class LinkEvent (Event):
"""
Link up/down event
"""
def __init__ (self, add, link, event = None):
self.link = link
self.added = add
self.removed = not add
self.event = event # PacketIn which caused this, if any
def port_for_dpid (self, dpid):
if self.link.dpid1 == dpid:
return self.link.port1
if self.link.dpid2 == dpid:
return self.link.port2
return None
class Link (namedtuple("LinkBase",("dpid1","port1","dpid2","port2"))):
@property
def uni (self):
"""
Returns a "unidirectional" version of this link
The unidirectional versions of symmetric keys will be equal
"""
pairs = list(self.end)
pairs.sort()
return Link(pairs[0][0],pairs[0][1],pairs[1][0],pairs[1][1])
@property
def flipped (self):
pairs = self.end
return Link(pairs[1][0],pairs[1][1],pairs[0][0],pairs[0][1])
@property
def end (self):
return ((self[0],self[1]),(self[2],self[3]))
def __str__ (self):
return "%s.%s -> %s.%s" % (dpid_to_str(self[0]),self[1],
dpid_to_str(self[2]),self[3])
def __repr__ (self):
return "Link(dpid1=%s,port1=%s, dpid2=%s,port2=%s)" % (self.dpid1,
self.port1, self.dpid2, self.port2)
class Discovery (EventMixin):
"""
Component that attempts to discover network toplogy.
Sends out specially-crafted LLDP packets, and monitors their arrival.
"""
_flow_priority = 65000 # Priority of LLDP-catching flow (if any)
_link_timeout = 10 # How long until we consider a link dead
_timeout_check_period = 5 # How often to check for timeouts
_eventMixin_events = set([
LinkEvent,
])
_core_name = "openflow_discovery" # we want to be core.openflow_discovery
Link = Link
def __init__ (self, install_flow = True, explicit_drop = True,
link_timeout = None, eat_early_packets = False):
self._eat_early_packets = eat_early_packets
self._explicit_drop = explicit_drop
self._install_flow = install_flow
if link_timeout: self._link_timeout = link_timeout
self.adjacency = {} # From Link to time.time() stamp
self._sender = LLDPSender(self.send_cycle_time)
# Listen with a high priority (mostly so we get PacketIns early)
core.listen_to_dependencies(self,
listen_args={'openflow':{'priority':0xffffffff}})
Timer(self._timeout_check_period, self._expire_links, recurring=True)
@property
def send_cycle_time (self):
return self._link_timeout / 2.0
def install_flow (self, con_or_dpid, priority = None):
if priority is None:
priority = self._flow_priority
if isinstance(con_or_dpid, int):
con = core.openflow.connections.get(con_or_dpid)
if con is None:
log.warn("Can't install flow for %s", dpid_to_str(con_or_dpid))
return False
else:
con = con_or_dpid
match = of.ofp_match(dl_type = pkt.ethernet.LLDP_TYPE,
dl_dst = pkt.ETHERNET.NDP_MULTICAST)
msg = of.ofp_flow_mod()
msg.priority = priority
msg.match = match
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
con.send(msg)
return True
def _handle_openflow_ConnectionUp (self, event):
if self._install_flow:
# Make sure we get appropriate traffic
log.debug("Installing flow for %s", dpid_to_str(event.dpid))
self.install_flow(event.connection)
def _handle_openflow_ConnectionDown (self, event):
# Delete all links on this switch
self._delete_links([link for link in self.adjacency
if link.dpid1 == event.dpid
or link.dpid2 == event.dpid])
def _expire_links (self):
"""
Remove apparently dead links
"""
now = time.time()
expired = [link for link,timestamp in self.adjacency.items()
if timestamp + self._link_timeout < now]
if expired:
for link in expired:
log.info('link timeout: %s', link)
self._delete_links(expired)
def _handle_openflow_PacketIn (self, event):
"""
Receive and process LLDP packets
"""
packet = event.parsed
if (packet.effective_ethertype != pkt.ethernet.LLDP_TYPE
or packet.dst != pkt.ETHERNET.NDP_MULTICAST):
if not self._eat_early_packets: return
if not event.connection.connect_time: return
enable_time = time.time() - self.send_cycle_time - 1
if event.connection.connect_time > enable_time:
return EventHalt
return
if self._explicit_drop:
if event.ofp.buffer_id is not None:
log.debug("Dropping LLDP packet %i", event.ofp.buffer_id)
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
lldph = packet.find(pkt.lldp)
if lldph is None or not lldph.parsed:
log.error("LLDP packet could not be parsed")
return EventHalt
if len(lldph.tlvs) < 3:
log.error("LLDP packet without required three TLVs")
return EventHalt
if lldph.tlvs[0].tlv_type != pkt.lldp.CHASSIS_ID_TLV:
log.error("LLDP packet TLV 1 not CHASSIS_ID")
return EventHalt
if lldph.tlvs[1].tlv_type != pkt.lldp.PORT_ID_TLV:
log.error("LLDP packet TLV 2 not PORT_ID")
return EventHalt
if lldph.tlvs[2].tlv_type != pkt.lldp.TTL_TLV:
log.error("LLDP packet TLV 3 not TTL")
return EventHalt
def lookInSysDesc ():
r = None
for t in lldph.tlvs[3:]:
if t.tlv_type == pkt.lldp.SYSTEM_DESC_TLV:
# This is our favored way...
for line in t.payload.decode().split('\n'):
if line.startswith('dpid:'):
try:
return int(line[5:], 16)
except:
pass
if len(t.payload) == 8:
# Maybe it's a FlowVisor LLDP...
# Do these still exist?
try:
return struct.unpack("!Q", t.payload)[0]
except:
pass
return None
originatorDPID = lookInSysDesc()
if originatorDPID == None:
# We'll look in the CHASSIS ID
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_LOCAL:
if lldph.tlvs[0].id.startswith(b'dpid:'):
# This is how NOX does it at the time of writing
try:
originatorDPID = int(lldph.tlvs[0].id[5:], 16)
except:
pass
if originatorDPID == None:
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_MAC:
# Last ditch effort -- we'll hope the DPID was small enough
# to fit into an ethernet address
if len(lldph.tlvs[0].id) == 6:
try:
s = lldph.tlvs[0].id
originatorDPID = struct.unpack("!Q",'\x00\x00' + s)[0]
except:
pass
if originatorDPID == None:
log.warning("Couldn't find a DPID in the LLDP packet")
return EventHalt
if originatorDPID not in core.openflow.connections:
log.info('Received LLDP packet from unknown switch')
return EventHalt
# Get port number from port TLV
if lldph.tlvs[1].subtype != pkt.port_id.SUB_PORT:
log.warning("Thought we found a DPID, but packet didn't have a port")
return EventHalt
originatorPort = None
if lldph.tlvs[1].id.isdigit():
# We expect it to be a decimal value
originatorPort = int(lldph.tlvs[1].id)
elif len(lldph.tlvs[1].id) == 2:
# Maybe it's a 16 bit port number...
try:
originatorPort = struct.unpack("!H", lldph.tlvs[1].id)[0]
except:
pass
if originatorPort is None:
log.warning("Thought we found a DPID, but port number didn't " +
"make sense")
return EventHalt
if (event.dpid, event.port) == (originatorDPID, originatorPort):
log.warning("Port received its own LLDP packet; ignoring")
return EventHalt
link = Discovery.Link(originatorDPID, originatorPort, event.dpid,
event.port)
if link not in self.adjacency:
self.adjacency[link] = time.time()
log.info('link detected: %s', link)
self.raiseEventNoErrors(LinkEvent, True, link, event)
else:
# Just update timestamp
self.adjacency[link] = time.time()
return EventHalt # Probably nobody else needs this event
def _delete_links (self, links):
for link in links:
self.raiseEventNoErrors(LinkEvent, False, link)
for link in links:
self.adjacency.pop(link, None)
def is_edge_port (self, dpid, port):
"""
Return True if given port does not connect to another switch
"""
for link in self.adjacency:
if link.dpid1 == dpid and link.port1 == port:
return False
if link.dpid2 == dpid and link.port2 == port:
return False
return True
class DiscoveryGraph (object):
"""
Keeps (and optionally exports) a NetworkX graph of the topology
A nice feature of this is that you can have it export the graph to a
GraphViz dot file, which you can then look at. It's a bit easier than
setting up Gephi or POXDesk if all you want is something quick. I
then a little bash script to create an image file from the dot. If
you use an image viewer which automatically refreshes when the file
changes (e.g., Gnome Image Viewer), you have a low-budget topology
graph viewer. I export the graph by running the POX component:
openflow.discovery:graph --export=foo.dot
And here's the script I use to generate the image:
touch foo.dot foo.dot.prev
while true; do
if [[ $(cmp foo.dot foo.dot.prev) ]]; then
cp foo.dot foo.dot.prev
dot -Tpng foo.dot -o foo.png
fi
sleep 2
done
"""
use_names = True
def __init__ (self, auto_export_file=None, use_names=None,
auto_export_interval=2.0):
self.auto_export_file = auto_export_file
self.auto_export_interval = auto_export_interval
if use_names is not None: self.use_names = use_names
self._export_pending = False
import networkx as NX
self.g = NX.MultiDiGraph()
core.listen_to_dependencies(self)
self._write_dot = None
if hasattr(NX, 'write_dot'):
self._write_dot = NX.write_dot
else:
try:
self._write_dot = NX.drawing.nx_pydot.write_dot
except ImportError:
self._write_dot = NX.drawing.nx_agraph.write_dot
self._auto_export_interval()
def _auto_export_interval (self):
if self.auto_export_interval:
core.call_delayed(self.auto_export_interval,
self._auto_export_interval)
self._do_auto_export()
def _handle_openflow_discovery_LinkEvent (self, event):
l = event.link
k = (l.end[0],l.end[1])
if event.added:
self.g.add_edge(l.dpid1, l.dpid2, key=k)
self.g.edges[l.dpid1,l.dpid2,k]['dead'] = False
elif event.removed:
self.g.edges[l.dpid1,l.dpid2,k]['dead'] = True
#self.g.remove_edge(l.dpid1, l.dpid2, key=k)
self._do_auto_export()
def _handle_openflow_PortStatus (self, event):
self._do_auto_export()
def _do_auto_export (self):
if not self.auto_export_file: return
if self._export_pending: return
self._export_pending = True
def do_export ():
self._export_pending = False
if not self.auto_export_file: return
self.export_dot(self.auto_export_file)
core.call_delayed(0.25, do_export)
def label_nodes (self):
for n,d in self.g.nodes(data=True):
c = core.openflow.connections.get(n)
name = dpid_to_str(n)
if self.use_names:
if c and of.OFPP_LOCAL in c.ports:
name = c.ports[of.OFPP_LOCAL].name
if name.startswith("ovs"):
if "_" in name and name[3:].split("_",1)[0].isdigit():
name = name.split("_", 1)[-1]
self.g.node[n]['label'] = name
def export_dot (self, filename):
if self._write_dot is None:
log.error("Can't export graph. NetworkX has no dot writing.")
log.error("You probably need to install something.")
return
self.label_nodes()
for u,v,k,d in self.g.edges(data=True, keys=True):
(d1,p1),(d2,p2) = k
assert d1 == u
con1 = core.openflow.connections.get(d1)
con2 = core.openflow.connections.get(d2)
c = ''
if d.get('dead') is True: c += 'gray'
elif not con1: c += "gray"
elif p1 not in con1.ports: c += "gray" # Shouldn't happen!
elif con1.ports[p1].config & of.OFPPC_PORT_DOWN: c += "red"
elif con1.ports[p1].config & of.OFPPC_NO_FWD: c += "brown"
elif con1.ports[p1].config & of.OFPPC_NO_FLOOD: c += "blue"
else: c += "green"
d['color'] = c
d['taillabel'] = str(p1)
d['style'] = 'dashed' if d.get('dead') else 'solid'
#log.debug("Exporting discovery graph to %s", filename)
self._write_dot(self.g, filename)
def graph (export = None, dpids_only = False, interval = "2.0"):
"""
Keep (and optionally export) a graph of the topology
If you pass --export=<filename>, it will periodically save a GraphViz
dot file containing the graph. Normally the graph will label switches
using their names when possible (based on the name of their "local"
interface). If you pass --dpids_only, it will just use DPIDs instead.
"""
core.registerNew(DiscoveryGraph, export, use_names = not dpids_only,
auto_export_interval = float(interval))
def launch (no_flow = False, explicit_drop = True, link_timeout = None,
eat_early_packets = False):
explicit_drop = str_to_bool(explicit_drop)
eat_early_packets = str_to_bool(eat_early_packets)
install_flow = not str_to_bool(no_flow)
if link_timeout: link_timeout = int(link_timeout)
core.registerNew(Discovery, explicit_drop=explicit_drop,
install_flow=install_flow, link_timeout=link_timeout,
eat_early_packets=eat_early_packets)
| # Copyright 2011-2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is loosely based on the discovery component in NOX.
"""
This module discovers the connectivity between OpenFlow switches by sending
out LLDP packets. To be notified of this information, listen to LinkEvents
on core.openflow_discovery.
It's possible that some of this should be abstracted out into a generic
Discovery module, or a Discovery superclass.
"""
from pox.lib.revent import *
from pox.lib.recoco import Timer
from pox.lib.util import dpid_to_str, str_to_bool
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import struct
import time
from collections import namedtuple
from random import shuffle, random
log = core.getLogger()
class LLDPSender (object):
"""
Sends out discovery packets
"""
SendItem = namedtuple("LLDPSenderItem", ('dpid','port_num','packet'))
#NOTE: This class keeps the packets to send in a flat list, which makes
# adding/removing them on switch join/leave or (especially) port
# status changes relatively expensive. Could easily be improved.
# Maximum times to run the timer per second
_sends_per_sec = 15
def __init__ (self, send_cycle_time, ttl = 120):
"""
Initialize an LLDP packet sender
send_cycle_time is the time (in seconds) that this sender will take to
send every discovery packet. Thus, it should be the link timeout
interval at most.
ttl is the time (in seconds) for which a receiving LLDP agent should
consider the rest of the data to be valid. We don't use this, but
other LLDP agents might. Can't be 0 (this means revoke).
"""
# Packets remaining to be sent in this cycle
self._this_cycle = []
# Packets we've already sent in this cycle
self._next_cycle = []
# Packets to send in a batch
self._send_chunk_size = 1
self._timer = None
self._ttl = ttl
self._send_cycle_time = send_cycle_time
core.listen_to_dependencies(self)
def _handle_openflow_PortStatus (self, event):
"""
Track changes to switch ports
"""
if event.added:
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr)
elif event.deleted:
self.del_port(event.dpid, event.port)
elif event.modified:
if event.ofp.desc.config & of.OFPPC_PORT_DOWN == 0:
# It's not down, so... try sending a discovery now
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr, False)
def _handle_openflow_ConnectionUp (self, event):
self.del_switch(event.dpid, set_timer = False)
ports = [(p.port_no, p.hw_addr) for p in event.ofp.ports]
for port_num, port_addr in ports:
self.add_port(event.dpid, port_num, port_addr, set_timer = False)
self._set_timer()
def _handle_openflow_ConnectionDown (self, event):
self.del_switch(event.dpid)
def del_switch (self, dpid, set_timer = True):
self._this_cycle = [p for p in self._this_cycle if p.dpid != dpid]
self._next_cycle = [p for p in self._next_cycle if p.dpid != dpid]
if set_timer: self._set_timer()
def del_port (self, dpid, port_num, set_timer = True):
if port_num > of.OFPP_MAX: return
self._this_cycle = [p for p in self._this_cycle
if p.dpid != dpid or p.port_num != port_num]
self._next_cycle = [p for p in self._next_cycle
if p.dpid != dpid or p.port_num != port_num]
if set_timer: self._set_timer()
def add_port (self, dpid, port_num, port_addr, set_timer = True):
if port_num > of.OFPP_MAX: return
self.del_port(dpid, port_num, set_timer = False)
packet = self.create_packet_out(dpid, port_num, port_addr)
self._next_cycle.insert(0, LLDPSender.SendItem(dpid, port_num, packet))
if set_timer: self._set_timer()
core.openflow.sendToDPID(dpid, packet) # Send one immediately
def _set_timer (self):
if self._timer: self._timer.cancel()
self._timer = None
num_packets = len(self._this_cycle) + len(self._next_cycle)
if num_packets == 0: return
self._send_chunk_size = 1 # One at a time
interval = self._send_cycle_time / float(num_packets)
if interval < 1.0 / self._sends_per_sec:
# Would require too many sends per sec -- send more than one at once
interval = 1.0 / self._sends_per_sec
chunk = float(num_packets) / self._send_cycle_time / self._sends_per_sec
self._send_chunk_size = chunk
self._timer = Timer(interval,
self._timer_handler, recurring=True)
def _timer_handler (self):
"""
Called by a timer to actually send packets.
Picks the first packet off this cycle's list, sends it, and then puts
it on the next-cycle list. When this cycle's list is empty, starts
the next cycle.
"""
num = int(self._send_chunk_size)
fpart = self._send_chunk_size - num
if random() < fpart: num += 1
for _ in range(num):
if len(self._this_cycle) == 0:
self._this_cycle = self._next_cycle
self._next_cycle = []
#shuffle(self._this_cycle)
item = self._this_cycle.pop(0)
self._next_cycle.append(item)
core.openflow.sendToDPID(item.dpid, item.packet)
def create_packet_out (self, dpid, port_num, port_addr):
"""
Create an ofp_packet_out containing a discovery packet
"""
eth = self._create_discovery_packet(dpid, port_num, port_addr, self._ttl)
po = of.ofp_packet_out(action = of.ofp_action_output(port=port_num))
po.data = eth.pack()
return po.pack()
@staticmethod
def _create_discovery_packet (dpid, port_num, port_addr, ttl):
"""
Build discovery packet
"""
chassis_id = pkt.chassis_id(subtype=pkt.chassis_id.SUB_LOCAL)
chassis_id.id = ('dpid:' + hex(int(dpid))[2:]).encode()
# Maybe this should be a MAC. But a MAC of what? Local port, maybe?
port_id = pkt.port_id(subtype=pkt.port_id.SUB_PORT, id=str(port_num))
ttl = pkt.ttl(ttl = ttl)
sysdesc = pkt.system_description()
sysdesc.payload = ('dpid:' + hex(int(dpid))[2:]).encode()
discovery_packet = pkt.lldp()
discovery_packet.tlvs.append(chassis_id)
discovery_packet.tlvs.append(port_id)
discovery_packet.tlvs.append(ttl)
discovery_packet.tlvs.append(sysdesc)
discovery_packet.tlvs.append(pkt.end_tlv())
eth = pkt.ethernet(type=pkt.ethernet.LLDP_TYPE)
eth.src = port_addr
eth.dst = pkt.ETHERNET.NDP_MULTICAST
eth.payload = discovery_packet
return eth
class LinkEvent (Event):
"""
Link up/down event
"""
def __init__ (self, add, link, event = None):
self.link = link
self.added = add
self.removed = not add
self.event = event # PacketIn which caused this, if any
def port_for_dpid (self, dpid):
if self.link.dpid1 == dpid:
return self.link.port1
if self.link.dpid2 == dpid:
return self.link.port2
return None
class Link (namedtuple("LinkBase",("dpid1","port1","dpid2","port2"))):
@property
def uni (self):
"""
Returns a "unidirectional" version of this link
The unidirectional versions of symmetric keys will be equal
"""
pairs = list(self.end)
pairs.sort()
return Link(pairs[0][0],pairs[0][1],pairs[1][0],pairs[1][1])
@property
def flipped (self):
pairs = self.end
return Link(pairs[1][0],pairs[1][1],pairs[0][0],pairs[0][1])
@property
def end (self):
return ((self[0],self[1]),(self[2],self[3]))
def __str__ (self):
return "%s.%s -> %s.%s" % (dpid_to_str(self[0]),self[1],
dpid_to_str(self[2]),self[3])
def __repr__ (self):
return "Link(dpid1=%s,port1=%s, dpid2=%s,port2=%s)" % (self.dpid1,
self.port1, self.dpid2, self.port2)
class Discovery (EventMixin):
"""
Component that attempts to discover network toplogy.
Sends out specially-crafted LLDP packets, and monitors their arrival.
"""
_flow_priority = 65000 # Priority of LLDP-catching flow (if any)
_link_timeout = 10 # How long until we consider a link dead
_timeout_check_period = 5 # How often to check for timeouts
_eventMixin_events = set([
LinkEvent,
])
_core_name = "openflow_discovery" # we want to be core.openflow_discovery
Link = Link
def __init__ (self, install_flow = True, explicit_drop = True,
link_timeout = None, eat_early_packets = False):
self._eat_early_packets = eat_early_packets
self._explicit_drop = explicit_drop
self._install_flow = install_flow
if link_timeout: self._link_timeout = link_timeout
self.adjacency = {} # From Link to time.time() stamp
self._sender = LLDPSender(self.send_cycle_time)
# Listen with a high priority (mostly so we get PacketIns early)
core.listen_to_dependencies(self,
listen_args={'openflow':{'priority':0xffffffff}})
Timer(self._timeout_check_period, self._expire_links, recurring=True)
@property
def send_cycle_time (self):
return self._link_timeout / 2.0
def install_flow (self, con_or_dpid, priority = None):
if priority is None:
priority = self._flow_priority
if isinstance(con_or_dpid, int):
con = core.openflow.connections.get(con_or_dpid)
if con is None:
log.warn("Can't install flow for %s", dpid_to_str(con_or_dpid))
return False
else:
con = con_or_dpid
match = of.ofp_match(dl_type = pkt.ethernet.LLDP_TYPE,
dl_dst = pkt.ETHERNET.NDP_MULTICAST)
msg = of.ofp_flow_mod()
msg.priority = priority
msg.match = match
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
con.send(msg)
return True
def _handle_openflow_ConnectionUp (self, event):
if self._install_flow:
# Make sure we get appropriate traffic
log.debug("Installing flow for %s", dpid_to_str(event.dpid))
self.install_flow(event.connection)
def _handle_openflow_ConnectionDown (self, event):
# Delete all links on this switch
self._delete_links([link for link in self.adjacency
if link.dpid1 == event.dpid
or link.dpid2 == event.dpid])
def _expire_links (self):
"""
Remove apparently dead links
"""
now = time.time()
expired = [link for link,timestamp in self.adjacency.items()
if timestamp + self._link_timeout < now]
if expired:
for link in expired:
log.info('link timeout: %s', link)
self._delete_links(expired)
def _handle_openflow_PacketIn (self, event):
"""
Receive and process LLDP packets
"""
packet = event.parsed
if (packet.effective_ethertype != pkt.ethernet.LLDP_TYPE
or packet.dst != pkt.ETHERNET.NDP_MULTICAST):
if not self._eat_early_packets: return
if not event.connection.connect_time: return
enable_time = time.time() - self.send_cycle_time - 1
if event.connection.connect_time > enable_time:
return EventHalt
return
if self._explicit_drop:
if event.ofp.buffer_id is not None:
log.debug("Dropping LLDP packet %i", event.ofp.buffer_id)
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
lldph = packet.find(pkt.lldp)
if lldph is None or not lldph.parsed:
log.error("LLDP packet could not be parsed")
return EventHalt
if len(lldph.tlvs) < 3:
log.error("LLDP packet without required three TLVs")
return EventHalt
if lldph.tlvs[0].tlv_type != pkt.lldp.CHASSIS_ID_TLV:
log.error("LLDP packet TLV 1 not CHASSIS_ID")
return EventHalt
if lldph.tlvs[1].tlv_type != pkt.lldp.PORT_ID_TLV:
log.error("LLDP packet TLV 2 not PORT_ID")
return EventHalt
if lldph.tlvs[2].tlv_type != pkt.lldp.TTL_TLV:
log.error("LLDP packet TLV 3 not TTL")
return EventHalt
def lookInSysDesc ():
r = None
for t in lldph.tlvs[3:]:
if t.tlv_type == pkt.lldp.SYSTEM_DESC_TLV:
# This is our favored way...
for line in t.payload.decode().split('\n'):
if line.startswith('dpid:'):
try:
return int(line[5:], 16)
except:
pass
if len(t.payload) == 8:
# Maybe it's a FlowVisor LLDP...
# Do these still exist?
try:
return struct.unpack("!Q", t.payload)[0]
except:
pass
return None
originatorDPID = lookInSysDesc()
if originatorDPID == None:
# We'll look in the CHASSIS ID
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_LOCAL:
if lldph.tlvs[0].id.startswith(b'dpid:'):
# This is how NOX does it at the time of writing
try:
originatorDPID = int(lldph.tlvs[0].id[5:], 16)
except:
pass
if originatorDPID == None:
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_MAC:
# Last ditch effort -- we'll hope the DPID was small enough
# to fit into an ethernet address
if len(lldph.tlvs[0].id) == 6:
try:
s = lldph.tlvs[0].id
originatorDPID = struct.unpack("!Q",'\x00\x00' + s)[0]
except:
pass
if originatorDPID == None:
log.warning("Couldn't find a DPID in the LLDP packet")
return EventHalt
if originatorDPID not in core.openflow.connections:
log.info('Received LLDP packet from unknown switch')
return EventHalt
# Get port number from port TLV
if lldph.tlvs[1].subtype != pkt.port_id.SUB_PORT:
log.warning("Thought we found a DPID, but packet didn't have a port")
return EventHalt
originatorPort = None
if lldph.tlvs[1].id.isdigit():
# We expect it to be a decimal value
originatorPort = int(lldph.tlvs[1].id)
elif len(lldph.tlvs[1].id) == 2:
# Maybe it's a 16 bit port number...
try:
originatorPort = struct.unpack("!H", lldph.tlvs[1].id)[0]
except:
pass
if originatorPort is None:
log.warning("Thought we found a DPID, but port number didn't " +
"make sense")
return EventHalt
if (event.dpid, event.port) == (originatorDPID, originatorPort):
log.warning("Port received its own LLDP packet; ignoring")
return EventHalt
link = Discovery.Link(originatorDPID, originatorPort, event.dpid,
event.port)
if link not in self.adjacency:
self.adjacency[link] = time.time()
log.info('link detected: %s', link)
self.raiseEventNoErrors(LinkEvent, True, link, event)
else:
# Just update timestamp
self.adjacency[link] = time.time()
return EventHalt # Probably nobody else needs this event
def _delete_links (self, links):
for link in links:
self.raiseEventNoErrors(LinkEvent, False, link)
for link in links:
self.adjacency.pop(link, None)
def is_edge_port (self, dpid, port):
"""
Return True if given port does not connect to another switch
"""
for link in self.adjacency:
if link.dpid1 == dpid and link.port1 == port:
return False
if link.dpid2 == dpid and link.port2 == port:
return False
return True
class DiscoveryGraph (object):
"""
Keeps (and optionally exports) a NetworkX graph of the topology
A nice feature of this is that you can have it export the graph to a
GraphViz dot file, which you can then look at. It's a bit easier than
setting up Gephi or POXDesk if all you want is something quick. I
then a little bash script to create an image file from the dot. If
you use an image viewer which automatically refreshes when the file
changes (e.g., Gnome Image Viewer), you have a low-budget topology
graph viewer. I export the graph by running the POX component:
openflow.discovery:graph --export=foo.dot
And here's the script I use to generate the image:
touch foo.dot foo.dot.prev
while true; do
if [[ $(cmp foo.dot foo.dot.prev) ]]; then
cp foo.dot foo.dot.prev
dot -Tpng foo.dot -o foo.png
fi
sleep 2
done
"""
use_names = True
def __init__ (self, auto_export_file=None, use_names=None,
auto_export_interval=2.0):
self.auto_export_file = auto_export_file
self.auto_export_interval = auto_export_interval
if use_names is not None: self.use_names = use_names
self._export_pending = False
import networkx as NX
self.g = NX.MultiDiGraph()
core.listen_to_dependencies(self)
self._write_dot = None
if hasattr(NX, 'write_dot'):
self._write_dot = NX.write_dot
else:
try:
self._write_dot = NX.drawing.nx_pydot.write_dot
except ImportError:
self._write_dot = NX.drawing.nx_agraph.write_dot
self._auto_export_interval()
def _auto_export_interval (self):
if self.auto_export_interval:
core.call_delayed(self.auto_export_interval,
self._auto_export_interval)
self._do_auto_export()
def _handle_openflow_discovery_LinkEvent (self, event):
l = event.link
k = (l.end[0],l.end[1])
if event.added:
self.g.add_edge(l.dpid1, l.dpid2, key=k)
self.g.edges[l.dpid1,l.dpid2,k]['dead'] = False
elif event.removed:
self.g.edges[l.dpid1,l.dpid2,k]['dead'] = True
#self.g.remove_edge(l.dpid1, l.dpid2, key=k)
self._do_auto_export()
def _handle_openflow_PortStatus (self, event):
self._do_auto_export()
def _do_auto_export (self):
if not self.auto_export_file: return
if self._export_pending: return
self._export_pending = True
def do_export ():
self._export_pending = False
if not self.auto_export_file: return
self.export_dot(self.auto_export_file)
core.call_delayed(0.25, do_export)
def label_nodes (self):
for n,d in self.g.nodes(data=True):
c = core.openflow.connections.get(n)
name = dpid_to_str(n)
if self.use_names:
if c and of.OFPP_LOCAL in c.ports:
name = c.ports[of.OFPP_LOCAL].name
if name.startswith("ovs"):
if "_" in name and name[3:].split("_",1)[0].isdigit():
name = name.split("_", 1)[-1]
self.g.node[n]['label'] = name
def export_dot (self, filename):
if self._write_dot is None:
log.error("Can't export graph. NetworkX has no dot writing.")
log.error("You probably need to install something.")
return
self.label_nodes()
for u,v,k,d in self.g.edges(data=True, keys=True):
(d1,p1),(d2,p2) = k
assert d1 == u
con1 = core.openflow.connections.get(d1)
con2 = core.openflow.connections.get(d2)
c = ''
if d.get('dead') is True: c += 'gray'
elif not con1: c += "gray"
elif p1 not in con1.ports: c += "gray" # Shouldn't happen!
elif con1.ports[p1].config & of.OFPPC_PORT_DOWN: c += "red"
elif con1.ports[p1].config & of.OFPPC_NO_FWD: c += "brown"
elif con1.ports[p1].config & of.OFPPC_NO_FLOOD: c += "blue"
else: c += "green"
d['color'] = c
d['taillabel'] = str(p1)
d['style'] = 'dashed' if d.get('dead') else 'solid'
#log.debug("Exporting discovery graph to %s", filename)
self._write_dot(self.g, filename)
def graph (export = None, dpids_only = False, interval = "2.0"):
"""
Keep (and optionally export) a graph of the topology
If you pass --export=<filename>, it will periodically save a GraphViz
dot file containing the graph. Normally the graph will label switches
using their names when possible (based on the name of their "local"
interface). If you pass --dpids_only, it will just use DPIDs instead.
"""
core.registerNew(DiscoveryGraph, export, use_names = not dpids_only,
auto_export_interval = float(interval))
def launch (no_flow = False, explicit_drop = True, link_timeout = None,
eat_early_packets = False):
explicit_drop = str_to_bool(explicit_drop)
eat_early_packets = str_to_bool(eat_early_packets)
install_flow = not str_to_bool(no_flow)
if link_timeout: link_timeout = int(link_timeout)
core.registerNew(Discovery, explicit_drop=explicit_drop,
install_flow=install_flow, link_timeout=link_timeout,
eat_early_packets=eat_early_packets)
| en | 0.878922 | # Copyright 2011-2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is loosely based on the discovery component in NOX. This module discovers the connectivity between OpenFlow switches by sending out LLDP packets. To be notified of this information, listen to LinkEvents on core.openflow_discovery. It's possible that some of this should be abstracted out into a generic Discovery module, or a Discovery superclass. Sends out discovery packets #NOTE: This class keeps the packets to send in a flat list, which makes # adding/removing them on switch join/leave or (especially) port # status changes relatively expensive. Could easily be improved. # Maximum times to run the timer per second Initialize an LLDP packet sender send_cycle_time is the time (in seconds) that this sender will take to send every discovery packet. Thus, it should be the link timeout interval at most. ttl is the time (in seconds) for which a receiving LLDP agent should consider the rest of the data to be valid. We don't use this, but other LLDP agents might. Can't be 0 (this means revoke). # Packets remaining to be sent in this cycle # Packets we've already sent in this cycle # Packets to send in a batch Track changes to switch ports # It's not down, so... try sending a discovery now # Send one immediately # One at a time # Would require too many sends per sec -- send more than one at once Called by a timer to actually send packets. Picks the first packet off this cycle's list, sends it, and then puts it on the next-cycle list. When this cycle's list is empty, starts the next cycle. #shuffle(self._this_cycle) Create an ofp_packet_out containing a discovery packet Build discovery packet # Maybe this should be a MAC. But a MAC of what? Local port, maybe? Link up/down event # PacketIn which caused this, if any Returns a "unidirectional" version of this link The unidirectional versions of symmetric keys will be equal Component that attempts to discover network toplogy. Sends out specially-crafted LLDP packets, and monitors their arrival. # Priority of LLDP-catching flow (if any) # How long until we consider a link dead # How often to check for timeouts # we want to be core.openflow_discovery # From Link to time.time() stamp # Listen with a high priority (mostly so we get PacketIns early) # Make sure we get appropriate traffic # Delete all links on this switch Remove apparently dead links Receive and process LLDP packets # This is our favored way... # Maybe it's a FlowVisor LLDP... # Do these still exist? # We'll look in the CHASSIS ID # This is how NOX does it at the time of writing # Last ditch effort -- we'll hope the DPID was small enough # to fit into an ethernet address # Get port number from port TLV # We expect it to be a decimal value # Maybe it's a 16 bit port number... # Just update timestamp # Probably nobody else needs this event Return True if given port does not connect to another switch Keeps (and optionally exports) a NetworkX graph of the topology A nice feature of this is that you can have it export the graph to a GraphViz dot file, which you can then look at. It's a bit easier than setting up Gephi or POXDesk if all you want is something quick. I then a little bash script to create an image file from the dot. If you use an image viewer which automatically refreshes when the file changes (e.g., Gnome Image Viewer), you have a low-budget topology graph viewer. I export the graph by running the POX component: openflow.discovery:graph --export=foo.dot And here's the script I use to generate the image: touch foo.dot foo.dot.prev while true; do if [[ $(cmp foo.dot foo.dot.prev) ]]; then cp foo.dot foo.dot.prev dot -Tpng foo.dot -o foo.png fi sleep 2 done #self.g.remove_edge(l.dpid1, l.dpid2, key=k) # Shouldn't happen! #log.debug("Exporting discovery graph to %s", filename) Keep (and optionally export) a graph of the topology If you pass --export=<filename>, it will periodically save a GraphViz dot file containing the graph. Normally the graph will label switches using their names when possible (based on the name of their "local" interface). If you pass --dpids_only, it will just use DPIDs instead. | 1.883736 | 2 |
full_scrapper.py | CarloGauss33/scrap-diputados | 2 | 6621056 | <filename>full_scrapper.py
import sys
import simple_scrap
from urllib.request import urlopen
import urllib
from bs4 import BeautifulSoup
import json
import os
def get_last_id():
pure_html = urlopen("https://www.camara.cl/legislacion/sala_sesiones/votaciones.aspx")
soup_html = BeautifulSoup(pure_html, 'html.parser')
#opening the results table
tbody = soup_html.find(id="ContentPlaceHolder1_ContentPlaceHolder1_PaginaContent_pnlVotaciones")
#getting the last votation
vot_lin = tbody.find_all("a")
last_vot = vot_lin[0].get("href").split("=")[1]
return int(last_vot)
def full_scrap(start_id ,wanted_results, filepath, verbose):
new_data = []
if verbose:
toolbar_width = wanted_results
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
if wanted_results > 30:
times = wanted_results//30
for i in range(0, times):
full_scrap(start_id - 30*i, 30, filepath)
full_scrap(start_id +30*times, wanted_results%30, filepath)
return True
n_of_projects = wanted_results
last_project = start_id
for project_n in range(last_project, last_project - n_of_projects, -1):
try:
link = f"https://www.camara.cl/legislacion/sala_sesiones/votacion_detalle.aspx?prmIdVotacion={project_n}"
status, project_results, meta = simple_scrap.scrap_web(link, project_n)
project_dict = dict()
if status:
meta.update({"votaciones": project_results})
new_data.append(meta)
except urllib.error.HTTPError:
pass
if verbose:
sys.stdout.write("-")
sys.stdout.flush()
if verbose:
sys.stdout.write("]\n")
old_data = []
if os.path.isfile(filepath):
with open(filepath, "r", encoding="utf-8") as f:
old_data = json.load(f)
with open(filepath, 'w+', encoding='utf-8') as outfile:
json.dump(old_data + new_data, outfile, indent=4, ensure_ascii=False)
return True
if __name__ == "__main__":
full_scrap(get_last_id(), 10, "./results/data.json")
| <filename>full_scrapper.py
import sys
import simple_scrap
from urllib.request import urlopen
import urllib
from bs4 import BeautifulSoup
import json
import os
def get_last_id():
pure_html = urlopen("https://www.camara.cl/legislacion/sala_sesiones/votaciones.aspx")
soup_html = BeautifulSoup(pure_html, 'html.parser')
#opening the results table
tbody = soup_html.find(id="ContentPlaceHolder1_ContentPlaceHolder1_PaginaContent_pnlVotaciones")
#getting the last votation
vot_lin = tbody.find_all("a")
last_vot = vot_lin[0].get("href").split("=")[1]
return int(last_vot)
def full_scrap(start_id ,wanted_results, filepath, verbose):
new_data = []
if verbose:
toolbar_width = wanted_results
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
if wanted_results > 30:
times = wanted_results//30
for i in range(0, times):
full_scrap(start_id - 30*i, 30, filepath)
full_scrap(start_id +30*times, wanted_results%30, filepath)
return True
n_of_projects = wanted_results
last_project = start_id
for project_n in range(last_project, last_project - n_of_projects, -1):
try:
link = f"https://www.camara.cl/legislacion/sala_sesiones/votacion_detalle.aspx?prmIdVotacion={project_n}"
status, project_results, meta = simple_scrap.scrap_web(link, project_n)
project_dict = dict()
if status:
meta.update({"votaciones": project_results})
new_data.append(meta)
except urllib.error.HTTPError:
pass
if verbose:
sys.stdout.write("-")
sys.stdout.flush()
if verbose:
sys.stdout.write("]\n")
old_data = []
if os.path.isfile(filepath):
with open(filepath, "r", encoding="utf-8") as f:
old_data = json.load(f)
with open(filepath, 'w+', encoding='utf-8') as outfile:
json.dump(old_data + new_data, outfile, indent=4, ensure_ascii=False)
return True
if __name__ == "__main__":
full_scrap(get_last_id(), 10, "./results/data.json")
| en | 0.436505 | #opening the results table #getting the last votation | 2.847281 | 3 |
DeblurGAN-tf/00-access/data.py | NALLEIN/Ascend | 0 | 6621057 | <reponame>NALLEIN/Ascend
import random
import pathlib
import tensorflow as tf
def preprocess_image(image, ext):
"""
Normalize image to [-1, 1]
"""
assert ext in ['.png', '.jpg', '.jpeg', '.JPEG']
if ext == '.png':
image = tf.image.decode_png(image, channels=3)
else:
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image -= 0.5
image /= 0.5
return image
def load_and_preprocess_image(image_path, ext):
image = tf.read_file(image_path)
return preprocess_image(image, ext)
def get_sorted_image_path(path, ext):
ext_regex = "*" + ext
data_root = pathlib.Path(path)
image_paths = list(data_root.glob(ext_regex))
image_paths = sorted([str(path) for path in image_paths])
return image_paths
def get_dataset(lr_path, hr_path, ext):
lr_sorted_paths = get_sorted_image_path(lr_path, ext)
hr_sorted_paths = get_sorted_image_path(hr_path, ext)
lr_hr_sorted_paths = list(zip(lr_sorted_paths[:], hr_sorted_paths[:]))
random.shuffle(lr_hr_sorted_paths)
lr_sorted_paths, hr_sorted_paths = zip(*lr_hr_sorted_paths)
ds = tf.data.Dataset.from_tensor_slices((list(lr_sorted_paths), list(hr_sorted_paths)))
def load_and_preprocess_lr_hr_images(lr_path, hr_path, ext=ext):
return load_and_preprocess_image(lr_path, ext), load_and_preprocess_image(hr_path, ext)
lr_hr_ds = ds.map(load_and_preprocess_lr_hr_images, num_parallel_calls=1)
return lr_hr_ds, len(lr_sorted_paths)
def load_train_dataset(lr_path, hr_path, ext, batch_size):
lr_hr_ds, n_data = get_dataset(lr_path, hr_path, ext)
lr_hr_ds = lr_hr_ds.batch(batch_size)
lr_hr_ds = lr_hr_ds.repeat()
lr_hr_ds = lr_hr_ds.make_one_shot_iterator()
return lr_hr_ds, n_data
def load_test_dataset(lr_path, hr_path, ext, batch_size):
val_lr_hr_ds, val_n_data = get_dataset(lr_path, hr_path, ext)
val_lr_hr_ds = val_lr_hr_ds.batch(batch_size)
val_lr_hr_ds = val_lr_hr_ds.repeat()
return val_lr_hr_ds, val_n_data
| import random
import pathlib
import tensorflow as tf
def preprocess_image(image, ext):
"""
Normalize image to [-1, 1]
"""
assert ext in ['.png', '.jpg', '.jpeg', '.JPEG']
if ext == '.png':
image = tf.image.decode_png(image, channels=3)
else:
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image -= 0.5
image /= 0.5
return image
def load_and_preprocess_image(image_path, ext):
image = tf.read_file(image_path)
return preprocess_image(image, ext)
def get_sorted_image_path(path, ext):
ext_regex = "*" + ext
data_root = pathlib.Path(path)
image_paths = list(data_root.glob(ext_regex))
image_paths = sorted([str(path) for path in image_paths])
return image_paths
def get_dataset(lr_path, hr_path, ext):
lr_sorted_paths = get_sorted_image_path(lr_path, ext)
hr_sorted_paths = get_sorted_image_path(hr_path, ext)
lr_hr_sorted_paths = list(zip(lr_sorted_paths[:], hr_sorted_paths[:]))
random.shuffle(lr_hr_sorted_paths)
lr_sorted_paths, hr_sorted_paths = zip(*lr_hr_sorted_paths)
ds = tf.data.Dataset.from_tensor_slices((list(lr_sorted_paths), list(hr_sorted_paths)))
def load_and_preprocess_lr_hr_images(lr_path, hr_path, ext=ext):
return load_and_preprocess_image(lr_path, ext), load_and_preprocess_image(hr_path, ext)
lr_hr_ds = ds.map(load_and_preprocess_lr_hr_images, num_parallel_calls=1)
return lr_hr_ds, len(lr_sorted_paths)
def load_train_dataset(lr_path, hr_path, ext, batch_size):
lr_hr_ds, n_data = get_dataset(lr_path, hr_path, ext)
lr_hr_ds = lr_hr_ds.batch(batch_size)
lr_hr_ds = lr_hr_ds.repeat()
lr_hr_ds = lr_hr_ds.make_one_shot_iterator()
return lr_hr_ds, n_data
def load_test_dataset(lr_path, hr_path, ext, batch_size):
val_lr_hr_ds, val_n_data = get_dataset(lr_path, hr_path, ext)
val_lr_hr_ds = val_lr_hr_ds.batch(batch_size)
val_lr_hr_ds = val_lr_hr_ds.repeat()
return val_lr_hr_ds, val_n_data | en | 0.866019 | Normalize image to [-1, 1] | 2.439815 | 2 |
troposphere_mate/applicationinsights.py | tsuttsu305/troposphere_mate-project | 0 | 6621058 | <filename>troposphere_mate/applicationinsights.py
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.applicationinsights
from troposphere.applicationinsights import (
Alarm as _Alarm,
AlarmMetric as _AlarmMetric,
ComponentConfiguration as _ComponentConfiguration,
ComponentMonitoringSetting as _ComponentMonitoringSetting,
ConfigurationDetails as _ConfigurationDetails,
CustomComponent as _CustomComponent,
Log as _Log,
LogPattern as _LogPattern,
LogPatternSet as _LogPatternSet,
SubComponentConfigurationDetails as _SubComponentConfigurationDetails,
SubComponentTypeConfiguration as _SubComponentTypeConfiguration,
Tags as _Tags,
WindowsEvent as _WindowsEvent,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class Alarm(troposphere.applicationinsights.Alarm, Mixin):
def __init__(self,
title=None,
AlarmName=REQUIRED, # type: Union[str, AWSHelperFn]
Severity=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmName=AlarmName,
Severity=Severity,
**kwargs
)
super(Alarm, self).__init__(**processed_kwargs)
class AlarmMetric(troposphere.applicationinsights.AlarmMetric, Mixin):
def __init__(self,
title=None,
AlarmMetricName=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetricName=AlarmMetricName,
**kwargs
)
super(AlarmMetric, self).__init__(**processed_kwargs)
class Log(troposphere.applicationinsights.Log, Mixin):
def __init__(self,
title=None,
LogType=REQUIRED, # type: Union[str, AWSHelperFn]
Encoding=NOTHING, # type: Union[str, AWSHelperFn]
LogGroupName=NOTHING, # type: Union[str, AWSHelperFn]
LogPath=NOTHING, # type: Union[str, AWSHelperFn]
PatternSet=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
LogType=LogType,
Encoding=Encoding,
LogGroupName=LogGroupName,
LogPath=LogPath,
PatternSet=PatternSet,
**kwargs
)
super(Log, self).__init__(**processed_kwargs)
class WindowsEvent(troposphere.applicationinsights.WindowsEvent, Mixin):
def __init__(self,
title=None,
EventLevels=REQUIRED, # type: List[Union[str, AWSHelperFn]]
EventName=REQUIRED, # type: Union[str, AWSHelperFn]
LogGroupName=REQUIRED, # type: Union[str, AWSHelperFn]
PatternSet=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
EventLevels=EventLevels,
EventName=EventName,
LogGroupName=LogGroupName,
PatternSet=PatternSet,
**kwargs
)
super(WindowsEvent, self).__init__(**processed_kwargs)
class ConfigurationDetails(troposphere.applicationinsights.ConfigurationDetails, Mixin):
def __init__(self,
title=None,
AlarmMetrics=NOTHING, # type: List[_AlarmMetric]
Alarms=NOTHING, # type: List[_Alarm]
Logs=NOTHING, # type: List[_Log]
WindowsEvents=NOTHING, # type: List[_WindowsEvent]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetrics=AlarmMetrics,
Alarms=Alarms,
Logs=Logs,
WindowsEvents=WindowsEvents,
**kwargs
)
super(ConfigurationDetails, self).__init__(**processed_kwargs)
class SubComponentConfigurationDetails(troposphere.applicationinsights.SubComponentConfigurationDetails, Mixin):
def __init__(self,
title=None,
AlarmMetrics=NOTHING, # type: List[_AlarmMetric]
Logs=NOTHING, # type: List[_Log]
WindowsEvents=NOTHING, # type: List[_WindowsEvent]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetrics=AlarmMetrics,
Logs=Logs,
WindowsEvents=WindowsEvents,
**kwargs
)
super(SubComponentConfigurationDetails, self).__init__(**processed_kwargs)
class SubComponentTypeConfiguration(troposphere.applicationinsights.SubComponentTypeConfiguration, Mixin):
def __init__(self,
title=None,
SubComponentConfigurationDetails=REQUIRED, # type: _SubComponentConfigurationDetails
SubComponentType=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
SubComponentConfigurationDetails=SubComponentConfigurationDetails,
SubComponentType=SubComponentType,
**kwargs
)
super(SubComponentTypeConfiguration, self).__init__(**processed_kwargs)
class ComponentConfiguration(troposphere.applicationinsights.ComponentConfiguration, Mixin):
def __init__(self,
title=None,
ConfigurationDetails=NOTHING, # type: _ConfigurationDetails
SubComponentTypeConfigurations=NOTHING, # type: List[_SubComponentTypeConfiguration]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ConfigurationDetails=ConfigurationDetails,
SubComponentTypeConfigurations=SubComponentTypeConfigurations,
**kwargs
)
super(ComponentConfiguration, self).__init__(**processed_kwargs)
class ComponentMonitoringSetting(troposphere.applicationinsights.ComponentMonitoringSetting, Mixin):
def __init__(self,
title=None,
ComponentARN=NOTHING, # type: Union[str, AWSHelperFn]
ComponentConfigurationMode=NOTHING, # type: Union[str, AWSHelperFn]
ComponentName=NOTHING, # type: Union[str, AWSHelperFn]
CustomComponentConfiguration=NOTHING, # type: _ComponentConfiguration
DefaultOverwriteComponentConfiguration=NOTHING, # type: _ComponentConfiguration
Tier=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ComponentARN=ComponentARN,
ComponentConfigurationMode=ComponentConfigurationMode,
ComponentName=ComponentName,
CustomComponentConfiguration=CustomComponentConfiguration,
DefaultOverwriteComponentConfiguration=DefaultOverwriteComponentConfiguration,
Tier=Tier,
**kwargs
)
super(ComponentMonitoringSetting, self).__init__(**processed_kwargs)
class CustomComponent(troposphere.applicationinsights.CustomComponent, Mixin):
def __init__(self,
title=None,
ComponentName=REQUIRED, # type: Union[str, AWSHelperFn]
ResourceList=REQUIRED, # type: List[Union[str, AWSHelperFn]]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ComponentName=ComponentName,
ResourceList=ResourceList,
**kwargs
)
super(CustomComponent, self).__init__(**processed_kwargs)
class LogPattern(troposphere.applicationinsights.LogPattern, Mixin):
def __init__(self,
title=None,
Pattern=REQUIRED, # type: Union[str, AWSHelperFn]
PatternName=REQUIRED, # type: Union[str, AWSHelperFn]
Rank=REQUIRED, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Pattern=Pattern,
PatternName=PatternName,
Rank=Rank,
**kwargs
)
super(LogPattern, self).__init__(**processed_kwargs)
class LogPatternSet(troposphere.applicationinsights.LogPatternSet, Mixin):
def __init__(self,
title=None,
LogPatterns=REQUIRED, # type: List[_LogPattern]
PatternSetName=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
LogPatterns=LogPatterns,
PatternSetName=PatternSetName,
**kwargs
)
super(LogPatternSet, self).__init__(**processed_kwargs)
class Application(troposphere.applicationinsights.Application, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ResourceGroupName=REQUIRED, # type: Union[str, AWSHelperFn]
AutoConfigurationEnabled=NOTHING, # type: bool
CWEMonitorEnabled=NOTHING, # type: bool
ComponentMonitoringSettings=NOTHING, # type: List[_ComponentMonitoringSetting]
CustomComponents=NOTHING, # type: List[_CustomComponent]
LogPatternSets=NOTHING, # type: List[_LogPatternSet]
OpsCenterEnabled=NOTHING, # type: bool
OpsItemSNSTopicArn=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: _Tags
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ResourceGroupName=ResourceGroupName,
AutoConfigurationEnabled=AutoConfigurationEnabled,
CWEMonitorEnabled=CWEMonitorEnabled,
ComponentMonitoringSettings=ComponentMonitoringSettings,
CustomComponents=CustomComponents,
LogPatternSets=LogPatternSets,
OpsCenterEnabled=OpsCenterEnabled,
OpsItemSNSTopicArn=OpsItemSNSTopicArn,
Tags=Tags,
**kwargs
)
super(Application, self).__init__(**processed_kwargs)
| <filename>troposphere_mate/applicationinsights.py
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.applicationinsights
from troposphere.applicationinsights import (
Alarm as _Alarm,
AlarmMetric as _AlarmMetric,
ComponentConfiguration as _ComponentConfiguration,
ComponentMonitoringSetting as _ComponentMonitoringSetting,
ConfigurationDetails as _ConfigurationDetails,
CustomComponent as _CustomComponent,
Log as _Log,
LogPattern as _LogPattern,
LogPatternSet as _LogPatternSet,
SubComponentConfigurationDetails as _SubComponentConfigurationDetails,
SubComponentTypeConfiguration as _SubComponentTypeConfiguration,
Tags as _Tags,
WindowsEvent as _WindowsEvent,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class Alarm(troposphere.applicationinsights.Alarm, Mixin):
def __init__(self,
title=None,
AlarmName=REQUIRED, # type: Union[str, AWSHelperFn]
Severity=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmName=AlarmName,
Severity=Severity,
**kwargs
)
super(Alarm, self).__init__(**processed_kwargs)
class AlarmMetric(troposphere.applicationinsights.AlarmMetric, Mixin):
def __init__(self,
title=None,
AlarmMetricName=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetricName=AlarmMetricName,
**kwargs
)
super(AlarmMetric, self).__init__(**processed_kwargs)
class Log(troposphere.applicationinsights.Log, Mixin):
def __init__(self,
title=None,
LogType=REQUIRED, # type: Union[str, AWSHelperFn]
Encoding=NOTHING, # type: Union[str, AWSHelperFn]
LogGroupName=NOTHING, # type: Union[str, AWSHelperFn]
LogPath=NOTHING, # type: Union[str, AWSHelperFn]
PatternSet=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
LogType=LogType,
Encoding=Encoding,
LogGroupName=LogGroupName,
LogPath=LogPath,
PatternSet=PatternSet,
**kwargs
)
super(Log, self).__init__(**processed_kwargs)
class WindowsEvent(troposphere.applicationinsights.WindowsEvent, Mixin):
def __init__(self,
title=None,
EventLevels=REQUIRED, # type: List[Union[str, AWSHelperFn]]
EventName=REQUIRED, # type: Union[str, AWSHelperFn]
LogGroupName=REQUIRED, # type: Union[str, AWSHelperFn]
PatternSet=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
EventLevels=EventLevels,
EventName=EventName,
LogGroupName=LogGroupName,
PatternSet=PatternSet,
**kwargs
)
super(WindowsEvent, self).__init__(**processed_kwargs)
class ConfigurationDetails(troposphere.applicationinsights.ConfigurationDetails, Mixin):
def __init__(self,
title=None,
AlarmMetrics=NOTHING, # type: List[_AlarmMetric]
Alarms=NOTHING, # type: List[_Alarm]
Logs=NOTHING, # type: List[_Log]
WindowsEvents=NOTHING, # type: List[_WindowsEvent]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetrics=AlarmMetrics,
Alarms=Alarms,
Logs=Logs,
WindowsEvents=WindowsEvents,
**kwargs
)
super(ConfigurationDetails, self).__init__(**processed_kwargs)
class SubComponentConfigurationDetails(troposphere.applicationinsights.SubComponentConfigurationDetails, Mixin):
def __init__(self,
title=None,
AlarmMetrics=NOTHING, # type: List[_AlarmMetric]
Logs=NOTHING, # type: List[_Log]
WindowsEvents=NOTHING, # type: List[_WindowsEvent]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AlarmMetrics=AlarmMetrics,
Logs=Logs,
WindowsEvents=WindowsEvents,
**kwargs
)
super(SubComponentConfigurationDetails, self).__init__(**processed_kwargs)
class SubComponentTypeConfiguration(troposphere.applicationinsights.SubComponentTypeConfiguration, Mixin):
def __init__(self,
title=None,
SubComponentConfigurationDetails=REQUIRED, # type: _SubComponentConfigurationDetails
SubComponentType=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
SubComponentConfigurationDetails=SubComponentConfigurationDetails,
SubComponentType=SubComponentType,
**kwargs
)
super(SubComponentTypeConfiguration, self).__init__(**processed_kwargs)
class ComponentConfiguration(troposphere.applicationinsights.ComponentConfiguration, Mixin):
def __init__(self,
title=None,
ConfigurationDetails=NOTHING, # type: _ConfigurationDetails
SubComponentTypeConfigurations=NOTHING, # type: List[_SubComponentTypeConfiguration]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ConfigurationDetails=ConfigurationDetails,
SubComponentTypeConfigurations=SubComponentTypeConfigurations,
**kwargs
)
super(ComponentConfiguration, self).__init__(**processed_kwargs)
class ComponentMonitoringSetting(troposphere.applicationinsights.ComponentMonitoringSetting, Mixin):
def __init__(self,
title=None,
ComponentARN=NOTHING, # type: Union[str, AWSHelperFn]
ComponentConfigurationMode=NOTHING, # type: Union[str, AWSHelperFn]
ComponentName=NOTHING, # type: Union[str, AWSHelperFn]
CustomComponentConfiguration=NOTHING, # type: _ComponentConfiguration
DefaultOverwriteComponentConfiguration=NOTHING, # type: _ComponentConfiguration
Tier=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ComponentARN=ComponentARN,
ComponentConfigurationMode=ComponentConfigurationMode,
ComponentName=ComponentName,
CustomComponentConfiguration=CustomComponentConfiguration,
DefaultOverwriteComponentConfiguration=DefaultOverwriteComponentConfiguration,
Tier=Tier,
**kwargs
)
super(ComponentMonitoringSetting, self).__init__(**processed_kwargs)
class CustomComponent(troposphere.applicationinsights.CustomComponent, Mixin):
def __init__(self,
title=None,
ComponentName=REQUIRED, # type: Union[str, AWSHelperFn]
ResourceList=REQUIRED, # type: List[Union[str, AWSHelperFn]]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ComponentName=ComponentName,
ResourceList=ResourceList,
**kwargs
)
super(CustomComponent, self).__init__(**processed_kwargs)
class LogPattern(troposphere.applicationinsights.LogPattern, Mixin):
def __init__(self,
title=None,
Pattern=REQUIRED, # type: Union[str, AWSHelperFn]
PatternName=REQUIRED, # type: Union[str, AWSHelperFn]
Rank=REQUIRED, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Pattern=Pattern,
PatternName=PatternName,
Rank=Rank,
**kwargs
)
super(LogPattern, self).__init__(**processed_kwargs)
class LogPatternSet(troposphere.applicationinsights.LogPatternSet, Mixin):
def __init__(self,
title=None,
LogPatterns=REQUIRED, # type: List[_LogPattern]
PatternSetName=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
LogPatterns=LogPatterns,
PatternSetName=PatternSetName,
**kwargs
)
super(LogPatternSet, self).__init__(**processed_kwargs)
class Application(troposphere.applicationinsights.Application, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ResourceGroupName=REQUIRED, # type: Union[str, AWSHelperFn]
AutoConfigurationEnabled=NOTHING, # type: bool
CWEMonitorEnabled=NOTHING, # type: bool
ComponentMonitoringSettings=NOTHING, # type: List[_ComponentMonitoringSetting]
CustomComponents=NOTHING, # type: List[_CustomComponent]
LogPatternSets=NOTHING, # type: List[_LogPatternSet]
OpsCenterEnabled=NOTHING, # type: bool
OpsItemSNSTopicArn=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: _Tags
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ResourceGroupName=ResourceGroupName,
AutoConfigurationEnabled=AutoConfigurationEnabled,
CWEMonitorEnabled=CWEMonitorEnabled,
ComponentMonitoringSettings=ComponentMonitoringSettings,
CustomComponents=CustomComponents,
LogPatternSets=LogPatternSets,
OpsCenterEnabled=OpsCenterEnabled,
OpsItemSNSTopicArn=OpsItemSNSTopicArn,
Tags=Tags,
**kwargs
)
super(Application, self).__init__(**processed_kwargs)
| en | 0.327073 | # -*- coding: utf-8 -*- This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. # pragma: no cover # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: List[Union[str, AWSHelperFn]] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: List[_AlarmMetric] # type: List[_Alarm] # type: List[_Log] # type: List[_WindowsEvent] # type: List[_AlarmMetric] # type: List[_Log] # type: List[_WindowsEvent] # type: _SubComponentConfigurationDetails # type: Union[str, AWSHelperFn] # type: _ConfigurationDetails # type: List[_SubComponentTypeConfiguration] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: _ComponentConfiguration # type: _ComponentConfiguration # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: List[Union[str, AWSHelperFn]] # type: Union[str, AWSHelperFn] # type: Union[str, AWSHelperFn] # type: int # type: List[_LogPattern] # type: Union[str, AWSHelperFn] # type: str # type: Template # type: bool # type: Union[str, AWSHelperFn] # type: bool # type: bool # type: List[_ComponentMonitoringSetting] # type: List[_CustomComponent] # type: List[_LogPatternSet] # type: bool # type: Union[str, AWSHelperFn] # type: _Tags | 1.776218 | 2 |
utility/bf2mugi.py | hvze/Mugi | 1 | 6621059 | <reponame>hvze/Mugi<filename>utility/bf2mugi.py
import sys
res = ''
for (i, c) in enumerate(" ".join(sys.argv[1:])):
for (x, y) in [('>', 'r'), ('<', 'l'), ('+', 'p'), ('-', 'm'), ('.', 'o')]:
if x == c:
res += y
print(res) | import sys
res = ''
for (i, c) in enumerate(" ".join(sys.argv[1:])):
for (x, y) in [('>', 'r'), ('<', 'l'), ('+', 'p'), ('-', 'm'), ('.', 'o')]:
if x == c:
res += y
print(res) | none | 1 | 3.452928 | 3 | |
webapp/app.py | rohitnayak/movie-review-sentiment-analysis | 0 | 6621060 | from flask import Flask, render_template, request, send_file
from wtforms import Form, TextAreaField, validators
import pickle, sqlite3, os, numpy as np
from vectorizer import vect
import uuid, json
import facerecognition.recognize as REC
import base64
import io
import logging, logging.config
import sys
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
logging.config.dictConfig(LOGGING)
cur_dir=os.path.dirname(__file__)
clf=pickle.load(open(os.path.join(cur_dir,
'pickled_objects', 'classifier.pkl'), 'rb'))
def classify(document):
label={0:'negative', 1:'positive'}
X=vect.transform([document])
y=clf.predict(X)[0]
p=clf.predict_proba(X).max()
return label[y], p
def train(document, y):
X=vect.transform([document])
clf.partial_fit(X, [y])
def sqlite_entry(document, y):
conn=sqlite3.connect('reviews.sqlite')
c=conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date)"\
" VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
app=Flask(__name__)
class ReviewForm(Form):
review=TextAreaField('', [validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form=ReviewForm(request.form)
return render_template('review.html', form=form)
@app.route('/results', methods=['POST'])
def results():
form=ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review=request.form['review']
y, p=classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(p*100,2))
return render_template('review.html', form=form)
@app.route('/feedback', methods=['POST'])
def feedback():
feedback=request.form['feedback_button']
review=request.form['review']
prediction=request.form['prediction']
inv_label={'negative': 0, 'positive': 1}
y=inv_label[prediction]
if feedback=='Incorrect':
y=int(not(y))
train(review, y)
sqlite_entry(review, y)
return render_template('feedback.html')
@app.route('/recognize', methods=['GET', 'POST'])
def recognize():
if request.method == 'POST':
logging.info('Hello1 '+ str(request.files))
file = request.files['file']
logging.info('Hello2')
extension = os.path.splitext(file.filename)[1]
if not extension:
extension = ".jpg"
logging.info('Hello3')
f_name = str(uuid.uuid4()) + extension
logging.info('Hello4')
f_path = os.path.join("./uploaded_images/", f_name)
logging.info('Hello5')
file.save(f_path)
logging.info('Hello6')
f_outpath = os.path.join("./rec_images/", f_name)
logging.info("fPath is " + f_path + ", Recfile is " + f_outpath)
REC.recognize(f_path, f_outpath)
logging.info('Hello7')
data = open(f_outpath, "rb").read()
data = base64.b64encode(data)
return send_file(io.BytesIO(data), mimetype='image/jpg')
@app.route('/recognize_submit')
def recognize_submit():
return render_template('recognize_submit.html')
@app.route('/jpeg_camera/<path:path>')
def send_js(path):
return send_file('jpeg_camera\\'+ path)
if __name__ == "__main__":
app.run(debug=True)
| from flask import Flask, render_template, request, send_file
from wtforms import Form, TextAreaField, validators
import pickle, sqlite3, os, numpy as np
from vectorizer import vect
import uuid, json
import facerecognition.recognize as REC
import base64
import io
import logging, logging.config
import sys
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
logging.config.dictConfig(LOGGING)
cur_dir=os.path.dirname(__file__)
clf=pickle.load(open(os.path.join(cur_dir,
'pickled_objects', 'classifier.pkl'), 'rb'))
def classify(document):
label={0:'negative', 1:'positive'}
X=vect.transform([document])
y=clf.predict(X)[0]
p=clf.predict_proba(X).max()
return label[y], p
def train(document, y):
X=vect.transform([document])
clf.partial_fit(X, [y])
def sqlite_entry(document, y):
conn=sqlite3.connect('reviews.sqlite')
c=conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date)"\
" VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
app=Flask(__name__)
class ReviewForm(Form):
review=TextAreaField('', [validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form=ReviewForm(request.form)
return render_template('review.html', form=form)
@app.route('/results', methods=['POST'])
def results():
form=ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review=request.form['review']
y, p=classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(p*100,2))
return render_template('review.html', form=form)
@app.route('/feedback', methods=['POST'])
def feedback():
feedback=request.form['feedback_button']
review=request.form['review']
prediction=request.form['prediction']
inv_label={'negative': 0, 'positive': 1}
y=inv_label[prediction]
if feedback=='Incorrect':
y=int(not(y))
train(review, y)
sqlite_entry(review, y)
return render_template('feedback.html')
@app.route('/recognize', methods=['GET', 'POST'])
def recognize():
if request.method == 'POST':
logging.info('Hello1 '+ str(request.files))
file = request.files['file']
logging.info('Hello2')
extension = os.path.splitext(file.filename)[1]
if not extension:
extension = ".jpg"
logging.info('Hello3')
f_name = str(uuid.uuid4()) + extension
logging.info('Hello4')
f_path = os.path.join("./uploaded_images/", f_name)
logging.info('Hello5')
file.save(f_path)
logging.info('Hello6')
f_outpath = os.path.join("./rec_images/", f_name)
logging.info("fPath is " + f_path + ", Recfile is " + f_outpath)
REC.recognize(f_path, f_outpath)
logging.info('Hello7')
data = open(f_outpath, "rb").read()
data = base64.b64encode(data)
return send_file(io.BytesIO(data), mimetype='image/jpg')
@app.route('/recognize_submit')
def recognize_submit():
return render_template('recognize_submit.html')
@app.route('/jpeg_camera/<path:path>')
def send_js(path):
return send_file('jpeg_camera\\'+ path)
if __name__ == "__main__":
app.run(debug=True)
| none | 1 | 2.369567 | 2 | |
packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/__init__.py | minos-framework/minos-python | 247 | 6621061 | <filename>packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/__init__.py
from .abc import (
TransactionRepository,
)
from .database import (
DatabaseTransactionRepository,
TransactionDatabaseOperationFactory,
)
from .memory import (
InMemoryTransactionRepository,
)
| <filename>packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/__init__.py
from .abc import (
TransactionRepository,
)
from .database import (
DatabaseTransactionRepository,
TransactionDatabaseOperationFactory,
)
from .memory import (
InMemoryTransactionRepository,
)
| none | 1 | 1.454719 | 1 | |
Day 3/part1.py | jonomango/advent-of-code-2021 | 0 | 6621062 | import math
import copy
from functools import reduce
arr = [0 for i in range(12)]
with open("input.txt", "r") as file:
# iterate over every line in a file
for line in file.read().strip().split("\n"):
for i in range(12):
if line[i] == '1':
arr[i] += 1
else:
arr[i] -= 1
g = ""
e = ""
for i in range(12):
if (arr[i] > 0):
g += "1"
e += "0"
else:
g += "0"
e += "1"
print(int(g, 2) * int(e, 2)) | import math
import copy
from functools import reduce
arr = [0 for i in range(12)]
with open("input.txt", "r") as file:
# iterate over every line in a file
for line in file.read().strip().split("\n"):
for i in range(12):
if line[i] == '1':
arr[i] += 1
else:
arr[i] -= 1
g = ""
e = ""
for i in range(12):
if (arr[i] > 0):
g += "1"
e += "0"
else:
g += "0"
e += "1"
print(int(g, 2) * int(e, 2)) | en | 0.934466 | # iterate over every line in a file | 3.181499 | 3 |
paradigmes/live_coding_2019_10_14/fonctionnel02.py | yostane/python-paradigmes-et-structures-de-donnees | 0 | 6621063 | <filename>paradigmes/live_coding_2019_10_14/fonctionnel02.py
l = [1, 2, 3, 4, 5, 6, 7]
def est_pair(x): # retourne true si x est pair
return x % 2 == 0
liste_filtree = filter(est_pair, l)
print(list(liste_filtree)) # je dois repréciser que c une liste
print(list(filter(lambda y: y >= 5, l)))
print(list(map(lambda x: x * 2, l))) # le double de chaque élément
liste_resultat = map(lambda x: x * 2, filter(est_pair, filter(lambda x: x >= 5, l)))
liste_resultat = map(lambda x: x * 2, filter(lambda x: x >= 5 and x % 2 == 0, l))
print(list(liste_resultat)) # le double des éléments >= 5
liste_resultat = [x * 2 for x in l if x >= 5 and x % 2 == 0] # compréhension de liste
print(liste_resultat)
pokemons = ["pikachu", "salameche", "rondoudou"]
resultat = map(lambda mot: len(mot), pokemons)
print(list(resultat)) # on peut trasformer vers un autre type avec map
print([len(x) for x in pokemons])
import functools
total = functools.reduce(lambda x, y: x + y, l, 0)
print(total)
print(functools.reduce(lambda x, y: x * y, l, 1))
y
| <filename>paradigmes/live_coding_2019_10_14/fonctionnel02.py
l = [1, 2, 3, 4, 5, 6, 7]
def est_pair(x): # retourne true si x est pair
return x % 2 == 0
liste_filtree = filter(est_pair, l)
print(list(liste_filtree)) # je dois repréciser que c une liste
print(list(filter(lambda y: y >= 5, l)))
print(list(map(lambda x: x * 2, l))) # le double de chaque élément
liste_resultat = map(lambda x: x * 2, filter(est_pair, filter(lambda x: x >= 5, l)))
liste_resultat = map(lambda x: x * 2, filter(lambda x: x >= 5 and x % 2 == 0, l))
print(list(liste_resultat)) # le double des éléments >= 5
liste_resultat = [x * 2 for x in l if x >= 5 and x % 2 == 0] # compréhension de liste
print(liste_resultat)
pokemons = ["pikachu", "salameche", "rondoudou"]
resultat = map(lambda mot: len(mot), pokemons)
print(list(resultat)) # on peut trasformer vers un autre type avec map
print([len(x) for x in pokemons])
import functools
total = functools.reduce(lambda x, y: x + y, l, 0)
print(total)
print(functools.reduce(lambda x, y: x * y, l, 1))
y
| fr | 0.979998 | # retourne true si x est pair # je dois repréciser que c une liste # le double de chaque élément # le double des éléments >= 5 # compréhension de liste # on peut trasformer vers un autre type avec map | 3.555641 | 4 |
appengine/networkx/algorithms/tests/test_swap.py | CSE512-15S/a3-haynesb-Pending | 12 | 6621064 | <reponame>CSE512-15S/a3-haynesb-Pending
#!/usr/bin/env python
from nose.tools import *
from networkx import *
def test_double_edge_swap():
graph = barabasi_albert_graph(200,1)
degreeStart = sorted(graph.degree().values())
G = connected_double_edge_swap(graph, 40)
assert_true(is_connected(graph))
degseq = sorted(graph.degree().values())
assert_true(degreeStart == degseq)
G = double_edge_swap(graph, 40)
degseq2 = sorted(graph.degree().values())
assert_true(degreeStart == degseq2)
def test_degree_seq_c4():
G = cycle_graph(4)
degree_start = sorted(G.degree().values())
G = double_edge_swap(G,1,100)
degseq = sorted(G.degree().values())
assert_true(degree_start == degseq)
| #!/usr/bin/env python
from nose.tools import *
from networkx import *
def test_double_edge_swap():
graph = barabasi_albert_graph(200,1)
degreeStart = sorted(graph.degree().values())
G = connected_double_edge_swap(graph, 40)
assert_true(is_connected(graph))
degseq = sorted(graph.degree().values())
assert_true(degreeStart == degseq)
G = double_edge_swap(graph, 40)
degseq2 = sorted(graph.degree().values())
assert_true(degreeStart == degseq2)
def test_degree_seq_c4():
G = cycle_graph(4)
degree_start = sorted(G.degree().values())
G = double_edge_swap(G,1,100)
degseq = sorted(G.degree().values())
assert_true(degree_start == degseq) | ru | 0.26433 | #!/usr/bin/env python | 2.639343 | 3 |
newt/magnetometer.py | acrerd/newt | 0 | 6621065 | import requests
import datetime
import pandas
import numpy as np
from instruments import Instrument
from . import config
class Magnetometer(Instrument):
"""
Represent the magnetometer.
"""
root_url = config.get("magnetometer", "url")
def __init__(self):
pass
def _determine_files(self, start, end):
start = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M")
end = datetime.datetime.strptime(end, "%Y-%m-%d %H:%M")
delta = end - start # as timedelta
files = []
for i in range(delta.days + 1):
day = start + datetime.timedelta(days=i)
files.append("{}".format(day.strftime("%Y-%m-%d")))
return files
def _download_files(self, start, end):
files = self._determine_files(start, end)
data = []
for file in files:
download = requests.get("{}/{}.txt".format(self.root_url, file))
download = np.fromstring(download.text, sep='\t').reshape(-1, 5)
download = np.array(download, dtype=object)
#download = self._apply_reductions(download)
times = np.array([datetime.datetime.strptime(file, "%Y-%m-%d") + datetime.timedelta(seconds=x/1000) for x in download[:,0]])
download[:,0] = times
data.append(download)
data = np.vstack(data)
return data
def _apply_reductions(self, data):
#data[["x", "y", "z"]] += np.median(data[["x", "y", "z"]], axis=0)
ftot = np.sum( data[["x", "y", "z"]], axis=0)
#da = (data.index[-1] - data.index[0]).days
ampcal = 1.1234615277 # 49891 / (ftot)
data[["x", "y", "z"]] * (ampcal)
return data
def _apply_rotation(self, data):
B_c = [16636.48633, -7215.19984936, 46478.2547421] # the field in the current axes (from a median of a lot of data)
u_c = B_c/np.linalg.norm(B_c)
B_t = [17264, -812, 46802] # the field in the target axes (from the reference website)
u_t = B_t/np.linalg.norm(B_t)
cross = np.cross(u_c, u_t)
u = -cross/np.linalg.norm(cross)
theta = np.arccos(np.dot(u_c, u_t))
x = u[0]
y = u[1]
z = u[2]
c = np.cos(theta)
s = np.sin(theta)
# rotation matrix by theta around u
R = np.array([ [ c+x*x*(1-c), x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[y*x*(1-c)+z*s, c+y*y*(1-c), y*z*(1-c)-x*s],
[z*x*(1-c)-y*s, z*y*(1-c)+x*s, c+z*z*(1-c)] ])
data[["x", "y", "z"]] = np.matmul(np.array(data[["x", "y", "z"]]).T, R)
return data
def get_data(self, start, end):
data = self._download_files(start, end)
dataframe = pandas.DataFrame(data[:,1:5], columns=("y", "x", "z", "temperature"), index=data[:,0])
dataframe['H'] = (dataframe.x**2 + dataframe.y**2)**0.5
dataframe['D'] = np.rad2deg(np.arctan2(np.array(dataframe.y.values, dtype=np.float64), np.array(dataframe.x.values, dtype=np.float64)))
dataframe = dataframe.loc[slice(start, end)]
dataframe = self._apply_reductions(dataframe)
return dataframe
| import requests
import datetime
import pandas
import numpy as np
from instruments import Instrument
from . import config
class Magnetometer(Instrument):
"""
Represent the magnetometer.
"""
root_url = config.get("magnetometer", "url")
def __init__(self):
pass
def _determine_files(self, start, end):
start = datetime.datetime.strptime(start, "%Y-%m-%d %H:%M")
end = datetime.datetime.strptime(end, "%Y-%m-%d %H:%M")
delta = end - start # as timedelta
files = []
for i in range(delta.days + 1):
day = start + datetime.timedelta(days=i)
files.append("{}".format(day.strftime("%Y-%m-%d")))
return files
def _download_files(self, start, end):
files = self._determine_files(start, end)
data = []
for file in files:
download = requests.get("{}/{}.txt".format(self.root_url, file))
download = np.fromstring(download.text, sep='\t').reshape(-1, 5)
download = np.array(download, dtype=object)
#download = self._apply_reductions(download)
times = np.array([datetime.datetime.strptime(file, "%Y-%m-%d") + datetime.timedelta(seconds=x/1000) for x in download[:,0]])
download[:,0] = times
data.append(download)
data = np.vstack(data)
return data
def _apply_reductions(self, data):
#data[["x", "y", "z"]] += np.median(data[["x", "y", "z"]], axis=0)
ftot = np.sum( data[["x", "y", "z"]], axis=0)
#da = (data.index[-1] - data.index[0]).days
ampcal = 1.1234615277 # 49891 / (ftot)
data[["x", "y", "z"]] * (ampcal)
return data
def _apply_rotation(self, data):
B_c = [16636.48633, -7215.19984936, 46478.2547421] # the field in the current axes (from a median of a lot of data)
u_c = B_c/np.linalg.norm(B_c)
B_t = [17264, -812, 46802] # the field in the target axes (from the reference website)
u_t = B_t/np.linalg.norm(B_t)
cross = np.cross(u_c, u_t)
u = -cross/np.linalg.norm(cross)
theta = np.arccos(np.dot(u_c, u_t))
x = u[0]
y = u[1]
z = u[2]
c = np.cos(theta)
s = np.sin(theta)
# rotation matrix by theta around u
R = np.array([ [ c+x*x*(1-c), x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[y*x*(1-c)+z*s, c+y*y*(1-c), y*z*(1-c)-x*s],
[z*x*(1-c)-y*s, z*y*(1-c)+x*s, c+z*z*(1-c)] ])
data[["x", "y", "z"]] = np.matmul(np.array(data[["x", "y", "z"]]).T, R)
return data
def get_data(self, start, end):
data = self._download_files(start, end)
dataframe = pandas.DataFrame(data[:,1:5], columns=("y", "x", "z", "temperature"), index=data[:,0])
dataframe['H'] = (dataframe.x**2 + dataframe.y**2)**0.5
dataframe['D'] = np.rad2deg(np.arctan2(np.array(dataframe.y.values, dtype=np.float64), np.array(dataframe.x.values, dtype=np.float64)))
dataframe = dataframe.loc[slice(start, end)]
dataframe = self._apply_reductions(dataframe)
return dataframe
| en | 0.746251 | Represent the magnetometer. # as timedelta #download = self._apply_reductions(download) #data[["x", "y", "z"]] += np.median(data[["x", "y", "z"]], axis=0) #da = (data.index[-1] - data.index[0]).days # 49891 / (ftot) # the field in the current axes (from a median of a lot of data) # the field in the target axes (from the reference website) # rotation matrix by theta around u | 3.039838 | 3 |
fetchers/FetcherController.py | OneStarSolution/prometeo | 0 | 6621066 | <filename>fetchers/FetcherController.py<gh_stars>0
from pymongo import UpdateOne
from db.PrometeoDB import PrometeoDB
class FetcherController:
def save(self, documents):
document_dicts = []
for document in documents:
if not isinstance(document, dict):
document = document.to_dict()
document['_id'] = document.get('ID')
del document['ID']
document_dicts.append(document)
with PrometeoDB() as db:
businesses = db.get_businesses()
operations = [
UpdateOne({"_id": doc.get("_id")}, {"$set": doc}, upsert=True) for doc in document_dicts
]
result = businesses.bulk_write(operations)
print(result)
| <filename>fetchers/FetcherController.py<gh_stars>0
from pymongo import UpdateOne
from db.PrometeoDB import PrometeoDB
class FetcherController:
def save(self, documents):
document_dicts = []
for document in documents:
if not isinstance(document, dict):
document = document.to_dict()
document['_id'] = document.get('ID')
del document['ID']
document_dicts.append(document)
with PrometeoDB() as db:
businesses = db.get_businesses()
operations = [
UpdateOne({"_id": doc.get("_id")}, {"$set": doc}, upsert=True) for doc in document_dicts
]
result = businesses.bulk_write(operations)
print(result)
| none | 1 | 2.67094 | 3 | |
Chapter11/clients/log-fluent.py | nontster/LoggingInActionWithFluentd | 11 | 6621067 | <reponame>nontster/LoggingInActionWithFluentd
#This implementation makes use of the Fluentd implementation directly without the use of
# the Python logging framework
import datetime, time
from fluent import handler, sender
fluentSender = sender.FluentSender('test', host='localhost', port=18090)
# using the Fluentd Handler means that msgpack will be used and therefore the source plugin in Fluentd is a forward plugin.
now = datetime.datetime.now().strftime("%d-%m-%Y %H-%M-%S")
fluentSender.emit_with_time('', int(time.time()), {'from': 'log-fluent', 'at': now})
| #This implementation makes use of the Fluentd implementation directly without the use of
# the Python logging framework
import datetime, time
from fluent import handler, sender
fluentSender = sender.FluentSender('test', host='localhost', port=18090)
# using the Fluentd Handler means that msgpack will be used and therefore the source plugin in Fluentd is a forward plugin.
now = datetime.datetime.now().strftime("%d-%m-%Y %H-%M-%S")
fluentSender.emit_with_time('', int(time.time()), {'from': 'log-fluent', 'at': now}) | en | 0.788355 | #This implementation makes use of the Fluentd implementation directly without the use of # the Python logging framework # using the Fluentd Handler means that msgpack will be used and therefore the source plugin in Fluentd is a forward plugin. | 2.408674 | 2 |
neural_exploration/visualize/views.py | brookefitzgerald/neural_exploration | 0 | 6621068 | <filename>neural_exploration/visualize/views.py
from django.apps import apps
from django.shortcuts import render
from rest_framework import renderers, viewsets, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import DataSerializer, FirstBinSerializer, SecondBinSerializer, ThirdBinSerializer
def SpikeDataView(request):
context = {
"host": request.META.get('HTTP_HOST')
}
return render(request, "visualize/spike.html", context)
@api_view(['GET'])
def data_list(request):
if request.method == 'GET':
data = (
apps
.get_model("visualize", "Site")
.objects
.all()
)
serializer = DataSerializer(data, many=True)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
@api_view(['GET'])
def data_detail(request, pk):
""" get data from specific site """
try:
data = (apps
.get_model("visualize", "Site")
.objects
.get(pk=pk))
except data.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DataSerializer(data)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
def get_queryset_of_bin_size(i):
bin_size_dict = {
"1": "bin_150_50",
"2": "bin_100_30",
"3": "bin_50_15",
}
try:
return (apps
.get_model("visualize", "BinnedData")
.objects
.all()
.only(bin_size_dict[i], bin_size_dict[i]+"_extents", "labels"))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
def anova_from_bin_list(request, i):
try:
data = ([bin.compute_ANOVA(i) for bin in apps
.get_model("visualize", "BinnedData")
.objects
.all()])
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
json_data = renderers.JSONRenderer().render(data)
return Response(json_data)
@api_view(['GET'])
def binned_filter_detail(request, i, pk):
""" Get specific data with specific bin size"""
bin_serializer_dict = {
"1": FirstBinSerializer,
"2": SecondBinSerializer,
"3": ThirdBinSerializer}
try:
data = (get_queryset_of_bin_size(i)
.get(pk=pk))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = bin_serializer_dict[i](data)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
@api_view(['GET'])
def binned_filter_list(request, i):
""" Get all data with specific bin size"""
bin_serializer_dict = {
"1": FirstBinSerializer,
"2": SecondBinSerializer,
"3": ThirdBinSerializer}
if request.method == 'GET':
data = get_queryset_of_bin_size(i)
serializer = bin_serializer_dict[i](data, many=True)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
| <filename>neural_exploration/visualize/views.py
from django.apps import apps
from django.shortcuts import render
from rest_framework import renderers, viewsets, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import DataSerializer, FirstBinSerializer, SecondBinSerializer, ThirdBinSerializer
def SpikeDataView(request):
context = {
"host": request.META.get('HTTP_HOST')
}
return render(request, "visualize/spike.html", context)
@api_view(['GET'])
def data_list(request):
if request.method == 'GET':
data = (
apps
.get_model("visualize", "Site")
.objects
.all()
)
serializer = DataSerializer(data, many=True)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
@api_view(['GET'])
def data_detail(request, pk):
""" get data from specific site """
try:
data = (apps
.get_model("visualize", "Site")
.objects
.get(pk=pk))
except data.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DataSerializer(data)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
def get_queryset_of_bin_size(i):
bin_size_dict = {
"1": "bin_150_50",
"2": "bin_100_30",
"3": "bin_50_15",
}
try:
return (apps
.get_model("visualize", "BinnedData")
.objects
.all()
.only(bin_size_dict[i], bin_size_dict[i]+"_extents", "labels"))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
def anova_from_bin_list(request, i):
try:
data = ([bin.compute_ANOVA(i) for bin in apps
.get_model("visualize", "BinnedData")
.objects
.all()])
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
json_data = renderers.JSONRenderer().render(data)
return Response(json_data)
@api_view(['GET'])
def binned_filter_detail(request, i, pk):
""" Get specific data with specific bin size"""
bin_serializer_dict = {
"1": FirstBinSerializer,
"2": SecondBinSerializer,
"3": ThirdBinSerializer}
try:
data = (get_queryset_of_bin_size(i)
.get(pk=pk))
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = bin_serializer_dict[i](data)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
@api_view(['GET'])
def binned_filter_list(request, i):
""" Get all data with specific bin size"""
bin_serializer_dict = {
"1": FirstBinSerializer,
"2": SecondBinSerializer,
"3": ThirdBinSerializer}
if request.method == 'GET':
data = get_queryset_of_bin_size(i)
serializer = bin_serializer_dict[i](data, many=True)
json_data = renderers.JSONRenderer().render(serializer.data)
return Response(json_data)
| en | 0.774586 | get data from specific site Get specific data with specific bin size Get all data with specific bin size | 2.219834 | 2 |
tests/conftest.py | mitrofun/skeletonWSGI | 0 | 6621069 | <gh_stars>0
import os
import sys
import pytest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, 'core'))
@pytest.fixture
def root_path():
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
@pytest.fixture
def template(root_path):
return os.path.join(root_path, 'fixtures', 'template.html')
| import os
import sys
import pytest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, 'core'))
@pytest.fixture
def root_path():
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
@pytest.fixture
def template(root_path):
return os.path.join(root_path, 'fixtures', 'template.html') | none | 1 | 2.153128 | 2 | |
app/auth/views.py | Jeffiy/zblog | 3 | 6621070 | <reponame>Jeffiy/zblog
#!/usr/bin/env python
# encoding:utf-8
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, login_required, logout_user, current_user
from app.auth import auth
from app.models import db, User
from app.auth.forms import LoginForm, ChangePwdForm
__author__ = 'zhangmm'
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('用户名或密码不正确.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('已退出.')
return redirect(url_for('main.index'))
@auth.before_app_request
def before_app_request():
if current_user.is_authenticated:
current_user.ping()
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePwdForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
logout_user()
flash('密码已更改, 请重新登录.')
return redirect(url_for('auth.login'))
else:
flash('你的久密码不正确,请重新输入.')
return render_template('auth/change_password.html', form=form)
| #!/usr/bin/env python
# encoding:utf-8
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, login_required, logout_user, current_user
from app.auth import auth
from app.models import db, User
from app.auth.forms import LoginForm, ChangePwdForm
__author__ = 'zhangmm'
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('用户名或密码不正确.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('已退出.')
return redirect(url_for('main.index'))
@auth.before_app_request
def before_app_request():
if current_user.is_authenticated:
current_user.ping()
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePwdForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
logout_user()
flash('密码已更改, 请重新登录.')
return redirect(url_for('auth.login'))
else:
flash('你的久密码不正确,请重新输入.')
return render_template('auth/change_password.html', form=form) | en | 0.333197 | #!/usr/bin/env python # encoding:utf-8 | 2.5352 | 3 |
tests/test_socfaker_file.py | priamai/soc-faker | 122 | 6621071 | <reponame>priamai/soc-faker
def test_socfaker_file_name(socfaker_fixture):
assert socfaker_fixture.file.name
def test_socfaker_file_extension(socfaker_fixture):
assert socfaker_fixture.file.extension
def test_socfaker_file_size(socfaker_fixture):
assert socfaker_fixture.file.size
def test_socfaker_file_timestamp(socfaker_fixture):
assert socfaker_fixture.file.timestamp
def test_socfaker_file_accessed_timestamp(socfaker_fixture):
assert socfaker_fixture.file.accessed_timestamp
def test_socfaker_file_hashes(socfaker_fixture):
assert socfaker_fixture.file.hashes
def test_socfaker_file_md5(socfaker_fixture):
assert socfaker_fixture.file.md5
def test_socfaker_file_sha1(socfaker_fixture):
assert socfaker_fixture.file.sha1
def test_socfaker_file_sha256(socfaker_fixture):
assert socfaker_fixture.file.sha256
def test_socfaker_file_full_path(socfaker_fixture):
assert socfaker_fixture.file.full_path
def test_socfaker_file_signed(socfaker_fixture):
assert socfaker_fixture.file.signed
def test_socfaker_file_signature(socfaker_fixture):
assert socfaker_fixture.file.signature
def test_socfaker_file_signature_status(socfaker_fixture):
assert socfaker_fixture.file.signature_status
def test_socfaker_file_directory(socfaker_fixture):
assert socfaker_fixture.file.directory
def test_socfaker_file_drive_letter(socfaker_fixture):
assert socfaker_fixture.file.drive_letter
def test_socfaker_file_gid(socfaker_fixture):
assert socfaker_fixture.file.gid
def test_socfaker_file_type(socfaker_fixture):
assert socfaker_fixture.file.type
def test_socfaker_file_mime_type(socfaker_fixture):
assert socfaker_fixture.file.mime_type
def test_socfaker_file_attributes(socfaker_fixture):
assert socfaker_fixture.file.attributes
def test_socfaker_file_version(socfaker_fixture):
assert socfaker_fixture.file.version
def test_socfaker_file_build_version(socfaker_fixture):
assert socfaker_fixture.file.build_version
def test_socfaker_file_checksum(socfaker_fixture):
assert socfaker_fixture.file.checksum
def test_socfaker_file_install_scope(socfaker_fixture):
assert socfaker_fixture.file.install_scope | def test_socfaker_file_name(socfaker_fixture):
assert socfaker_fixture.file.name
def test_socfaker_file_extension(socfaker_fixture):
assert socfaker_fixture.file.extension
def test_socfaker_file_size(socfaker_fixture):
assert socfaker_fixture.file.size
def test_socfaker_file_timestamp(socfaker_fixture):
assert socfaker_fixture.file.timestamp
def test_socfaker_file_accessed_timestamp(socfaker_fixture):
assert socfaker_fixture.file.accessed_timestamp
def test_socfaker_file_hashes(socfaker_fixture):
assert socfaker_fixture.file.hashes
def test_socfaker_file_md5(socfaker_fixture):
assert socfaker_fixture.file.md5
def test_socfaker_file_sha1(socfaker_fixture):
assert socfaker_fixture.file.sha1
def test_socfaker_file_sha256(socfaker_fixture):
assert socfaker_fixture.file.sha256
def test_socfaker_file_full_path(socfaker_fixture):
assert socfaker_fixture.file.full_path
def test_socfaker_file_signed(socfaker_fixture):
assert socfaker_fixture.file.signed
def test_socfaker_file_signature(socfaker_fixture):
assert socfaker_fixture.file.signature
def test_socfaker_file_signature_status(socfaker_fixture):
assert socfaker_fixture.file.signature_status
def test_socfaker_file_directory(socfaker_fixture):
assert socfaker_fixture.file.directory
def test_socfaker_file_drive_letter(socfaker_fixture):
assert socfaker_fixture.file.drive_letter
def test_socfaker_file_gid(socfaker_fixture):
assert socfaker_fixture.file.gid
def test_socfaker_file_type(socfaker_fixture):
assert socfaker_fixture.file.type
def test_socfaker_file_mime_type(socfaker_fixture):
assert socfaker_fixture.file.mime_type
def test_socfaker_file_attributes(socfaker_fixture):
assert socfaker_fixture.file.attributes
def test_socfaker_file_version(socfaker_fixture):
assert socfaker_fixture.file.version
def test_socfaker_file_build_version(socfaker_fixture):
assert socfaker_fixture.file.build_version
def test_socfaker_file_checksum(socfaker_fixture):
assert socfaker_fixture.file.checksum
def test_socfaker_file_install_scope(socfaker_fixture):
assert socfaker_fixture.file.install_scope | none | 1 | 1.761338 | 2 | |
hmc/algorithm/io/lib_data_geo_ascii.py | c-hydro/hmc | 0 | 6621072 | <reponame>c-hydro/hmc
"""
Class Features
Name: lib_data_geo_ascii
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import rasterio
import os
import numpy as np
from rasterio.crs import CRS
from collections import OrderedDict
from decimal import Decimal
from hmc.algorithm.io.lib_data_io_generic import create_darray_2d
from hmc.algorithm.utils.lib_utils_system import create_folder
from hmc.algorithm.utils.lib_utils_list import pad_or_truncate_list
from hmc.algorithm.utils.lib_utils_string import parse_row2string
from hmc.algorithm.default.lib_default_args import logger_name
from hmc.algorithm.default.lib_default_args import proj_epsg as proj_epsg_default
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to write file point section(s)
def write_data_point_section(file_name, file_data, file_cols_expected=10):
if isinstance(file_data, dict):
file_keys = list(file_data.keys())
if file_keys.__len__() >= 1:
for file_key in file_keys:
file_fields = file_data[file_key]
if isinstance(file_fields, dict):
cols = file_fields.__len__()
break
else:
log_stream.error(' ===> Fields obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
else:
cols = None
log_stream.warning(' ===> Section list is equal to zero. No file section will be dumped.')
else:
log_stream.error(' ===> Section data obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
# cols = file_data.__len__() --> previous
if cols != file_cols_expected:
log_stream.error(' ===> File sections columns ' + str(cols) + ' found != columns expected ' +
str(file_cols_expected))
raise IOError('File datasets are in a wrong format')
if cols is not None:
file_obj = []
for key in file_keys:
if isinstance(file_data, dict):
row = list(file_data[key].values())
else:
log_stream.error(' ===> Section data obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
file_obj.append(row)
file_folder = os.path.split(file_name)[0]
create_folder(file_folder)
with open(file_name, "w", encoding='utf-8') as file:
for file_row in file_obj:
string_row = ' '.join(str(item) for item in file_row)
string_row = string_row + '\n'
file.write(string_row)
else:
log_stream.warning(' ===> Section data is None type. The file section will be undefined.')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point section(s)
def read_data_point_section(file_name, section_cols_expected=8):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
if file_lines.__len__() >= 1:
string_parts = None
point_frame = OrderedDict()
for row_id, row_data in enumerate(file_lines):
# Read line by line
section_row = row_data.strip()
# Clean unnecessary string delimiter ""
section_row = section_row.replace('"', '')
# Split string to cell(s)
section_cols = section_row.split()
if section_row != '':
if string_parts is None:
string_parts = section_cols.__len__()
if section_cols.__len__() > string_parts:
log_stream.error(' ===> Parse section filename failed for filename ' + os.path.split(file_name)[1])
raise IOError(' ===> Section file in wrong format: [fields: "'
+ section_row + '" at line ' + str(row_id + 1) + ']')
if section_cols.__len__() < section_cols_expected:
section_cols = pad_or_truncate_list(section_cols, section_cols_expected)
section_idx_ji = [int(section_cols[0]), int(section_cols[1])]
section_domain = section_cols[2]
section_name = section_cols[3]
if isinstance(section_cols[4], (float, int)):
section_code = int(section_cols[4])
elif isinstance(section_cols[4], str):
section_code = section_cols[4]
else:
log_stream.error(
' ===> Parse section filename failed in filtering "section code" value "' + section_cols[4] +
'". Value types allowed are float, int and string')
raise IOError('Case not implemented yet')
section_drained_area = float(section_cols[5])
section_discharge_thr_alert = float(section_cols[6])
section_discharge_thr_alarm = float(section_cols[7])
section_id = int(row_id)
section_key = ':'.join([section_domain, section_name])
point_frame[section_key] = {}
point_frame[section_key]['section_id'] = section_id
point_frame[section_key]['section_name'] = section_name
point_frame[section_key]['section_domain'] = section_domain
point_frame[section_key]['section_idx_ji'] = section_idx_ji
point_frame[section_key]['section_code'] = section_code
point_frame[section_key]['section_drained_area'] = section_drained_area
point_frame[section_key]['section_discharge_thr_alert'] = section_discharge_thr_alert
point_frame[section_key]['section_discharge_thr_alarm'] = section_discharge_thr_alarm
else:
log_stream.error(' ===> Parse section filename failed for filename ' + os.path.split(file_name)[1])
raise IOError(' ===> Section file in empty format: [fields: "'
+ section_row + '" at line ' + str(row_id + 1) + ']')
else:
log_stream.warning(' ===> File info for sections was found; sections are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point dam(s)
def read_data_point_dam(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
dam_n = int(file_lines[row_id].split(line_delimiter)[0])
row_id += 1
plant_n = int(file_lines[row_id].split(line_delimiter)[0])
if dam_n > 0:
point_frame = OrderedDict()
for dam_id in range(0, dam_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
dam_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
dam_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
dam_plant_n = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_cell_lake_code = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_volume_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_volume_init = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_level_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_h_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_lin_coeff = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_storage_curve = parse_row2string(file_lines[row_id], line_delimiter)
for plant_id in range(0, int(dam_plant_n)):
row_id += 1
plant_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
plant_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
plant_tc = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
plant_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
plant_discharge_flag = int(parse_row2string(file_lines[row_id], line_delimiter))
if plant_name != '':
dam_key = ':'.join([dam_name, plant_name])
else:
dam_key = dam_name
point_frame[dam_key] = {}
point_frame[dam_key]['dam_name'] = dam_name
point_frame[dam_key]['dam_idx_ji'] = dam_idx_ji
point_frame[dam_key]['dam_plant_n'] = dam_plant_n
point_frame[dam_key]['dam_lake_code'] = dam_cell_lake_code
point_frame[dam_key]['dam_volume_max'] = dam_volume_max
point_frame[dam_key]['dam_volume_init'] = dam_volume_init
point_frame[dam_key]['dam_discharge_max'] = dam_discharge_max
point_frame[dam_key]['dam_level_max'] = dam_level_max
point_frame[dam_key]['dam_h_max'] = dam_h_max
point_frame[dam_key]['dam_lin_coeff'] = dam_lin_coeff
point_frame[dam_key]['dam_storage_curve'] = dam_storage_curve
point_frame[dam_key]['plant_name'] = plant_name
point_frame[dam_key]['plant_idx_ji'] = plant_idx_ji
point_frame[dam_key]['plant_tc'] = plant_tc
point_frame[dam_key]['plant_discharge_max'] = plant_discharge_max
point_frame[dam_key]['plant_discharge_flag'] = plant_discharge_flag
else:
log_stream.warning(' ===> File info for dams was found; dams are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point intake(s)
def read_data_point_intake(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
catch_n = int(file_lines[row_id].split(line_delimiter)[0])
row_id += 1
release_n = int(file_lines[row_id].split(line_delimiter)[0])
if release_n > 0:
point_frame = OrderedDict()
for release_id in range(0, release_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
release_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
release_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
release_catch_n = int(parse_row2string(file_lines[row_id], line_delimiter))
for catch_id in range(0, int(release_catch_n)):
row_id += 1
catch_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
catch_tc = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
catch_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_discharge_min = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_discharge_weight = float(parse_row2string(file_lines[row_id], line_delimiter))
release_key = ':'.join([release_name, catch_name])
point_frame[release_key] = {}
point_frame[release_key]['release_name'] = release_name
point_frame[release_key]['release_idx_ji'] = release_idx_ji
point_frame[release_key]['release_catch_n'] = release_catch_n
point_frame[release_key]['catch_name'] = catch_name
point_frame[release_key]['catch_idx_ji'] = catch_idx_ji
point_frame[release_key]['catch_tc'] = catch_tc
point_frame[release_key]['catch_discharge_max'] = catch_discharge_max
point_frame[release_key]['catch_discharge_min'] = catch_discharge_min
point_frame[release_key]['catch_discharge_weight'] = catch_discharge_weight
else:
log_stream.warning(' ===> File info for intakes was found; intakes are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point joint(s)
def read_data_point_joint(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
joint_n = int(file_lines[row_id].split(line_delimiter)[0])
if joint_n > 0:
log_stream.error(' ===> File info for joints was found; function to read joints is not implemented')
raise NotImplementedError(' ===> Method is not implemented yet')
else:
log_stream.warning(' ===> File info for joints was found; joints are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point lake(s)
def read_data_point_lake(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
lake_n = int(file_lines[row_id].split(line_delimiter)[0])
if lake_n > 0:
point_frame = OrderedDict()
for lake_id in range(0, lake_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
lake_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
lake_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
lake_cell_code = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_volume_min = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_volume_init = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_const_draining = float(parse_row2string(file_lines[row_id], line_delimiter))
lake_key = lake_name
point_frame[lake_key] = {}
point_frame[lake_key]['lake_name'] = lake_name
point_frame[lake_key]['lake_idx_ji'] = lake_idx_ji
point_frame[lake_key]['lake_cell_code'] = lake_cell_code
point_frame[lake_key]['lake_volume_min'] = lake_volume_min
point_frame[lake_key]['lake_volume_init'] = lake_volume_init
point_frame[lake_key]['lake_constant_draining'] = lake_const_draining
else:
log_stream.warning(' ===> File info for lakes was found; lakes are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write an empty data point file
def write_data_point_undefined(file_path, element_n=2, element_init=0):
if not os.path.exists(file_path):
element_list = [str(element_init)] * element_n
folder_name, file_name = os.path.split(file_path)
create_folder(folder_name)
file_handle = open(file_path, 'w')
for element_step in element_list:
element_step = element_step + '\n'
file_handle.write(element_step)
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write an ascii grid file
def write_data_grid(file_name, file_data, file_ancillary=None):
if 'bb_left' in list(file_ancillary.keys()):
bb_left = file_ancillary['bb_left']
else:
log_stream.error(' ===> Geographical info "bb_left" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'bb_bottom' in list(file_ancillary.keys()):
bb_bottom = file_ancillary['bb_bottom']
else:
log_stream.error(' ===> Geographical info "bb_bottom" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'res_lon' in list(file_ancillary.keys()):
res_lon = file_ancillary['res_lon']
else:
log_stream.error(' ===> Geographical info "res_lon" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'res_lat' in list(file_ancillary.keys()):
res_lat = file_ancillary['res_lat']
else:
log_stream.error(' ===> Geographical info "res_lat" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'transform' in list(file_ancillary.keys()):
transform = file_ancillary['transform']
else:
log_stream.error(' ===> Geographical info "transform" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'no_data' in list(file_ancillary.keys()):
no_data = file_ancillary['no_data']
else:
no_data = -9999
if 'espg' in list(file_ancillary.keys()):
epsg = file_ancillary['epsg']
else:
epsg = proj_epsg_default
if 'decimal_precision' in list(file_ancillary.keys()):
decimal_precision = int(file_ancillary['decimal_precision'])
else:
decimal_num = Decimal(str(file_data[0][0]))
decimal_precision = abs(decimal_num.as_tuple().exponent)
if isinstance(epsg, int):
crs = CRS.from_epsg(epsg)
elif isinstance(epsg, str):
crs = CRS.from_string(epsg)
else:
log_stream.error(' ===> Geographical info "epsg" defined by using an unsupported format.')
raise IOError('Geographical EPSG must be in string format "EPSG:4326" or integer format "4326".')
dset_meta = dict(driver='AAIGrid', height=file_data.shape[0], width=file_data.shape[1], crs=crs,
count=1, dtype=str(file_data.dtype), transform=transform, nodata=no_data,
decimal_precision=decimal_precision)
with rasterio.open(file_name, 'w', **dset_meta) as dset_handle:
dset_handle.write(file_data, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read an ascii grid file
def read_data_grid(file_name, output_format='data_array', output_dtype='float32'):
try:
dset = rasterio.open(file_name)
bounds = dset.bounds
res = dset.res
transform = dset.transform
data = dset.read()
if dset.crs is None:
crs = CRS.from_string(proj_epsg_default)
else:
crs = dset.crs
values = data[0, :, :]
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float)
lons, lats = np.meshgrid(lon, lat)
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
lats = np.flipud(lats)
if output_format == 'data_array':
data_obj = create_darray_2d(values, lons, lats,
coord_name_x='west_east', coord_name_y='south_north',
dim_name_x='west_east', dim_name_y='south_north')
elif output_format == 'dictionary':
data_obj = {'values': values, 'longitude': lons, 'latitude': lats,
'transform': transform, 'crs': crs,
'bbox': [bounds.left, bounds.bottom, bounds.right, bounds.top],
'bb_left': bounds.left, 'bb_right': bounds.right,
'bb_top': bounds.top, 'bb_bottom': bounds.bottom,
'res_lon': res[0], 'res_lat': res[1]}
else:
log_stream.error(' ===> File static "' + file_name + '" output format not allowed')
raise NotImplementedError('Case not implemented yet')
except IOError as io_error:
data_obj = None
log_stream.warning(' ===> File static in ascii grid was not correctly open with error "' + str(io_error) + '"')
log_stream.warning(' ===> Filename "' + os.path.split(file_name)[1] + '"')
return data_obj
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to load an ascii vector file
def read_data_vector(file_name):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
vector_frame = [float(elem.strip('\n')) for elem in file_lines]
return vector_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read a raster ascii file
def read_data_raster(filename_reference):
dset = rasterio.open(filename_reference)
bounds = dset.bounds
res = dset.res
transform = dset.transform
data = dset.read()
values = data[0, :, :]
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float)
lons, lats = np.meshgrid(lon, lat)
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
lats = np.flipud(lats)
obj = {'values': values, 'longitude': lons, 'latitude': lats,
'transform': transform, 'bbox': [bounds.left, bounds.bottom, bounds.right, bounds.top],
'bb_left': bounds.left, 'bb_right': bounds.right,
'bb_top': bounds.top, 'bb_bottom': bounds.bottom,
'res_lon': res[0], 'res_lat': res[1]}
return obj
# -------------------------------------------------------------------------------------
| """
Class Features
Name: lib_data_geo_ascii
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import rasterio
import os
import numpy as np
from rasterio.crs import CRS
from collections import OrderedDict
from decimal import Decimal
from hmc.algorithm.io.lib_data_io_generic import create_darray_2d
from hmc.algorithm.utils.lib_utils_system import create_folder
from hmc.algorithm.utils.lib_utils_list import pad_or_truncate_list
from hmc.algorithm.utils.lib_utils_string import parse_row2string
from hmc.algorithm.default.lib_default_args import logger_name
from hmc.algorithm.default.lib_default_args import proj_epsg as proj_epsg_default
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to write file point section(s)
def write_data_point_section(file_name, file_data, file_cols_expected=10):
if isinstance(file_data, dict):
file_keys = list(file_data.keys())
if file_keys.__len__() >= 1:
for file_key in file_keys:
file_fields = file_data[file_key]
if isinstance(file_fields, dict):
cols = file_fields.__len__()
break
else:
log_stream.error(' ===> Fields obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
else:
cols = None
log_stream.warning(' ===> Section list is equal to zero. No file section will be dumped.')
else:
log_stream.error(' ===> Section data obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
# cols = file_data.__len__() --> previous
if cols != file_cols_expected:
log_stream.error(' ===> File sections columns ' + str(cols) + ' found != columns expected ' +
str(file_cols_expected))
raise IOError('File datasets are in a wrong format')
if cols is not None:
file_obj = []
for key in file_keys:
if isinstance(file_data, dict):
row = list(file_data[key].values())
else:
log_stream.error(' ===> Section data obj is not in a dictionary format.')
raise NotImplementedError('Case not implemented yet')
file_obj.append(row)
file_folder = os.path.split(file_name)[0]
create_folder(file_folder)
with open(file_name, "w", encoding='utf-8') as file:
for file_row in file_obj:
string_row = ' '.join(str(item) for item in file_row)
string_row = string_row + '\n'
file.write(string_row)
else:
log_stream.warning(' ===> Section data is None type. The file section will be undefined.')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point section(s)
def read_data_point_section(file_name, section_cols_expected=8):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
if file_lines.__len__() >= 1:
string_parts = None
point_frame = OrderedDict()
for row_id, row_data in enumerate(file_lines):
# Read line by line
section_row = row_data.strip()
# Clean unnecessary string delimiter ""
section_row = section_row.replace('"', '')
# Split string to cell(s)
section_cols = section_row.split()
if section_row != '':
if string_parts is None:
string_parts = section_cols.__len__()
if section_cols.__len__() > string_parts:
log_stream.error(' ===> Parse section filename failed for filename ' + os.path.split(file_name)[1])
raise IOError(' ===> Section file in wrong format: [fields: "'
+ section_row + '" at line ' + str(row_id + 1) + ']')
if section_cols.__len__() < section_cols_expected:
section_cols = pad_or_truncate_list(section_cols, section_cols_expected)
section_idx_ji = [int(section_cols[0]), int(section_cols[1])]
section_domain = section_cols[2]
section_name = section_cols[3]
if isinstance(section_cols[4], (float, int)):
section_code = int(section_cols[4])
elif isinstance(section_cols[4], str):
section_code = section_cols[4]
else:
log_stream.error(
' ===> Parse section filename failed in filtering "section code" value "' + section_cols[4] +
'". Value types allowed are float, int and string')
raise IOError('Case not implemented yet')
section_drained_area = float(section_cols[5])
section_discharge_thr_alert = float(section_cols[6])
section_discharge_thr_alarm = float(section_cols[7])
section_id = int(row_id)
section_key = ':'.join([section_domain, section_name])
point_frame[section_key] = {}
point_frame[section_key]['section_id'] = section_id
point_frame[section_key]['section_name'] = section_name
point_frame[section_key]['section_domain'] = section_domain
point_frame[section_key]['section_idx_ji'] = section_idx_ji
point_frame[section_key]['section_code'] = section_code
point_frame[section_key]['section_drained_area'] = section_drained_area
point_frame[section_key]['section_discharge_thr_alert'] = section_discharge_thr_alert
point_frame[section_key]['section_discharge_thr_alarm'] = section_discharge_thr_alarm
else:
log_stream.error(' ===> Parse section filename failed for filename ' + os.path.split(file_name)[1])
raise IOError(' ===> Section file in empty format: [fields: "'
+ section_row + '" at line ' + str(row_id + 1) + ']')
else:
log_stream.warning(' ===> File info for sections was found; sections are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point dam(s)
def read_data_point_dam(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
dam_n = int(file_lines[row_id].split(line_delimiter)[0])
row_id += 1
plant_n = int(file_lines[row_id].split(line_delimiter)[0])
if dam_n > 0:
point_frame = OrderedDict()
for dam_id in range(0, dam_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
dam_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
dam_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
dam_plant_n = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_cell_lake_code = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_volume_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_volume_init = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_level_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_h_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_lin_coeff = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
dam_storage_curve = parse_row2string(file_lines[row_id], line_delimiter)
for plant_id in range(0, int(dam_plant_n)):
row_id += 1
plant_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
plant_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
plant_tc = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
plant_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
plant_discharge_flag = int(parse_row2string(file_lines[row_id], line_delimiter))
if plant_name != '':
dam_key = ':'.join([dam_name, plant_name])
else:
dam_key = dam_name
point_frame[dam_key] = {}
point_frame[dam_key]['dam_name'] = dam_name
point_frame[dam_key]['dam_idx_ji'] = dam_idx_ji
point_frame[dam_key]['dam_plant_n'] = dam_plant_n
point_frame[dam_key]['dam_lake_code'] = dam_cell_lake_code
point_frame[dam_key]['dam_volume_max'] = dam_volume_max
point_frame[dam_key]['dam_volume_init'] = dam_volume_init
point_frame[dam_key]['dam_discharge_max'] = dam_discharge_max
point_frame[dam_key]['dam_level_max'] = dam_level_max
point_frame[dam_key]['dam_h_max'] = dam_h_max
point_frame[dam_key]['dam_lin_coeff'] = dam_lin_coeff
point_frame[dam_key]['dam_storage_curve'] = dam_storage_curve
point_frame[dam_key]['plant_name'] = plant_name
point_frame[dam_key]['plant_idx_ji'] = plant_idx_ji
point_frame[dam_key]['plant_tc'] = plant_tc
point_frame[dam_key]['plant_discharge_max'] = plant_discharge_max
point_frame[dam_key]['plant_discharge_flag'] = plant_discharge_flag
else:
log_stream.warning(' ===> File info for dams was found; dams are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point intake(s)
def read_data_point_intake(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
catch_n = int(file_lines[row_id].split(line_delimiter)[0])
row_id += 1
release_n = int(file_lines[row_id].split(line_delimiter)[0])
if release_n > 0:
point_frame = OrderedDict()
for release_id in range(0, release_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
release_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
release_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
release_catch_n = int(parse_row2string(file_lines[row_id], line_delimiter))
for catch_id in range(0, int(release_catch_n)):
row_id += 1
catch_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
catch_tc = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
catch_discharge_max = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_discharge_min = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
catch_discharge_weight = float(parse_row2string(file_lines[row_id], line_delimiter))
release_key = ':'.join([release_name, catch_name])
point_frame[release_key] = {}
point_frame[release_key]['release_name'] = release_name
point_frame[release_key]['release_idx_ji'] = release_idx_ji
point_frame[release_key]['release_catch_n'] = release_catch_n
point_frame[release_key]['catch_name'] = catch_name
point_frame[release_key]['catch_idx_ji'] = catch_idx_ji
point_frame[release_key]['catch_tc'] = catch_tc
point_frame[release_key]['catch_discharge_max'] = catch_discharge_max
point_frame[release_key]['catch_discharge_min'] = catch_discharge_min
point_frame[release_key]['catch_discharge_weight'] = catch_discharge_weight
else:
log_stream.warning(' ===> File info for intakes was found; intakes are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point joint(s)
def read_data_point_joint(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
joint_n = int(file_lines[row_id].split(line_delimiter)[0])
if joint_n > 0:
log_stream.error(' ===> File info for joints was found; function to read joints is not implemented')
raise NotImplementedError(' ===> Method is not implemented yet')
else:
log_stream.warning(' ===> File info for joints was found; joints are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file point lake(s)
def read_data_point_lake(file_name, line_delimiter='#'):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
row_id = 0
lake_n = int(file_lines[row_id].split(line_delimiter)[0])
if lake_n > 0:
point_frame = OrderedDict()
for lake_id in range(0, lake_n):
row_id += 1
_ = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
lake_name = parse_row2string(file_lines[row_id], line_delimiter)
row_id += 1
lake_idx_ji = list(map(int, parse_row2string(file_lines[row_id], line_delimiter).split()))
row_id += 1
lake_cell_code = int(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_volume_min = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_volume_init = float(parse_row2string(file_lines[row_id], line_delimiter))
row_id += 1
lake_const_draining = float(parse_row2string(file_lines[row_id], line_delimiter))
lake_key = lake_name
point_frame[lake_key] = {}
point_frame[lake_key]['lake_name'] = lake_name
point_frame[lake_key]['lake_idx_ji'] = lake_idx_ji
point_frame[lake_key]['lake_cell_code'] = lake_cell_code
point_frame[lake_key]['lake_volume_min'] = lake_volume_min
point_frame[lake_key]['lake_volume_init'] = lake_volume_init
point_frame[lake_key]['lake_constant_draining'] = lake_const_draining
else:
log_stream.warning(' ===> File info for lakes was found; lakes are equal to zero. Datasets is None')
log_stream.warning(' ===> Filename ' + os.path.split(file_name)[1])
point_frame = None
return point_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write an empty data point file
def write_data_point_undefined(file_path, element_n=2, element_init=0):
if not os.path.exists(file_path):
element_list = [str(element_init)] * element_n
folder_name, file_name = os.path.split(file_path)
create_folder(folder_name)
file_handle = open(file_path, 'w')
for element_step in element_list:
element_step = element_step + '\n'
file_handle.write(element_step)
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write an ascii grid file
def write_data_grid(file_name, file_data, file_ancillary=None):
if 'bb_left' in list(file_ancillary.keys()):
bb_left = file_ancillary['bb_left']
else:
log_stream.error(' ===> Geographical info "bb_left" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'bb_bottom' in list(file_ancillary.keys()):
bb_bottom = file_ancillary['bb_bottom']
else:
log_stream.error(' ===> Geographical info "bb_bottom" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'res_lon' in list(file_ancillary.keys()):
res_lon = file_ancillary['res_lon']
else:
log_stream.error(' ===> Geographical info "res_lon" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'res_lat' in list(file_ancillary.keys()):
res_lat = file_ancillary['res_lat']
else:
log_stream.error(' ===> Geographical info "res_lat" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'transform' in list(file_ancillary.keys()):
transform = file_ancillary['transform']
else:
log_stream.error(' ===> Geographical info "transform" for writing ascii grid file is undefined.')
raise IOError('Geographical info is mandatory. Check your static datasets.')
if 'no_data' in list(file_ancillary.keys()):
no_data = file_ancillary['no_data']
else:
no_data = -9999
if 'espg' in list(file_ancillary.keys()):
epsg = file_ancillary['epsg']
else:
epsg = proj_epsg_default
if 'decimal_precision' in list(file_ancillary.keys()):
decimal_precision = int(file_ancillary['decimal_precision'])
else:
decimal_num = Decimal(str(file_data[0][0]))
decimal_precision = abs(decimal_num.as_tuple().exponent)
if isinstance(epsg, int):
crs = CRS.from_epsg(epsg)
elif isinstance(epsg, str):
crs = CRS.from_string(epsg)
else:
log_stream.error(' ===> Geographical info "epsg" defined by using an unsupported format.')
raise IOError('Geographical EPSG must be in string format "EPSG:4326" or integer format "4326".')
dset_meta = dict(driver='AAIGrid', height=file_data.shape[0], width=file_data.shape[1], crs=crs,
count=1, dtype=str(file_data.dtype), transform=transform, nodata=no_data,
decimal_precision=decimal_precision)
with rasterio.open(file_name, 'w', **dset_meta) as dset_handle:
dset_handle.write(file_data, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read an ascii grid file
def read_data_grid(file_name, output_format='data_array', output_dtype='float32'):
try:
dset = rasterio.open(file_name)
bounds = dset.bounds
res = dset.res
transform = dset.transform
data = dset.read()
if dset.crs is None:
crs = CRS.from_string(proj_epsg_default)
else:
crs = dset.crs
values = data[0, :, :]
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float)
lons, lats = np.meshgrid(lon, lat)
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
lats = np.flipud(lats)
if output_format == 'data_array':
data_obj = create_darray_2d(values, lons, lats,
coord_name_x='west_east', coord_name_y='south_north',
dim_name_x='west_east', dim_name_y='south_north')
elif output_format == 'dictionary':
data_obj = {'values': values, 'longitude': lons, 'latitude': lats,
'transform': transform, 'crs': crs,
'bbox': [bounds.left, bounds.bottom, bounds.right, bounds.top],
'bb_left': bounds.left, 'bb_right': bounds.right,
'bb_top': bounds.top, 'bb_bottom': bounds.bottom,
'res_lon': res[0], 'res_lat': res[1]}
else:
log_stream.error(' ===> File static "' + file_name + '" output format not allowed')
raise NotImplementedError('Case not implemented yet')
except IOError as io_error:
data_obj = None
log_stream.warning(' ===> File static in ascii grid was not correctly open with error "' + str(io_error) + '"')
log_stream.warning(' ===> Filename "' + os.path.split(file_name)[1] + '"')
return data_obj
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to load an ascii vector file
def read_data_vector(file_name):
file_handle = open(file_name, 'r')
file_lines = file_handle.readlines()
file_handle.close()
vector_frame = [float(elem.strip('\n')) for elem in file_lines]
return vector_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read a raster ascii file
def read_data_raster(filename_reference):
dset = rasterio.open(filename_reference)
bounds = dset.bounds
res = dset.res
transform = dset.transform
data = dset.read()
values = data[0, :, :]
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float)
lons, lats = np.meshgrid(lon, lat)
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
lats = np.flipud(lats)
obj = {'values': values, 'longitude': lons, 'latitude': lats,
'transform': transform, 'bbox': [bounds.left, bounds.bottom, bounds.right, bounds.top],
'bb_left': bounds.left, 'bb_right': bounds.right,
'bb_top': bounds.top, 'bb_bottom': bounds.bottom,
'res_lon': res[0], 'res_lat': res[1]}
return obj
# ------------------------------------------------------------------------------------- | en | 0.257533 | Class Features Name: lib_data_geo_ascii Author(s): <NAME> (<EMAIL>) Date: '20200401' Version: '3.0.0' ####################################################################################### # Libraries # Logging # Debug ####################################################################################### # ------------------------------------------------------------------------------------- # Method to write file point section(s) # cols = file_data.__len__() --> previous # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read file point section(s) # Read line by line # Clean unnecessary string delimiter "" # Split string to cell(s) # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read file point dam(s) # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read file point intake(s) # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read file point joint(s) # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read file point lake(s) # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to write an empty data point file # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to write an ascii grid file # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read an ascii grid file # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to load an ascii vector file # ------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------- # Method to read a raster ascii file # ------------------------------------------------------------------------------------- | 2.166425 | 2 |
images/forms.py | cebanauskes/ida_images | 0 | 6621073 | import requests
from urllib.request import urlopen
from urllib.error import HTTPError
from django import forms
from django.core.exceptions import ValidationError
from .models import Image
from .utils import valid_url_mimetype
class ImageForm(forms.ModelForm):
"""Форма для добавления иизображения на основе модели Image"""
class Meta:
model = Image
fields = ('url', 'image',)
def clean(self):
"""Проверяет, чтобы только одно поле было заполнено"""
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
image = cleaned_data.get('image')
if (url and image) or (not url and not image):
raise ValidationError(
'Только одно поле должно быть заполнено '
'Либо проверьте правильность введенной ссылки')
return cleaned_data
def clean_url(self):
"""Проверяет, работоспособна ли ссылка и
работоспособна ли ссылка
"""
url = self.cleaned_data.get('url')
if url == None or url == '':
return url
response = requests.get(url)
if response.status_code != 200:
raise ValidationError('Ссылка не работает, попробуйте другую')
if not valid_url_mimetype(url):
raise ValidationError('Неправильное расширение файла (ожидается изображение)',)
return url
class ResizeForm(forms.Form):
"""Форма для изменения размера изображения"""
width = forms.IntegerField(
max_value=10000, min_value=1, label='Ширина', required=False)
height = forms.IntegerField(
max_value=10000, min_value=1, label='Высота', required=False)
def clean(self):
"""Проверяет, чтобы хотя бы одно поле было заполнено"""
cleaned_data = self.cleaned_data
width = cleaned_data.get('width')
height = cleaned_data.get('height')
if width is None and height is None:
raise ValidationError('Заполните хотя бы одно поле')
return cleaned_data
| import requests
from urllib.request import urlopen
from urllib.error import HTTPError
from django import forms
from django.core.exceptions import ValidationError
from .models import Image
from .utils import valid_url_mimetype
class ImageForm(forms.ModelForm):
"""Форма для добавления иизображения на основе модели Image"""
class Meta:
model = Image
fields = ('url', 'image',)
def clean(self):
"""Проверяет, чтобы только одно поле было заполнено"""
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
image = cleaned_data.get('image')
if (url and image) or (not url and not image):
raise ValidationError(
'Только одно поле должно быть заполнено '
'Либо проверьте правильность введенной ссылки')
return cleaned_data
def clean_url(self):
"""Проверяет, работоспособна ли ссылка и
работоспособна ли ссылка
"""
url = self.cleaned_data.get('url')
if url == None or url == '':
return url
response = requests.get(url)
if response.status_code != 200:
raise ValidationError('Ссылка не работает, попробуйте другую')
if not valid_url_mimetype(url):
raise ValidationError('Неправильное расширение файла (ожидается изображение)',)
return url
class ResizeForm(forms.Form):
"""Форма для изменения размера изображения"""
width = forms.IntegerField(
max_value=10000, min_value=1, label='Ширина', required=False)
height = forms.IntegerField(
max_value=10000, min_value=1, label='Высота', required=False)
def clean(self):
"""Проверяет, чтобы хотя бы одно поле было заполнено"""
cleaned_data = self.cleaned_data
width = cleaned_data.get('width')
height = cleaned_data.get('height')
if width is None and height is None:
raise ValidationError('Заполните хотя бы одно поле')
return cleaned_data
| ru | 0.997711 | Форма для добавления иизображения на основе модели Image Проверяет, чтобы только одно поле было заполнено Проверяет, работоспособна ли ссылка и работоспособна ли ссылка Форма для изменения размера изображения Проверяет, чтобы хотя бы одно поле было заполнено | 2.873875 | 3 |
gogorat.py | talesmarra/GoGoRat | 2 | 6621074 | <gh_stars>1-10
#IA created to be integrated as a player in the PyRat Game.
#Winner of the competition between AI's in the first semester of 2019.
#This AI combines a Supervised Approach with a CGT approach in order to obtain maximum performance,
#not only against the greedy algorithm but also against other types of AI.
# The supervised learning model was trained using games of the greedy algorithm against itself,
#but also against a reinforcement learning algorithm trained against the greedy, in order to increase robustness.
#Please cite as you use this code.
import numpy as np
import random as rd
import pickle
import time
import joblib
MOVE_DOWN = 'D'
MOVE_LEFT = 'L'
MOVE_RIGHT = 'R'
MOVE_UP = 'U'
MIN_CHEESE_GT = 12
global model
# new location after a move
def move(location, move):
if move == MOVE_UP:
return (location[0], location[1] + 1)
if move == MOVE_DOWN:
return (location[0], location[1] - 1)
if move == MOVE_LEFT:
return (location[0] - 1, location[1])
if move == MOVE_RIGHT:
return (location[0] + 1, location[1])
#opponnent stategy
def distance(la, lb):
ax,ay = la
bx,by = lb
return abs(bx - ax) + abs(by - ay)
#simulates opponent
def turn_of_opponent(opponentLocation, piecesOfCheese):
closest_poc = (-1,-1)
best_distance = -1
for poc in piecesOfCheese:
if distance(poc, opponentLocation) < best_distance or best_distance == -1:
best_distance = distance(poc, opponentLocation)
closest_poc = poc
ax, ay = opponentLocation
bx, by = closest_poc
if bx > ax:
return MOVE_RIGHT
if bx < ax:
return MOVE_LEFT
if by > ay:
return MOVE_UP
if by < ay:
return MOVE_DOWN
pass
# With this template, we integrate two ML approaches to acquire robustness against the greedy algorithm
#but also against other AI's
TEAM_NAME = "GoGoRat"
# We do not need preprocessing, so we let this function empty
def preprocessing(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, piecesOfCheese, timeAllowed):
global model
### Here we load the previously trained model
model = joblib.load('trained_classifier_go.pkl')
# We use a recursive function that goes through the trees of possible plays
# It takes as arguments a given situation, and return a best target piece of cheese for the player, such that aiming to grab this piece of cheese will eventually lead to a maximum score. It also returns the corresponding score
def best_target(playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese):
# First we should check how many pieces of cheese each player has to see if the play is over. It is the case if no pieces of cheese are left, or if playerScore or opponentScore is more than half the total number playerScore + opponentScore + piecesOfCheese
totalPieces = len(piecesOfCheese) + playerScore + opponentScore
if playerScore > totalPieces / 2 or opponentScore > totalPieces / 2 or len(piecesOfCheese) == 0:
return (-1,-1), playerScore
# If the play is not over, then the player can aim for any of the remaining pieces of cheese
# So we will simulate the game to each of the pieces, which will then by recurrence test all
# the possible trees.
best_score_so_far = -1
best_target_so_far = (-1,-1)
for target in piecesOfCheese:
end_state = simulate_game_until_target(
target,playerLocation,opponentLocation,
playerScore,opponentScore,piecesOfCheese.copy())
_, score = best_target(*end_state)
if score > best_score_so_far:
best_score_so_far = score
best_target_so_far = target
return best_target_so_far, best_score_so_far
# Move the agent on the labyrinth using function move from aux and the different directions
# It suffices to move in the direction of the target.
# You should only run function move once and you can't move diagonally.
## Without loss of generality, we can suppose it gets there moving vertically first then horizontally
def updatePlayerLocation(target,playerLocation):
if playerLocation[1] != target[1]:
if target[1] < playerLocation[1]:
playerLocation = move(playerLocation, MOVE_DOWN)
else:
playerLocation = move(playerLocation, MOVE_UP)
elif target[0] < playerLocation[0]:
playerLocation = move(playerLocation, MOVE_LEFT)
else:
playerLocation = move(playerLocation, MOVE_RIGHT)
return playerLocation
#CHECK IF EITHER/BOTH PLAYERS ARE ON THE SAME SQUARE OF A CHEESE.
#If that is the case you have to remove the cheese from the piecesOfCheese list and
#add points to the score. The players get 1 point if they are alone on the square with a cheese.
#If both players are in the same square and there is a cheese on the square each player gets 0.5 points.
def checkEatCheese(playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese):
if playerLocation in piecesOfCheese and playerLocation == opponentLocation:
playerScore = playerScore + 0.5
opponentScore = opponentScore + 0.5
piecesOfCheese.remove(playerLocation)
else:
if playerLocation in piecesOfCheese:
playerScore = playerScore + 1
piecesOfCheese.remove(playerLocation)
if opponentLocation in piecesOfCheese:
opponentScore = opponentScore + 1
piecesOfCheese.remove(opponentLocation)
return playerScore,opponentScore
#In this function we simulate what will happen until we reach the target
#You should use the two functions defined before
def simulate_game_until_target(target,playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese):
#While the target cheese has not yet been eaten by either player
#We simulate how the game will evolve until that happens
while target in piecesOfCheese:
#Update playerLocation (position of your player) using updatePlayerLocation
playerLocation = updatePlayerLocation(target,playerLocation)
#Every time that we move the opponent also moves. update the position of the opponent using turn_of_opponent and move
opponentLocation = move(opponentLocation, turn_of_opponent(opponentLocation, piecesOfCheese))
#Finally use the function checkEatCheese to see if any of the players is in the same square of a cheese.
playerScore, opponentScore = checkEatCheese(
playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese)
return playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese
# During our turn we continue going to the next target, unless the piece of cheese it originally contained has been taken
# In such case, we compute the new best target to go to
current_target = (-1,-1)
def turn(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
global current_target
if(len(piecesOfCheese)<MIN_CHEESE_GT):
if current_target not in piecesOfCheese:
current_target, score = best_target(playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese)
if current_target[1] > playerLocation[1]:
return MOVE_UP
if current_target[1] < playerLocation[1]:
return MOVE_DOWN
if current_target[0] > playerLocation[0]:
return MOVE_RIGHT
return MOVE_LEFT
else:
return turn_supervised(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed)
def convert_input_2(player, maze, opponent, mazeHeight, mazeWidth, piecesOfCheese):
# We will consider twice the size of the maze to simplify the creation of the canvas
# The canvas is initialized as a numpy tensor with 3 modes (meaning it is indexed using three integers), the third one corresponding to "layers" of the canvas.
# Here, we just use one layer, but you can defined other ones to put more information on the play (e.g. the location of the opponent could be put in a second layer)
im_size = (2*mazeHeight-1,2*mazeWidth-1,1)
# We initialize a canvas with only zeros
canvas = np.zeros(im_size)
(x,y) = player
# fill in the first layer of the canvas with the value 1 at the location of the cheeses, relative to the position of the player (i.e. the canvas is centered on the player location)
center_x, center_y = mazeWidth-1, mazeHeight-1
for (x_cheese,y_cheese) in piecesOfCheese:
canvas[y_cheese+center_y-y,x_cheese+center_x-x,0] = 1
return canvas
def turn_supervised(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
global model,input_tm1, action, score
# Transform the input into the canvas using convert_input
input_t = convert_input_2(playerLocation, mazeMap, opponentLocation, mazeHeight, mazeWidth, piecesOfCheese)
# Predict the next action using the trained model
output = model.predict(input_t.reshape(1,-1))
action = output[0]
# Return the action to perform
return [MOVE_LEFT, MOVE_RIGHT, MOVE_UP, MOVE_DOWN][action]
def postprocessing (mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
pass
| #IA created to be integrated as a player in the PyRat Game.
#Winner of the competition between AI's in the first semester of 2019.
#This AI combines a Supervised Approach with a CGT approach in order to obtain maximum performance,
#not only against the greedy algorithm but also against other types of AI.
# The supervised learning model was trained using games of the greedy algorithm against itself,
#but also against a reinforcement learning algorithm trained against the greedy, in order to increase robustness.
#Please cite as you use this code.
import numpy as np
import random as rd
import pickle
import time
import joblib
MOVE_DOWN = 'D'
MOVE_LEFT = 'L'
MOVE_RIGHT = 'R'
MOVE_UP = 'U'
MIN_CHEESE_GT = 12
global model
# new location after a move
def move(location, move):
if move == MOVE_UP:
return (location[0], location[1] + 1)
if move == MOVE_DOWN:
return (location[0], location[1] - 1)
if move == MOVE_LEFT:
return (location[0] - 1, location[1])
if move == MOVE_RIGHT:
return (location[0] + 1, location[1])
#opponnent stategy
def distance(la, lb):
ax,ay = la
bx,by = lb
return abs(bx - ax) + abs(by - ay)
#simulates opponent
def turn_of_opponent(opponentLocation, piecesOfCheese):
closest_poc = (-1,-1)
best_distance = -1
for poc in piecesOfCheese:
if distance(poc, opponentLocation) < best_distance or best_distance == -1:
best_distance = distance(poc, opponentLocation)
closest_poc = poc
ax, ay = opponentLocation
bx, by = closest_poc
if bx > ax:
return MOVE_RIGHT
if bx < ax:
return MOVE_LEFT
if by > ay:
return MOVE_UP
if by < ay:
return MOVE_DOWN
pass
# With this template, we integrate two ML approaches to acquire robustness against the greedy algorithm
#but also against other AI's
TEAM_NAME = "GoGoRat"
# We do not need preprocessing, so we let this function empty
def preprocessing(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, piecesOfCheese, timeAllowed):
global model
### Here we load the previously trained model
model = joblib.load('trained_classifier_go.pkl')
# We use a recursive function that goes through the trees of possible plays
# It takes as arguments a given situation, and return a best target piece of cheese for the player, such that aiming to grab this piece of cheese will eventually lead to a maximum score. It also returns the corresponding score
def best_target(playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese):
# First we should check how many pieces of cheese each player has to see if the play is over. It is the case if no pieces of cheese are left, or if playerScore or opponentScore is more than half the total number playerScore + opponentScore + piecesOfCheese
totalPieces = len(piecesOfCheese) + playerScore + opponentScore
if playerScore > totalPieces / 2 or opponentScore > totalPieces / 2 or len(piecesOfCheese) == 0:
return (-1,-1), playerScore
# If the play is not over, then the player can aim for any of the remaining pieces of cheese
# So we will simulate the game to each of the pieces, which will then by recurrence test all
# the possible trees.
best_score_so_far = -1
best_target_so_far = (-1,-1)
for target in piecesOfCheese:
end_state = simulate_game_until_target(
target,playerLocation,opponentLocation,
playerScore,opponentScore,piecesOfCheese.copy())
_, score = best_target(*end_state)
if score > best_score_so_far:
best_score_so_far = score
best_target_so_far = target
return best_target_so_far, best_score_so_far
# Move the agent on the labyrinth using function move from aux and the different directions
# It suffices to move in the direction of the target.
# You should only run function move once and you can't move diagonally.
## Without loss of generality, we can suppose it gets there moving vertically first then horizontally
def updatePlayerLocation(target,playerLocation):
if playerLocation[1] != target[1]:
if target[1] < playerLocation[1]:
playerLocation = move(playerLocation, MOVE_DOWN)
else:
playerLocation = move(playerLocation, MOVE_UP)
elif target[0] < playerLocation[0]:
playerLocation = move(playerLocation, MOVE_LEFT)
else:
playerLocation = move(playerLocation, MOVE_RIGHT)
return playerLocation
#CHECK IF EITHER/BOTH PLAYERS ARE ON THE SAME SQUARE OF A CHEESE.
#If that is the case you have to remove the cheese from the piecesOfCheese list and
#add points to the score. The players get 1 point if they are alone on the square with a cheese.
#If both players are in the same square and there is a cheese on the square each player gets 0.5 points.
def checkEatCheese(playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese):
if playerLocation in piecesOfCheese and playerLocation == opponentLocation:
playerScore = playerScore + 0.5
opponentScore = opponentScore + 0.5
piecesOfCheese.remove(playerLocation)
else:
if playerLocation in piecesOfCheese:
playerScore = playerScore + 1
piecesOfCheese.remove(playerLocation)
if opponentLocation in piecesOfCheese:
opponentScore = opponentScore + 1
piecesOfCheese.remove(opponentLocation)
return playerScore,opponentScore
#In this function we simulate what will happen until we reach the target
#You should use the two functions defined before
def simulate_game_until_target(target,playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese):
#While the target cheese has not yet been eaten by either player
#We simulate how the game will evolve until that happens
while target in piecesOfCheese:
#Update playerLocation (position of your player) using updatePlayerLocation
playerLocation = updatePlayerLocation(target,playerLocation)
#Every time that we move the opponent also moves. update the position of the opponent using turn_of_opponent and move
opponentLocation = move(opponentLocation, turn_of_opponent(opponentLocation, piecesOfCheese))
#Finally use the function checkEatCheese to see if any of the players is in the same square of a cheese.
playerScore, opponentScore = checkEatCheese(
playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese)
return playerLocation,opponentLocation,playerScore,opponentScore,piecesOfCheese
# During our turn we continue going to the next target, unless the piece of cheese it originally contained has been taken
# In such case, we compute the new best target to go to
current_target = (-1,-1)
def turn(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
global current_target
if(len(piecesOfCheese)<MIN_CHEESE_GT):
if current_target not in piecesOfCheese:
current_target, score = best_target(playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese)
if current_target[1] > playerLocation[1]:
return MOVE_UP
if current_target[1] < playerLocation[1]:
return MOVE_DOWN
if current_target[0] > playerLocation[0]:
return MOVE_RIGHT
return MOVE_LEFT
else:
return turn_supervised(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed)
def convert_input_2(player, maze, opponent, mazeHeight, mazeWidth, piecesOfCheese):
# We will consider twice the size of the maze to simplify the creation of the canvas
# The canvas is initialized as a numpy tensor with 3 modes (meaning it is indexed using three integers), the third one corresponding to "layers" of the canvas.
# Here, we just use one layer, but you can defined other ones to put more information on the play (e.g. the location of the opponent could be put in a second layer)
im_size = (2*mazeHeight-1,2*mazeWidth-1,1)
# We initialize a canvas with only zeros
canvas = np.zeros(im_size)
(x,y) = player
# fill in the first layer of the canvas with the value 1 at the location of the cheeses, relative to the position of the player (i.e. the canvas is centered on the player location)
center_x, center_y = mazeWidth-1, mazeHeight-1
for (x_cheese,y_cheese) in piecesOfCheese:
canvas[y_cheese+center_y-y,x_cheese+center_x-x,0] = 1
return canvas
def turn_supervised(mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
global model,input_tm1, action, score
# Transform the input into the canvas using convert_input
input_t = convert_input_2(playerLocation, mazeMap, opponentLocation, mazeHeight, mazeWidth, piecesOfCheese)
# Predict the next action using the trained model
output = model.predict(input_t.reshape(1,-1))
action = output[0]
# Return the action to perform
return [MOVE_LEFT, MOVE_RIGHT, MOVE_UP, MOVE_DOWN][action]
def postprocessing (mazeMap, mazeWidth, mazeHeight, playerLocation, opponentLocation, playerScore, opponentScore, piecesOfCheese, timeAllowed):
pass | en | 0.932487 | #IA created to be integrated as a player in the PyRat Game. #Winner of the competition between AI's in the first semester of 2019. #This AI combines a Supervised Approach with a CGT approach in order to obtain maximum performance, #not only against the greedy algorithm but also against other types of AI. # The supervised learning model was trained using games of the greedy algorithm against itself, #but also against a reinforcement learning algorithm trained against the greedy, in order to increase robustness. #Please cite as you use this code. # new location after a move #opponnent stategy #simulates opponent # With this template, we integrate two ML approaches to acquire robustness against the greedy algorithm #but also against other AI's # We do not need preprocessing, so we let this function empty ### Here we load the previously trained model # We use a recursive function that goes through the trees of possible plays # It takes as arguments a given situation, and return a best target piece of cheese for the player, such that aiming to grab this piece of cheese will eventually lead to a maximum score. It also returns the corresponding score # First we should check how many pieces of cheese each player has to see if the play is over. It is the case if no pieces of cheese are left, or if playerScore or opponentScore is more than half the total number playerScore + opponentScore + piecesOfCheese # If the play is not over, then the player can aim for any of the remaining pieces of cheese # So we will simulate the game to each of the pieces, which will then by recurrence test all # the possible trees. # Move the agent on the labyrinth using function move from aux and the different directions # It suffices to move in the direction of the target. # You should only run function move once and you can't move diagonally. ## Without loss of generality, we can suppose it gets there moving vertically first then horizontally #CHECK IF EITHER/BOTH PLAYERS ARE ON THE SAME SQUARE OF A CHEESE. #If that is the case you have to remove the cheese from the piecesOfCheese list and #add points to the score. The players get 1 point if they are alone on the square with a cheese. #If both players are in the same square and there is a cheese on the square each player gets 0.5 points. #In this function we simulate what will happen until we reach the target #You should use the two functions defined before #While the target cheese has not yet been eaten by either player #We simulate how the game will evolve until that happens #Update playerLocation (position of your player) using updatePlayerLocation #Every time that we move the opponent also moves. update the position of the opponent using turn_of_opponent and move #Finally use the function checkEatCheese to see if any of the players is in the same square of a cheese. # During our turn we continue going to the next target, unless the piece of cheese it originally contained has been taken # In such case, we compute the new best target to go to # We will consider twice the size of the maze to simplify the creation of the canvas # The canvas is initialized as a numpy tensor with 3 modes (meaning it is indexed using three integers), the third one corresponding to "layers" of the canvas. # Here, we just use one layer, but you can defined other ones to put more information on the play (e.g. the location of the opponent could be put in a second layer) # We initialize a canvas with only zeros # fill in the first layer of the canvas with the value 1 at the location of the cheeses, relative to the position of the player (i.e. the canvas is centered on the player location) # Transform the input into the canvas using convert_input # Predict the next action using the trained model # Return the action to perform | 3.239888 | 3 |
tests/resources/test_numbers.py | vaibhav-plivo/plivo-python | 42 | 6621075 | # -*- coding: utf-8 -*-
import plivo
from tests.base import PlivoResourceTestCase
from tests.decorators import with_response
number_id = '123'
class NumberTest(PlivoResourceTestCase):
@with_response(200)
def test_list(self):
numbers = self.client.numbers.list(
number_startswith=24,
services=['voice', 'sms'],
alias='SomeAlias',
type='local')
# Test if ListResponseObject's __iter__ is working correctly
self.assertEqual(len(list(numbers)), 3)
print(self.client.current_request.url)
# Verifying the endpoint hit
self.assertUrlEqual(
'https://api.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Number/?number_startswith=24&alias=SomeAlias&limit=20&offset=0&services=voice%2Csms&type=local',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
def test_numbers_list_all_invalid_params(self):
with self.assertRaises(plivo.exceptions.ValidationError):
numbers = self.client.numbers.list(limit=100)
@with_response(200)
def test_get(self):
number = self.client.numbers.get(number_id)
self.assertResponseMatches(number)
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'GET')
@with_response(202)
def test_update(self):
self.client.numbers.update(number_id, alias='Test')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'POST')
@with_response(204)
def test_numbers_delete(self):
self.client.numbers.delete(number_id)
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'DELETE')
@with_response(202)
def test_create(self):
self.client.numbers.create('1231231231', 'carrier', 'region')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', ))
self.assertEqual(self.client.current_request.method, 'POST')
class PhoneNumberTest(PlivoResourceTestCase):
@with_response(200)
def test_list(self):
self.client.numbers.search('GB', type='tollfree')
self.assertUrlEqual(self.client.current_request.url,
self.get_url(
'PhoneNumber',
type='tollfree',
country_iso='GB'))
self.assertEqual(self.client.current_request.method, 'GET')
@with_response(202)
def test_create(self):
self.client.numbers.buy(number_id, app_id='test')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('PhoneNumber', number_id))
self.assertEqual(self.client.current_request.method, 'POST')
| # -*- coding: utf-8 -*-
import plivo
from tests.base import PlivoResourceTestCase
from tests.decorators import with_response
number_id = '123'
class NumberTest(PlivoResourceTestCase):
@with_response(200)
def test_list(self):
numbers = self.client.numbers.list(
number_startswith=24,
services=['voice', 'sms'],
alias='SomeAlias',
type='local')
# Test if ListResponseObject's __iter__ is working correctly
self.assertEqual(len(list(numbers)), 3)
print(self.client.current_request.url)
# Verifying the endpoint hit
self.assertUrlEqual(
'https://api.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Number/?number_startswith=24&alias=SomeAlias&limit=20&offset=0&services=voice%2Csms&type=local',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
def test_numbers_list_all_invalid_params(self):
with self.assertRaises(plivo.exceptions.ValidationError):
numbers = self.client.numbers.list(limit=100)
@with_response(200)
def test_get(self):
number = self.client.numbers.get(number_id)
self.assertResponseMatches(number)
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'GET')
@with_response(202)
def test_update(self):
self.client.numbers.update(number_id, alias='Test')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'POST')
@with_response(204)
def test_numbers_delete(self):
self.client.numbers.delete(number_id)
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', number_id))
self.assertEqual(self.client.current_request.method, 'DELETE')
@with_response(202)
def test_create(self):
self.client.numbers.create('1231231231', 'carrier', 'region')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('Number', ))
self.assertEqual(self.client.current_request.method, 'POST')
class PhoneNumberTest(PlivoResourceTestCase):
@with_response(200)
def test_list(self):
self.client.numbers.search('GB', type='tollfree')
self.assertUrlEqual(self.client.current_request.url,
self.get_url(
'PhoneNumber',
type='tollfree',
country_iso='GB'))
self.assertEqual(self.client.current_request.method, 'GET')
@with_response(202)
def test_create(self):
self.client.numbers.buy(number_id, app_id='test')
self.assertUrlEqual(self.client.current_request.url,
self.get_url('PhoneNumber', number_id))
self.assertEqual(self.client.current_request.method, 'POST')
| en | 0.707933 | # -*- coding: utf-8 -*- # Test if ListResponseObject's __iter__ is working correctly # Verifying the endpoint hit # Verifying the method used | 2.627016 | 3 |
_all.py | un-pogaz/MC-Decompil-Generated-data | 0 | 6621076 | <gh_stars>0
import sys
print('--==| Minecraft: Build all Generated data |==--')
print()
print('It can be a lot of files, are you sure to do it?')
if not input()[:1] == 'y': sys.exit()
from builder.generated_data_builder import args, build_generated_data, version_manifest
args.manifest_json = None
args.overwrite = False
args.output = None
args.quiet = True
args.zip = True
for version in version_manifest["paths"]:
args.version = version
print(args)
build_generated_data(args)
print('All => Done') | import sys
print('--==| Minecraft: Build all Generated data |==--')
print()
print('It can be a lot of files, are you sure to do it?')
if not input()[:1] == 'y': sys.exit()
from builder.generated_data_builder import args, build_generated_data, version_manifest
args.manifest_json = None
args.overwrite = False
args.output = None
args.quiet = True
args.zip = True
for version in version_manifest["paths"]:
args.version = version
print(args)
build_generated_data(args)
print('All => Done') | none | 1 | 2.49279 | 2 | |
examples/python/usm_memory_operation.py | vlad-perevezentsev/dpctl | 0 | 6621077 | import dpctl
import dpctl.memory as dpmem
import numpy as np
ms = dpmem.MemoryUSMShared(32)
md = dpmem.MemoryUSMDevice(32)
host_buf = np.random.randint(0, 42, dtype=np.uint8, size=32)
# copy host byte-like object to USM-device buffer
md.copy_from_host(host_buf)
# copy USM-device buffer to USM-shared buffer in parallel (using sycl::queue::memcpy)
ms.copy_from_device(md)
# build numpy array reusing host-accessible USM-shared memory
X = np.ndarray((len(ms),), buffer=ms, dtype=np.uint8)
# Display Python object NumPy ndarray is viewing into
print("numpy.ndarray.base: ", X.base)
print("")
# Print content of the view
print("View..........: ", X)
# Print content of the original host buffer
print("host_buf......: ", host_buf)
# use copy_to_host to retrieve memory of USM-device memory
print("copy_to_host(): ", md.copy_to_host())
| import dpctl
import dpctl.memory as dpmem
import numpy as np
ms = dpmem.MemoryUSMShared(32)
md = dpmem.MemoryUSMDevice(32)
host_buf = np.random.randint(0, 42, dtype=np.uint8, size=32)
# copy host byte-like object to USM-device buffer
md.copy_from_host(host_buf)
# copy USM-device buffer to USM-shared buffer in parallel (using sycl::queue::memcpy)
ms.copy_from_device(md)
# build numpy array reusing host-accessible USM-shared memory
X = np.ndarray((len(ms),), buffer=ms, dtype=np.uint8)
# Display Python object NumPy ndarray is viewing into
print("numpy.ndarray.base: ", X.base)
print("")
# Print content of the view
print("View..........: ", X)
# Print content of the original host buffer
print("host_buf......: ", host_buf)
# use copy_to_host to retrieve memory of USM-device memory
print("copy_to_host(): ", md.copy_to_host())
| en | 0.648832 | # copy host byte-like object to USM-device buffer # copy USM-device buffer to USM-shared buffer in parallel (using sycl::queue::memcpy) # build numpy array reusing host-accessible USM-shared memory # Display Python object NumPy ndarray is viewing into # Print content of the view # Print content of the original host buffer # use copy_to_host to retrieve memory of USM-device memory | 2.699753 | 3 |
networks/generator.py | andgitisaac/MNIST_GAN | 0 | 6621078 | <filename>networks/generator.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, GANType, zDim, numClasses=10):
super(Generator, self).__init__()
if GANType in ["CGAN", "ACGAN"]:
zDim += numClasses
self.conv1 = nn.ConvTranspose2d(zDim, 64, 7, 1, 0, bias=False)
self.conv2 = nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False)
self.conv3 = nn.ConvTranspose2d(32, 1, 4, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv3(x)
x = torch.tanh(x)
return x
| <filename>networks/generator.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, GANType, zDim, numClasses=10):
super(Generator, self).__init__()
if GANType in ["CGAN", "ACGAN"]:
zDim += numClasses
self.conv1 = nn.ConvTranspose2d(zDim, 64, 7, 1, 0, bias=False)
self.conv2 = nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False)
self.conv3 = nn.ConvTranspose2d(32, 1, 4, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv3(x)
x = torch.tanh(x)
return x
| none | 1 | 2.725573 | 3 | |
lib/tsdesktop/bottman/views/dockman_test.py | tsadm/desktop | 0 | 6621079 | <filename>lib/tsdesktop/bottman/views/dockman_test.py
from tsdesktop.testing import TSDesktopTest
from .dockman import view
from tsdesktop import dockman
from bottle import HTTPResponse, HTTPError
images = [{}]
containers = [{'Status': None}]
class Views(TSDesktopTest):
cli = None
def setUp(self):
self.cli = dockman._mockClient()
def test_dockman(self):
r = view()
self.assertLinesContains(r,
'<div id="dockman_mysqld" class="w3-modal">')
def test_dockmanActionInvalid(self):
r = view('mysqld', 'invalid')
self.assertResponse(r, 400)
def test_dockmanPingFail(self):
self.cli.pingFail = True
with self.assertRaises(HTTPError) as cm:
r = view()
self.assertResponseError(cm.exception)
def test_pullImage(self):
with self.assertRaises(HTTPResponse) as cm:
view('mysqld', 'pull-image')
self.assertRedirect(cm.exception, location='/dockman')
def test_pullImageInvalid(self):
r = view('faked', 'pull-image')
self.assertResponse(r, 400)
def test_pullImageError(self):
self.cli.mock(images, '{"error": "fake error"}')
r = view('mysqld', 'pull-image')
self.assertResponseError(r)
def test_serviceStart(self):
self.cli.mock([{'Status': ''}])
with self.assertRaises(HTTPResponse) as cm:
view('mysqld', 'start')
self.assertRedirect(cm.exception, location='/dockman')
def test_serviceStartError(self):
self.cli.mock(containers)
self.cli.mock('{"error": "service start fake error"}')
r = view('mysqld', 'start')
self.assertResponse(r, 400)
def test_serviceStop(self):
self.cli.mock([{
'Status': 'Up since...',
'Names': ['/tsdesktop-mysqld'],
}])
with self.assertRaises(HTTPResponse) as cm:
r = view('mysqld', 'stop')
self.assertRedirect(cm.exception, location='/dockman')
def test_serviceStopError(self):
self.cli.mock(containers)
self.cli.mock('{"error": "service stop fake error"}')
r = view('mysqld', 'stop')
self.assertResponse(r, 400)
| <filename>lib/tsdesktop/bottman/views/dockman_test.py
from tsdesktop.testing import TSDesktopTest
from .dockman import view
from tsdesktop import dockman
from bottle import HTTPResponse, HTTPError
images = [{}]
containers = [{'Status': None}]
class Views(TSDesktopTest):
cli = None
def setUp(self):
self.cli = dockman._mockClient()
def test_dockman(self):
r = view()
self.assertLinesContains(r,
'<div id="dockman_mysqld" class="w3-modal">')
def test_dockmanActionInvalid(self):
r = view('mysqld', 'invalid')
self.assertResponse(r, 400)
def test_dockmanPingFail(self):
self.cli.pingFail = True
with self.assertRaises(HTTPError) as cm:
r = view()
self.assertResponseError(cm.exception)
def test_pullImage(self):
with self.assertRaises(HTTPResponse) as cm:
view('mysqld', 'pull-image')
self.assertRedirect(cm.exception, location='/dockman')
def test_pullImageInvalid(self):
r = view('faked', 'pull-image')
self.assertResponse(r, 400)
def test_pullImageError(self):
self.cli.mock(images, '{"error": "fake error"}')
r = view('mysqld', 'pull-image')
self.assertResponseError(r)
def test_serviceStart(self):
self.cli.mock([{'Status': ''}])
with self.assertRaises(HTTPResponse) as cm:
view('mysqld', 'start')
self.assertRedirect(cm.exception, location='/dockman')
def test_serviceStartError(self):
self.cli.mock(containers)
self.cli.mock('{"error": "service start fake error"}')
r = view('mysqld', 'start')
self.assertResponse(r, 400)
def test_serviceStop(self):
self.cli.mock([{
'Status': 'Up since...',
'Names': ['/tsdesktop-mysqld'],
}])
with self.assertRaises(HTTPResponse) as cm:
r = view('mysqld', 'stop')
self.assertRedirect(cm.exception, location='/dockman')
def test_serviceStopError(self):
self.cli.mock(containers)
self.cli.mock('{"error": "service stop fake error"}')
r = view('mysqld', 'stop')
self.assertResponse(r, 400)
| none | 1 | 2.464234 | 2 | |
pip_services3_datadog/clients/DataDogMetricsClient.py | pip-services3-python/pip-services3-datadog-python | 0 | 6621080 | # -*- coding: utf-8 -*-
import datetime
from typing import Optional, List
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.convert import StringConverter
from pip_services3_commons.errors import ConfigException
from pip_services3_commons.refer import IReferences
from pip_services3_components.auth import CredentialResolver
from pip_services3_rpc.clients import RestClient
from pip_services3_datadog.clients.DataDogMetric import DataDogMetric
from pip_services3_datadog.clients.DataDogMetricPoint import DataDogMetricPoint
class DataDogMetricsClient(RestClient):
__default_config: ConfigParams = ConfigParams.from_tuples(
"connection.protocol", "https",
"connection.host", "api.datadoghq.com",
"connection.port", 443,
"credential.internal_network", "true"
)
def __init__(self, config: ConfigParams = None):
super().__init__()
self.__credential_resolver: CredentialResolver = CredentialResolver()
if config:
self.configure(config)
self._base_route = 'api/v1'
def configure(self, config: ConfigParams):
config = self.__default_config.override(config)
super().configure(config)
self.__credential_resolver.configure(config)
def set_references(self, references: IReferences):
super().set_references(references)
self.__credential_resolver.set_references(references)
def open(self, correlation_id: Optional[str]):
credential = self.__credential_resolver.lookup(correlation_id)
if credential is None or credential.get_access_key() is None:
raise ConfigException(
correlation_id,
"NO_ACCESS_KEY",
"Missing access key in credentials"
)
self._headers = self._headers or {}
self._headers['DD-API-KEY'] = credential.get_access_key()
super().open(correlation_id)
def __convert_tags(self, tags: dict) -> Optional[str]:
if tags is None:
return
builder = ''
for key in tags:
if builder != '':
builder += ','
builder += key + ':' + tags[key]
return builder
def __convert_points(self, points: List[DataDogMetricPoint]) -> List[List[str]]:
results = []
for point in points:
time = point.time or datetime.datetime.now()
results.append([
str(time.timestamp()),
StringConverter.to_string(point.value)
])
return results
def __convert_metric(self, metric: DataDogMetric) -> dict:
tags = metric.tags
if metric.service:
tags = tags or {}
tags['service'] = metric.service
result = {
'metric': metric.metric,
'type': metric.type or 'gauge',
'points': self.__convert_points(metric.points)
}
if tags:
result['tags'] = self.__convert_tags(tags)
if metric.tags:
result['host'] = metric.host
if metric.interval:
result['interval'] = metric.interval
return result
def __convert_metrics(self, metrics: List[DataDogMetric]) -> dict:
series = list(map(lambda m: self.__convert_metric(m), metrics))
return {
'series': series
}
def send_metrics(self, correlation_id: Optional[str], metrics: List[DataDogMetric]) -> Any:
data = self.__convert_metrics(metrics)
# Commented instrumentation because otherwise it will never stop sending logs...
# timing = self._instrument(correlation_id, 'datadog.send_metrics')
try:
return self._call('post', 'series', None, None, data)
finally:
# timing.end_timing()
pass
| # -*- coding: utf-8 -*-
import datetime
from typing import Optional, List
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.convert import StringConverter
from pip_services3_commons.errors import ConfigException
from pip_services3_commons.refer import IReferences
from pip_services3_components.auth import CredentialResolver
from pip_services3_rpc.clients import RestClient
from pip_services3_datadog.clients.DataDogMetric import DataDogMetric
from pip_services3_datadog.clients.DataDogMetricPoint import DataDogMetricPoint
class DataDogMetricsClient(RestClient):
__default_config: ConfigParams = ConfigParams.from_tuples(
"connection.protocol", "https",
"connection.host", "api.datadoghq.com",
"connection.port", 443,
"credential.internal_network", "true"
)
def __init__(self, config: ConfigParams = None):
super().__init__()
self.__credential_resolver: CredentialResolver = CredentialResolver()
if config:
self.configure(config)
self._base_route = 'api/v1'
def configure(self, config: ConfigParams):
config = self.__default_config.override(config)
super().configure(config)
self.__credential_resolver.configure(config)
def set_references(self, references: IReferences):
super().set_references(references)
self.__credential_resolver.set_references(references)
def open(self, correlation_id: Optional[str]):
credential = self.__credential_resolver.lookup(correlation_id)
if credential is None or credential.get_access_key() is None:
raise ConfigException(
correlation_id,
"NO_ACCESS_KEY",
"Missing access key in credentials"
)
self._headers = self._headers or {}
self._headers['DD-API-KEY'] = credential.get_access_key()
super().open(correlation_id)
def __convert_tags(self, tags: dict) -> Optional[str]:
if tags is None:
return
builder = ''
for key in tags:
if builder != '':
builder += ','
builder += key + ':' + tags[key]
return builder
def __convert_points(self, points: List[DataDogMetricPoint]) -> List[List[str]]:
results = []
for point in points:
time = point.time or datetime.datetime.now()
results.append([
str(time.timestamp()),
StringConverter.to_string(point.value)
])
return results
def __convert_metric(self, metric: DataDogMetric) -> dict:
tags = metric.tags
if metric.service:
tags = tags or {}
tags['service'] = metric.service
result = {
'metric': metric.metric,
'type': metric.type or 'gauge',
'points': self.__convert_points(metric.points)
}
if tags:
result['tags'] = self.__convert_tags(tags)
if metric.tags:
result['host'] = metric.host
if metric.interval:
result['interval'] = metric.interval
return result
def __convert_metrics(self, metrics: List[DataDogMetric]) -> dict:
series = list(map(lambda m: self.__convert_metric(m), metrics))
return {
'series': series
}
def send_metrics(self, correlation_id: Optional[str], metrics: List[DataDogMetric]) -> Any:
data = self.__convert_metrics(metrics)
# Commented instrumentation because otherwise it will never stop sending logs...
# timing = self._instrument(correlation_id, 'datadog.send_metrics')
try:
return self._call('post', 'series', None, None, data)
finally:
# timing.end_timing()
pass
| en | 0.656638 | # -*- coding: utf-8 -*- # Commented instrumentation because otherwise it will never stop sending logs... # timing = self._instrument(correlation_id, 'datadog.send_metrics') # timing.end_timing() | 1.971296 | 2 |
devincachu/inscricao/tests/test_model_configuracao.py | devincachu/devincachu-2013 | 3 | 6621081 | # -*- coding: utf-8 -*-
# Copyright 2013 <NAME> authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.db import models as django_models
from .. import models
class ConfiguracaoTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.field_names = models.Configuracao._meta.get_all_field_names()
def test_deve_ter_campo_com_valor_da_inscricao(self):
self.assertIn("valor_inscricao", self.field_names)
def test_valor_da_inscricao_deve_ser_do_tipo_FloatField(self):
field = models.Configuracao._meta.get_field_by_name(
"valor_inscricao")[0]
self.assertIsInstance(field, django_models.FloatField)
def test_valor_da_inscricao_deve_ter_verbose_name_com_special_chars(self):
field = models.Configuracao._meta.get_field_by_name(
"valor_inscricao")[0]
self.assertEqual(u"Valor da inscrição", field.verbose_name)
def test_deve_ter_campo_informando_se_a_inscricao_esta_aberta(self):
self.assertIn("status", self.field_names)
def test_status_deve_ser_do_tipo_CharField(self):
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertIsInstance(field, django_models.CharField)
def test_status_deve_ter_no_maximo_10_caracteres(self):
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertEqual(10, field.max_length)
def test_status_deve_ser_fechadas_abertas_ou_encerradas(self):
esperado = (
(u"fechadas", u"Fechadas (inscrições ainda não abriram)"),
(u"abertas", u"Inscrições abertas"),
(u"encerradas", u"Inscrições encerradas"),
)
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertEqual(esperado, field.choices)
def test__unicode__deve_retornar_informando_que_eh_model_de_config(self):
configuracao = models.Configuracao.objects.get()
esperado = u"Configuração das inscrições do Dev in Cachu 2012"
self.assertEqual(esperado, unicode(configuracao))
def test_verbose_name_deve_ter_acento_e_cedilha(self):
self.assertEqual(u"Configuração das inscrições",
models.Configuracao._meta.verbose_name)
def test_verbose_name_plural_deve_ser_igual_verbose_name(self):
self.assertEqual(models.Configuracao._meta.verbose_name,
models.Configuracao._meta.verbose_name_plural)
| # -*- coding: utf-8 -*-
# Copyright 2013 <NAME> authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.db import models as django_models
from .. import models
class ConfiguracaoTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.field_names = models.Configuracao._meta.get_all_field_names()
def test_deve_ter_campo_com_valor_da_inscricao(self):
self.assertIn("valor_inscricao", self.field_names)
def test_valor_da_inscricao_deve_ser_do_tipo_FloatField(self):
field = models.Configuracao._meta.get_field_by_name(
"valor_inscricao")[0]
self.assertIsInstance(field, django_models.FloatField)
def test_valor_da_inscricao_deve_ter_verbose_name_com_special_chars(self):
field = models.Configuracao._meta.get_field_by_name(
"valor_inscricao")[0]
self.assertEqual(u"Valor da inscrição", field.verbose_name)
def test_deve_ter_campo_informando_se_a_inscricao_esta_aberta(self):
self.assertIn("status", self.field_names)
def test_status_deve_ser_do_tipo_CharField(self):
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertIsInstance(field, django_models.CharField)
def test_status_deve_ter_no_maximo_10_caracteres(self):
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertEqual(10, field.max_length)
def test_status_deve_ser_fechadas_abertas_ou_encerradas(self):
esperado = (
(u"fechadas", u"Fechadas (inscrições ainda não abriram)"),
(u"abertas", u"Inscrições abertas"),
(u"encerradas", u"Inscrições encerradas"),
)
field = models.Configuracao._meta.get_field_by_name("status")[0]
self.assertEqual(esperado, field.choices)
def test__unicode__deve_retornar_informando_que_eh_model_de_config(self):
configuracao = models.Configuracao.objects.get()
esperado = u"Configuração das inscrições do Dev in Cachu 2012"
self.assertEqual(esperado, unicode(configuracao))
def test_verbose_name_deve_ter_acento_e_cedilha(self):
self.assertEqual(u"Configuração das inscrições",
models.Configuracao._meta.verbose_name)
def test_verbose_name_plural_deve_ser_igual_verbose_name(self):
self.assertEqual(models.Configuracao._meta.verbose_name,
models.Configuracao._meta.verbose_name_plural)
| en | 0.90995 | # -*- coding: utf-8 -*- # Copyright 2013 <NAME> authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. | 2.431134 | 2 |
test/test_cards.py | volfpeter/markyp-bootstrap4 | 21 | 6621082 | <filename>test/test_cards.py
from markyp_bootstrap4.cards import *
def test_title():
assert title.h1("Text").markup ==\
'<h1 class="card-title">Text</h1>'
assert title.h2("Text").markup ==\
'<h2 class="card-title">Text</h2>'
assert title.h3("Text").markup ==\
'<h3 class="card-title">Text</h3>'
assert title.h4("Text").markup ==\
'<h4 class="card-title">Text</h4>'
assert title.h5("Text").markup ==\
'<h5 class="card-title">Text</h5>'
assert title.h6("Text").markup ==\
'<h6 class="card-title">Text</h6>'
assert title.p("Text").markup ==\
'<p class="card-title">Text</p>'
assert title.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-title my-title">Text</h1>'
assert title.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-title my-title">Text</h2>'
assert title.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-title my-title">Text</h3>'
assert title.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-title my-title">Text</h4>'
assert title.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-title my-title">Text</h5>'
assert title.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-title my-title">Text</h6>'
assert title.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-title my-title">Text</p>'
def test_subtitle():
assert subtitle.h1("Text").markup ==\
'<h1 class="card-subtitle text-muted mb-2">Text</h1>'
assert subtitle.h2("Text").markup ==\
'<h2 class="card-subtitle text-muted mb-2">Text</h2>'
assert subtitle.h3("Text").markup ==\
'<h3 class="card-subtitle text-muted mb-2">Text</h3>'
assert subtitle.h4("Text").markup ==\
'<h4 class="card-subtitle text-muted mb-2">Text</h4>'
assert subtitle.h5("Text").markup ==\
'<h5 class="card-subtitle text-muted mb-2">Text</h5>'
assert subtitle.h6("Text").markup ==\
'<h6 class="card-subtitle text-muted mb-2">Text</h6>'
assert subtitle.p("Text").markup ==\
'<p class="card-subtitle text-muted mb-2">Text</p>'
assert subtitle.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h1>'
assert subtitle.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h2>'
assert subtitle.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h3>'
assert subtitle.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h4>'
assert subtitle.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h5>'
assert subtitle.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h6>'
assert subtitle.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</p>'
def test_text():
assert text.h1("Text").markup ==\
'<h1 class="card-text">Text</h1>'
assert text.h2("Text").markup ==\
'<h2 class="card-text">Text</h2>'
assert text.h3("Text").markup ==\
'<h3 class="card-text">Text</h3>'
assert text.h4("Text").markup ==\
'<h4 class="card-text">Text</h4>'
assert text.h5("Text").markup ==\
'<h5 class="card-text">Text</h5>'
assert text.h6("Text").markup ==\
'<h6 class="card-text">Text</h6>'
assert text.p("Text").markup ==\
'<p class="card-text">Text</p>'
assert text.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-text my-title">Text</h1>'
assert text.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-text my-title">Text</h2>'
assert text.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-text my-title">Text</h3>'
assert text.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-text my-title">Text</h4>'
assert text.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-text my-title">Text</h5>'
assert text.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-text my-title">Text</h6>'
assert text.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-text my-title">Text</p>'
def test_header():
assert header.h1("Text").markup ==\
'<h1 class="card-header">Text</h1>'
assert header.h2("Text").markup ==\
'<h2 class="card-header">Text</h2>'
assert header.h3("Text").markup ==\
'<h3 class="card-header">Text</h3>'
assert header.h4("Text").markup ==\
'<h4 class="card-header">Text</h4>'
assert header.h5("Text").markup ==\
'<h5 class="card-header">Text</h5>'
assert header.h6("Text").markup ==\
'<h6 class="card-header">Text</h6>'
assert header.p("Text").markup ==\
'<p class="card-header">Text</p>'
assert header.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-header my-title">Text</h1>'
assert header.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-header my-title">Text</h2>'
assert header.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-header my-title">Text</h3>'
assert header.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-header my-title">Text</h4>'
assert header.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-header my-title">Text</h5>'
assert header.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-header my-title">Text</h6>'
assert header.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-header my-title">Text</p>'
def test_footer():
assert footer.h1("Text").markup ==\
'<h1 class="card-footer">Text</h1>'
assert footer.h2("Text").markup ==\
'<h2 class="card-footer">Text</h2>'
assert footer.h3("Text").markup ==\
'<h3 class="card-footer">Text</h3>'
assert footer.h4("Text").markup ==\
'<h4 class="card-footer">Text</h4>'
assert footer.h5("Text").markup ==\
'<h5 class="card-footer">Text</h5>'
assert footer.h6("Text").markup ==\
'<h6 class="card-footer">Text</h6>'
assert footer.p("Text").markup ==\
'<p class="card-footer">Text</p>'
assert footer.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-footer my-title">Text</h1>'
assert footer.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-footer my-title">Text</h2>'
assert footer.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-footer my-title">Text</h3>'
assert footer.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-footer my-title">Text</h4>'
assert footer.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-footer my-title">Text</h5>'
assert footer.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-footer my-title">Text</h6>'
assert footer.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-footer my-title">Text</p>'
def test_TextAlign():
assert TextAlign.LEFT == "text-left"
assert TextAlign.CENTER == "text-center"
assert TextAlign.RIGHT == "text-right"
def test_Image():
assert Image.top(src="https://via.placeholder.com/150").markup ==\
'<img src="https://via.placeholder.com/150" class="card-img-top">'
assert Image.bottom(src="https://via.placeholder.com/150").markup ==\
'<img src="https://via.placeholder.com/150" class="card-img-bottom">'
assert Image.top(src="https://via.placeholder.com/150", class_="my-card-img", attr="attr-value").markup ==\
'<img src="https://via.placeholder.com/150" attr="attr-value" class="card-img-top my-card-img">'
assert Image.bottom(src="https://via.placeholder.com/150", class_="my-card-img", attr="attr-value").markup ==\
'<img src="https://via.placeholder.com/150" attr="attr-value" class="card-img-bottom my-card-img">'
def test_card():
assert card().markup == '<div class="card"></div>'
assert card("Content").markup == '<div class="card">\nContent\n</div>'
assert card("Content", class_="my-card", attr="attr-value").markup ==\
'<div attr="attr-value" class="card my-card">\nContent\n</div>'
def test_body():
assert body().markup == '<div class="card-body"></div>'
assert body("Content").markup == '<div class="card-body">\nContent\n</div>'
assert body("Content", class_="my-body", attr="value").markup ==\
'<div attr="value" class="card-body my-body">\nContent\n</div>'
def test_footer_div():
assert footer_div().markup == '<div class="card-footer"></div>'
assert footer_div("Text").markup == '<div class="card-footer">\nText\n</div>'
assert footer_div("Text", class_="my-footer", attr="value").markup ==\
'<div attr="value" class="card-footer my-footer">\nText\n</div>'
def test_header_div():
assert header_div().markup == '<div class="card-header"></div>'
assert header_div("Text").markup == '<div class="card-header">\nText\n</div>'
assert header_div("Text", class_="my-header", attr="value").markup ==\
'<div attr="value" class="card-header my-header">\nText\n</div>'
def test_link():
assert link().markup == '<a class="card-link"></a>'
assert link("Content").markup == '<a class="card-link">Content</a>'
assert link("Content", class_="my-link", attr="value").markup ==\
'<a attr="value" class="card-link my-link">Content</a>'
| <filename>test/test_cards.py
from markyp_bootstrap4.cards import *
def test_title():
assert title.h1("Text").markup ==\
'<h1 class="card-title">Text</h1>'
assert title.h2("Text").markup ==\
'<h2 class="card-title">Text</h2>'
assert title.h3("Text").markup ==\
'<h3 class="card-title">Text</h3>'
assert title.h4("Text").markup ==\
'<h4 class="card-title">Text</h4>'
assert title.h5("Text").markup ==\
'<h5 class="card-title">Text</h5>'
assert title.h6("Text").markup ==\
'<h6 class="card-title">Text</h6>'
assert title.p("Text").markup ==\
'<p class="card-title">Text</p>'
assert title.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-title my-title">Text</h1>'
assert title.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-title my-title">Text</h2>'
assert title.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-title my-title">Text</h3>'
assert title.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-title my-title">Text</h4>'
assert title.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-title my-title">Text</h5>'
assert title.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-title my-title">Text</h6>'
assert title.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-title my-title">Text</p>'
def test_subtitle():
assert subtitle.h1("Text").markup ==\
'<h1 class="card-subtitle text-muted mb-2">Text</h1>'
assert subtitle.h2("Text").markup ==\
'<h2 class="card-subtitle text-muted mb-2">Text</h2>'
assert subtitle.h3("Text").markup ==\
'<h3 class="card-subtitle text-muted mb-2">Text</h3>'
assert subtitle.h4("Text").markup ==\
'<h4 class="card-subtitle text-muted mb-2">Text</h4>'
assert subtitle.h5("Text").markup ==\
'<h5 class="card-subtitle text-muted mb-2">Text</h5>'
assert subtitle.h6("Text").markup ==\
'<h6 class="card-subtitle text-muted mb-2">Text</h6>'
assert subtitle.p("Text").markup ==\
'<p class="card-subtitle text-muted mb-2">Text</p>'
assert subtitle.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h1>'
assert subtitle.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h2>'
assert subtitle.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h3>'
assert subtitle.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h4>'
assert subtitle.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h5>'
assert subtitle.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</h6>'
assert subtitle.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-subtitle text-muted mb-2 my-title">Text</p>'
def test_text():
assert text.h1("Text").markup ==\
'<h1 class="card-text">Text</h1>'
assert text.h2("Text").markup ==\
'<h2 class="card-text">Text</h2>'
assert text.h3("Text").markup ==\
'<h3 class="card-text">Text</h3>'
assert text.h4("Text").markup ==\
'<h4 class="card-text">Text</h4>'
assert text.h5("Text").markup ==\
'<h5 class="card-text">Text</h5>'
assert text.h6("Text").markup ==\
'<h6 class="card-text">Text</h6>'
assert text.p("Text").markup ==\
'<p class="card-text">Text</p>'
assert text.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-text my-title">Text</h1>'
assert text.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-text my-title">Text</h2>'
assert text.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-text my-title">Text</h3>'
assert text.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-text my-title">Text</h4>'
assert text.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-text my-title">Text</h5>'
assert text.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-text my-title">Text</h6>'
assert text.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-text my-title">Text</p>'
def test_header():
assert header.h1("Text").markup ==\
'<h1 class="card-header">Text</h1>'
assert header.h2("Text").markup ==\
'<h2 class="card-header">Text</h2>'
assert header.h3("Text").markup ==\
'<h3 class="card-header">Text</h3>'
assert header.h4("Text").markup ==\
'<h4 class="card-header">Text</h4>'
assert header.h5("Text").markup ==\
'<h5 class="card-header">Text</h5>'
assert header.h6("Text").markup ==\
'<h6 class="card-header">Text</h6>'
assert header.p("Text").markup ==\
'<p class="card-header">Text</p>'
assert header.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-header my-title">Text</h1>'
assert header.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-header my-title">Text</h2>'
assert header.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-header my-title">Text</h3>'
assert header.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-header my-title">Text</h4>'
assert header.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-header my-title">Text</h5>'
assert header.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-header my-title">Text</h6>'
assert header.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-header my-title">Text</p>'
def test_footer():
assert footer.h1("Text").markup ==\
'<h1 class="card-footer">Text</h1>'
assert footer.h2("Text").markup ==\
'<h2 class="card-footer">Text</h2>'
assert footer.h3("Text").markup ==\
'<h3 class="card-footer">Text</h3>'
assert footer.h4("Text").markup ==\
'<h4 class="card-footer">Text</h4>'
assert footer.h5("Text").markup ==\
'<h5 class="card-footer">Text</h5>'
assert footer.h6("Text").markup ==\
'<h6 class="card-footer">Text</h6>'
assert footer.p("Text").markup ==\
'<p class="card-footer">Text</p>'
assert footer.h1("Text", class_="my-title", attr="attr-value").markup ==\
'<h1 attr="attr-value" class="card-footer my-title">Text</h1>'
assert footer.h2("Text", class_="my-title", attr="attr-value").markup ==\
'<h2 attr="attr-value" class="card-footer my-title">Text</h2>'
assert footer.h3("Text", class_="my-title", attr="attr-value").markup ==\
'<h3 attr="attr-value" class="card-footer my-title">Text</h3>'
assert footer.h4("Text", class_="my-title", attr="attr-value").markup ==\
'<h4 attr="attr-value" class="card-footer my-title">Text</h4>'
assert footer.h5("Text", class_="my-title", attr="attr-value").markup ==\
'<h5 attr="attr-value" class="card-footer my-title">Text</h5>'
assert footer.h6("Text", class_="my-title", attr="attr-value").markup ==\
'<h6 attr="attr-value" class="card-footer my-title">Text</h6>'
assert footer.p("Text", class_="my-title", attr="attr-value").markup ==\
'<p attr="attr-value" class="card-footer my-title">Text</p>'
def test_TextAlign():
assert TextAlign.LEFT == "text-left"
assert TextAlign.CENTER == "text-center"
assert TextAlign.RIGHT == "text-right"
def test_Image():
assert Image.top(src="https://via.placeholder.com/150").markup ==\
'<img src="https://via.placeholder.com/150" class="card-img-top">'
assert Image.bottom(src="https://via.placeholder.com/150").markup ==\
'<img src="https://via.placeholder.com/150" class="card-img-bottom">'
assert Image.top(src="https://via.placeholder.com/150", class_="my-card-img", attr="attr-value").markup ==\
'<img src="https://via.placeholder.com/150" attr="attr-value" class="card-img-top my-card-img">'
assert Image.bottom(src="https://via.placeholder.com/150", class_="my-card-img", attr="attr-value").markup ==\
'<img src="https://via.placeholder.com/150" attr="attr-value" class="card-img-bottom my-card-img">'
def test_card():
assert card().markup == '<div class="card"></div>'
assert card("Content").markup == '<div class="card">\nContent\n</div>'
assert card("Content", class_="my-card", attr="attr-value").markup ==\
'<div attr="attr-value" class="card my-card">\nContent\n</div>'
def test_body():
assert body().markup == '<div class="card-body"></div>'
assert body("Content").markup == '<div class="card-body">\nContent\n</div>'
assert body("Content", class_="my-body", attr="value").markup ==\
'<div attr="value" class="card-body my-body">\nContent\n</div>'
def test_footer_div():
assert footer_div().markup == '<div class="card-footer"></div>'
assert footer_div("Text").markup == '<div class="card-footer">\nText\n</div>'
assert footer_div("Text", class_="my-footer", attr="value").markup ==\
'<div attr="value" class="card-footer my-footer">\nText\n</div>'
def test_header_div():
assert header_div().markup == '<div class="card-header"></div>'
assert header_div("Text").markup == '<div class="card-header">\nText\n</div>'
assert header_div("Text", class_="my-header", attr="value").markup ==\
'<div attr="value" class="card-header my-header">\nText\n</div>'
def test_link():
assert link().markup == '<a class="card-link"></a>'
assert link("Content").markup == '<a class="card-link">Content</a>'
assert link("Content", class_="my-link", attr="value").markup ==\
'<a attr="value" class="card-link my-link">Content</a>'
| none | 1 | 2.735601 | 3 | |
swizzle.py | anjiro/bearutils | 13 | 6621083 | <reponame>anjiro/bearutils
# coding: utf-8
from objc_util import *
from objc_util import parse_types
import ctypes
import inspect
import sys
def _str_to_bytes(s):
if sys.version_info[0] >= 3:
if isinstance(s,bytes):
return s
else:
return bytes(s,'ascii')
else:
return str(s)
def _bytes_to_str(b):
if sys.version_info[0] >= 3:
if isinstance(b,str):
return b
else:
return str(b,'ascii')
else:
return str(b)
@on_main_thread
def is_swizzled(cls, selector):
new_sel = 'original'+selector
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
c.method_getImplementation.restype=c_void_p
c.method_getImplementation.argtypes=[c_void_p]
if orig_method and \
new_method and \
c.method_getImplementation(orig_method) != c.method_getImplementation(new_method):
return True
@on_main_thread
def unswizzle(cls, selector):
new_sel = 'original'+selector
method_exchangeImplementations=c.method_exchangeImplementations
method_exchangeImplementations.restype=None
method_exchangeImplementations.argtypes=[c_void_p,c_void_p]
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
method_exchangeImplementations(orig_method, new_method)
c.method_getImplementation.restype=c_void_p
c.method_getImplementation.argtypes=[c_void_p]
imp=c.method_getImplementation(new_method)
types=c.method_getTypeEncoding(new_method)
c.class_replaceMethod.argtypes=[c_void_p,c_void_p,c_void_p,c_char_p]
c.class_replaceMethod( cls, sel(selector), imp, types)
@on_main_thread
def swizzle(cls, selector, new_fcn,type_encoding=None,debug=False):
'''swizzles ObjCClass cls's selector with implementation from python new_fcn. new_fcn needs to adjere to ther type encoding of the original, including the two "hidden" arguments _self, _sel.
if a class is already swizzled, this will override swizzled implemetation, and use new method. We could implement a forwarding system, but it becomes hard to unswizzle because there is no way to remove a selector once added.
A method can always get its predecessor by simply prepending original to its selector name.
If the referenced method does not exist, must supply type_encoding.
'''
if not type_encoding:
type_encoding=_str_to_bytes(str(cls.instanceMethodSignatureForSelector_(sel(selector))._typeString()))
else:
type_encoding=_str_to_bytes(type_encoding)
parsed_types = parse_types(_str_to_bytes(type_encoding))
restype = parsed_types[0]
argtypes = parsed_types[1]
# Check if the number of arguments derived from the selector matches the actual function:
try:
argspec=inspect.getargspec(new_fcn.__closure__[0].cell_contents)
except:
argspec = inspect.getargspec(new_fcn)
has_varargs=inspect.getargspec(new_fcn).varargs
if (len(argspec.args) != len(argtypes)) and not has_varargs:
raise ValueError('%s has %i arguments (expected %i)' % (new_fcn, len(argspec.args), len(argtypes)))
for i,arg in enumerate(argtypes):
if arg==ObjCBlock:
print('replace block with voidp')
argtypes[i]=ctypes.c_void_p
IMPTYPE = ctypes.CFUNCTYPE(restype, *argtypes)
imp = IMPTYPE(new_fcn)
retain_global(imp)
if debug:
print(restype)
print(argtypes)
print(selector)
print(cls)
#find rootmost parent
# add new to orig_.... N (N-3) (N-2) (N-1)
# then starting at end, swap up the chain
if not c.class_getInstanceMethod(cls.ptr, sel(selector)):
# just add the selector
new_sel = selector
didAdd=c.class_addMethod(cls.ptr, sel(new_sel), imp, (type_encoding))
return
new_sel = 'original'+selector
didAdd=c.class_addMethod(cls.ptr, sel(new_sel), imp, (type_encoding))
method_exchangeImplementations=c.method_exchangeImplementations
method_exchangeImplementations.restype=None
method_exchangeImplementations.argtypes=[c_void_p,c_void_p]
method_setImplementation=c.method_setImplementation
method_setImplementation.restype=None
method_setImplementation.argtypes=[c_void_p, c_void_p]
if didAdd:
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
method_exchangeImplementations(orig_method, new_method)
else:
# setimp,
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
method_setImplementation(orig_method,imp)
if __name__=='__main__':
import console, editor
t=editor._get_editor_tab()
def saveData(_self,_sel):
'''swizzle savedata. called whenever tab is switched, etc. seems to be called whether or not there are changes, so be sure to check if hasChanges before doing anything. In this case, I always call _original_saveData after, but it would be possible to prevent saves, etc.'''
try:
obj=ObjCInstance(_self)
if obj.hasChanges():
console.hud_alert('saving '+str(obj.filePath()).split('/')[-1])
finally:
obj=ObjCInstance(_self)
original_method=getattr(obj,_bytes_to_str(b'original'+c.sel_getName(_sel)),None)
if original_method:
original_method()
cls=ObjCInstance(c.object_getClass(t.ptr))
swizzle(cls,'saveData',saveData)
| # coding: utf-8
from objc_util import *
from objc_util import parse_types
import ctypes
import inspect
import sys
def _str_to_bytes(s):
if sys.version_info[0] >= 3:
if isinstance(s,bytes):
return s
else:
return bytes(s,'ascii')
else:
return str(s)
def _bytes_to_str(b):
if sys.version_info[0] >= 3:
if isinstance(b,str):
return b
else:
return str(b,'ascii')
else:
return str(b)
@on_main_thread
def is_swizzled(cls, selector):
new_sel = 'original'+selector
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
c.method_getImplementation.restype=c_void_p
c.method_getImplementation.argtypes=[c_void_p]
if orig_method and \
new_method and \
c.method_getImplementation(orig_method) != c.method_getImplementation(new_method):
return True
@on_main_thread
def unswizzle(cls, selector):
new_sel = 'original'+selector
method_exchangeImplementations=c.method_exchangeImplementations
method_exchangeImplementations.restype=None
method_exchangeImplementations.argtypes=[c_void_p,c_void_p]
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
method_exchangeImplementations(orig_method, new_method)
c.method_getImplementation.restype=c_void_p
c.method_getImplementation.argtypes=[c_void_p]
imp=c.method_getImplementation(new_method)
types=c.method_getTypeEncoding(new_method)
c.class_replaceMethod.argtypes=[c_void_p,c_void_p,c_void_p,c_char_p]
c.class_replaceMethod( cls, sel(selector), imp, types)
@on_main_thread
def swizzle(cls, selector, new_fcn,type_encoding=None,debug=False):
'''swizzles ObjCClass cls's selector with implementation from python new_fcn. new_fcn needs to adjere to ther type encoding of the original, including the two "hidden" arguments _self, _sel.
if a class is already swizzled, this will override swizzled implemetation, and use new method. We could implement a forwarding system, but it becomes hard to unswizzle because there is no way to remove a selector once added.
A method can always get its predecessor by simply prepending original to its selector name.
If the referenced method does not exist, must supply type_encoding.
'''
if not type_encoding:
type_encoding=_str_to_bytes(str(cls.instanceMethodSignatureForSelector_(sel(selector))._typeString()))
else:
type_encoding=_str_to_bytes(type_encoding)
parsed_types = parse_types(_str_to_bytes(type_encoding))
restype = parsed_types[0]
argtypes = parsed_types[1]
# Check if the number of arguments derived from the selector matches the actual function:
try:
argspec=inspect.getargspec(new_fcn.__closure__[0].cell_contents)
except:
argspec = inspect.getargspec(new_fcn)
has_varargs=inspect.getargspec(new_fcn).varargs
if (len(argspec.args) != len(argtypes)) and not has_varargs:
raise ValueError('%s has %i arguments (expected %i)' % (new_fcn, len(argspec.args), len(argtypes)))
for i,arg in enumerate(argtypes):
if arg==ObjCBlock:
print('replace block with voidp')
argtypes[i]=ctypes.c_void_p
IMPTYPE = ctypes.CFUNCTYPE(restype, *argtypes)
imp = IMPTYPE(new_fcn)
retain_global(imp)
if debug:
print(restype)
print(argtypes)
print(selector)
print(cls)
#find rootmost parent
# add new to orig_.... N (N-3) (N-2) (N-1)
# then starting at end, swap up the chain
if not c.class_getInstanceMethod(cls.ptr, sel(selector)):
# just add the selector
new_sel = selector
didAdd=c.class_addMethod(cls.ptr, sel(new_sel), imp, (type_encoding))
return
new_sel = 'original'+selector
didAdd=c.class_addMethod(cls.ptr, sel(new_sel), imp, (type_encoding))
method_exchangeImplementations=c.method_exchangeImplementations
method_exchangeImplementations.restype=None
method_exchangeImplementations.argtypes=[c_void_p,c_void_p]
method_setImplementation=c.method_setImplementation
method_setImplementation.restype=None
method_setImplementation.argtypes=[c_void_p, c_void_p]
if didAdd:
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
new_method=c.class_getInstanceMethod(cls.ptr, sel(new_sel))
method_exchangeImplementations(orig_method, new_method)
else:
# setimp,
orig_method=c.class_getInstanceMethod(cls.ptr, sel(selector))
method_setImplementation(orig_method,imp)
if __name__=='__main__':
import console, editor
t=editor._get_editor_tab()
def saveData(_self,_sel):
'''swizzle savedata. called whenever tab is switched, etc. seems to be called whether or not there are changes, so be sure to check if hasChanges before doing anything. In this case, I always call _original_saveData after, but it would be possible to prevent saves, etc.'''
try:
obj=ObjCInstance(_self)
if obj.hasChanges():
console.hud_alert('saving '+str(obj.filePath()).split('/')[-1])
finally:
obj=ObjCInstance(_self)
original_method=getattr(obj,_bytes_to_str(b'original'+c.sel_getName(_sel)),None)
if original_method:
original_method()
cls=ObjCInstance(c.object_getClass(t.ptr))
swizzle(cls,'saveData',saveData) | en | 0.89605 | # coding: utf-8 swizzles ObjCClass cls's selector with implementation from python new_fcn. new_fcn needs to adjere to ther type encoding of the original, including the two "hidden" arguments _self, _sel. if a class is already swizzled, this will override swizzled implemetation, and use new method. We could implement a forwarding system, but it becomes hard to unswizzle because there is no way to remove a selector once added. A method can always get its predecessor by simply prepending original to its selector name. If the referenced method does not exist, must supply type_encoding. # Check if the number of arguments derived from the selector matches the actual function: #find rootmost parent # add new to orig_.... N (N-3) (N-2) (N-1) # then starting at end, swap up the chain # just add the selector # setimp, swizzle savedata. called whenever tab is switched, etc. seems to be called whether or not there are changes, so be sure to check if hasChanges before doing anything. In this case, I always call _original_saveData after, but it would be possible to prevent saves, etc. | 2.17461 | 2 |
src/__init__.py | Somsubhra/Simplify | 0 | 6621084 | __author__ = 's7a'
| __author__ = 's7a'
| none | 1 | 1.017905 | 1 | |
bazaar/templatetags/bazaar_management.py | meghabhoj/NEWBAZAAR | 0 | 6621085 | from __future__ import unicode_literals
from django import template
from ..listings.stores import stores_loader
register = template.Library()
@register.assignment_tag
def store_publishing_template(store_slug):
return stores_loader.get_store_strategy(store_slug).get_store_publishing_template()
@register.assignment_tag
def store_publishing_list_template(store_slug):
return stores_loader.get_store_strategy(store_slug).get_store_publishing_list_template()
@register.assignment_tag
def store_publishing_can_delete(store_slug):
return True if stores_loader.get_store_strategy(store_slug).get_publishing_delete_action() else False
@register.assignment_tag
def store_publishing_can_update(store_slug):
return True if stores_loader.get_store_strategy(store_slug).get_publishing_update_action() else False
| from __future__ import unicode_literals
from django import template
from ..listings.stores import stores_loader
register = template.Library()
@register.assignment_tag
def store_publishing_template(store_slug):
return stores_loader.get_store_strategy(store_slug).get_store_publishing_template()
@register.assignment_tag
def store_publishing_list_template(store_slug):
return stores_loader.get_store_strategy(store_slug).get_store_publishing_list_template()
@register.assignment_tag
def store_publishing_can_delete(store_slug):
return True if stores_loader.get_store_strategy(store_slug).get_publishing_delete_action() else False
@register.assignment_tag
def store_publishing_can_update(store_slug):
return True if stores_loader.get_store_strategy(store_slug).get_publishing_update_action() else False
| none | 1 | 1.893168 | 2 | |
src/loader/impl/__init__.py | William9923/IF4072-SentimentClassification | 0 | 6621086 | from src.loader.impl.dataloader import DataLoader | from src.loader.impl.dataloader import DataLoader | none | 1 | 1.168834 | 1 | |
triangular_lattice/fractal_dim_from_box_counting.py | ssh0/growing-string | 0 | 6621087 | <gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by <NAME>
# 2017-01-27
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
from optimize import Optimize_powerlaw
import time
def load_data(path):
data = np.load(path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
N_L = data['N_L']
Ls = data['Ls']
N = data['N']
return float(beta), int(num_of_strings), int(N_L), int(frames), Ls, N
def _plot_data_for_validation(paths, raw=False):
fig, ax = plt.subplots()
if raw: # Plot raw data
for path in paths:
beta, num_of_strings, N_L, frames, Ls, N = load_data(path)
ax.loglog(Ls, N, '.',
label=r'$\beta = %2.2f$, $T = %d$' % (beta, frames))
ax.set_title(r'Box count')
ax.set_ylabel(r'$N(\delta)$')
else: # Plot averaged data
for path in paths:
beta, num_of_strings, N_r, frames, Ls, N = load_data(path)
Ls, N = averaging_data(Ls, N, N_r, scale='log')
ax.loglog(Ls, N, '.',
label=r'$\beta = %2.2f$, $T = %d$' % (beta, frames))
ax.set_title(r'Box count (averaged)')
ax.set_ylabel(r'$N(\delta)$')
ax.legend(loc='best')
ax.set_aspect('equal')
ax.set_xlabel(r'Cutting size $L$')
plt.show()
def averaging_data(x, y, x_bin, scale='linear'):
x_min, x_max = np.min(x), np.max(x)
if scale == 'linear':
x_width = (x_max - x_min) / float(x_bin)
x_edges = [x_min + x_width * i for i in range(x_bin + 1)]
elif scale == 'log':
x_width_log = (np.log(x_max) - np.log(x_min)) / float(x_bin)
x_edges = [np.exp(np.log(x_min) + x_width_log * i) for i in range(x_bin)]
else:
raise AttributeError("option `scale` must be 'linear' or 'log'")
X, Y = [], []
for left, right in zip(x_edges[:-1], x_edges[1:]):
index = np.where((x >= left) & (x < right))[0] # x_max のデータは除かれる?
if len(index) == 0:
continue
_X = np.average(x[index])
_Y = np.average(y[index])
X.append(_X)
Y.append(_Y)
return np.array(X), np.array(Y)
def get_fractal_dim(path):
beta, num_of_strings, N_r, frames, Ls, N = load_data(path)
fig, ax = plt.subplots()
# Ls, N = averaging_data(Ls, N, N_r, scale='log')
ax.loglog(Ls, N, '.')
ax.set_aspect('equal')
ax.set_title(r'Box count' +
r'($\beta = {}$, $T = {}$)'.format(beta, frames))
ax.set_ylabel(r'$N(\delta)$')
ax.set_xlabel(r'Cutting size $\delta$')
def onselect(vmin, vmax):
global result, selected_index, ln, text, D
if globals().has_key('ln') and ln:
ln.remove()
text.remove()
selected_index = np.where((Ls >= vmin) & (Ls <= vmax))
optimizer = Optimize_powerlaw(
args=(Ls[selected_index], N[selected_index]),
parameters=[1000., -1.5])
result = optimizer.fitting()
D = - result['D']
print "beta = {}, D = {}".format(beta, D)
optimizer.c = result['c']
X = Ls[selected_index]
Y = optimizer.fitted(X)
ln, = ax.loglog(X, Y, ls='-', marker='', color='k')
text = ax.text((X[0] + X[-1]) / 2., (Y[0] + Y[-1]) / 2.,
r'$D = %2.2f$' % D,
ha='center', va='bottom',
rotation=np.arctan(result['D']) * (180 / np.pi))
def press(event):
global ln
if event.key == 'a':
ln = False
if event.key == 'x':
# save image
# fn = "./results/img/fractal_dim/2017-01-27/raw_frames=%d_beta=%.0f" % (frames, beta)
fn = "./results/img/fractal_dim/2017-01-29/raw_frames=%d_beta=%.0f" % (frames, beta)
fn += "_" + time.strftime("%y%m%d_%H%M%S")
# fn += ".png"
fn += ".pdf"
plt.savefig(fn, bbox_inches='tight')
print "[saved] " + fn
plt.close()
span = SpanSelector(ax, onselect, direction='horizontal')
fig.canvas.mpl_connect('key_press_event', press)
fig.tight_layout()
plt.show()
def get_paths(fix=None, beta_num=0, frame_num=0):
"""get specific condtion datas
filter: 'beta' or 'frames'
"""
# ls -1 ./results/data/box_counting/2017-01-27/beta=0.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=2.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=4.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=6.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=8.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=10.00_frames=*.npz | sort -V
result_data_paths = [
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=200_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=400_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=600_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=800_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1000_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1200_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1400_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1600_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1800_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=2000_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=200_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=400_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=600_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=800_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1000_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1200_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1400_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1600_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1800_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=2000_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=200_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=400_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=600_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=800_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1000_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1200_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1400_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1600_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1800_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=2000_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=200_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=400_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=600_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=800_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1000_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1200_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1400_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1600_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1800_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=2000_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=200_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=400_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=600_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=800_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1000_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1200_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1400_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1600_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1800_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=2000_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=200_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=400_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=600_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=800_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1000_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1200_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1400_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1600_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1800_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=2000_170129_035850.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=200_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=400_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=600_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=800_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1000_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1200_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1400_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1600_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1800_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=2000_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=200_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=400_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=600_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=800_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1000_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1200_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1400_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1600_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1800_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=2000_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=200_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=400_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=600_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=800_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1000_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1200_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1400_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1600_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1800_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=2000_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=200_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=400_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=600_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=800_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1000_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1200_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1400_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1600_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1800_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=2000_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=200_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=400_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=600_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=800_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1000_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1200_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1400_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1600_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1800_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=2000_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=200_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=400_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=600_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=800_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1000_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1200_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1400_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1600_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1800_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=2000_170131_065137.npz",
]
if fix == 'beta': # fix beta (all frames)
result_data_paths = [result_data_paths[beta_num * 10 + i]
for i in range(10)]
elif fix == 'frames': # fix frames (all beta)
result_data_paths = [result_data_paths[i * 10 + frame_num]
for i in range(6)]
elif fix is None:
result_data_paths = [result_data_paths[beta_num * 10 + frame_num]]
return result_data_paths
if __name__ == '__main__':
frames_list = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
## 0 1 2 3 4 5 6 7 8 9
beta_list = [0, 2, 4, 6, 8, 10]
## 0 1 2 3 4 5
# result_data_paths = get_paths(beta_num=0, frame_num=2)
result_data_paths = get_paths(fix='frames', frame_num=9)
# result_data_paths = get_paths(fix='beta', beta_num=2)
# _plot_data_for_validation(result_data_paths, raw=True)
# _plot_data_for_validation(result_data_paths)
for path in result_data_paths:
get_fractal_dim(path)
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by <NAME>
# 2017-01-27
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
from optimize import Optimize_powerlaw
import time
def load_data(path):
data = np.load(path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
N_L = data['N_L']
Ls = data['Ls']
N = data['N']
return float(beta), int(num_of_strings), int(N_L), int(frames), Ls, N
def _plot_data_for_validation(paths, raw=False):
fig, ax = plt.subplots()
if raw: # Plot raw data
for path in paths:
beta, num_of_strings, N_L, frames, Ls, N = load_data(path)
ax.loglog(Ls, N, '.',
label=r'$\beta = %2.2f$, $T = %d$' % (beta, frames))
ax.set_title(r'Box count')
ax.set_ylabel(r'$N(\delta)$')
else: # Plot averaged data
for path in paths:
beta, num_of_strings, N_r, frames, Ls, N = load_data(path)
Ls, N = averaging_data(Ls, N, N_r, scale='log')
ax.loglog(Ls, N, '.',
label=r'$\beta = %2.2f$, $T = %d$' % (beta, frames))
ax.set_title(r'Box count (averaged)')
ax.set_ylabel(r'$N(\delta)$')
ax.legend(loc='best')
ax.set_aspect('equal')
ax.set_xlabel(r'Cutting size $L$')
plt.show()
def averaging_data(x, y, x_bin, scale='linear'):
x_min, x_max = np.min(x), np.max(x)
if scale == 'linear':
x_width = (x_max - x_min) / float(x_bin)
x_edges = [x_min + x_width * i for i in range(x_bin + 1)]
elif scale == 'log':
x_width_log = (np.log(x_max) - np.log(x_min)) / float(x_bin)
x_edges = [np.exp(np.log(x_min) + x_width_log * i) for i in range(x_bin)]
else:
raise AttributeError("option `scale` must be 'linear' or 'log'")
X, Y = [], []
for left, right in zip(x_edges[:-1], x_edges[1:]):
index = np.where((x >= left) & (x < right))[0] # x_max のデータは除かれる?
if len(index) == 0:
continue
_X = np.average(x[index])
_Y = np.average(y[index])
X.append(_X)
Y.append(_Y)
return np.array(X), np.array(Y)
def get_fractal_dim(path):
beta, num_of_strings, N_r, frames, Ls, N = load_data(path)
fig, ax = plt.subplots()
# Ls, N = averaging_data(Ls, N, N_r, scale='log')
ax.loglog(Ls, N, '.')
ax.set_aspect('equal')
ax.set_title(r'Box count' +
r'($\beta = {}$, $T = {}$)'.format(beta, frames))
ax.set_ylabel(r'$N(\delta)$')
ax.set_xlabel(r'Cutting size $\delta$')
def onselect(vmin, vmax):
global result, selected_index, ln, text, D
if globals().has_key('ln') and ln:
ln.remove()
text.remove()
selected_index = np.where((Ls >= vmin) & (Ls <= vmax))
optimizer = Optimize_powerlaw(
args=(Ls[selected_index], N[selected_index]),
parameters=[1000., -1.5])
result = optimizer.fitting()
D = - result['D']
print "beta = {}, D = {}".format(beta, D)
optimizer.c = result['c']
X = Ls[selected_index]
Y = optimizer.fitted(X)
ln, = ax.loglog(X, Y, ls='-', marker='', color='k')
text = ax.text((X[0] + X[-1]) / 2., (Y[0] + Y[-1]) / 2.,
r'$D = %2.2f$' % D,
ha='center', va='bottom',
rotation=np.arctan(result['D']) * (180 / np.pi))
def press(event):
global ln
if event.key == 'a':
ln = False
if event.key == 'x':
# save image
# fn = "./results/img/fractal_dim/2017-01-27/raw_frames=%d_beta=%.0f" % (frames, beta)
fn = "./results/img/fractal_dim/2017-01-29/raw_frames=%d_beta=%.0f" % (frames, beta)
fn += "_" + time.strftime("%y%m%d_%H%M%S")
# fn += ".png"
fn += ".pdf"
plt.savefig(fn, bbox_inches='tight')
print "[saved] " + fn
plt.close()
span = SpanSelector(ax, onselect, direction='horizontal')
fig.canvas.mpl_connect('key_press_event', press)
fig.tight_layout()
plt.show()
def get_paths(fix=None, beta_num=0, frame_num=0):
"""get specific condtion datas
filter: 'beta' or 'frames'
"""
# ls -1 ./results/data/box_counting/2017-01-27/beta=0.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=2.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=4.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=6.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=8.00_frames=*.npz | sort -V
# ls -1 ./results/data/box_counting/2017-01-27/beta=10.00_frames=*.npz | sort -V
result_data_paths = [
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=200_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=400_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=600_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=800_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1000_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1200_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1400_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1600_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=1800_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=0.00_frames=2000_170129_033959.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=200_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=400_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=600_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=800_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1000_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1200_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1400_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1600_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=1800_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=2.00_frames=2000_170129_032930.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=200_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=400_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=600_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=800_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1000_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1200_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1400_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1600_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=1800_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=4.00_frames=2000_170129_034118.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=200_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=400_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=600_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=800_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1000_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1200_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1400_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1600_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=1800_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=6.00_frames=2000_170129_041933.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=200_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=400_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=600_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=800_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1000_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1200_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1400_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1600_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=1800_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=8.00_frames=2000_170129_025417.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=200_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=400_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=600_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=800_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1000_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1200_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1400_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1600_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=1800_170129_035850.npz",
# "./results/data/box_counting/2017-01-27/beta=10.00_frames=2000_170129_035850.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=200_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=400_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=600_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=800_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1000_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1200_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1400_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1600_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=1800_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=0.00_frames=2000_170131_083442.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=200_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=400_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=600_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=800_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1000_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1200_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1400_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1600_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=1800_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=2.00_frames=2000_170131_052224.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=200_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=400_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=600_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=800_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1000_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1200_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1400_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1600_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=1800_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=4.00_frames=2000_170131_071317.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=200_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=400_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=600_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=800_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1000_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1200_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1400_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1600_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=1800_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=6.00_frames=2000_170131_063058.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=200_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=400_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=600_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=800_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1000_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1200_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1400_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1600_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=1800_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=8.00_frames=2000_170131_065856.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=200_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=400_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=600_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=800_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1000_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1200_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1400_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1600_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=1800_170131_065137.npz",
"./results/data/box_counting/2017-01-29/beta=10.00_frames=2000_170131_065137.npz",
]
if fix == 'beta': # fix beta (all frames)
result_data_paths = [result_data_paths[beta_num * 10 + i]
for i in range(10)]
elif fix == 'frames': # fix frames (all beta)
result_data_paths = [result_data_paths[i * 10 + frame_num]
for i in range(6)]
elif fix is None:
result_data_paths = [result_data_paths[beta_num * 10 + frame_num]]
return result_data_paths
if __name__ == '__main__':
frames_list = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]
## 0 1 2 3 4 5 6 7 8 9
beta_list = [0, 2, 4, 6, 8, 10]
## 0 1 2 3 4 5
# result_data_paths = get_paths(beta_num=0, frame_num=2)
result_data_paths = get_paths(fix='frames', frame_num=9)
# result_data_paths = get_paths(fix='beta', beta_num=2)
# _plot_data_for_validation(result_data_paths, raw=True)
# _plot_data_for_validation(result_data_paths)
for path in result_data_paths:
get_fractal_dim(path) | en | 0.300253 | #!/usr/bin/env python # -*- coding:utf-8 -*- # # written by <NAME> # 2017-01-27 # Plot raw data # Plot averaged data # x_max のデータは除かれる? # Ls, N = averaging_data(Ls, N, N_r, scale='log') # save image # fn = "./results/img/fractal_dim/2017-01-27/raw_frames=%d_beta=%.0f" % (frames, beta) # fn += ".png" get specific condtion datas filter: 'beta' or 'frames' # ls -1 ./results/data/box_counting/2017-01-27/beta=0.00_frames=*.npz | sort -V # ls -1 ./results/data/box_counting/2017-01-27/beta=2.00_frames=*.npz | sort -V # ls -1 ./results/data/box_counting/2017-01-27/beta=4.00_frames=*.npz | sort -V # ls -1 ./results/data/box_counting/2017-01-27/beta=6.00_frames=*.npz | sort -V # ls -1 ./results/data/box_counting/2017-01-27/beta=8.00_frames=*.npz | sort -V # ls -1 ./results/data/box_counting/2017-01-27/beta=10.00_frames=*.npz | sort -V # "./results/data/box_counting/2017-01-27/beta=0.00_frames=200_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=400_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=600_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=800_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=1000_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=1200_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=1400_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=1600_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=1800_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=0.00_frames=2000_170129_033959.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=200_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=400_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=600_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=800_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=1000_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=1200_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=1400_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=1600_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=1800_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=2.00_frames=2000_170129_032930.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=200_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=400_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=600_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=800_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=1000_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=1200_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=1400_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=1600_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=1800_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=4.00_frames=2000_170129_034118.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=200_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=400_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=600_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=800_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=1000_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=1200_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=1400_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=1600_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=1800_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=6.00_frames=2000_170129_041933.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=200_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=400_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=600_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=800_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=1000_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=1200_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=1400_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=1600_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=1800_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=8.00_frames=2000_170129_025417.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=200_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=400_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=600_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=800_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=1000_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=1200_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=1400_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=1600_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=1800_170129_035850.npz", # "./results/data/box_counting/2017-01-27/beta=10.00_frames=2000_170129_035850.npz", # fix beta (all frames) # fix frames (all beta) ## 0 1 2 3 4 5 6 7 8 9 ## 0 1 2 3 4 5 # result_data_paths = get_paths(beta_num=0, frame_num=2) # result_data_paths = get_paths(fix='beta', beta_num=2) # _plot_data_for_validation(result_data_paths, raw=True) # _plot_data_for_validation(result_data_paths) | 2.428102 | 2 |
backend_poa_admin/apps/prueba/views.py | lizethlizi/proyecto_backend_POA | 0 | 6621088 | from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from rest_framework import status
from .models import objetivo
from .serializers import ObjetivoSerializer
class ListaObjetivos(APIView):
def get(self, request):
Objetivo = objetivo.objects.all()[:20]
#medico=Medico.objects.all()[:20]
data = ObjetivoSerializer(Objetivo, many=True).data
return Response(data)
class GuardarObjetivoGestion(APIView):
# def post(self, request, id_medico):
def post(self, request):
# Gestion=request.data.get("Gestion")
# fecha = request.data.get("fecha")
Descripcion = request.data.get("Descripcion")
Resultados=request.data.get("Resultados")
Beneficiarios=request.data.get("Beneficiarios")
data = {'Descripcion': Descripcion,'Resultados':Resultados,'Beneficiarios':Beneficiarios} #medico como ide tiene q ir el mismo q con el que pusimos en el modelo
serializer = ObjetivoSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class ListaObjetivos(APIView):
# """LISTA TODOS LOS REGISTROS"""
# def get(self, request, format=None):
# snippets = ExamenGeneralOrina.objects.all()
# serializer = ExamenGeneralOrinaSerializer(snippets, many=True)
# return Response(serializer.data) | from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from rest_framework import status
from .models import objetivo
from .serializers import ObjetivoSerializer
class ListaObjetivos(APIView):
def get(self, request):
Objetivo = objetivo.objects.all()[:20]
#medico=Medico.objects.all()[:20]
data = ObjetivoSerializer(Objetivo, many=True).data
return Response(data)
class GuardarObjetivoGestion(APIView):
# def post(self, request, id_medico):
def post(self, request):
# Gestion=request.data.get("Gestion")
# fecha = request.data.get("fecha")
Descripcion = request.data.get("Descripcion")
Resultados=request.data.get("Resultados")
Beneficiarios=request.data.get("Beneficiarios")
data = {'Descripcion': Descripcion,'Resultados':Resultados,'Beneficiarios':Beneficiarios} #medico como ide tiene q ir el mismo q con el que pusimos en el modelo
serializer = ObjetivoSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class ListaObjetivos(APIView):
# """LISTA TODOS LOS REGISTROS"""
# def get(self, request, format=None):
# snippets = ExamenGeneralOrina.objects.all()
# serializer = ExamenGeneralOrinaSerializer(snippets, many=True)
# return Response(serializer.data) | es | 0.386899 | #medico=Medico.objects.all()[:20] # def post(self, request, id_medico): # Gestion=request.data.get("Gestion") # fecha = request.data.get("fecha") #medico como ide tiene q ir el mismo q con el que pusimos en el modelo # class ListaObjetivos(APIView): # """LISTA TODOS LOS REGISTROS""" # def get(self, request, format=None): # snippets = ExamenGeneralOrina.objects.all() # serializer = ExamenGeneralOrinaSerializer(snippets, many=True) # return Response(serializer.data) | 2.16465 | 2 |
src/code/custom_callbacks.py | TerboucheHacene/StyleGAN-pytorch-lightining | 0 | 6621089 | <reponame>TerboucheHacene/StyleGAN-pytorch-lightining<filename>src/code/custom_callbacks.py
import torch
import torch.nn.functional as F
import torch.utils.data as tud
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_only
from typing import List
from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY
@CALLBACK_REGISTRY
class UpdateBatchSizeDataLoader(Callback):
def __init__(self, batch_sizes: List[int]):
super().__init__()
self.batch_sizes = batch_sizes
def on_train_epoch_start(self, trainer, pl_module):
current_depth = pl_module.current_depth
trainer.datamodule.set_batch_size(self.batch_sizes[current_depth])
@CALLBACK_REGISTRY
class UpdateMixingDepth(Callback):
def __init__(
self, epochs_for_each_depth: List[int], fade_for_each_depth: List[int]
) -> None:
super().__init__()
self.epochs_for_each_depth = epochs_for_each_depth
self.fade_for_each_depth = fade_for_each_depth
self.n_epochs_current_depth = 0
self.step_current_depth = 0
self.last_epoch = 0
def on_train_epoch_start(self, trainer, pl_module) -> None:
current_depth = pl_module.current_depth
current_epoch = pl_module.current_epoch
epochs = self.epochs_for_each_depth[current_depth]
if self.n_epochs_current_depth >= epochs:
# set next depth
self.n_epochs_current_depth = 0
self.step_current_depth = 0
current_depth += 1
pl_module.set_depth(current_depth)
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, unused=0):
current_depth = pl_module.current_depth
current_epoch = pl_module.current_epoch
fade = self.fade_for_each_depth[current_depth]
epochs = self.epochs_for_each_depth[current_depth]
if current_epoch > self.last_epoch:
self.last_epoch = current_epoch
self.n_epochs_current_depth += 1
fade = self.fade_for_each_depth[current_depth]
epochs = self.epochs_for_each_depth[current_depth]
total_batches = len(trainer.train_dataloader)
fade_point = int((fade / 100) * epochs * total_batches)
if self.step_current_depth <= fade_point:
alpha = self.step_current_depth / fade_point
else:
alpha = 1.0
self.step_current_depth += 1
pl_module.set_alpha(alpha)
| import torch
import torch.nn.functional as F
import torch.utils.data as tud
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_only
from typing import List
from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY
@CALLBACK_REGISTRY
class UpdateBatchSizeDataLoader(Callback):
def __init__(self, batch_sizes: List[int]):
super().__init__()
self.batch_sizes = batch_sizes
def on_train_epoch_start(self, trainer, pl_module):
current_depth = pl_module.current_depth
trainer.datamodule.set_batch_size(self.batch_sizes[current_depth])
@CALLBACK_REGISTRY
class UpdateMixingDepth(Callback):
def __init__(
self, epochs_for_each_depth: List[int], fade_for_each_depth: List[int]
) -> None:
super().__init__()
self.epochs_for_each_depth = epochs_for_each_depth
self.fade_for_each_depth = fade_for_each_depth
self.n_epochs_current_depth = 0
self.step_current_depth = 0
self.last_epoch = 0
def on_train_epoch_start(self, trainer, pl_module) -> None:
current_depth = pl_module.current_depth
current_epoch = pl_module.current_epoch
epochs = self.epochs_for_each_depth[current_depth]
if self.n_epochs_current_depth >= epochs:
# set next depth
self.n_epochs_current_depth = 0
self.step_current_depth = 0
current_depth += 1
pl_module.set_depth(current_depth)
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, unused=0):
current_depth = pl_module.current_depth
current_epoch = pl_module.current_epoch
fade = self.fade_for_each_depth[current_depth]
epochs = self.epochs_for_each_depth[current_depth]
if current_epoch > self.last_epoch:
self.last_epoch = current_epoch
self.n_epochs_current_depth += 1
fade = self.fade_for_each_depth[current_depth]
epochs = self.epochs_for_each_depth[current_depth]
total_batches = len(trainer.train_dataloader)
fade_point = int((fade / 100) * epochs * total_batches)
if self.step_current_depth <= fade_point:
alpha = self.step_current_depth / fade_point
else:
alpha = 1.0
self.step_current_depth += 1
pl_module.set_alpha(alpha) | en | 0.629711 | # set next depth | 2.234457 | 2 |
srcs/lcn.py | paozer/patent_classification | 0 | 6621090 | <gh_stars>0
from data_model_handling import import_data, get_level_data, TransformerPipeline
import os
import pandas as pd
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
import tensorflow
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
from keras.models import load_model
from sklearn.metrics import f1_score, precision_score, recall_score
import pickle
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class ParentNode:
"""ParentNode
This class represents a parent node in the classification hierarchy of a Local Classifier per Node (LCN)
concept based on the definition of Silla and Freitas [1].
Besides carrying the name and level of the respective node, it serves as a container to save the data transformers
for X and y data that are fit on training data and ready to be applied on test data.
Furthermore, it carries the name of all sub classes, the respective binary child models and the respective child
nodes (which are a ParentNode again). This design which allows to iteratively build a hierarchy from top to bottom.
Once the hierarchy is built, this implementation design allows to access the hierarchy with all nodes only by
referencing the seed node. In addition, this design allows to merge predictions of different levels by matching
the child node's name with the list of sub classes of the parent node.
Parameters
----------
name : string
Name of the node
level : integer
Level of the node within the hierarchy
transformer_x : TransformerPipeline object
Transformer pipeline fit on training X data
count_vec_y : CountVectorizer object
Count vectorizer fit on training y data
sub_classes : list of strings
List with all names of target/ child classes
"""
def __init__(self, name, level, transformer_x, count_vec_y, sub_classes):
self.name = name
self.level = level
self.transformer_x = transformer_x
self.count_vec_y = count_vec_y
self.sub_classes = sub_classes
self.child_models = []
self.child_nodes = []
def add_child_model(self, child_model):
"""Add a child model classifier to the child_models list
Parameters
----------
child_model : ChildModel
An object of the class ChildModel
"""
self.child_models.append(child_model)
def add_child_node(self, child_node):
"""Add a child node to the child_nodes list
Parameters
----------
child_node : ParentNode
An object of the class ParentNode
"""
self.child_nodes.append(child_node)
def predict(self, df_test, parent_node = None, parent_pred = None):
"""Predict labels for all subclasses
After transforming the input data, this method iterates through the list of all child models (binary classifiers) and calls their predict-method which returns
a one-column numpy-array with a prediction regarding their belonging to the respective subclass. Subsequently, all predictions
are aggregated on the parent node level which results in a multi-column numpy array. Furthermore, if a parent_node and parent_pred
is provided, the resulting multi-column array is matched against the respective parent_pred column in order to transfer label
predictions between neighboring levels.
Parameters
----------
df_test : pandas DataFrame
Input data to predict from
parent_node : ParentNode, None
Parent node that carries a string list with all subclass names, which is used to get the respective subclass index in the parent_pred
array
parent_pred : numpy array
Prediction results on the same input data by parent node.
Returns
-------
test_pred : numpy array
Prediction result on input data by this node, that has been matched against the upper level prediction results if provided.
"""
# transform test data and create empty prediction matrix
X_test = self.transformer_x.transform(df_test['document'])
test_pred = np.zeros(shape=(X_test.shape[0], len(self.child_models)))
# iterate through all binary child_models and add their prediction to the prediction matrix
for index, cm in enumerate(self.child_models):
cm_test_pred = cm.model.predict_proba(X_test)
test_pred[:, index] = cm_test_pred[:, 1]
# if parent node extistent, incorporate parent node prediction that corresponds to this node based on a name matching
if parent_node is not None:
parent_column_index = parent_node.sub_classes.index(self.name)
parent_column = parent_pred[:, parent_column_index]
test_pred = test_pred * parent_column[:, np.newaxis]
return test_pred
class ChildModel:
"""ChildModel
This class simply serves as container for a binary classifier model, that is used to generate predictions regarding one subclass
of the respective ParentNode.
Parameters
----------
name : string
Name of the subclass the model represents
level : integer
Level of the parent node within the hierarchy
model : classifier object
Classifier object that is trained and executes the actual binary prediction
"""
def __init__(self, name, level, model):
self.name = name
self.level = level
self.model = model
def get_classification_node(name, level, df_raw):
"""Create a ParentNode object
This method creates a ParentNode object that carries all revelant information, transformer objects and all trained sub models.
Parameters
----------
name : string
Name of the resulting parent node
level : integer
Hierarchy level of the resulting parent node which is required to cut the training data labels to the correct length
df_raw : pandas DataFrame
Raw training data without any level adjustments of labels or vectorization applied
Returns
-------
parent_node: ParentNode
Resulting parent node with all trained child models
"""
# get training data
df_train = get_level_data(df_raw, level, name)
# shuffle training data
df_train = df_train.sample(frac=1)
input_dim = min(300, df_train.shape[0])
X_train = df_train['document']
# process data
count_vec = CountVectorizer()
tfidf = TfidfTransformer()
svd = TruncatedSVD(n_components = input_dim)
X_train_counts = count_vec.fit_transform(X_train)
X_train_tfidf = tfidf.fit_transform(X_train_counts)
X_train_svd = svd.fit_transform(X_train_tfidf)
# save transfomer oipeline for applying it on test data later
transformer_x = TransformerPipeline([count_vec, tfidf, svd])
# vectorize target values
count_vec_y = CountVectorizer(analyzer = "word", tokenizer = None, preprocessor = None, stop_words = None, token_pattern = r"(?u)\b\w+\b")
y_train = count_vec_y.fit_transform(df_train['ipcs'])
y_target_names = count_vec_y.get_feature_names()
# create ParentNode object with processed information
parent_node = ParentNode(name, level, transformer_x, count_vec_y, y_target_names)
# create a binary ChildModel object for every name in y_target_names
for index, target in enumerate(y_target_names):
print("Target:", target)
y_train_child = y_train[:,index].copy()
# define used model
# for usage of alternative model check the support of the predict_proba method --> if not supported upper code has to be adjusted accordingly
model = SGDClassifier(loss='log', class_weight='balanced', learning_rate = 'optimal', penalty='l2')
#model = SGDClassifier(loss='hinge', class_weight='balanced')
# reshape data
X_train_svd = X_train_svd.reshape(X_train_svd.shape[0], input_dim)
y_train_child = y_train_child.toarray().reshape(y_train_child.shape[0],)
# fit model and create ChildModel object
model.fit(X_train_svd, y_train_child)
child_model = ChildModel(target, (level + 1), model)
# add ChildModel object to parent node
parent_node.add_child_model(child_model)
return parent_node | from data_model_handling import import_data, get_level_data, TransformerPipeline
import os
import pandas as pd
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
import tensorflow
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
from keras.models import load_model
from sklearn.metrics import f1_score, precision_score, recall_score
import pickle
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class ParentNode:
"""ParentNode
This class represents a parent node in the classification hierarchy of a Local Classifier per Node (LCN)
concept based on the definition of Silla and Freitas [1].
Besides carrying the name and level of the respective node, it serves as a container to save the data transformers
for X and y data that are fit on training data and ready to be applied on test data.
Furthermore, it carries the name of all sub classes, the respective binary child models and the respective child
nodes (which are a ParentNode again). This design which allows to iteratively build a hierarchy from top to bottom.
Once the hierarchy is built, this implementation design allows to access the hierarchy with all nodes only by
referencing the seed node. In addition, this design allows to merge predictions of different levels by matching
the child node's name with the list of sub classes of the parent node.
Parameters
----------
name : string
Name of the node
level : integer
Level of the node within the hierarchy
transformer_x : TransformerPipeline object
Transformer pipeline fit on training X data
count_vec_y : CountVectorizer object
Count vectorizer fit on training y data
sub_classes : list of strings
List with all names of target/ child classes
"""
def __init__(self, name, level, transformer_x, count_vec_y, sub_classes):
self.name = name
self.level = level
self.transformer_x = transformer_x
self.count_vec_y = count_vec_y
self.sub_classes = sub_classes
self.child_models = []
self.child_nodes = []
def add_child_model(self, child_model):
"""Add a child model classifier to the child_models list
Parameters
----------
child_model : ChildModel
An object of the class ChildModel
"""
self.child_models.append(child_model)
def add_child_node(self, child_node):
"""Add a child node to the child_nodes list
Parameters
----------
child_node : ParentNode
An object of the class ParentNode
"""
self.child_nodes.append(child_node)
def predict(self, df_test, parent_node = None, parent_pred = None):
"""Predict labels for all subclasses
After transforming the input data, this method iterates through the list of all child models (binary classifiers) and calls their predict-method which returns
a one-column numpy-array with a prediction regarding their belonging to the respective subclass. Subsequently, all predictions
are aggregated on the parent node level which results in a multi-column numpy array. Furthermore, if a parent_node and parent_pred
is provided, the resulting multi-column array is matched against the respective parent_pred column in order to transfer label
predictions between neighboring levels.
Parameters
----------
df_test : pandas DataFrame
Input data to predict from
parent_node : ParentNode, None
Parent node that carries a string list with all subclass names, which is used to get the respective subclass index in the parent_pred
array
parent_pred : numpy array
Prediction results on the same input data by parent node.
Returns
-------
test_pred : numpy array
Prediction result on input data by this node, that has been matched against the upper level prediction results if provided.
"""
# transform test data and create empty prediction matrix
X_test = self.transformer_x.transform(df_test['document'])
test_pred = np.zeros(shape=(X_test.shape[0], len(self.child_models)))
# iterate through all binary child_models and add their prediction to the prediction matrix
for index, cm in enumerate(self.child_models):
cm_test_pred = cm.model.predict_proba(X_test)
test_pred[:, index] = cm_test_pred[:, 1]
# if parent node extistent, incorporate parent node prediction that corresponds to this node based on a name matching
if parent_node is not None:
parent_column_index = parent_node.sub_classes.index(self.name)
parent_column = parent_pred[:, parent_column_index]
test_pred = test_pred * parent_column[:, np.newaxis]
return test_pred
class ChildModel:
"""ChildModel
This class simply serves as container for a binary classifier model, that is used to generate predictions regarding one subclass
of the respective ParentNode.
Parameters
----------
name : string
Name of the subclass the model represents
level : integer
Level of the parent node within the hierarchy
model : classifier object
Classifier object that is trained and executes the actual binary prediction
"""
def __init__(self, name, level, model):
self.name = name
self.level = level
self.model = model
def get_classification_node(name, level, df_raw):
"""Create a ParentNode object
This method creates a ParentNode object that carries all revelant information, transformer objects and all trained sub models.
Parameters
----------
name : string
Name of the resulting parent node
level : integer
Hierarchy level of the resulting parent node which is required to cut the training data labels to the correct length
df_raw : pandas DataFrame
Raw training data without any level adjustments of labels or vectorization applied
Returns
-------
parent_node: ParentNode
Resulting parent node with all trained child models
"""
# get training data
df_train = get_level_data(df_raw, level, name)
# shuffle training data
df_train = df_train.sample(frac=1)
input_dim = min(300, df_train.shape[0])
X_train = df_train['document']
# process data
count_vec = CountVectorizer()
tfidf = TfidfTransformer()
svd = TruncatedSVD(n_components = input_dim)
X_train_counts = count_vec.fit_transform(X_train)
X_train_tfidf = tfidf.fit_transform(X_train_counts)
X_train_svd = svd.fit_transform(X_train_tfidf)
# save transfomer oipeline for applying it on test data later
transformer_x = TransformerPipeline([count_vec, tfidf, svd])
# vectorize target values
count_vec_y = CountVectorizer(analyzer = "word", tokenizer = None, preprocessor = None, stop_words = None, token_pattern = r"(?u)\b\w+\b")
y_train = count_vec_y.fit_transform(df_train['ipcs'])
y_target_names = count_vec_y.get_feature_names()
# create ParentNode object with processed information
parent_node = ParentNode(name, level, transformer_x, count_vec_y, y_target_names)
# create a binary ChildModel object for every name in y_target_names
for index, target in enumerate(y_target_names):
print("Target:", target)
y_train_child = y_train[:,index].copy()
# define used model
# for usage of alternative model check the support of the predict_proba method --> if not supported upper code has to be adjusted accordingly
model = SGDClassifier(loss='log', class_weight='balanced', learning_rate = 'optimal', penalty='l2')
#model = SGDClassifier(loss='hinge', class_weight='balanced')
# reshape data
X_train_svd = X_train_svd.reshape(X_train_svd.shape[0], input_dim)
y_train_child = y_train_child.toarray().reshape(y_train_child.shape[0],)
# fit model and create ChildModel object
model.fit(X_train_svd, y_train_child)
child_model = ChildModel(target, (level + 1), model)
# add ChildModel object to parent node
parent_node.add_child_model(child_model)
return parent_node | en | 0.763877 | ParentNode This class represents a parent node in the classification hierarchy of a Local Classifier per Node (LCN) concept based on the definition of Silla and Freitas [1]. Besides carrying the name and level of the respective node, it serves as a container to save the data transformers for X and y data that are fit on training data and ready to be applied on test data. Furthermore, it carries the name of all sub classes, the respective binary child models and the respective child nodes (which are a ParentNode again). This design which allows to iteratively build a hierarchy from top to bottom. Once the hierarchy is built, this implementation design allows to access the hierarchy with all nodes only by referencing the seed node. In addition, this design allows to merge predictions of different levels by matching the child node's name with the list of sub classes of the parent node. Parameters ---------- name : string Name of the node level : integer Level of the node within the hierarchy transformer_x : TransformerPipeline object Transformer pipeline fit on training X data count_vec_y : CountVectorizer object Count vectorizer fit on training y data sub_classes : list of strings List with all names of target/ child classes Add a child model classifier to the child_models list Parameters ---------- child_model : ChildModel An object of the class ChildModel Add a child node to the child_nodes list Parameters ---------- child_node : ParentNode An object of the class ParentNode Predict labels for all subclasses After transforming the input data, this method iterates through the list of all child models (binary classifiers) and calls their predict-method which returns a one-column numpy-array with a prediction regarding their belonging to the respective subclass. Subsequently, all predictions are aggregated on the parent node level which results in a multi-column numpy array. Furthermore, if a parent_node and parent_pred is provided, the resulting multi-column array is matched against the respective parent_pred column in order to transfer label predictions between neighboring levels. Parameters ---------- df_test : pandas DataFrame Input data to predict from parent_node : ParentNode, None Parent node that carries a string list with all subclass names, which is used to get the respective subclass index in the parent_pred array parent_pred : numpy array Prediction results on the same input data by parent node. Returns ------- test_pred : numpy array Prediction result on input data by this node, that has been matched against the upper level prediction results if provided. # transform test data and create empty prediction matrix # iterate through all binary child_models and add their prediction to the prediction matrix # if parent node extistent, incorporate parent node prediction that corresponds to this node based on a name matching ChildModel This class simply serves as container for a binary classifier model, that is used to generate predictions regarding one subclass of the respective ParentNode. Parameters ---------- name : string Name of the subclass the model represents level : integer Level of the parent node within the hierarchy model : classifier object Classifier object that is trained and executes the actual binary prediction Create a ParentNode object This method creates a ParentNode object that carries all revelant information, transformer objects and all trained sub models. Parameters ---------- name : string Name of the resulting parent node level : integer Hierarchy level of the resulting parent node which is required to cut the training data labels to the correct length df_raw : pandas DataFrame Raw training data without any level adjustments of labels or vectorization applied Returns ------- parent_node: ParentNode Resulting parent node with all trained child models # get training data # shuffle training data # process data # save transfomer oipeline for applying it on test data later # vectorize target values # create ParentNode object with processed information # create a binary ChildModel object for every name in y_target_names # define used model # for usage of alternative model check the support of the predict_proba method --> if not supported upper code has to be adjusted accordingly #model = SGDClassifier(loss='hinge', class_weight='balanced') # reshape data # fit model and create ChildModel object # add ChildModel object to parent node | 2.051531 | 2 |
test/test_lincomb_bitwise.py | gxavier38/pysnark | 94 | 6621091 | <gh_stars>10-100
import pytest
from pysnark.runtime import PrivVal
from pysnark.boolean import LinCombBool
class TestLinCombBitwise():
def test_to_bits(self):
bits = PrivVal(5).to_bits()
assert bits[3].val() == 0
assert bits[2].val() == 1
assert bits[1].val() == 0
assert bits[0].val() == 1
def test_and(self):
assert (3 & PrivVal(7)).val() == 3
assert (PrivVal(7) & 3).val() == 3
assert (PrivVal(7) & PrivVal(3)).val() == 3
assert (PrivVal(7) & PrivVal(0)).val() == 0
assert (PrivVal(0) & PrivVal(7)).val() == 0
def test_or(self):
assert (3 | PrivVal(7)).val() == 7
assert (PrivVal(7) | 3).val() == 7
assert (PrivVal(7) | PrivVal(3)).val() == 7
assert (PrivVal(7) | PrivVal(0)).val() == 7
assert (PrivVal(0) | PrivVal(7)).val() == 7
def test_xor(self):
assert (3 ^ PrivVal(7)).val() == 4
assert (PrivVal(7) ^ 3).val() == 4
assert (PrivVal(7) ^ PrivVal(3)).val() == 4
assert (PrivVal(7) ^ PrivVal(0)).val() == 7
assert (PrivVal(0) ^ PrivVal(7)).val() == 7
| import pytest
from pysnark.runtime import PrivVal
from pysnark.boolean import LinCombBool
class TestLinCombBitwise():
def test_to_bits(self):
bits = PrivVal(5).to_bits()
assert bits[3].val() == 0
assert bits[2].val() == 1
assert bits[1].val() == 0
assert bits[0].val() == 1
def test_and(self):
assert (3 & PrivVal(7)).val() == 3
assert (PrivVal(7) & 3).val() == 3
assert (PrivVal(7) & PrivVal(3)).val() == 3
assert (PrivVal(7) & PrivVal(0)).val() == 0
assert (PrivVal(0) & PrivVal(7)).val() == 0
def test_or(self):
assert (3 | PrivVal(7)).val() == 7
assert (PrivVal(7) | 3).val() == 7
assert (PrivVal(7) | PrivVal(3)).val() == 7
assert (PrivVal(7) | PrivVal(0)).val() == 7
assert (PrivVal(0) | PrivVal(7)).val() == 7
def test_xor(self):
assert (3 ^ PrivVal(7)).val() == 4
assert (PrivVal(7) ^ 3).val() == 4
assert (PrivVal(7) ^ PrivVal(3)).val() == 4
assert (PrivVal(7) ^ PrivVal(0)).val() == 7
assert (PrivVal(0) ^ PrivVal(7)).val() == 7 | none | 1 | 2.562765 | 3 | |
test.py | LSaldyt/leaps_karel_standalone | 0 | 6621092 | from karel import *
print(dir())
print(dir(generator))
print(dir(karel))
print(dir(karel_supervised))
print(dir(util))
| from karel import *
print(dir())
print(dir(generator))
print(dir(karel))
print(dir(karel_supervised))
print(dir(util))
| none | 1 | 1.427277 | 1 | |
operaciones_list.py | MrInternauta/Python-apuntes | 0 | 6621093 | <reponame>MrInternauta/Python-apuntes
mi_lista = []
print(type(mi_lista))
mi_lista.append(1)
mi_lista2 = [2,3,4,5]
mi_lista3 = mi_lista + mi_lista2
print(mi_lista3)
mi_lista4 = ['a']
mi_lista5 = mi_lista4*10
print(mi_lista5)
#Modificar lista
lista = ['Juan', 'Pedro', 'Pepe']
lista[0] = 'Jose'
print(lista)
#Eliminar elemento
del lista[0]
print(lista)
#De String a lista
casa = 'casa'
lista_casa = list(casa) #De string a lista
print(lista_casa)
#De Lista a String y viceversa
casa = 'casa'
lista_casa = list(casa) #De string a lista
str_casa = ''.join(lista_casa)
print(str_casa)
| mi_lista = []
print(type(mi_lista))
mi_lista.append(1)
mi_lista2 = [2,3,4,5]
mi_lista3 = mi_lista + mi_lista2
print(mi_lista3)
mi_lista4 = ['a']
mi_lista5 = mi_lista4*10
print(mi_lista5)
#Modificar lista
lista = ['Juan', 'Pedro', 'Pepe']
lista[0] = 'Jose'
print(lista)
#Eliminar elemento
del lista[0]
print(lista)
#De String a lista
casa = 'casa'
lista_casa = list(casa) #De string a lista
print(lista_casa)
#De Lista a String y viceversa
casa = 'casa'
lista_casa = list(casa) #De string a lista
str_casa = ''.join(lista_casa)
print(str_casa) | es | 0.311196 | #Modificar lista #Eliminar elemento #De String a lista #De string a lista #De Lista a String y viceversa #De string a lista | 3.789491 | 4 |
covews/data_access/preprocessing/custom_imputer.py | d909b/CovEWS | 16 | 6621094 | """
Copyright (C) 2020 <NAME>, <NAME> Ltd
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
sklearn.impute.IterativeImputer:
New BSD License
Copyright (c) 2007-2020 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Scikit-learn Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import scipy
import numpy as np
from scipy import stats
from sklearn.base import clone
from distutils.version import LooseVersion
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.utils import check_array, check_random_state, _safe_indexing
from covews.data_access.meta_data.feature_types import FeatureTypeDiscrete
class MockEstimator(object):
def __init__(self, constant):
self.constant = constant
def predict(self, x):
y_pred = np.ones((len(x),)) * self.constant
return y_pred
class CustomImputer(IterativeImputer):
"""
Custom multiple Imputer based on sklearn.impute.IterativeImputer.
Differentiates between Continuous and Discrete features, whereas IterativeImputer is continuous-only.
"""
def __init__(self,
feature_types,
regression_estimator=None,
classification_estimator=None,
missing_values=np.nan,
sample_posterior=False,
max_iter=10,
tol=1e-3,
n_nearest_features=None,
initial_strategy="mean",
imputation_order='ascending',
skip_complete=False,
min_value=None,
max_value=None,
verbose=0,
random_state=None,
add_indicator=False):
super(CustomImputer, self).__init__(
estimator=regression_estimator,
missing_values=missing_values,
sample_posterior=sample_posterior,
max_iter=max_iter,
tol=tol,
n_nearest_features=n_nearest_features,
initial_strategy=initial_strategy,
imputation_order=imputation_order,
skip_complete=skip_complete,
min_value=min_value,
max_value=max_value,
verbose=verbose,
random_state=random_state,
add_indicator=add_indicator
)
self.classification_estimator = classification_estimator
self.feature_types = feature_types
def _impute_one_feature(self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True):
"""
SOURCE: sklearn.impute.IterativeImputer
Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The ``estimator`` must
support ``return_std=True`` in its ``predict`` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing ``feat_idx``.
estimator : object
The estimator to use at this step of the round-robin imputation.
If ``sample_posterior`` is True, the estimator must support
``return_std`` in its ``predict`` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
Returns
-------
X_filled : ndarray
Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
``X_filled[missing_row_mask, feat_idx]``.
"""
if estimator is None and fit_mode is False:
raise ValueError("If fit_mode is False, then an already-fitted "
"estimator should be passed in.")
is_discrete = isinstance(self.feature_types[feat_idx], FeatureTypeDiscrete)
if estimator is None:
if is_discrete:
# Case: Classification
if self.classification_estimator is None:
estimator = LogisticRegression()
else:
estimator = clone(self.classification_estimator)
else: # Case: Regression
estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if fit_mode:
X_train = _safe_indexing(X_filled[:, neighbor_feat_idx],
~missing_row_mask)
y_train = _safe_indexing(X_filled[:, feat_idx],
~missing_row_mask)
all_y_same = len(set(y_train)) == 1
if all_y_same:
estimator = MockEstimator(constant=y_train[0])
else:
estimator.fit(X_train, y_train)
# if no missing values, don't predict
if np.sum(missing_row_mask) == 0:
return X_filled, estimator
# get posterior samples if there is at least one missing value
X_test = _safe_indexing(X_filled[:, neighbor_feat_idx],
missing_row_mask)
if self.sample_posterior:
mus, sigmas = estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
# two types of problems: (1) non-positive sigmas
# (2) mus outside legal range of min_value and max_value
# (results in inf sample)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value
imputed_values[mus_too_low] = self._min_value
mus_too_high = mus > self._max_value
imputed_values[mus_too_high] = self._max_value
# the rest can be sampled without statistical issues
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value - mus) / sigmas
b = (self._max_value - mus) / sigmas
if scipy.__version__ < LooseVersion('0.18'):
# bug with vector-valued `a` in old scipy
imputed_values[inrange_mask] = [
stats.truncnorm(a=a_, b=b_,
loc=loc_, scale=scale_).rvs(
random_state=self.random_state_)
for a_, b_, loc_, scale_
in zip(a, b, mus, sigmas)]
else:
truncated_normal = stats.truncnorm(a=a, b=b,
loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(
random_state=self.random_state_)
else:
imputed_values = estimator.predict(X_test)
imputed_values = np.clip(imputed_values,
self._min_value,
self._max_value)
# update the feature
X_filled[missing_row_mask, feat_idx] = imputed_values
return X_filled, estimator | """
Copyright (C) 2020 <NAME>, <NAME> Ltd
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
sklearn.impute.IterativeImputer:
New BSD License
Copyright (c) 2007-2020 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Scikit-learn Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import scipy
import numpy as np
from scipy import stats
from sklearn.base import clone
from distutils.version import LooseVersion
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.utils import check_array, check_random_state, _safe_indexing
from covews.data_access.meta_data.feature_types import FeatureTypeDiscrete
class MockEstimator(object):
def __init__(self, constant):
self.constant = constant
def predict(self, x):
y_pred = np.ones((len(x),)) * self.constant
return y_pred
class CustomImputer(IterativeImputer):
"""
Custom multiple Imputer based on sklearn.impute.IterativeImputer.
Differentiates between Continuous and Discrete features, whereas IterativeImputer is continuous-only.
"""
def __init__(self,
feature_types,
regression_estimator=None,
classification_estimator=None,
missing_values=np.nan,
sample_posterior=False,
max_iter=10,
tol=1e-3,
n_nearest_features=None,
initial_strategy="mean",
imputation_order='ascending',
skip_complete=False,
min_value=None,
max_value=None,
verbose=0,
random_state=None,
add_indicator=False):
super(CustomImputer, self).__init__(
estimator=regression_estimator,
missing_values=missing_values,
sample_posterior=sample_posterior,
max_iter=max_iter,
tol=tol,
n_nearest_features=n_nearest_features,
initial_strategy=initial_strategy,
imputation_order=imputation_order,
skip_complete=skip_complete,
min_value=min_value,
max_value=max_value,
verbose=verbose,
random_state=random_state,
add_indicator=add_indicator
)
self.classification_estimator = classification_estimator
self.feature_types = feature_types
def _impute_one_feature(self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True):
"""
SOURCE: sklearn.impute.IterativeImputer
Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The ``estimator`` must
support ``return_std=True`` in its ``predict`` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing ``feat_idx``.
estimator : object
The estimator to use at this step of the round-robin imputation.
If ``sample_posterior`` is True, the estimator must support
``return_std`` in its ``predict`` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
Returns
-------
X_filled : ndarray
Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
``X_filled[missing_row_mask, feat_idx]``.
"""
if estimator is None and fit_mode is False:
raise ValueError("If fit_mode is False, then an already-fitted "
"estimator should be passed in.")
is_discrete = isinstance(self.feature_types[feat_idx], FeatureTypeDiscrete)
if estimator is None:
if is_discrete:
# Case: Classification
if self.classification_estimator is None:
estimator = LogisticRegression()
else:
estimator = clone(self.classification_estimator)
else: # Case: Regression
estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if fit_mode:
X_train = _safe_indexing(X_filled[:, neighbor_feat_idx],
~missing_row_mask)
y_train = _safe_indexing(X_filled[:, feat_idx],
~missing_row_mask)
all_y_same = len(set(y_train)) == 1
if all_y_same:
estimator = MockEstimator(constant=y_train[0])
else:
estimator.fit(X_train, y_train)
# if no missing values, don't predict
if np.sum(missing_row_mask) == 0:
return X_filled, estimator
# get posterior samples if there is at least one missing value
X_test = _safe_indexing(X_filled[:, neighbor_feat_idx],
missing_row_mask)
if self.sample_posterior:
mus, sigmas = estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
# two types of problems: (1) non-positive sigmas
# (2) mus outside legal range of min_value and max_value
# (results in inf sample)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value
imputed_values[mus_too_low] = self._min_value
mus_too_high = mus > self._max_value
imputed_values[mus_too_high] = self._max_value
# the rest can be sampled without statistical issues
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value - mus) / sigmas
b = (self._max_value - mus) / sigmas
if scipy.__version__ < LooseVersion('0.18'):
# bug with vector-valued `a` in old scipy
imputed_values[inrange_mask] = [
stats.truncnorm(a=a_, b=b_,
loc=loc_, scale=scale_).rvs(
random_state=self.random_state_)
for a_, b_, loc_, scale_
in zip(a, b, mus, sigmas)]
else:
truncated_normal = stats.truncnorm(a=a, b=b,
loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(
random_state=self.random_state_)
else:
imputed_values = estimator.predict(X_test)
imputed_values = np.clip(imputed_values,
self._min_value,
self._max_value)
# update the feature
X_filled[missing_row_mask, feat_idx] = imputed_values
return X_filled, estimator | en | 0.734491 | Copyright (C) 2020 <NAME>, <NAME> Ltd Copyright (C) 2019 <NAME>, ETH Zurich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sklearn.impute.IterativeImputer: New BSD License Copyright (c) 2007-2020 The scikit-learn developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of the Scikit-learn Developers nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Custom multiple Imputer based on sklearn.impute.IterativeImputer. Differentiates between Continuous and Discrete features, whereas IterativeImputer is continuous-only. SOURCE: sklearn.impute.IterativeImputer Impute a single feature from the others provided. This function predicts the missing values of one of the features using the current estimates of all the other features. The ``estimator`` must support ``return_std=True`` in its ``predict`` method for this function to work. Parameters ---------- X_filled : ndarray Input data with the most recent imputations. mask_missing_values : ndarray Input data's missing indicator matrix. feat_idx : int Index of the feature currently being imputed. neighbor_feat_idx : ndarray Indices of the features to be used in imputing ``feat_idx``. estimator : object The estimator to use at this step of the round-robin imputation. If ``sample_posterior`` is True, the estimator must support ``return_std`` in its ``predict`` method. If None, it will be cloned from self._estimator. fit_mode : boolean, default=True Whether to fit and predict with the estimator or just predict. Returns ------- X_filled : ndarray Input data with ``X_filled[missing_row_mask, feat_idx]`` updated. estimator : estimator with sklearn API The fitted estimator used to impute ``X_filled[missing_row_mask, feat_idx]``. # Case: Classification # Case: Regression # if no missing values, don't predict # get posterior samples if there is at least one missing value # two types of problems: (1) non-positive sigmas # (2) mus outside legal range of min_value and max_value # (results in inf sample) # the rest can be sampled without statistical issues # bug with vector-valued `a` in old scipy # update the feature | 1.67527 | 2 |
crypto/coinapi/apps.py | aberrier/crypto-api-back | 0 | 6621095 | from django.apps import AppConfig
class CoinapiConfig(AppConfig):
name = 'coinapi'
| from django.apps import AppConfig
class CoinapiConfig(AppConfig):
name = 'coinapi'
| none | 1 | 1.233177 | 1 | |
backend/atlas/middleware/auth.py | getsentry/atlas | 18 | 6621096 | <reponame>getsentry/atlas
import logging
import sentry_sdk
from django.contrib.auth.models import AnonymousUser
from django.utils.functional import SimpleLazyObject
from atlas.models import User
from atlas.utils.auth import parse_token, security_hash
def get_user(header):
if not header.startswith("Token "):
return AnonymousUser()
token = header.split(" ", 1)[1]
payload = parse_token(token)
if not payload:
return AnonymousUser()
try:
user = User.objects.get(id=payload["uid"])
except (TypeError, KeyError, User.DoesNotExist):
logging.error("auth.invalid-uid", exc_info=True)
return AnonymousUser()
if security_hash(user) != payload["sh"]:
logging.error("auth.invalid-security-hash uid={}".format(payload["uid"]))
return AnonymousUser()
return user
class JWSTokenAuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
header = request.META.get("HTTP_AUTHORIZATION")
if header:
request.user = SimpleLazyObject(lambda: get_user(header))
else:
request.user = AnonymousUser()
with sentry_sdk.configure_scope() as scope:
scope.user = (
{"id": str(request.user.id), "email": request.user.email}
if request.user.is_authenticated
else {}
)
return self.get_response(request)
| import logging
import sentry_sdk
from django.contrib.auth.models import AnonymousUser
from django.utils.functional import SimpleLazyObject
from atlas.models import User
from atlas.utils.auth import parse_token, security_hash
def get_user(header):
if not header.startswith("Token "):
return AnonymousUser()
token = header.split(" ", 1)[1]
payload = parse_token(token)
if not payload:
return AnonymousUser()
try:
user = User.objects.get(id=payload["uid"])
except (TypeError, KeyError, User.DoesNotExist):
logging.error("auth.invalid-uid", exc_info=True)
return AnonymousUser()
if security_hash(user) != payload["sh"]:
logging.error("auth.invalid-security-hash uid={}".format(payload["uid"]))
return AnonymousUser()
return user
class JWSTokenAuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
header = request.META.get("HTTP_AUTHORIZATION")
if header:
request.user = SimpleLazyObject(lambda: get_user(header))
else:
request.user = AnonymousUser()
with sentry_sdk.configure_scope() as scope:
scope.user = (
{"id": str(request.user.id), "email": request.user.email}
if request.user.is_authenticated
else {}
)
return self.get_response(request) | none | 1 | 2.058265 | 2 | |
OpenCV/Tutorials/readImage.py | carlosfelgarcia/AITraining | 0 | 6621097 | <filename>OpenCV/Tutorials/readImage.py
'''
Created on Aug 3, 2018
@author: User
'''
import cv2
import numpy as np
import matplotlib.pyplot as ptl
img = cv2.imread('Utils/watch.jpg', cv2.IMREAD_GRAYSCALE)
# with CV2
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#With matplolib
# ptl.imshow(img, cmap='gray', interpolation='bicubic')
# ptl.show()
#Save
cv2.imwrite('Utils/watchGray.jpg', img) | <filename>OpenCV/Tutorials/readImage.py
'''
Created on Aug 3, 2018
@author: User
'''
import cv2
import numpy as np
import matplotlib.pyplot as ptl
img = cv2.imread('Utils/watch.jpg', cv2.IMREAD_GRAYSCALE)
# with CV2
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#With matplolib
# ptl.imshow(img, cmap='gray', interpolation='bicubic')
# ptl.show()
#Save
cv2.imwrite('Utils/watchGray.jpg', img) | en | 0.459573 | Created on Aug 3, 2018
@author: User # with CV2 #With matplolib # ptl.imshow(img, cmap='gray', interpolation='bicubic') # ptl.show() #Save | 3.372842 | 3 |
spider/DataBase.py | RaspPiTor/JAPPT | 0 | 6621098 | <reponame>RaspPiTor/JAPPT<filename>spider/DataBase.py
from contextlib import suppress
import json
import sys
import dbm
class DataBase(object):
'''A wrapper around the built in dbm library. This class uses json
to allow for non-string data to be stored in the database'''
def __init__(self, dbname, maxcache=10**8):
'''Create a database using given database name and use a cache with a
specified maximium size
'''
self.dbname=dbname
self.db=dbm.open(dbname, 'c')
self.maxcache=maxcache
self.cache={}
def __setitem__(self, key, value):
self.checksize()
self.cache[key]=value
self.commit()
def __getitem__(self, key):
self.checksize()
with suppress(KeyError):
return self.cache[key]
with suppress(KeyError):
res=self.db[json.dumps(key)].decode('utf-8')
res=json.loads(res)
self.cache[key]=res
return res
raise KeyError(key)
def __delitem__(self, key):
with suppress(KeyError):
del self.db[json.dumps(key)]
with suppress(KeyError):
del self.cache[key]
def __contains__(self, key):
self.checksize()
if key in self.cache:
return True
if key in self.db:
self.cache[key]=json.loads(self.db[json.dumps(key)].decode('utf-8'))
return True
return False
def __iter__(self):
for i in self.cache:
yield i
for i in self.db.keys():
i=json.loads(i.decode('utf-8'))
if i not in self.cache:
yield i
def keys(self):
return iter(self)
def update(self, E, **F):
self.checksize()
self.cache.update(E, **F)
def checksize(self):
if sys.getsizeof(self.cache)>self.maxcache:
print('Cache size exeeded')
self.commit()
def commit(self):
'''Save all changes to file and clear the cache'''
for key in self.cache:
self.db[json.dumps(key)]=json.dumps(self.cache[key])
self.db.close()
del self.cache
self.__init__(self.dbname, self.maxcache)
| from contextlib import suppress
import json
import sys
import dbm
class DataBase(object):
'''A wrapper around the built in dbm library. This class uses json
to allow for non-string data to be stored in the database'''
def __init__(self, dbname, maxcache=10**8):
'''Create a database using given database name and use a cache with a
specified maximium size
'''
self.dbname=dbname
self.db=dbm.open(dbname, 'c')
self.maxcache=maxcache
self.cache={}
def __setitem__(self, key, value):
self.checksize()
self.cache[key]=value
self.commit()
def __getitem__(self, key):
self.checksize()
with suppress(KeyError):
return self.cache[key]
with suppress(KeyError):
res=self.db[json.dumps(key)].decode('utf-8')
res=json.loads(res)
self.cache[key]=res
return res
raise KeyError(key)
def __delitem__(self, key):
with suppress(KeyError):
del self.db[json.dumps(key)]
with suppress(KeyError):
del self.cache[key]
def __contains__(self, key):
self.checksize()
if key in self.cache:
return True
if key in self.db:
self.cache[key]=json.loads(self.db[json.dumps(key)].decode('utf-8'))
return True
return False
def __iter__(self):
for i in self.cache:
yield i
for i in self.db.keys():
i=json.loads(i.decode('utf-8'))
if i not in self.cache:
yield i
def keys(self):
return iter(self)
def update(self, E, **F):
self.checksize()
self.cache.update(E, **F)
def checksize(self):
if sys.getsizeof(self.cache)>self.maxcache:
print('Cache size exeeded')
self.commit()
def commit(self):
'''Save all changes to file and clear the cache'''
for key in self.cache:
self.db[json.dumps(key)]=json.dumps(self.cache[key])
self.db.close()
del self.cache
self.__init__(self.dbname, self.maxcache) | en | 0.710763 | A wrapper around the built in dbm library. This class uses json to allow for non-string data to be stored in the database Create a database using given database name and use a cache with a specified maximium size Save all changes to file and clear the cache | 2.85281 | 3 |
tensorpipe/__init__.py | kartik4949/TensorPipe | 89 | 6621099 | <reponame>kartik4949/TensorPipe
import tensorpipe.augment
import tensorpipe.pipe
import tensorpipe.funnels
| import tensorpipe.augment
import tensorpipe.pipe
import tensorpipe.funnels | none | 1 | 1.005026 | 1 | |
CS2/8000_lunar_lander/lunar_lander_clean/ground.py | nealholt/python_programming_curricula | 7 | 6621100 | <reponame>nealholt/python_programming_curricula
import pygame, math
from constants import *
class Ground:
def __init__(self, surface):
self.surface = surface
self.ground_heights = [[0,0],[200,700],[400,200],[600,550],
[800,0],[1000,300],[1200,800],[1400,400],
[1600,600],[1650,600],#Landing pad start and end
[1800,500],[2000,500],[2200,100]]
def getVectorToPad(self, x, y):
'''Use this to figure out trajectory to the pad.'''
midpoint = self.ground_heights[landing_pad_index+1][0] - self.ground_heights[landing_pad_index][0]
return (x - midpoint, y - self.ground_heights[landing_pad_index][1])
def overLandingPad(self,x):
return x>self.ground_heights[landing_pad_index][0] and x<self.ground_heights[landing_pad_index+1][0]
def shipHitGround(self, ship):
points = ship.getCorners(ship.x, ship.y)
for p in points:
if p[1] >= self.getHeightAt(p[0]):
return True
return False
def safeLanding(self, player_ship):
print('Over pad: '+str(self.overLandingPad(player_ship.x)))
print('Collided: '+str(self.shipHitGround(player_ship)))
print('Good dx: '+str(abs(player_ship.dx)<1))
print('Good dy: '+str(player_ship.dy>0 and player_ship.dy<1.5))
print('Good angle: '+str(abs((player_ship.angle%math.pi)-math.pi/2)<math.pi/32))
print('angle: '+str(player_ship.angle))
print('angle difference to pi/2: '+str(abs((player_ship.angle%math.pi)-math.pi/2)))
print('dx: '+str(player_ship.dx))
print('dy: '+str(player_ship.dy))
if self.overLandingPad(player_ship.x) and self.shipHitGround(player_ship):
#Check that the player is facing upright, and
#has minimal dx and sufficiently small dy for a safe landing.
if abs(player_ship.dx)<1 and player_ship.dy>0 and player_ship.dy<1.5:
return abs((player_ship.angle%math.pi)-math.pi/2)<math.pi/32
else:
return False
def rateTheLanding(self, player_ship):
return int(100*(abs(player_ship.dx) + abs(player_ship.dy) + abs((player_ship.angle%math.pi)-math.pi/2)))
def getHeightAt(self, x):
#Get coordinates that x is between and use those coordinates
#as a line to calculate the height of the ground at that x value.
if x<=self.ground_heights[0][0] or x>=self.ground_heights[-1][0]:
return 0
for i in range(len(self.ground_heights)-1):
if x>=self.ground_heights[i][0] and x<self.ground_heights[i+1][0]:
start = self.ground_heights[i]
end = self.ground_heights[i+1]
slope = (end[1]-start[1]) / (end[0]-start[0])
#Point slope form:
return slope*(x-end[0])+end[1]
def draw(self, povx, povy):
x_adjust = self.surface.get_width()/2 - povx
y_adjust = self.surface.get_height()/2 - povy
#Draw the line segments
for i in range(len(self.ground_heights)-1):
coord1 = [self.ground_heights[i][0]+x_adjust, self.ground_heights[i][1]+y_adjust]
coord2 = [self.ground_heights[i+1][0]+x_adjust, self.ground_heights[i+1][1]+y_adjust]
color = white
thickness = 3
if i == landing_pad_index:
color = green
thickness = 5
pygame.draw.line(self.surface, color, coord1, coord2, thickness)
| import pygame, math
from constants import *
class Ground:
def __init__(self, surface):
self.surface = surface
self.ground_heights = [[0,0],[200,700],[400,200],[600,550],
[800,0],[1000,300],[1200,800],[1400,400],
[1600,600],[1650,600],#Landing pad start and end
[1800,500],[2000,500],[2200,100]]
def getVectorToPad(self, x, y):
'''Use this to figure out trajectory to the pad.'''
midpoint = self.ground_heights[landing_pad_index+1][0] - self.ground_heights[landing_pad_index][0]
return (x - midpoint, y - self.ground_heights[landing_pad_index][1])
def overLandingPad(self,x):
return x>self.ground_heights[landing_pad_index][0] and x<self.ground_heights[landing_pad_index+1][0]
def shipHitGround(self, ship):
points = ship.getCorners(ship.x, ship.y)
for p in points:
if p[1] >= self.getHeightAt(p[0]):
return True
return False
def safeLanding(self, player_ship):
print('Over pad: '+str(self.overLandingPad(player_ship.x)))
print('Collided: '+str(self.shipHitGround(player_ship)))
print('Good dx: '+str(abs(player_ship.dx)<1))
print('Good dy: '+str(player_ship.dy>0 and player_ship.dy<1.5))
print('Good angle: '+str(abs((player_ship.angle%math.pi)-math.pi/2)<math.pi/32))
print('angle: '+str(player_ship.angle))
print('angle difference to pi/2: '+str(abs((player_ship.angle%math.pi)-math.pi/2)))
print('dx: '+str(player_ship.dx))
print('dy: '+str(player_ship.dy))
if self.overLandingPad(player_ship.x) and self.shipHitGround(player_ship):
#Check that the player is facing upright, and
#has minimal dx and sufficiently small dy for a safe landing.
if abs(player_ship.dx)<1 and player_ship.dy>0 and player_ship.dy<1.5:
return abs((player_ship.angle%math.pi)-math.pi/2)<math.pi/32
else:
return False
def rateTheLanding(self, player_ship):
return int(100*(abs(player_ship.dx) + abs(player_ship.dy) + abs((player_ship.angle%math.pi)-math.pi/2)))
def getHeightAt(self, x):
#Get coordinates that x is between and use those coordinates
#as a line to calculate the height of the ground at that x value.
if x<=self.ground_heights[0][0] or x>=self.ground_heights[-1][0]:
return 0
for i in range(len(self.ground_heights)-1):
if x>=self.ground_heights[i][0] and x<self.ground_heights[i+1][0]:
start = self.ground_heights[i]
end = self.ground_heights[i+1]
slope = (end[1]-start[1]) / (end[0]-start[0])
#Point slope form:
return slope*(x-end[0])+end[1]
def draw(self, povx, povy):
x_adjust = self.surface.get_width()/2 - povx
y_adjust = self.surface.get_height()/2 - povy
#Draw the line segments
for i in range(len(self.ground_heights)-1):
coord1 = [self.ground_heights[i][0]+x_adjust, self.ground_heights[i][1]+y_adjust]
coord2 = [self.ground_heights[i+1][0]+x_adjust, self.ground_heights[i+1][1]+y_adjust]
color = white
thickness = 3
if i == landing_pad_index:
color = green
thickness = 5
pygame.draw.line(self.surface, color, coord1, coord2, thickness) | en | 0.933276 | #Landing pad start and end Use this to figure out trajectory to the pad. #Check that the player is facing upright, and #has minimal dx and sufficiently small dy for a safe landing. #Get coordinates that x is between and use those coordinates #as a line to calculate the height of the ground at that x value. #Point slope form: #Draw the line segments | 3.423926 | 3 |
src/plot/plot_net.py | Zimiao1025/Sesica | 0 | 6621101 | from random import sample
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# Gold standard data of positive gene functional associations
# from https://www.inetbio.org/wormnet/downloadnetwork.php
# G = nx.read_edgelist("WormNet.v3.benchmark.txt")
def net_fig(txt_file, fig_path):
G = nx.read_edgelist(txt_file, data=(("weight", float),))
# G = nx.read_edgelist("pos_homo_pairs.txt")
# remove randomly selected nodes (to make example fast)
num_to_remove = int(len(G) / 1.5)
nodes = sample(list(G.nodes), num_to_remove)
G.remove_nodes_from(nodes)
# remove low-degree nodes
low_degree = [n for n, d in G.degree() if d < 10]
G.remove_nodes_from(low_degree)
# print(list(G.edges(data=True)))
# exit()
# largest connected component
components = nx.connected_components(G)
largest_component = max(components, key=len)
H = G.subgraph(largest_component)
# compute centrality
centrality = nx.betweenness_centrality(H, k=10, endpoints=True)
# compute community structure
lpc = nx.community.label_propagation_communities(H)
print(lpc)
community_index = {n: i for i, com in enumerate(lpc) for n in com}
print(community_index)
# ### draw graph ####
fig, ax = plt.subplots(figsize=(20, 15))
pos = nx.spring_layout(H, k=0.15, seed=1025)
node_color = [community_index[n] for n in H]
node_size = [v * 20000 for v in centrality.values()]
nx.draw_networkx(
H,
pos=pos,
with_labels=False,
node_color=node_color,
node_size=node_size,
edge_color="gainsboro",
alpha=0.4,
)
# Title/legend
font = {"color": "k", "fontweight": "bold", "fontsize": 32}
ax.set_title("Association network", font)
# Change font color for legend
font_new = {"color": "r", "fontsize": 18}
ax.text(
0.80,
0.10,
"node color = community structure",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font_new,
)
ax.text(
0.80,
0.06,
"node size = betweeness centrality",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font_new,
)
# Resize figure for label readibility
ax.margins(0.1, 0.05)
fig.tight_layout()
plt.axis("off")
# plt.show()
plt.savefig(fig_path, bbox_inches='tight')
plt.close()
def save_txt(graph_type, net_arr, prob_dict, txt_path):
prob_arr = np.array(list(prob_dict.values()), dtype=np.float).transpose().mean(axis=1)
# print(prob_arr)
# print(len(prob_arr))
# print(len(net_arr))
with open(txt_path, 'w') as f:
for i in range(len(net_arr)):
if prob_arr[i] > 0.5:
if graph_type == 'hetero':
f.write('A_' + str(int(net_arr[i][0])) + ' B_' + str(int(net_arr[i][1])) + ' ' +
str(prob_arr[i]) + '\n')
else:
f.write('A_' + str(int(net_arr[i][0])) + ' A_' + str(int(net_arr[i][1])) + ' ' +
str(prob_arr[i]) + '\n')
| from random import sample
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# Gold standard data of positive gene functional associations
# from https://www.inetbio.org/wormnet/downloadnetwork.php
# G = nx.read_edgelist("WormNet.v3.benchmark.txt")
def net_fig(txt_file, fig_path):
G = nx.read_edgelist(txt_file, data=(("weight", float),))
# G = nx.read_edgelist("pos_homo_pairs.txt")
# remove randomly selected nodes (to make example fast)
num_to_remove = int(len(G) / 1.5)
nodes = sample(list(G.nodes), num_to_remove)
G.remove_nodes_from(nodes)
# remove low-degree nodes
low_degree = [n for n, d in G.degree() if d < 10]
G.remove_nodes_from(low_degree)
# print(list(G.edges(data=True)))
# exit()
# largest connected component
components = nx.connected_components(G)
largest_component = max(components, key=len)
H = G.subgraph(largest_component)
# compute centrality
centrality = nx.betweenness_centrality(H, k=10, endpoints=True)
# compute community structure
lpc = nx.community.label_propagation_communities(H)
print(lpc)
community_index = {n: i for i, com in enumerate(lpc) for n in com}
print(community_index)
# ### draw graph ####
fig, ax = plt.subplots(figsize=(20, 15))
pos = nx.spring_layout(H, k=0.15, seed=1025)
node_color = [community_index[n] for n in H]
node_size = [v * 20000 for v in centrality.values()]
nx.draw_networkx(
H,
pos=pos,
with_labels=False,
node_color=node_color,
node_size=node_size,
edge_color="gainsboro",
alpha=0.4,
)
# Title/legend
font = {"color": "k", "fontweight": "bold", "fontsize": 32}
ax.set_title("Association network", font)
# Change font color for legend
font_new = {"color": "r", "fontsize": 18}
ax.text(
0.80,
0.10,
"node color = community structure",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font_new,
)
ax.text(
0.80,
0.06,
"node size = betweeness centrality",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font_new,
)
# Resize figure for label readibility
ax.margins(0.1, 0.05)
fig.tight_layout()
plt.axis("off")
# plt.show()
plt.savefig(fig_path, bbox_inches='tight')
plt.close()
def save_txt(graph_type, net_arr, prob_dict, txt_path):
prob_arr = np.array(list(prob_dict.values()), dtype=np.float).transpose().mean(axis=1)
# print(prob_arr)
# print(len(prob_arr))
# print(len(net_arr))
with open(txt_path, 'w') as f:
for i in range(len(net_arr)):
if prob_arr[i] > 0.5:
if graph_type == 'hetero':
f.write('A_' + str(int(net_arr[i][0])) + ' B_' + str(int(net_arr[i][1])) + ' ' +
str(prob_arr[i]) + '\n')
else:
f.write('A_' + str(int(net_arr[i][0])) + ' A_' + str(int(net_arr[i][1])) + ' ' +
str(prob_arr[i]) + '\n')
| en | 0.491009 | # Gold standard data of positive gene functional associations # from https://www.inetbio.org/wormnet/downloadnetwork.php # G = nx.read_edgelist("WormNet.v3.benchmark.txt") # G = nx.read_edgelist("pos_homo_pairs.txt") # remove randomly selected nodes (to make example fast) # remove low-degree nodes # print(list(G.edges(data=True))) # exit() # largest connected component # compute centrality # compute community structure # ### draw graph #### # Title/legend # Change font color for legend # Resize figure for label readibility # plt.show() # print(prob_arr) # print(len(prob_arr)) # print(len(net_arr)) | 2.736976 | 3 |
docs/api/escape_latex.py | tzole1155/moai | 10 | 6621102 | <filename>docs/api/escape_latex.py
__SYMBOL_MAP__ = {
# '!': '%21',
'#': '%23',
'$': '%24',
'%': '%25',
'&': '%26',
'\'': '%27',
'(': '%28',
')': '%29',
'*': '%2A',
'+': '%2B',
',': '%2C',
'/': '%2F',
':': '%3A',
';': '%3B',
'=': '%3D',
'?': '%3F',
'@': '%40',
'[': '%5B',
']': '%5D',
' ': '%20',
'<': '%3C',
'>': '%3E',
'\\': '%5C',
'{': '%7B',
'}': '%7D',
}
__SIZE_MAP__ = {
'large': '%5Clarge',
'vlarge': '%5CLarge',
'vvlarge': '%5CLARGE',
'huge': '%5Chuge',
'vhuge': '%5CHuge',
}
#NOTE: https://gist.github.com/a-rodin/fef3f543412d6e1ec5b6cf55bf197d7b
if __name__ == "__main__":
wing_loss = r"\begin{equation} wing(x) = \left\{ \begin{array}{ll}w \ln (1 + |x|/\epsilon) & \text{if } |x| < w \\|x| - C & \text{otherwise}\end{array}\right.\end{equation}"
adaptive_wing_loss = r"\begin{equation} \small AWing(y,\!\hat{y})\! =\! \begin{cases} \omega\! \ln(1\! +\! \displaystyle |\frac{y\!-\!\hat{y}\!}{\epsilon}|^{\alpha-y})\! &\! \text{if } |(y\!-\!\hat{y})|\! <\! \theta \\ A|y-\hat{y}\!| - C & \text{otherwise} \end{cases} \end{equation}"
soft_wing_loss = r"\begin{equation}\mathrm{Wing}(x)=\left\{ \begin{array}{ll} \omega \ln({1+\frac{|x|}{\epsilon}})& \mathrm{if}\ |x| < \omega \\ |x| - C &\mathrm{otherwise} \end{array} \right. \end{equation}"
berhu_loss = r"\begin{equation}\mathcal{B}(x) = \begin{cases} |x| & |x| \leq c, \\ \frac{x^2 + c^2}{2c} & |x| > c. \\ \end{cases} \end{equation}"
berhu_threshold = r"c = \frac{1}{5} \max_i(|\tilde{y}_i - y_i|)"
std_kl_loss = r"\begin{equation}\mathrm{StandardKL}(\mu,\sigma) = \frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\end{equation}"
std_kl_beta_loss = r"\begin{equation}\beta\mathrm{-StandardKL}(\mu,\sigma) = \beta \, \, \frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\end{equation}"
std_kl_capacity_loss = r"\begin{equation}\mathrm{CapacityStandardKL}(\mu,\sigma) = \beta \, \, \, |\,\frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\,-\,C\,|\end{equation}"
std_kl_robust_loss = r"\begin{equation}\mathrm{RobustStandardKL}(\mu,\sigma) = \sqrt{1 + \big(\frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\big)^2} - 1\end{equation}"
geodesic_loss = r"\begin{equation}\mathcal{d}(R_1,R_2) = \arccos\frac{trace(R_1R_2^T) - 1}{2}\end{equation}"
output_string = geodesic_loss
size = 'huge'
for k, v in __SYMBOL_MAP__.items():
output_string = output_string.replace(k, v)
url = "https://render.githubusercontent.com/render/math?math="
print(url + __SIZE_MAP__[size] + output_string)
| <filename>docs/api/escape_latex.py
__SYMBOL_MAP__ = {
# '!': '%21',
'#': '%23',
'$': '%24',
'%': '%25',
'&': '%26',
'\'': '%27',
'(': '%28',
')': '%29',
'*': '%2A',
'+': '%2B',
',': '%2C',
'/': '%2F',
':': '%3A',
';': '%3B',
'=': '%3D',
'?': '%3F',
'@': '%40',
'[': '%5B',
']': '%5D',
' ': '%20',
'<': '%3C',
'>': '%3E',
'\\': '%5C',
'{': '%7B',
'}': '%7D',
}
__SIZE_MAP__ = {
'large': '%5Clarge',
'vlarge': '%5CLarge',
'vvlarge': '%5CLARGE',
'huge': '%5Chuge',
'vhuge': '%5CHuge',
}
#NOTE: https://gist.github.com/a-rodin/fef3f543412d6e1ec5b6cf55bf197d7b
if __name__ == "__main__":
wing_loss = r"\begin{equation} wing(x) = \left\{ \begin{array}{ll}w \ln (1 + |x|/\epsilon) & \text{if } |x| < w \\|x| - C & \text{otherwise}\end{array}\right.\end{equation}"
adaptive_wing_loss = r"\begin{equation} \small AWing(y,\!\hat{y})\! =\! \begin{cases} \omega\! \ln(1\! +\! \displaystyle |\frac{y\!-\!\hat{y}\!}{\epsilon}|^{\alpha-y})\! &\! \text{if } |(y\!-\!\hat{y})|\! <\! \theta \\ A|y-\hat{y}\!| - C & \text{otherwise} \end{cases} \end{equation}"
soft_wing_loss = r"\begin{equation}\mathrm{Wing}(x)=\left\{ \begin{array}{ll} \omega \ln({1+\frac{|x|}{\epsilon}})& \mathrm{if}\ |x| < \omega \\ |x| - C &\mathrm{otherwise} \end{array} \right. \end{equation}"
berhu_loss = r"\begin{equation}\mathcal{B}(x) = \begin{cases} |x| & |x| \leq c, \\ \frac{x^2 + c^2}{2c} & |x| > c. \\ \end{cases} \end{equation}"
berhu_threshold = r"c = \frac{1}{5} \max_i(|\tilde{y}_i - y_i|)"
std_kl_loss = r"\begin{equation}\mathrm{StandardKL}(\mu,\sigma) = \frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\end{equation}"
std_kl_beta_loss = r"\begin{equation}\beta\mathrm{-StandardKL}(\mu,\sigma) = \beta \, \, \frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\end{equation}"
std_kl_capacity_loss = r"\begin{equation}\mathrm{CapacityStandardKL}(\mu,\sigma) = \beta \, \, \, |\,\frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\,-\,C\,|\end{equation}"
std_kl_robust_loss = r"\begin{equation}\mathrm{RobustStandardKL}(\mu,\sigma) = \sqrt{1 + \big(\frac{1}{2} \displaystyle\sum_{i} (1+\log(\sigma_{i}^{2}) -\mu_{i}^{2}-\sigma_{i}^{2})\big)^2} - 1\end{equation}"
geodesic_loss = r"\begin{equation}\mathcal{d}(R_1,R_2) = \arccos\frac{trace(R_1R_2^T) - 1}{2}\end{equation}"
output_string = geodesic_loss
size = 'huge'
for k, v in __SYMBOL_MAP__.items():
output_string = output_string.replace(k, v)
url = "https://render.githubusercontent.com/render/math?math="
print(url + __SIZE_MAP__[size] + output_string)
| en | 0.281094 | # '!': '%21', #NOTE: https://gist.github.com/a-rodin/fef3f543412d6e1ec5b6cf55bf197d7b | 2.118835 | 2 |
ares/util/ParameterBundles.py | astrojhgu/ares | 1 | 6621103 | <gh_stars>1-10
"""
ParameterBundles.py
Author: <NAME>
Affiliation: UCLA
Created on: Fri Jun 10 11:00:05 PDT 2016
Description:
"""
import numpy as np
from .ReadData import read_lit
from .ParameterFile import pop_id_num
from .ProblemTypes import ProblemType
from .PrintInfo import header, footer, separator, line
def _add_pop_tag(par, num):
"""
Add a population ID tag to each parameter.
"""
prefix, idnum = pop_id_num(par)
if idnum is not None:
return '%s{%i}' % (prefix, num)
else:
return '%s{%i}' % (par, num)
_pop_fcoll = \
{
'pop_sfr_model': 'fcoll',
'pop_Tmin': 1e4,
'pop_Tmax': None,
}
_pop_user_sfrd = \
{
'pop_sfr_model': 'sfrd-func',
'pop_sfrd': 'php[0]',
'php_func[0]': 'dpl',
'php_func_var[0]': 'redshift',
'php_func_par0[0]': 1e-6,
'php_func_par1[0]': 15.,
'php_func_par2[0]': -5.,
'php_func_par3[0]': -8.,
}
_sed_toy = \
{
'pop_sed_model': False,
'pop_Nion': 4e3,
'pop_Nlw': 9690,
'pop_fX': 1.0,
'pop_fesc': 0.1,
}
_sed_xi = \
{
'pop_sed_model': False,
'pop_xi_LW': 40.,
'pop_xi_UV': 969.,
'pop_xi_XR': 0.1,
}
_pop_sfe = \
{
'pop_sfr_model': 'sfe-dpl',
'pop_fstar': 'php',
'php_func': 'dpl',
'php_func_par0': 0.1,
'php_func_par1': 3e11,
'php_func_par2': 0.6,
'php_func_par3': -0.6,
# Redshift dependent parameters here
}
_pop_sfe_ext = \
{
'php_faux': 'plexp',
'php_faux_var': 'mass',
'php_faux_meth': 'add',
'php_faux_par0': 0.005,
'php_faux_par1': 1e9,
'php_faux_par2': 0.01,
'php_faux_par3': 1e10,
}
_pop_mlf = \
{
'pop_sfr_model': 'mlf',
'pop_fstar': None,
'pop_mlf': 'php',
'pop_MAR': 'hmf',
'php_func': 'dpl',
'php_func_par0': 0.1,
'php_func_par1': 1e12,
'php_func_par2': 0.67,
'php_func_par3': 0.5,
}
_sed_uv = \
{
# Emits LW and LyC
"pop_lya_src": True,
"pop_ion_src_cgm": True,
"pop_ion_src_igm": False,
"pop_heat_src_igm": False,
"pop_fesc": 0.1,
"pop_fesc_LW": 1.0,
'pop_sed': 'pl',
'pop_alpha': 1.0,
"pop_Emin": 10.2,
"pop_Emax": 24.6,
"pop_EminNorm": 13.6,
"pop_EmaxNorm": 24.6,
"pop_yield": 4e3,
"pop_yield_units": 'photons/baryon',
}
_sed_lw = _sed_uv.copy()
_sed_lw['pop_ion_src_cgm'] = False
_sed_lyc = _sed_uv.copy()
_sed_lyc['pop_lya_src'] = False
_pop_synth = \
{
# Stellar pop + fesc
'pop_sed': 'eldridge2009',
'pop_binaries': False,
'pop_Z': 0.02,
'pop_Emin': 1,
'pop_Emax': 1e2,
'pop_yield': 'from_sed',
}
_sed_xr = \
{
# Emits X-rays
"pop_lya_src": False,
"pop_ion_src_cgm": False,
"pop_ion_src_igm": True,
"pop_heat_src_cgm": False,
"pop_heat_src_igm": True,
"pop_sed": 'pl',
"pop_alpha": -1.5,
'pop_logN': -np.inf,
"pop_Emin": 2e2,
"pop_Emax": 3e4,
"pop_EminNorm": 5e2,
"pop_EmaxNorm": 8e3,
"pop_Ex": 500.,
"pop_yield": 2.6e39,
"pop_yield_units": 'erg/s/SFR',
}
_crte_xrb = \
{
"pop_solve_rte": True,
"pop_tau_Nz": 400,
"pop_approx_tau": 'neutral',
}
_crte_lwb = _crte_xrb.copy()
_crte_lwb['pop_solve_rte'] = (10.2, 13.6)
_crte_lwb['pop_approx_tau'] = True
# Some different spectral models
_uvsed_toy = dict(pop_yield=4000, pop_yield_units='photons/b',
pop_Emin=10.2, pop_Emax=24.6, pop_EminNorm=13.6, pop_EmaxNorm=24.6)
_uvsed_bpass = dict(pop_sed='eldridge2009', pop_binaries=False, pop_Z=0.02,
pop_Emin=10.2, pop_Emax=24.6, pop_EminNorm=13.6, pop_EmaxNorm=24.6)
_uvsed_s99 = _uvsed_bpass.copy()
_uvsed_s99['pop_sed'] = 'leitherer1999'
_mcd = _sed_xr.copy()
_mcd['pop_sed'] = 'mcd'
_pl = _mcd.copy()
_pl['pop_sed'] = 'pl'
_Bundles = \
{
'pop': {'fcoll': _pop_fcoll, 'sfe-dpl': _pop_sfe, 'sfe-func': _pop_sfe,
'sfrd-func': _pop_user_sfrd, 'sfe-pl-ext': _pop_sfe_ext},
'sed': {'uv': _sed_uv, 'lw': _sed_lw, 'lyc': _sed_lyc,
'xray':_sed_xr, 'pl': _pl, 'mcd': _mcd, 'toy': _sed_toy,
'bpass': _uvsed_bpass, 's99': _uvsed_s99, 'xi': _sed_xi},
'physics': {'xrb': _crte_xrb, 'lwb': _crte_lwb},
}
class ParameterBundle(dict):
def __init__(self, bundle=None, id_num=None, bset=None, **kwargs):
self.bundle = bundle
self.kwargs = kwargs
if bset is None:
self.bset = _Bundles
else:
self.bset = bset
# data should be a string
if bundle is not None:
self._initialize(bundle, **kwargs)
if id_num is not None:
self.num = id_num
else:
for key in kwargs:
self[key] = kwargs[key]
def _initialize(self, bundle, **kwargs):
# Clear out
tmp = self.keys()
for key in tmp:
del self[key]
# Assume format: "modeltype:model", e.g., "pop:fcoll" or "sed:uv"
pre, post = bundle.split(':')
if pre in self.bset.keys():
kw = self.bset[pre][post]
elif pre == 'prob':
kw = ProblemType(float(post))
else:
kw = read_lit(pre).__dict__[post]
pars = kw.keys()
for key in pars:
self[key] = kw[key]
def __getattr__(self, name):
if name not in self.keys():
pass
return self[name]
def __add__(self, other):
tmp = self.copy()
# Make sure to not overwrite anything here!
for key in other:
if key in tmp:
raise KeyError('%s supplied more than once!' % key)
tmp[key] = other[key]
return ParameterBundle(**tmp)
def __sub__(self, other):
tmp1 = self.copy()
for key in other:
del tmp1[key]
return ParameterBundle(**tmp1)
@property
def num(self):
if not hasattr(self, '_num'):
self._num = None
return self._num
@num.setter
def num(self, value):
assert value % 1 == 0
self._value = value
for key in self.keys():
self[_add_pop_tag(key, value)] = self.pop(key)
@property
def Npops(self):
""" Number of distinct populations represented. """
return len(self.pop_ids)
@property
def pop_ids(self):
""" List of ID numbers -- one for each population."""
pops = []
for key in self:
prefix, idnum = pop_id_num(key)
if idnum not in pops:
pops.append(idnum)
return pops
def link_sfrd_to(self, num):
if self.num is not None:
self['pop_tunnel{%i}' % self.num] = num
else:
self['pop_tunnel'] = num
@property
def info(self):
""" Print out info about this bundle. """
header('Bundle Info')
for key in self.kwargs.keys():
if key == self.bundle:
found = True
print line('*%s*' % self.base)
else:
found = False
print line(key)
if not found:
print line('*%s*' % self.base)
separator()
print line('Run \'reinitialize\' with one of the above as argument to change.')
footer()
@property
def orphans(self):
"""
Return dictionary of parameters that aren't associated with a population.
"""
tmp = {}
for par in self:
prefix, idnum = pop_id_num(par)
if idnum is None:
tmp[par] = self[par]
return tmp
def pars_by_pop(self, num, strip_id=False):
"""
Return dictionary of parameters associated with population `num`.
"""
tmp = {}
for par in self:
prefix, idnum = pop_id_num(par)
if idnum == num:
if strip_id:
tmp[prefix] = self[par]
else:
tmp[par] = self[par]
return tmp
_PB = ParameterBundle
_uv_pop = _PB('pop:fcoll', id_num=0) + _PB('sed:uv', id_num=0)
_xr_pop = _PB('pop:fcoll', id_num=1) + _PB('sed:xray', id_num=1)
_gs_4par = _PB('pop:fcoll', id_num=0) + _PB('sed:lw', id_num=0) \
+ _PB('pop:fcoll', id_num=1) + _PB('sed:lyc', id_num=1) \
+ _PB('pop:fcoll', id_num=2) + _PB('sed:xray', id_num=2)
_tmp = {'gs_2pop': _uv_pop+_xr_pop, 'gs_4par': _gs_4par}
_Bundles['sim'] = _tmp
| """
ParameterBundles.py
Author: <NAME>
Affiliation: UCLA
Created on: Fri Jun 10 11:00:05 PDT 2016
Description:
"""
import numpy as np
from .ReadData import read_lit
from .ParameterFile import pop_id_num
from .ProblemTypes import ProblemType
from .PrintInfo import header, footer, separator, line
def _add_pop_tag(par, num):
"""
Add a population ID tag to each parameter.
"""
prefix, idnum = pop_id_num(par)
if idnum is not None:
return '%s{%i}' % (prefix, num)
else:
return '%s{%i}' % (par, num)
_pop_fcoll = \
{
'pop_sfr_model': 'fcoll',
'pop_Tmin': 1e4,
'pop_Tmax': None,
}
_pop_user_sfrd = \
{
'pop_sfr_model': 'sfrd-func',
'pop_sfrd': 'php[0]',
'php_func[0]': 'dpl',
'php_func_var[0]': 'redshift',
'php_func_par0[0]': 1e-6,
'php_func_par1[0]': 15.,
'php_func_par2[0]': -5.,
'php_func_par3[0]': -8.,
}
_sed_toy = \
{
'pop_sed_model': False,
'pop_Nion': 4e3,
'pop_Nlw': 9690,
'pop_fX': 1.0,
'pop_fesc': 0.1,
}
_sed_xi = \
{
'pop_sed_model': False,
'pop_xi_LW': 40.,
'pop_xi_UV': 969.,
'pop_xi_XR': 0.1,
}
_pop_sfe = \
{
'pop_sfr_model': 'sfe-dpl',
'pop_fstar': 'php',
'php_func': 'dpl',
'php_func_par0': 0.1,
'php_func_par1': 3e11,
'php_func_par2': 0.6,
'php_func_par3': -0.6,
# Redshift dependent parameters here
}
_pop_sfe_ext = \
{
'php_faux': 'plexp',
'php_faux_var': 'mass',
'php_faux_meth': 'add',
'php_faux_par0': 0.005,
'php_faux_par1': 1e9,
'php_faux_par2': 0.01,
'php_faux_par3': 1e10,
}
_pop_mlf = \
{
'pop_sfr_model': 'mlf',
'pop_fstar': None,
'pop_mlf': 'php',
'pop_MAR': 'hmf',
'php_func': 'dpl',
'php_func_par0': 0.1,
'php_func_par1': 1e12,
'php_func_par2': 0.67,
'php_func_par3': 0.5,
}
_sed_uv = \
{
# Emits LW and LyC
"pop_lya_src": True,
"pop_ion_src_cgm": True,
"pop_ion_src_igm": False,
"pop_heat_src_igm": False,
"pop_fesc": 0.1,
"pop_fesc_LW": 1.0,
'pop_sed': 'pl',
'pop_alpha': 1.0,
"pop_Emin": 10.2,
"pop_Emax": 24.6,
"pop_EminNorm": 13.6,
"pop_EmaxNorm": 24.6,
"pop_yield": 4e3,
"pop_yield_units": 'photons/baryon',
}
_sed_lw = _sed_uv.copy()
_sed_lw['pop_ion_src_cgm'] = False
_sed_lyc = _sed_uv.copy()
_sed_lyc['pop_lya_src'] = False
_pop_synth = \
{
# Stellar pop + fesc
'pop_sed': 'eldridge2009',
'pop_binaries': False,
'pop_Z': 0.02,
'pop_Emin': 1,
'pop_Emax': 1e2,
'pop_yield': 'from_sed',
}
_sed_xr = \
{
# Emits X-rays
"pop_lya_src": False,
"pop_ion_src_cgm": False,
"pop_ion_src_igm": True,
"pop_heat_src_cgm": False,
"pop_heat_src_igm": True,
"pop_sed": 'pl',
"pop_alpha": -1.5,
'pop_logN': -np.inf,
"pop_Emin": 2e2,
"pop_Emax": 3e4,
"pop_EminNorm": 5e2,
"pop_EmaxNorm": 8e3,
"pop_Ex": 500.,
"pop_yield": 2.6e39,
"pop_yield_units": 'erg/s/SFR',
}
_crte_xrb = \
{
"pop_solve_rte": True,
"pop_tau_Nz": 400,
"pop_approx_tau": 'neutral',
}
_crte_lwb = _crte_xrb.copy()
_crte_lwb['pop_solve_rte'] = (10.2, 13.6)
_crte_lwb['pop_approx_tau'] = True
# Some different spectral models
_uvsed_toy = dict(pop_yield=4000, pop_yield_units='photons/b',
pop_Emin=10.2, pop_Emax=24.6, pop_EminNorm=13.6, pop_EmaxNorm=24.6)
_uvsed_bpass = dict(pop_sed='eldridge2009', pop_binaries=False, pop_Z=0.02,
pop_Emin=10.2, pop_Emax=24.6, pop_EminNorm=13.6, pop_EmaxNorm=24.6)
_uvsed_s99 = _uvsed_bpass.copy()
_uvsed_s99['pop_sed'] = 'leitherer1999'
_mcd = _sed_xr.copy()
_mcd['pop_sed'] = 'mcd'
_pl = _mcd.copy()
_pl['pop_sed'] = 'pl'
_Bundles = \
{
'pop': {'fcoll': _pop_fcoll, 'sfe-dpl': _pop_sfe, 'sfe-func': _pop_sfe,
'sfrd-func': _pop_user_sfrd, 'sfe-pl-ext': _pop_sfe_ext},
'sed': {'uv': _sed_uv, 'lw': _sed_lw, 'lyc': _sed_lyc,
'xray':_sed_xr, 'pl': _pl, 'mcd': _mcd, 'toy': _sed_toy,
'bpass': _uvsed_bpass, 's99': _uvsed_s99, 'xi': _sed_xi},
'physics': {'xrb': _crte_xrb, 'lwb': _crte_lwb},
}
class ParameterBundle(dict):
def __init__(self, bundle=None, id_num=None, bset=None, **kwargs):
self.bundle = bundle
self.kwargs = kwargs
if bset is None:
self.bset = _Bundles
else:
self.bset = bset
# data should be a string
if bundle is not None:
self._initialize(bundle, **kwargs)
if id_num is not None:
self.num = id_num
else:
for key in kwargs:
self[key] = kwargs[key]
def _initialize(self, bundle, **kwargs):
# Clear out
tmp = self.keys()
for key in tmp:
del self[key]
# Assume format: "modeltype:model", e.g., "pop:fcoll" or "sed:uv"
pre, post = bundle.split(':')
if pre in self.bset.keys():
kw = self.bset[pre][post]
elif pre == 'prob':
kw = ProblemType(float(post))
else:
kw = read_lit(pre).__dict__[post]
pars = kw.keys()
for key in pars:
self[key] = kw[key]
def __getattr__(self, name):
if name not in self.keys():
pass
return self[name]
def __add__(self, other):
tmp = self.copy()
# Make sure to not overwrite anything here!
for key in other:
if key in tmp:
raise KeyError('%s supplied more than once!' % key)
tmp[key] = other[key]
return ParameterBundle(**tmp)
def __sub__(self, other):
tmp1 = self.copy()
for key in other:
del tmp1[key]
return ParameterBundle(**tmp1)
@property
def num(self):
if not hasattr(self, '_num'):
self._num = None
return self._num
@num.setter
def num(self, value):
assert value % 1 == 0
self._value = value
for key in self.keys():
self[_add_pop_tag(key, value)] = self.pop(key)
@property
def Npops(self):
""" Number of distinct populations represented. """
return len(self.pop_ids)
@property
def pop_ids(self):
""" List of ID numbers -- one for each population."""
pops = []
for key in self:
prefix, idnum = pop_id_num(key)
if idnum not in pops:
pops.append(idnum)
return pops
def link_sfrd_to(self, num):
if self.num is not None:
self['pop_tunnel{%i}' % self.num] = num
else:
self['pop_tunnel'] = num
@property
def info(self):
""" Print out info about this bundle. """
header('Bundle Info')
for key in self.kwargs.keys():
if key == self.bundle:
found = True
print line('*%s*' % self.base)
else:
found = False
print line(key)
if not found:
print line('*%s*' % self.base)
separator()
print line('Run \'reinitialize\' with one of the above as argument to change.')
footer()
@property
def orphans(self):
"""
Return dictionary of parameters that aren't associated with a population.
"""
tmp = {}
for par in self:
prefix, idnum = pop_id_num(par)
if idnum is None:
tmp[par] = self[par]
return tmp
def pars_by_pop(self, num, strip_id=False):
"""
Return dictionary of parameters associated with population `num`.
"""
tmp = {}
for par in self:
prefix, idnum = pop_id_num(par)
if idnum == num:
if strip_id:
tmp[prefix] = self[par]
else:
tmp[par] = self[par]
return tmp
_PB = ParameterBundle
_uv_pop = _PB('pop:fcoll', id_num=0) + _PB('sed:uv', id_num=0)
_xr_pop = _PB('pop:fcoll', id_num=1) + _PB('sed:xray', id_num=1)
_gs_4par = _PB('pop:fcoll', id_num=0) + _PB('sed:lw', id_num=0) \
+ _PB('pop:fcoll', id_num=1) + _PB('sed:lyc', id_num=1) \
+ _PB('pop:fcoll', id_num=2) + _PB('sed:xray', id_num=2)
_tmp = {'gs_2pop': _uv_pop+_xr_pop, 'gs_4par': _gs_4par}
_Bundles['sim'] = _tmp | en | 0.700436 | ParameterBundles.py Author: <NAME> Affiliation: UCLA Created on: Fri Jun 10 11:00:05 PDT 2016 Description: Add a population ID tag to each parameter. # Redshift dependent parameters here # Emits LW and LyC # Stellar pop + fesc # Emits X-rays # Some different spectral models # data should be a string # Clear out # Assume format: "modeltype:model", e.g., "pop:fcoll" or "sed:uv" # Make sure to not overwrite anything here! Number of distinct populations represented. List of ID numbers -- one for each population. Print out info about this bundle. Return dictionary of parameters that aren't associated with a population. Return dictionary of parameters associated with population `num`. | 2.233097 | 2 |
kotlang/kotc_main.py | jstasiak/kotlang | 1 | 6621104 | #!/usr/bin/env python3
import contextlib
import os
import subprocess
import sys
import time
from typing import cast, IO, Iterator, Optional
import click
from llvmlite import binding as llvm, ir
from kotlang.context import Context
class Emitter:
def __init__(self, optimization_level: Optional[int] = None) -> None:
# All these initializations are required for code generation!
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.initialize_native_asmparser()
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine(codemodel='default')
target_machine.set_asm_verbosity(True)
self._target_machine = target_machine
pmb = llvm.PassManagerBuilder()
pmb.opt_level = optimization_level
self.mpm = llvm.create_module_pass_manager()
pmb.populate(self.mpm)
def _module_to_llvm_module(self, module: ir.Module) -> llvm.module.ModuleRef:
try:
llvm_module = llvm.parse_assembly(str(module))
except Exception:
print('Assembly being parsed:', file=sys.stderr)
print(module, file=sys.stderr)
raise
self.mpm.run(llvm_module)
return llvm_module
def module_to_ir(self, module: ir.Module) -> bytes:
return str(self._module_to_llvm_module(module)).encode()
def module_to_machine_code(self, module: ir.Module) -> bytes:
return cast(bytes, self._target_machine.emit_object(self._module_to_llvm_module(module)))
def module_to_assembly(self, module: ir.Module) -> bytes:
return cast(bytes, self._target_machine.emit_assembly(self._module_to_llvm_module(module)).encode())
@click.command()
@click.argument('source', nargs=1, type=click.Path(exists=True))
@click.option('-c', '--compile-only', is_flag=True)
@click.option('-o', '--output')
@click.option('-v', '--verbose', count=True)
@click.option('-f', '--output-format', default='obj', type=click.Choice(['asm', 'ir', 'obj']))
@click.option('-O', '--optimization-level', default=0, type=int)
def main(
source: str, compile_only: bool, output: str, verbose: int, output_format: str, optimization_level: int
) -> None:
assert optimization_level in range(0, 3 + 1)
timer = timing if verbose >= 2 else dummy_timing
context = Context(timer)
base_name = os.path.splitext(source)[0]
llvm_module = context.compile(source)
with timer('Initializing LLVM'):
emitter = Emitter(optimization_level)
with timer('Generating output'):
suffix = ''
if output_format == 'ir':
content = emitter.module_to_ir(llvm_module)
suffix = '.ll'
elif output_format == 'asm':
content = emitter.module_to_assembly(llvm_module)
suffix = '.s'
else:
content = emitter.module_to_machine_code(llvm_module)
suffix = '.o'
if output_format == 'obj' and not compile_only:
direct_output = base_name + suffix
final_output = output or base_name
else:
direct_output = output or (base_name + suffix)
with timer('Writing to storage'):
with stdout_aware_binary_open(direct_output, 'w') as f2:
f2.write(content)
if output_format == 'obj' and not compile_only:
with timer('Linking'):
command_line = [
'cc',
direct_output,
# TODO: bring back declaring what libraries should we link with
# *(f'-l{library}' for library in ...),
'-o',
final_output,
'-v',
]
subprocess.check_call(command_line)
@contextlib.contextmanager
def stdout_aware_binary_open(filename: str, mode: str) -> Iterator[IO[bytes]]:
assert 't' not in mode
assert 'b' not in mode
mode += 'b'
if filename == '-':
fd = os.fdopen(sys.stdout.fileno(), mode)
else:
fd = open(filename, mode)
try:
yield fd
finally:
if filename != '-':
fd.close()
@contextlib.contextmanager
def timing(description: str) -> Iterator[None]:
t0 = time.time()
yield
t1 = time.time()
dt = (t1 - t0) * 1000
print(f'[timer] {description} took {dt:4f} ms', file=sys.stderr)
@contextlib.contextmanager
def dummy_timing(description: str) -> Iterator[None]:
yield
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import contextlib
import os
import subprocess
import sys
import time
from typing import cast, IO, Iterator, Optional
import click
from llvmlite import binding as llvm, ir
from kotlang.context import Context
class Emitter:
def __init__(self, optimization_level: Optional[int] = None) -> None:
# All these initializations are required for code generation!
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.initialize_native_asmparser()
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine(codemodel='default')
target_machine.set_asm_verbosity(True)
self._target_machine = target_machine
pmb = llvm.PassManagerBuilder()
pmb.opt_level = optimization_level
self.mpm = llvm.create_module_pass_manager()
pmb.populate(self.mpm)
def _module_to_llvm_module(self, module: ir.Module) -> llvm.module.ModuleRef:
try:
llvm_module = llvm.parse_assembly(str(module))
except Exception:
print('Assembly being parsed:', file=sys.stderr)
print(module, file=sys.stderr)
raise
self.mpm.run(llvm_module)
return llvm_module
def module_to_ir(self, module: ir.Module) -> bytes:
return str(self._module_to_llvm_module(module)).encode()
def module_to_machine_code(self, module: ir.Module) -> bytes:
return cast(bytes, self._target_machine.emit_object(self._module_to_llvm_module(module)))
def module_to_assembly(self, module: ir.Module) -> bytes:
return cast(bytes, self._target_machine.emit_assembly(self._module_to_llvm_module(module)).encode())
@click.command()
@click.argument('source', nargs=1, type=click.Path(exists=True))
@click.option('-c', '--compile-only', is_flag=True)
@click.option('-o', '--output')
@click.option('-v', '--verbose', count=True)
@click.option('-f', '--output-format', default='obj', type=click.Choice(['asm', 'ir', 'obj']))
@click.option('-O', '--optimization-level', default=0, type=int)
def main(
source: str, compile_only: bool, output: str, verbose: int, output_format: str, optimization_level: int
) -> None:
assert optimization_level in range(0, 3 + 1)
timer = timing if verbose >= 2 else dummy_timing
context = Context(timer)
base_name = os.path.splitext(source)[0]
llvm_module = context.compile(source)
with timer('Initializing LLVM'):
emitter = Emitter(optimization_level)
with timer('Generating output'):
suffix = ''
if output_format == 'ir':
content = emitter.module_to_ir(llvm_module)
suffix = '.ll'
elif output_format == 'asm':
content = emitter.module_to_assembly(llvm_module)
suffix = '.s'
else:
content = emitter.module_to_machine_code(llvm_module)
suffix = '.o'
if output_format == 'obj' and not compile_only:
direct_output = base_name + suffix
final_output = output or base_name
else:
direct_output = output or (base_name + suffix)
with timer('Writing to storage'):
with stdout_aware_binary_open(direct_output, 'w') as f2:
f2.write(content)
if output_format == 'obj' and not compile_only:
with timer('Linking'):
command_line = [
'cc',
direct_output,
# TODO: bring back declaring what libraries should we link with
# *(f'-l{library}' for library in ...),
'-o',
final_output,
'-v',
]
subprocess.check_call(command_line)
@contextlib.contextmanager
def stdout_aware_binary_open(filename: str, mode: str) -> Iterator[IO[bytes]]:
assert 't' not in mode
assert 'b' not in mode
mode += 'b'
if filename == '-':
fd = os.fdopen(sys.stdout.fileno(), mode)
else:
fd = open(filename, mode)
try:
yield fd
finally:
if filename != '-':
fd.close()
@contextlib.contextmanager
def timing(description: str) -> Iterator[None]:
t0 = time.time()
yield
t1 = time.time()
dt = (t1 - t0) * 1000
print(f'[timer] {description} took {dt:4f} ms', file=sys.stderr)
@contextlib.contextmanager
def dummy_timing(description: str) -> Iterator[None]:
yield
if __name__ == '__main__':
main()
| en | 0.738287 | #!/usr/bin/env python3 # All these initializations are required for code generation! # Create a target machine representing the host # TODO: bring back declaring what libraries should we link with # *(f'-l{library}' for library in ...), | 2.10469 | 2 |
setup.py | ofey404/MPS060602 | 0 | 6621105 | """
Setup file for MPS060602.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 4.1.5.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
from setuptools import setup
from sys import platform
if __name__ == "__main__":
try:
if platform != "win32":
print(
"\n\nMPS-060602 acquisition card only support windows,",
"but platform is {}.".format(platform),
)
raise
setup(
use_scm_version={"version_scheme": "no-guess-dev"},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
install_requires=["dataclasses"],
)
except: # noqa
print(
"\n\nAn error occurred while building the project, "
"please ensure you have the most updated version of setuptools, "
"setuptools_scm and wheel with:\n"
" pip install -U setuptools setuptools_scm wheel\n\n"
)
raise
| """
Setup file for MPS060602.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 4.1.5.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
from setuptools import setup
from sys import platform
if __name__ == "__main__":
try:
if platform != "win32":
print(
"\n\nMPS-060602 acquisition card only support windows,",
"but platform is {}.".format(platform),
)
raise
setup(
use_scm_version={"version_scheme": "no-guess-dev"},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
install_requires=["dataclasses"],
)
except: # noqa
print(
"\n\nAn error occurred while building the project, "
"please ensure you have the most updated version of setuptools, "
"setuptools_scm and wheel with:\n"
" pip install -U setuptools setuptools_scm wheel\n\n"
)
raise
| en | 0.926968 | Setup file for MPS060602. Use setup.cfg to configure your project. This file was generated with PyScaffold 4.1.5. PyScaffold helps you to put up the scaffold of your new Python project. Learn more under: https://pyscaffold.org/ # noqa | 1.958157 | 2 |
src/main/nluas/language/ntuple_visualizer.py | icsi-berkeley/framework-code | 2 | 6621106 | <reponame>icsi-berkeley/framework-code<gh_stars>1-10
"""
@author: <<EMAIL>
A simple program to output n-tuples using Analyzer+Specializer. Not reliant on any packages other than Jython.
"""
from nluas.language.core_specializer import *
from nluas.ntuple_decoder import *
import traceback
import pprint
from six.moves import input
decoder = NtupleDecoder()
analyzer = Analyzer("http://localhost:8090")
cs = CoreSpecializer(analyzer)
while True:
text = input("> ")
if text == "q":
quit()
elif text == "d":
cs.debug_mode = True
else:
try:
full_parse = analyzer.full_parse(text)
semspecs = full_parse['parse']
costs = full_parse['costs']
for i in range(len(semspecs)):
try:
fs = semspecs[i]
cost = costs[i]
ntuple = cs.specialize(fs)
#decoder.pprint_ntuple(ntuple)
#print(ntuple)
print("\n")
print("SemSpec Cost: {}".format(str(cost)))
pprint.pprint(ntuple)
break
except Exception as e:
traceback.print_exc()
print(e)
except Exception as e:
traceback.print_exc()
print(e)
| """
@author: <<EMAIL>
A simple program to output n-tuples using Analyzer+Specializer. Not reliant on any packages other than Jython.
"""
from nluas.language.core_specializer import *
from nluas.ntuple_decoder import *
import traceback
import pprint
from six.moves import input
decoder = NtupleDecoder()
analyzer = Analyzer("http://localhost:8090")
cs = CoreSpecializer(analyzer)
while True:
text = input("> ")
if text == "q":
quit()
elif text == "d":
cs.debug_mode = True
else:
try:
full_parse = analyzer.full_parse(text)
semspecs = full_parse['parse']
costs = full_parse['costs']
for i in range(len(semspecs)):
try:
fs = semspecs[i]
cost = costs[i]
ntuple = cs.specialize(fs)
#decoder.pprint_ntuple(ntuple)
#print(ntuple)
print("\n")
print("SemSpec Cost: {}".format(str(cost)))
pprint.pprint(ntuple)
break
except Exception as e:
traceback.print_exc()
print(e)
except Exception as e:
traceback.print_exc()
print(e) | en | 0.403683 | @author: <<EMAIL> A simple program to output n-tuples using Analyzer+Specializer. Not reliant on any packages other than Jython. #decoder.pprint_ntuple(ntuple) #print(ntuple) | 2.716185 | 3 |
hw2_classification.py | mariamingallonMM/AI-ML-W6-classifier | 1 | 6621107 | """
This code implements a k-class Classifier per week 6 assignment of the machine learning module part of Columbia University Micromaster programme in AI.
Written using Python 3.X for running on Vocareum
Execute as follows:
$ python3 hw2_classification.py X_train.csv y_train.csv X_test.csv
"""
# builtin modules
from __future__ import division
import os
#import psutil
import requests
import sys
import math
from random import randrange
import functools
import operator
# 3rd party modules
import pandas as pd
import numpy as np
def separate_by_class(X_train, y_train, k_classes:int = 10):
"""
Separates our training data by class, from the following inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : correspoding labels of the training dataset (single column)
k_classes: number of k classes for the classifier, (10 classes fixed in assignment)
It returns a dictionary where each key is the class value.
"""
keys = list(range(k_classes))
separated = dict([(key, []) for key in keys])
dataset_train = pd.concat([X_train, y_train], axis = 1)
for i in range(len(dataset_train)):
vector = np.array(dataset_train.iloc[i])
class_value = y_train.iloc[i, 0]
if (class_value not in separated):
separated[class_value] = list()
separated[class_value].append(vector)
return separated
def summarize_dataframe(dataframe, class_value, n_features):
"""
Calculate the mean, standard deviation and count for each column in the dataframe from the following inputs:
dataframe : dataset to summarise as a DataFrame
class_value : the value (label from 0 to 9) of the class being summarised
n_features : number of features (columns) in the training dataset (X_train + y_train)
It returns a DataFrame of mean, std and count for each column/feature in the dataset. Note that it is prepared to handle an empty dataframe to deal with classes unseen in the training dataset.
"""
# dealing with emtpy dataframes as a result of including for classes unseen in dataset
if dataframe.shape == (0,0):
mean = np.append(np.zeros(n_features), [class_value])
sigma = np.zeros(n_features + 1)
count = np.zeros(n_features + 1)
else:
mean = dataframe.mean(axis=0)
sigma = dataframe.std(axis=0, ddof=1) #ddof = 0 to have same behaviour as numpy.std, std takes the absolute value before squaring
count = dataframe.count(axis=0)
frame = {'mean': mean, 'std': sigma, 'count': count}
summaries = pd.DataFrame(frame)
return summaries
def summarize_by_class(X_train, y_train):
"""
Calculate statistics (mean, stdv, count) for each class subset from the following inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : corresponding labels of the training dataset (single column)
It first calls the function 'separate_by_class' to split the dataset by class. It then calls the function 'summarize_dataframe' to calculate the statistics for each row.
It returns a dictionary object where each key is the class value and then a list of all the records as the value in the dictionary.
"""
separated = separate_by_class(X_train, y_train, 10)
summaries = dict()
for class_value, rows in separated.items():
# convert class subset lists to a dataframe before passing on to summarize_dataframe
class_subset = pd.DataFrame(separated[class_value])
# obtain summary statistics per class subset, note we specify the number of features in the dataframe to be summarised
summaries[class_value] = summarize_dataframe(class_subset, class_value, len(X_train.columns))
return summaries
def calculate_probability(x, mean, stdev):
"""
Calculate the Gaussian probability distribution function for x from inputs:
x: the variable we are calculating the probability for
mean: the mean of the distribution
stdev: the standard deviation of the distribution (sigma before squaring)
It returns the Gaussian probability of a given value based on:
f(x) = (1 / sqrt(2 * PI) * sigma) * exp(-((x-mean)^2 / (2 * sigma^2)))
"""
if (mean or stdev) == float(0.0):
probability = float(0.0)
else:
probability = (1 / (math.sqrt(2 * math.pi) * stdev)) * (math.exp(-((x-mean)**2 / (2 * stdev**2 ))))
return probability
def calculate_class_probabilities(summaries, row):
"""
Calculate the probability of a value using the Gaussian Probability Density Function from inputs:
summaries: prepared summaries of dataset
row: a new row
This function uses the statistics calculated from training data to calculate probabilities for the testing dataset (new data). Probabilities are calculated separately for each class. First, we calculate the probability that a new X vector from the testing dataset belongs to the first class. Then, we calculate the probabilities that it belongs to the second class, and so on for all the classes identified in the training dataset.
The probability that a new X vector from the testing dataset belongs to a class is calculated as follows:
P(class|data) = P(X|class) * P(class)
Note we have simplified the Bayes theorem by removing the division as we do not strictly need a number between 0 and 1 to predict the class the new data belongs to as we will be simply taking the maximum result from the above equation for each class.
It returns a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# total number of training records calculated from the counts stored in the summary statistics
# note that the count column has the same value for all rows, and hence picking up item [0] will suffice
total_rows = sum([summaries[label]['count'][0] for label in summaries])
probabilities = dict()
for class_value, class_summaries in summaries.items():
probabilities[class_value] = summaries[class_value]['count'][0]/float(total_rows)
for i in range(len(class_summaries)-1):
mean, stdev, _ = class_summaries.iloc[i]
# probabilities are multiplied together as they accumulate.
probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
# normalize probabilities so that they sum 1
max_prob = probabilities[max(probabilities, key=probabilities.get)]
min_prob = probabilities[min(probabilities, key=probabilities.get)]
for class_value, probability in probabilities.items():
if (max_prob - min_prob) > 0:
probabilities[class_value] = (probability - min_prob) / (max_prob - min_prob)
else:
probabilities[class_value] = float(0.0)
# divide by the sum of the probabilities to ensure sum of probabilities for all classes is equal to 1.
sum_prob = sum(probabilities.values())
for class_value, probability in probabilities.items():
if sum_prob > 0:
probabilities[class_value] = probability / sum_prob
return probabilities
def predict(summaries, row):
"""
Predict the most likely class from inputs:
summaries: prepared summaries of dataset
row: a row in the dataset for predicting its label (a row of X_test)
This function uses the probabilities calculated from each class via the function 'calculate_class_probabilities' and returns the label with the highest probability.
It returns the maximum likelihood estimate for a given row.
"""
probabilities = calculate_class_probabilities(summaries, row)
best_label, best_prob = None, -1
for class_value, probability in probabilities.items():
print(class_value, probability)
if best_label is None or probability > best_prob:
best_prob = probability
best_label = class_value
return best_label, best_prob, probabilities
def write_csv(filename, a, **kwargs):
# write the outputs csv file
if 'header' in kwargs:
header = kwargs['header']
else:
header = False
if 'path' in kwargs:
filepath = kwargs['path']
else:
filepath = os.path.join(os.getcwd(),'datasets','out', filename)
df = pd.DataFrame(a)
df1 = df.iloc[0:len(df)-1]
df2 = df.iloc[[len(df)-1]]
df1.to_csv(filepath, sep=',', index = False, header = header)
df2.to_csv(filepath, sep=',', index = False, header = False, mode = 'a', line_terminator = "")
#print("New Outputs file saved to: <<", filename, ">>", sep='', end='\n')
def pluginClassifier(X_train, y_train, X_test):
"""
Implements a Bayes Naive Classifier from inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : corresponding labels of the training dataset (single column)
X_test : testing dataset features excluding the label (multiple columns)
This function consists of the following main steps:
Step 1. Get a summary of the statistics of the training dataset (X_train, y_train) pairs combined
Step 2. Declare empty lists for predictions and probabilities for each row in X_test of belonging to each of the classes present on the dataset
Step 3. Get the maximum likelihood estimate for each row of belonging to each of the classes, from a given 'summaries' and
It returns two lists:
prediction_outputs: a list of the predicted labels for each row (class of highest probability)
probabilities_output: a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# Step 1. Get statistics summary on the training dataset
summaries = summarize_by_class(X_train, y_train)
# Step 2. create an empty list to store predictions
predictions = list()
probabilities_output = list()
# Step 3. Go through each row in the testing dataset to get the maximum likelihood estimate of the probability of a row to belong to each of the classes
for i in range(len(X_test)):
row = X_test.iloc[i] #note how row does not include the label value 'y'
out_prediction, _, out_probability = predict(summaries, row)
predictions.append(out_prediction)
probabilities_output.append(out_probability)
prediction_outputs = (predictions)
probabilities_output = (probabilities_output)
return probabilities_output
def main():
X_train = np.genfromtxt(sys.argv[1], delimiter=",")
y_train = np.genfromtxt(sys.argv[2])
X_test = np.genfromtxt(sys.argv[3], delimiter=",")
X_train = pd.DataFrame(data=X_train)
y_train = pd.DataFrame(data=y_train).astype('int32')
X_test = pd.DataFrame(data=X_test)
final_outputs = pluginClassifier(X_train, y_train, X_test) # get final outputs
# write the probability of predicting the class right to a csv
# note it is important not to write the header into the output csv as Vocareum will throw error
write_csv("probs_test.csv", final_outputs, header = False, path = os.path.join(os.getcwd(), "probs_test.csv"))
#np.savetxt("probs_test.csv", final_outputs, fmt='%1.2f', delimiter="\n") # write output to file, note values for fmt and delimiter
if __name__ == '__main__':
main()
| """
This code implements a k-class Classifier per week 6 assignment of the machine learning module part of Columbia University Micromaster programme in AI.
Written using Python 3.X for running on Vocareum
Execute as follows:
$ python3 hw2_classification.py X_train.csv y_train.csv X_test.csv
"""
# builtin modules
from __future__ import division
import os
#import psutil
import requests
import sys
import math
from random import randrange
import functools
import operator
# 3rd party modules
import pandas as pd
import numpy as np
def separate_by_class(X_train, y_train, k_classes:int = 10):
"""
Separates our training data by class, from the following inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : correspoding labels of the training dataset (single column)
k_classes: number of k classes for the classifier, (10 classes fixed in assignment)
It returns a dictionary where each key is the class value.
"""
keys = list(range(k_classes))
separated = dict([(key, []) for key in keys])
dataset_train = pd.concat([X_train, y_train], axis = 1)
for i in range(len(dataset_train)):
vector = np.array(dataset_train.iloc[i])
class_value = y_train.iloc[i, 0]
if (class_value not in separated):
separated[class_value] = list()
separated[class_value].append(vector)
return separated
def summarize_dataframe(dataframe, class_value, n_features):
"""
Calculate the mean, standard deviation and count for each column in the dataframe from the following inputs:
dataframe : dataset to summarise as a DataFrame
class_value : the value (label from 0 to 9) of the class being summarised
n_features : number of features (columns) in the training dataset (X_train + y_train)
It returns a DataFrame of mean, std and count for each column/feature in the dataset. Note that it is prepared to handle an empty dataframe to deal with classes unseen in the training dataset.
"""
# dealing with emtpy dataframes as a result of including for classes unseen in dataset
if dataframe.shape == (0,0):
mean = np.append(np.zeros(n_features), [class_value])
sigma = np.zeros(n_features + 1)
count = np.zeros(n_features + 1)
else:
mean = dataframe.mean(axis=0)
sigma = dataframe.std(axis=0, ddof=1) #ddof = 0 to have same behaviour as numpy.std, std takes the absolute value before squaring
count = dataframe.count(axis=0)
frame = {'mean': mean, 'std': sigma, 'count': count}
summaries = pd.DataFrame(frame)
return summaries
def summarize_by_class(X_train, y_train):
"""
Calculate statistics (mean, stdv, count) for each class subset from the following inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : corresponding labels of the training dataset (single column)
It first calls the function 'separate_by_class' to split the dataset by class. It then calls the function 'summarize_dataframe' to calculate the statistics for each row.
It returns a dictionary object where each key is the class value and then a list of all the records as the value in the dictionary.
"""
separated = separate_by_class(X_train, y_train, 10)
summaries = dict()
for class_value, rows in separated.items():
# convert class subset lists to a dataframe before passing on to summarize_dataframe
class_subset = pd.DataFrame(separated[class_value])
# obtain summary statistics per class subset, note we specify the number of features in the dataframe to be summarised
summaries[class_value] = summarize_dataframe(class_subset, class_value, len(X_train.columns))
return summaries
def calculate_probability(x, mean, stdev):
"""
Calculate the Gaussian probability distribution function for x from inputs:
x: the variable we are calculating the probability for
mean: the mean of the distribution
stdev: the standard deviation of the distribution (sigma before squaring)
It returns the Gaussian probability of a given value based on:
f(x) = (1 / sqrt(2 * PI) * sigma) * exp(-((x-mean)^2 / (2 * sigma^2)))
"""
if (mean or stdev) == float(0.0):
probability = float(0.0)
else:
probability = (1 / (math.sqrt(2 * math.pi) * stdev)) * (math.exp(-((x-mean)**2 / (2 * stdev**2 ))))
return probability
def calculate_class_probabilities(summaries, row):
"""
Calculate the probability of a value using the Gaussian Probability Density Function from inputs:
summaries: prepared summaries of dataset
row: a new row
This function uses the statistics calculated from training data to calculate probabilities for the testing dataset (new data). Probabilities are calculated separately for each class. First, we calculate the probability that a new X vector from the testing dataset belongs to the first class. Then, we calculate the probabilities that it belongs to the second class, and so on for all the classes identified in the training dataset.
The probability that a new X vector from the testing dataset belongs to a class is calculated as follows:
P(class|data) = P(X|class) * P(class)
Note we have simplified the Bayes theorem by removing the division as we do not strictly need a number between 0 and 1 to predict the class the new data belongs to as we will be simply taking the maximum result from the above equation for each class.
It returns a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# total number of training records calculated from the counts stored in the summary statistics
# note that the count column has the same value for all rows, and hence picking up item [0] will suffice
total_rows = sum([summaries[label]['count'][0] for label in summaries])
probabilities = dict()
for class_value, class_summaries in summaries.items():
probabilities[class_value] = summaries[class_value]['count'][0]/float(total_rows)
for i in range(len(class_summaries)-1):
mean, stdev, _ = class_summaries.iloc[i]
# probabilities are multiplied together as they accumulate.
probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
# normalize probabilities so that they sum 1
max_prob = probabilities[max(probabilities, key=probabilities.get)]
min_prob = probabilities[min(probabilities, key=probabilities.get)]
for class_value, probability in probabilities.items():
if (max_prob - min_prob) > 0:
probabilities[class_value] = (probability - min_prob) / (max_prob - min_prob)
else:
probabilities[class_value] = float(0.0)
# divide by the sum of the probabilities to ensure sum of probabilities for all classes is equal to 1.
sum_prob = sum(probabilities.values())
for class_value, probability in probabilities.items():
if sum_prob > 0:
probabilities[class_value] = probability / sum_prob
return probabilities
def predict(summaries, row):
"""
Predict the most likely class from inputs:
summaries: prepared summaries of dataset
row: a row in the dataset for predicting its label (a row of X_test)
This function uses the probabilities calculated from each class via the function 'calculate_class_probabilities' and returns the label with the highest probability.
It returns the maximum likelihood estimate for a given row.
"""
probabilities = calculate_class_probabilities(summaries, row)
best_label, best_prob = None, -1
for class_value, probability in probabilities.items():
print(class_value, probability)
if best_label is None or probability > best_prob:
best_prob = probability
best_label = class_value
return best_label, best_prob, probabilities
def write_csv(filename, a, **kwargs):
# write the outputs csv file
if 'header' in kwargs:
header = kwargs['header']
else:
header = False
if 'path' in kwargs:
filepath = kwargs['path']
else:
filepath = os.path.join(os.getcwd(),'datasets','out', filename)
df = pd.DataFrame(a)
df1 = df.iloc[0:len(df)-1]
df2 = df.iloc[[len(df)-1]]
df1.to_csv(filepath, sep=',', index = False, header = header)
df2.to_csv(filepath, sep=',', index = False, header = False, mode = 'a', line_terminator = "")
#print("New Outputs file saved to: <<", filename, ">>", sep='', end='\n')
def pluginClassifier(X_train, y_train, X_test):
"""
Implements a Bayes Naive Classifier from inputs:
X_train : training dataset features excluding the label (multiple columns)
y_train : corresponding labels of the training dataset (single column)
X_test : testing dataset features excluding the label (multiple columns)
This function consists of the following main steps:
Step 1. Get a summary of the statistics of the training dataset (X_train, y_train) pairs combined
Step 2. Declare empty lists for predictions and probabilities for each row in X_test of belonging to each of the classes present on the dataset
Step 3. Get the maximum likelihood estimate for each row of belonging to each of the classes, from a given 'summaries' and
It returns two lists:
prediction_outputs: a list of the predicted labels for each row (class of highest probability)
probabilities_output: a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# Step 1. Get statistics summary on the training dataset
summaries = summarize_by_class(X_train, y_train)
# Step 2. create an empty list to store predictions
predictions = list()
probabilities_output = list()
# Step 3. Go through each row in the testing dataset to get the maximum likelihood estimate of the probability of a row to belong to each of the classes
for i in range(len(X_test)):
row = X_test.iloc[i] #note how row does not include the label value 'y'
out_prediction, _, out_probability = predict(summaries, row)
predictions.append(out_prediction)
probabilities_output.append(out_probability)
prediction_outputs = (predictions)
probabilities_output = (probabilities_output)
return probabilities_output
def main():
X_train = np.genfromtxt(sys.argv[1], delimiter=",")
y_train = np.genfromtxt(sys.argv[2])
X_test = np.genfromtxt(sys.argv[3], delimiter=",")
X_train = pd.DataFrame(data=X_train)
y_train = pd.DataFrame(data=y_train).astype('int32')
X_test = pd.DataFrame(data=X_test)
final_outputs = pluginClassifier(X_train, y_train, X_test) # get final outputs
# write the probability of predicting the class right to a csv
# note it is important not to write the header into the output csv as Vocareum will throw error
write_csv("probs_test.csv", final_outputs, header = False, path = os.path.join(os.getcwd(), "probs_test.csv"))
#np.savetxt("probs_test.csv", final_outputs, fmt='%1.2f', delimiter="\n") # write output to file, note values for fmt and delimiter
if __name__ == '__main__':
main()
| en | 0.836801 | This code implements a k-class Classifier per week 6 assignment of the machine learning module part of Columbia University Micromaster programme in AI. Written using Python 3.X for running on Vocareum Execute as follows: $ python3 hw2_classification.py X_train.csv y_train.csv X_test.csv # builtin modules #import psutil # 3rd party modules Separates our training data by class, from the following inputs: X_train : training dataset features excluding the label (multiple columns) y_train : correspoding labels of the training dataset (single column) k_classes: number of k classes for the classifier, (10 classes fixed in assignment) It returns a dictionary where each key is the class value. Calculate the mean, standard deviation and count for each column in the dataframe from the following inputs: dataframe : dataset to summarise as a DataFrame class_value : the value (label from 0 to 9) of the class being summarised n_features : number of features (columns) in the training dataset (X_train + y_train) It returns a DataFrame of mean, std and count for each column/feature in the dataset. Note that it is prepared to handle an empty dataframe to deal with classes unseen in the training dataset. # dealing with emtpy dataframes as a result of including for classes unseen in dataset #ddof = 0 to have same behaviour as numpy.std, std takes the absolute value before squaring Calculate statistics (mean, stdv, count) for each class subset from the following inputs: X_train : training dataset features excluding the label (multiple columns) y_train : corresponding labels of the training dataset (single column) It first calls the function 'separate_by_class' to split the dataset by class. It then calls the function 'summarize_dataframe' to calculate the statistics for each row. It returns a dictionary object where each key is the class value and then a list of all the records as the value in the dictionary. # convert class subset lists to a dataframe before passing on to summarize_dataframe # obtain summary statistics per class subset, note we specify the number of features in the dataframe to be summarised Calculate the Gaussian probability distribution function for x from inputs: x: the variable we are calculating the probability for mean: the mean of the distribution stdev: the standard deviation of the distribution (sigma before squaring) It returns the Gaussian probability of a given value based on: f(x) = (1 / sqrt(2 * PI) * sigma) * exp(-((x-mean)^2 / (2 * sigma^2))) Calculate the probability of a value using the Gaussian Probability Density Function from inputs: summaries: prepared summaries of dataset row: a new row This function uses the statistics calculated from training data to calculate probabilities for the testing dataset (new data). Probabilities are calculated separately for each class. First, we calculate the probability that a new X vector from the testing dataset belongs to the first class. Then, we calculate the probabilities that it belongs to the second class, and so on for all the classes identified in the training dataset. The probability that a new X vector from the testing dataset belongs to a class is calculated as follows: P(class|data) = P(X|class) * P(class) Note we have simplified the Bayes theorem by removing the division as we do not strictly need a number between 0 and 1 to predict the class the new data belongs to as we will be simply taking the maximum result from the above equation for each class. It returns a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset. # total number of training records calculated from the counts stored in the summary statistics # note that the count column has the same value for all rows, and hence picking up item [0] will suffice # probabilities are multiplied together as they accumulate. # normalize probabilities so that they sum 1 # divide by the sum of the probabilities to ensure sum of probabilities for all classes is equal to 1. Predict the most likely class from inputs: summaries: prepared summaries of dataset row: a row in the dataset for predicting its label (a row of X_test) This function uses the probabilities calculated from each class via the function 'calculate_class_probabilities' and returns the label with the highest probability. It returns the maximum likelihood estimate for a given row. # write the outputs csv file #print("New Outputs file saved to: <<", filename, ">>", sep='', end='\n') Implements a Bayes Naive Classifier from inputs: X_train : training dataset features excluding the label (multiple columns) y_train : corresponding labels of the training dataset (single column) X_test : testing dataset features excluding the label (multiple columns) This function consists of the following main steps: Step 1. Get a summary of the statistics of the training dataset (X_train, y_train) pairs combined Step 2. Declare empty lists for predictions and probabilities for each row in X_test of belonging to each of the classes present on the dataset Step 3. Get the maximum likelihood estimate for each row of belonging to each of the classes, from a given 'summaries' and It returns two lists: prediction_outputs: a list of the predicted labels for each row (class of highest probability) probabilities_output: a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset. # Step 1. Get statistics summary on the training dataset # Step 2. create an empty list to store predictions # Step 3. Go through each row in the testing dataset to get the maximum likelihood estimate of the probability of a row to belong to each of the classes #note how row does not include the label value 'y' # get final outputs # write the probability of predicting the class right to a csv # note it is important not to write the header into the output csv as Vocareum will throw error #np.savetxt("probs_test.csv", final_outputs, fmt='%1.2f', delimiter="\n") # write output to file, note values for fmt and delimiter | 4.13598 | 4 |
software/archived/utils/cameraSetup.py | luzgool/ReachMaster | 2 | 6621108 | from ximea import xiapi
import numpy as np
class cameraDev():
#Initialize all camera devices to user defined camera settings
def __init__(self,num_cams,camSetDict,init_time):
self.cameraList = []
self.init_time = init_time
for i in range(num_cams):
camera = xiapi.Camera(dev_id = i)
#start communication
print('Opening camera %s ...' %(i))
camera.open_device()
camera.set_imgdataformat(camSetDict['imgdf'])
camera.set_exposure(camSetDict['exp_per'])
camera.set_gain(camSetDict['gain_val'])
camera.set_sensor_feature_value(camSetDict['sensor_feat'])
camera.set_gpi_selector(camSetDict['gpi_selector'])
camera.set_gpi_mode(camSetDict['gpi_mode'])
camera.set_trigger_source(camSetDict['trigger_source'])
camera.set_gpo_selector(camSetDict['gpo_selector'])
camera.set_gpo_mode(camSetDict['gpo_mode'])
self.cameraList.append(camera)
self.printCamSet()
#Prints the settings of each camera
def printCamSet(self):
counter = 0
for i in range(len(self.cameraList)):
currCamera = self.cameraList[i]
print ('Camera %d Settings: ' %i)
print('Exposure was set to %i us' %currCamera.get_exposure())
print('Gain was set to %f db' %currCamera.get_gain())
print('Img Data Format set to %s' %currCamera.get_imgdataformat())
print ("Available Bandwidth for camera %d: %s " % (i,currCamera.get_available_bandwidth()))
counter +=1
| from ximea import xiapi
import numpy as np
class cameraDev():
#Initialize all camera devices to user defined camera settings
def __init__(self,num_cams,camSetDict,init_time):
self.cameraList = []
self.init_time = init_time
for i in range(num_cams):
camera = xiapi.Camera(dev_id = i)
#start communication
print('Opening camera %s ...' %(i))
camera.open_device()
camera.set_imgdataformat(camSetDict['imgdf'])
camera.set_exposure(camSetDict['exp_per'])
camera.set_gain(camSetDict['gain_val'])
camera.set_sensor_feature_value(camSetDict['sensor_feat'])
camera.set_gpi_selector(camSetDict['gpi_selector'])
camera.set_gpi_mode(camSetDict['gpi_mode'])
camera.set_trigger_source(camSetDict['trigger_source'])
camera.set_gpo_selector(camSetDict['gpo_selector'])
camera.set_gpo_mode(camSetDict['gpo_mode'])
self.cameraList.append(camera)
self.printCamSet()
#Prints the settings of each camera
def printCamSet(self):
counter = 0
for i in range(len(self.cameraList)):
currCamera = self.cameraList[i]
print ('Camera %d Settings: ' %i)
print('Exposure was set to %i us' %currCamera.get_exposure())
print('Gain was set to %f db' %currCamera.get_gain())
print('Img Data Format set to %s' %currCamera.get_imgdataformat())
print ("Available Bandwidth for camera %d: %s " % (i,currCamera.get_available_bandwidth()))
counter +=1
| en | 0.850208 | #Initialize all camera devices to user defined camera settings #start communication #Prints the settings of each camera | 2.428879 | 2 |
auto_surprise/strategies/base.py | BeelGroup/Auto-Surprise | 23 | 6621109 | <gh_stars>10-100
from auto_surprise.constants import DEFAULT_MAX_EVALS, DEFAULT_HPO_ALGO
class StrategyBase:
def __init__(
self,
algorithms,
data,
target_metric,
baseline_loss,
temporary_directory,
time_limit=None,
max_evals=DEFAULT_MAX_EVALS,
hpo_algo=DEFAULT_HPO_ALGO,
verbose=False,
random_state=None,
):
self.algorithms = algorithms
self.data = data
self.target_metric = target_metric
self.baseline_loss = baseline_loss
self.time_limit = time_limit
self.tmp_dir = temporary_directory
self.hpo_algo = hpo_algo
self.max_evals = max_evals
self.verbose = verbose
self.random_state = random_state
| from auto_surprise.constants import DEFAULT_MAX_EVALS, DEFAULT_HPO_ALGO
class StrategyBase:
def __init__(
self,
algorithms,
data,
target_metric,
baseline_loss,
temporary_directory,
time_limit=None,
max_evals=DEFAULT_MAX_EVALS,
hpo_algo=DEFAULT_HPO_ALGO,
verbose=False,
random_state=None,
):
self.algorithms = algorithms
self.data = data
self.target_metric = target_metric
self.baseline_loss = baseline_loss
self.time_limit = time_limit
self.tmp_dir = temporary_directory
self.hpo_algo = hpo_algo
self.max_evals = max_evals
self.verbose = verbose
self.random_state = random_state | none | 1 | 1.967068 | 2 | |
src/filecrawl.py | pik-software/pst-extraction | 35 | 6621110 | #! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
import argparse
import sys
import os
import json
import time
import mimetypes
import dateutil.parser
import dateutil.tz
import itertools
import collections
import datetime
import uuid
import traceback
sys.path.append("./utils")
from utils.file import slurpBase64, RollingFile
def timeNow():
return datetime.datetime.now().strftime('%H:%M:%S')
def prn(msg):
print "[{}] {}".format(timeNow(), msg)
def skip(iterable, at_start=0, at_end=0):
it = iter(iterable)
for x in itertools.islice(it, at_start):
pass
queue = collections.deque(itertools.islice(it, at_end))
for x in it:
queue.append(x)
yield queue.popleft()
count_total = 0
FILE_TYPES_BLACK_LIST=["mdb","msg","exe","zip","gz","dat"]
FILE_TYPES_WHITE_LIST=[]
def guess_mime(filename):
mime_type = mimetypes.guess_type(filename)[0]
return mime_type if mime_type else "application/octet-stream"
def UTC_date(date_str):
dt = dateutil.parser.parse(date_str)
if not dt.tzinfo:
dt =dt.replace(tzinfo=dateutil.tz.tzlocal())
dt = dt.astimezone(dateutil.tz.tzutc())
return dt.strftime('%Y-%m-%dT%H:%M:%S')
def crawl_files(root_dir, meta):
global count_total
_prefix_length = len(root_dir)
for root, _, files in os.walk(root_dir):
for filename in files:
_, ext = os.path.splitext(filename)
if ext.replace(".","").lower() in FILE_TYPES_BLACK_LIST:
print "Skipping file: %s"%str(filename)
else:
count_total+=1
print "Processing file: %s"%str(filename)
abs_path = os.path.abspath("{}/{}".format(root, filename))
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(abs_path)
if size > 10000000:
print "Skipping large file: %s, size=%s"%(str(filename),str(size))
continue
# filename, ext = os.path.splitext(file)
guid = str(uuid.uuid1())
rel_path = str(abs_path[(_prefix_length if not abs_path[_prefix_length]=='/' else _prefix_length+1):])
print "-- abs_path: %s"%str(abs_path)
print "-- rel_path: %s"%str(rel_path)
meta["original_artifact"] = {"filename" : rel_path, "type" : "files"}
row = {
"id" : guid,
"senders" : [str(uid)],
"senders_line" : str(uid),
"tos" : ["none"],
"tos_line" : "none",
"ccs":[],
"ccs_line": "",
"bccs":[],
"bccs_line": "",
"subject" : rel_path,
"body" : "",
"datetime" : UTC_date(time.ctime(mtime)),
"attachments" : [
{
"guid" : str(uuid.uuid1()),
"contents64" : slurpBase64(abs_path),
"filename" : filename,
"extension" :ext.replace(".","").lower(),
"content_type" : guess_mime(filename),
"filesize": size,
"created" : UTC_date(time.ctime(ctime)),
"modified": UTC_date(time.ctime(mtime)),
}
]}
row.update(meta)
yield json.dumps(row)
if __name__ == "__main__":
desc = '''
examples:
./filecrawl.py {files_directory} output_path
'''
parser = argparse.ArgumentParser(
description=" ... ",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=desc)
parser.add_argument("-l", "--limit", type=int, default=10, help="number of MB to limit output file size too, default 10MB")
parser.add_argument("file_root_path", help="root directory of files")
parser.add_argument("out_dir", help="ouput directory")
parser.add_argument("-i", "--ingest_id", required=True, help="ingest id, usually the name of the email account, or the ingest process")
parser.add_argument("-c", "--case_id", required=True, help="case id used to track and search accross multiple cases")
parser.add_argument("-a", "--alt_ref_id", required=True, help="an alternate id used to corelate to external datasource")
parser.add_argument("-b", "--label", required=True, help="user defined label for the dateset")
args = parser.parse_args()
meta = {}
meta["ingest_id"] = args.ingest_id
meta["case_id"] = args.case_id
meta["alt_ref_id"] = args.alt_ref_id
meta["label"] = args.label
files_path = os.path.abspath(args.file_root_path)
with RollingFile(args.out_dir, "part", args.limit) as outfile:
for i, crawl_file in enumerate(crawl_files(files_path, meta)):
try:
outfile.write( crawl_file + "\n")
except Exception as e:
traceback.print_exc()
print "exception line: {} | {} ".format(i, e.message)
if i % 1000 == 0:
prn("completed line: {}".format(i))
print "Total processed: {}".format(count_total)
| #! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
import argparse
import sys
import os
import json
import time
import mimetypes
import dateutil.parser
import dateutil.tz
import itertools
import collections
import datetime
import uuid
import traceback
sys.path.append("./utils")
from utils.file import slurpBase64, RollingFile
def timeNow():
return datetime.datetime.now().strftime('%H:%M:%S')
def prn(msg):
print "[{}] {}".format(timeNow(), msg)
def skip(iterable, at_start=0, at_end=0):
it = iter(iterable)
for x in itertools.islice(it, at_start):
pass
queue = collections.deque(itertools.islice(it, at_end))
for x in it:
queue.append(x)
yield queue.popleft()
count_total = 0
FILE_TYPES_BLACK_LIST=["mdb","msg","exe","zip","gz","dat"]
FILE_TYPES_WHITE_LIST=[]
def guess_mime(filename):
mime_type = mimetypes.guess_type(filename)[0]
return mime_type if mime_type else "application/octet-stream"
def UTC_date(date_str):
dt = dateutil.parser.parse(date_str)
if not dt.tzinfo:
dt =dt.replace(tzinfo=dateutil.tz.tzlocal())
dt = dt.astimezone(dateutil.tz.tzutc())
return dt.strftime('%Y-%m-%dT%H:%M:%S')
def crawl_files(root_dir, meta):
global count_total
_prefix_length = len(root_dir)
for root, _, files in os.walk(root_dir):
for filename in files:
_, ext = os.path.splitext(filename)
if ext.replace(".","").lower() in FILE_TYPES_BLACK_LIST:
print "Skipping file: %s"%str(filename)
else:
count_total+=1
print "Processing file: %s"%str(filename)
abs_path = os.path.abspath("{}/{}".format(root, filename))
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(abs_path)
if size > 10000000:
print "Skipping large file: %s, size=%s"%(str(filename),str(size))
continue
# filename, ext = os.path.splitext(file)
guid = str(uuid.uuid1())
rel_path = str(abs_path[(_prefix_length if not abs_path[_prefix_length]=='/' else _prefix_length+1):])
print "-- abs_path: %s"%str(abs_path)
print "-- rel_path: %s"%str(rel_path)
meta["original_artifact"] = {"filename" : rel_path, "type" : "files"}
row = {
"id" : guid,
"senders" : [str(uid)],
"senders_line" : str(uid),
"tos" : ["none"],
"tos_line" : "none",
"ccs":[],
"ccs_line": "",
"bccs":[],
"bccs_line": "",
"subject" : rel_path,
"body" : "",
"datetime" : UTC_date(time.ctime(mtime)),
"attachments" : [
{
"guid" : str(uuid.uuid1()),
"contents64" : slurpBase64(abs_path),
"filename" : filename,
"extension" :ext.replace(".","").lower(),
"content_type" : guess_mime(filename),
"filesize": size,
"created" : UTC_date(time.ctime(ctime)),
"modified": UTC_date(time.ctime(mtime)),
}
]}
row.update(meta)
yield json.dumps(row)
if __name__ == "__main__":
desc = '''
examples:
./filecrawl.py {files_directory} output_path
'''
parser = argparse.ArgumentParser(
description=" ... ",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=desc)
parser.add_argument("-l", "--limit", type=int, default=10, help="number of MB to limit output file size too, default 10MB")
parser.add_argument("file_root_path", help="root directory of files")
parser.add_argument("out_dir", help="ouput directory")
parser.add_argument("-i", "--ingest_id", required=True, help="ingest id, usually the name of the email account, or the ingest process")
parser.add_argument("-c", "--case_id", required=True, help="case id used to track and search accross multiple cases")
parser.add_argument("-a", "--alt_ref_id", required=True, help="an alternate id used to corelate to external datasource")
parser.add_argument("-b", "--label", required=True, help="user defined label for the dateset")
args = parser.parse_args()
meta = {}
meta["ingest_id"] = args.ingest_id
meta["case_id"] = args.case_id
meta["alt_ref_id"] = args.alt_ref_id
meta["label"] = args.label
files_path = os.path.abspath(args.file_root_path)
with RollingFile(args.out_dir, "part", args.limit) as outfile:
for i, crawl_file in enumerate(crawl_files(files_path, meta)):
try:
outfile.write( crawl_file + "\n")
except Exception as e:
traceback.print_exc()
print "exception line: {} | {} ".format(i, e.message)
if i % 1000 == 0:
prn("completed line: {}".format(i))
print "Total processed: {}".format(count_total)
| en | 0.27532 | #! /usr/bin/env python2.7 # -*- coding: utf-8 -*- # filename, ext = os.path.splitext(file) examples: ./filecrawl.py {files_directory} output_path | 2.187081 | 2 |
TORO/libs/toro/model.py | IDA-TUBS/TORO | 2 | 6621111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Toro
| Copyright (C) 2021 Institute of Computer and Network Engineering (IDA) at TU BS
| All rights reserved.
| See LICENSE file for copyright and license details.
:Authors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
Description
-----------
Parent module for software parts of a system. extTask and extEffectChain are derived from pyCPA,
with class Job added specifically for the BET/LET chain analysis. Class extEffectChain contains
functions for decomposing CECs and determining subchain deadlines.
"""
from enum import Enum, auto
import math
from collections import deque
from pycpa import model
class extTask(model.Task): # pragma: no cover
"""
Derived task class from pyCPA.
It includes BET and LET semantics.
It provides a function to instantiate jobs.
"""
def __init__(self, name, release_offset, bcet, wcet,
scheduling_parameter,
semantic, let=None, wcrt=None, bcrt=None, deadline=None):
model.Task.__init__(self, name)
self.release_offset = release_offset
self.bcet = bcet
self.wcet = wcet
self.scheduling_parameter = scheduling_parameter
self.semantic = semantic
self.let = let
self.deadline = deadline
self.wcrt = wcrt
self.bcrt = bcrt
def set_scheduling_paramter(self, scheduling_parameter):
""" Define a task's priority if not already defined at a task's initilisation
:param scheduling_parameter: task priority (int)
:rtype: None
"""
self.scheduling_parameter = scheduling_parameter
def set_release_offset(self, release_offset):
""" Define a task's release/activation offset, if not already defined at a task's initilisation
:param release_offset: task release/activation offset (int)
:rtype: None
"""
self.release_offset = release_offset
def set_system_level_task(self):
""" Set flag showing a task is used as a LET interconnect task.
:rtype: None
"""
assert self.semantic == Semantic.LET, "Task " + self.name + " is not a LET task, therefore cannot be defined as LET interconnect task."
self.sl_ic_task = True
def instantiate_job(self, job_number, wcrt, bcrt):
""" This function instantiates a job with ID job_number.
:param job_number: job_index
:param wcrt: task WCRT
:param bcrt: task BCRT
:rtype: None
"""
job = Job(name=self.name,
task_name = self.name,
period=self.in_event_model.P,
offset=self.release_offset,
bcet = self.bcet,
wcet=self.wcet,
let=self.let,
job_number=job_number,
wcrt=wcrt,
bcrt=bcrt,
semantic=self.semantic,
deadline = self.deadline)
if self.semantic == Semantic.BET: # NOTE added that line
assert bcrt == self.bcet, ('Warning: check relation between bcrt = ' + str(bcrt)
+ ' and bcet = ' + str(self.bcet)
+ ' for task ' + self.name + '.')
elif (self.semantic == Semantic.LET) and (hasattr(self, 'sl_ic_task')):
job.set_ic_task()
return job
class Job(object): # pragma: no cover
""" Parameterized job model."""
def __init__(self, name, task_name, period, offset=None, bcet=None, wcet=None, let=None, bcrt=None,
job_number=None, wcrt=None, semantic=None, deadline=None):
self.task_name = task_name
self.period = period
self.offset = offset
self.job_number = job_number
self.name = (name + ",%d" % self.job_number)
self.bcet = bcet
self.wcet = wcet
self.wcrt = wcrt
self.bcrt = bcrt
self.let = let
self.ic_task = False
if deadline is None:
self.deadline = period # implicit deadline
else:
self.deadline = deadline # arbitrary deadline
self.Rmin = None
self.Rmax = None
self.Dmin = None
self.Dmax = None
self.robustness_margin = None
self.slack = None
self.delta_let = None
self.semantic = semantic
self.set_RI_DI()
def set_ic_task(self):
""" set flag showing a job is used as a LET interconnect task.
:rtype: None
"""
self.ic_task = True
def set_RI_DI(self):
""" The function computes minimum and maximum read and data intervals of a job.
[4] Equations (2.2a)-(2.2d) and (2.3a)-(2.3d).
:rtype: None
"""
# job belongs to BET task
if self.semantic == Semantic.BET:
assert self.wcet != None or self.wcet != 0, 'Unset WCET values for task! '+ self.task_name
assert self.let == None or self.let == 0, 'Contradictory task parameters!'
self.Rmin = self.offset + (self.job_number - 1) * self.period
self.Rmax = self.Rmin + self.wcrt - self.bcet
self.Dmin = self.Rmin + self.bcrt
self.Dmax = self.offset + self.job_number * self.period + self.wcrt
elif self.semantic == Semantic.LET:
self.Rmin = self.offset + (self.job_number - 1) * self.period
self.Rmax = self.Rmin
self.Dmin = self.Rmin + self.let
self.Dmax = self.offset + self.job_number * self.period + self.let
else:
raise NotImplementedError("Task semantic " + self.semantic + "not supported yet")
def set_slack(self, theta):
""" Update the job's maximum possible slack theta.
Results are stored on a job level.
:param theta: int
:rtype: None
"""
if theta >= 0:
if (self.slack is None) or ((self.slack is not None) and (theta < self.slack)):
self.slack = theta
class extEffectChain(model.EffectChain):
""" Cause-effect chain representation in TORO. Derived from pyCPA
Contains methods for decomposing chains and analysing those
decomposed chains with regards to transition latencies and
deadlines. Can also calculate subchain deadlines.
"""
def __init__(self, name, tasks=None, e2e_deadline=None, subchain=False):
model.EffectChain.__init__(self, name, tasks)
self.e2e_deadline = e2e_deadline
self.next_chains = list()
if subchain is False:
self.combined_latency = None
self.combined_transistion_latencies = dict()
self.combined_transistion_latencies['let'] = None
self.combined_transistion_latencies['bet'] = None
if subchain is True:
# init subchains with additional parameters: latency, transition_latency, transition deadline, ...?
self.transition_latency = None
self.latency = None
self.transition_deadline = None
def set_semantic(self, semantic):
""" set chain semantic
:param semantic: Semantic
:rtype: None
"""
self.semantic = semantic
def determine_semantic(self) -> bool:
""" Determine and set semantic of chain. Must not work work if tasks with
different semantics are included.
:rtype: bool
"""
semantic_tmp = None
prev = None
for task in self.tasks:
print(task)
if semantic_tmp is None:
semantic_tmp = task.semantic
print(semantic_tmp)
else:
# compare semantic/activation pattern with previous task
if task.semantic != prev.semantic:
return False
prev = task
self.set_semantic(semantic_tmp)
return True
def calculate_transition_latency(self, prevChain):
""" Calculate the transition latency between the previous and current (self) subchain.
(cf. [3] theorem 2 and [4] Equation (3.4)
Function does save the value directly in prevChain AND returns the result seperatly.
:param prevChain: TORO extEffectChain
:rtype: int
"""
assert (prevChain is not None), "Chain has not been initialized yet"
if self.semantic == 'event-triggered':
t_lat = 0
elif prevChain.semantic == Semantic.BET:
t = prevChain.tasks[-1]
t_lat = t.in_event_model.P + t.wcrt - t.bcrt # TODO would imo still be valid after adding: - (t.wcrt - t.bcrt) --> = P
elif prevChain.semantic == Semantic.LET:
t = prevChain.tasks[-1]
t_lat = t.in_event_model.P
else:
raise NotImplementedError("Calculation of transistion latencies between " + prevChain.semantic + " and " + self.semantic + "chains has not been implemented yet")
prevChain.transition_latency = t_lat
return t_lat
def calculate_preliminary_deadline(self, total_deadline, sum_latencies, known_transition_deadlines):
"""
This function calculates a preliminary subchain deadline (deadline_tilde).
The more precise subchain deadlines are computed based on this preliminary one.
(Cf. [4] Equation (3.7): BET transition deadlines are expected to be 0 at this point of time!)
:param total_deadline: int
:param sum_latencies: int
:param known_transition_deadlines: dict
"""
if total_deadline is None:
# if no deadline has been defined, the maximum deadline has to be defined in a different way
total_deadline = sum_latencies + known_transition_deadlines['let'] + known_transition_deadlines['bet']
deadline_tilde = (self.latency / sum_latencies) * (total_deadline - known_transition_deadlines['let'])
self.e2e_deadline = deadline_tilde
def update_deadline(self, total_deadline, sum_latencies, chain_transition_deadlines):
""" Update a subchain's e2e deadline (Equation (3.9) [4])
:param total_deadline: int
:param sum_latencies: int
:param chain_transition_deadline: float
"""
if total_deadline is None:
total_deadline = sum_latencies # TODO + transition latencies?
assert self.latency is not None, "Error: updating subchain deadlines without subchain latency not possible."
deadline = (self.latency / sum_latencies) * (total_deadline - chain_transition_deadlines)
assert self.latency <= (deadline), "Error: subchain deadline (" + str(deadline) + ") was violated: latency = " + str(self.latency)
# results are always rounded of!
self.e2e_deadline = math.floor(deadline)
def update_transition_deadline(self):
""" Calculate a subchain's transition deadline (Equation (3.8) [4]).
Only used for BET subchains.
:rtype: int
"""
if self.transition_latency is None:
return 0
transition_deadline = self.e2e_deadline - self.latency + self.transition_latency
# TODO some assertion here as well?
self.transition_deadline = transition_deadline
return transition_deadline
def calc_actual_deadlines(self):
""" Implementation of Algorithm 6 [4].
Updating preliminary deadlines and transition latencies repeatitly until
a fix point has been reached.
:rtype: list
"""
# counter
cnt = 0
# ring buffer for checking for a basic form of convergence
ring_buffer = deque(maxlen=2)
while True:
results = list()
sum_transition_deadlines = 0
# accumulate transition deadlines of all subchains belonging to a cause-effect chain
for subchain in self.decomposed_chains:
if (subchain.semantic == Semantic.BET):
sum_transition_deadlines += subchain.update_transition_deadline()
elif (subchain.semantic == Semantic.LET):
if subchain.transition_latency is not None:
# LET subchain transition latencies is either = P (per def) or 0 if consecutive subchain is event-triggered
sum_transition_deadlines += subchain.transition_latency
else:
raise NotImplementedError("Procesing of chains that use a %s semantic has not been implemented yet" % subchain.semantic)
# update subchain deadline and create result vector used for convergence testing
sum_deadlines = 0
for subchain in self.decomposed_chains:
subchain.update_deadline(self.e2e_deadline, self.combined_latency, sum_transition_deadlines)
res = (subchain.e2e_deadline, subchain.transition_deadline)
results.append(res)
sum_deadlines += subchain.e2e_deadline
if subchain.transition_deadline is not None:
sum_deadlines += subchain.transition_deadline
# check for convergence
if results in ring_buffer:
# if the results vector can be found in the ring buffer, a fix point has been reached
# the ring buffer is used to be prone to alternating values that might occur
# Note: sum_deadlines already contains both "normal" and transition deadlines
assert sum_deadlines <= self.e2e_deadline, "Chain end-to-end deadline has been violated: deadline = " + str(self.e2e_deadline) + " - latency = " + str(sum_deadlines)
return results
ring_buffer.append(results)
cnt += 1
class Semantic(Enum): # pragma: no cover
""" enum for storing all task semantics """
LET = auto()
BET = auto()
EVENT_TRIGGERED = auto()
SPORADIC = auto()
MISC = auto() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Toro
| Copyright (C) 2021 Institute of Computer and Network Engineering (IDA) at TU BS
| All rights reserved.
| See LICENSE file for copyright and license details.
:Authors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
Description
-----------
Parent module for software parts of a system. extTask and extEffectChain are derived from pyCPA,
with class Job added specifically for the BET/LET chain analysis. Class extEffectChain contains
functions for decomposing CECs and determining subchain deadlines.
"""
from enum import Enum, auto
import math
from collections import deque
from pycpa import model
class extTask(model.Task): # pragma: no cover
"""
Derived task class from pyCPA.
It includes BET and LET semantics.
It provides a function to instantiate jobs.
"""
def __init__(self, name, release_offset, bcet, wcet,
scheduling_parameter,
semantic, let=None, wcrt=None, bcrt=None, deadline=None):
model.Task.__init__(self, name)
self.release_offset = release_offset
self.bcet = bcet
self.wcet = wcet
self.scheduling_parameter = scheduling_parameter
self.semantic = semantic
self.let = let
self.deadline = deadline
self.wcrt = wcrt
self.bcrt = bcrt
def set_scheduling_paramter(self, scheduling_parameter):
""" Define a task's priority if not already defined at a task's initilisation
:param scheduling_parameter: task priority (int)
:rtype: None
"""
self.scheduling_parameter = scheduling_parameter
def set_release_offset(self, release_offset):
""" Define a task's release/activation offset, if not already defined at a task's initilisation
:param release_offset: task release/activation offset (int)
:rtype: None
"""
self.release_offset = release_offset
def set_system_level_task(self):
""" Set flag showing a task is used as a LET interconnect task.
:rtype: None
"""
assert self.semantic == Semantic.LET, "Task " + self.name + " is not a LET task, therefore cannot be defined as LET interconnect task."
self.sl_ic_task = True
def instantiate_job(self, job_number, wcrt, bcrt):
""" This function instantiates a job with ID job_number.
:param job_number: job_index
:param wcrt: task WCRT
:param bcrt: task BCRT
:rtype: None
"""
job = Job(name=self.name,
task_name = self.name,
period=self.in_event_model.P,
offset=self.release_offset,
bcet = self.bcet,
wcet=self.wcet,
let=self.let,
job_number=job_number,
wcrt=wcrt,
bcrt=bcrt,
semantic=self.semantic,
deadline = self.deadline)
if self.semantic == Semantic.BET: # NOTE added that line
assert bcrt == self.bcet, ('Warning: check relation between bcrt = ' + str(bcrt)
+ ' and bcet = ' + str(self.bcet)
+ ' for task ' + self.name + '.')
elif (self.semantic == Semantic.LET) and (hasattr(self, 'sl_ic_task')):
job.set_ic_task()
return job
class Job(object): # pragma: no cover
""" Parameterized job model."""
def __init__(self, name, task_name, period, offset=None, bcet=None, wcet=None, let=None, bcrt=None,
job_number=None, wcrt=None, semantic=None, deadline=None):
self.task_name = task_name
self.period = period
self.offset = offset
self.job_number = job_number
self.name = (name + ",%d" % self.job_number)
self.bcet = bcet
self.wcet = wcet
self.wcrt = wcrt
self.bcrt = bcrt
self.let = let
self.ic_task = False
if deadline is None:
self.deadline = period # implicit deadline
else:
self.deadline = deadline # arbitrary deadline
self.Rmin = None
self.Rmax = None
self.Dmin = None
self.Dmax = None
self.robustness_margin = None
self.slack = None
self.delta_let = None
self.semantic = semantic
self.set_RI_DI()
def set_ic_task(self):
""" set flag showing a job is used as a LET interconnect task.
:rtype: None
"""
self.ic_task = True
def set_RI_DI(self):
""" The function computes minimum and maximum read and data intervals of a job.
[4] Equations (2.2a)-(2.2d) and (2.3a)-(2.3d).
:rtype: None
"""
# job belongs to BET task
if self.semantic == Semantic.BET:
assert self.wcet != None or self.wcet != 0, 'Unset WCET values for task! '+ self.task_name
assert self.let == None or self.let == 0, 'Contradictory task parameters!'
self.Rmin = self.offset + (self.job_number - 1) * self.period
self.Rmax = self.Rmin + self.wcrt - self.bcet
self.Dmin = self.Rmin + self.bcrt
self.Dmax = self.offset + self.job_number * self.period + self.wcrt
elif self.semantic == Semantic.LET:
self.Rmin = self.offset + (self.job_number - 1) * self.period
self.Rmax = self.Rmin
self.Dmin = self.Rmin + self.let
self.Dmax = self.offset + self.job_number * self.period + self.let
else:
raise NotImplementedError("Task semantic " + self.semantic + "not supported yet")
def set_slack(self, theta):
""" Update the job's maximum possible slack theta.
Results are stored on a job level.
:param theta: int
:rtype: None
"""
if theta >= 0:
if (self.slack is None) or ((self.slack is not None) and (theta < self.slack)):
self.slack = theta
class extEffectChain(model.EffectChain):
""" Cause-effect chain representation in TORO. Derived from pyCPA
Contains methods for decomposing chains and analysing those
decomposed chains with regards to transition latencies and
deadlines. Can also calculate subchain deadlines.
"""
def __init__(self, name, tasks=None, e2e_deadline=None, subchain=False):
model.EffectChain.__init__(self, name, tasks)
self.e2e_deadline = e2e_deadline
self.next_chains = list()
if subchain is False:
self.combined_latency = None
self.combined_transistion_latencies = dict()
self.combined_transistion_latencies['let'] = None
self.combined_transistion_latencies['bet'] = None
if subchain is True:
# init subchains with additional parameters: latency, transition_latency, transition deadline, ...?
self.transition_latency = None
self.latency = None
self.transition_deadline = None
def set_semantic(self, semantic):
""" set chain semantic
:param semantic: Semantic
:rtype: None
"""
self.semantic = semantic
def determine_semantic(self) -> bool:
""" Determine and set semantic of chain. Must not work work if tasks with
different semantics are included.
:rtype: bool
"""
semantic_tmp = None
prev = None
for task in self.tasks:
print(task)
if semantic_tmp is None:
semantic_tmp = task.semantic
print(semantic_tmp)
else:
# compare semantic/activation pattern with previous task
if task.semantic != prev.semantic:
return False
prev = task
self.set_semantic(semantic_tmp)
return True
def calculate_transition_latency(self, prevChain):
""" Calculate the transition latency between the previous and current (self) subchain.
(cf. [3] theorem 2 and [4] Equation (3.4)
Function does save the value directly in prevChain AND returns the result seperatly.
:param prevChain: TORO extEffectChain
:rtype: int
"""
assert (prevChain is not None), "Chain has not been initialized yet"
if self.semantic == 'event-triggered':
t_lat = 0
elif prevChain.semantic == Semantic.BET:
t = prevChain.tasks[-1]
t_lat = t.in_event_model.P + t.wcrt - t.bcrt # TODO would imo still be valid after adding: - (t.wcrt - t.bcrt) --> = P
elif prevChain.semantic == Semantic.LET:
t = prevChain.tasks[-1]
t_lat = t.in_event_model.P
else:
raise NotImplementedError("Calculation of transistion latencies between " + prevChain.semantic + " and " + self.semantic + "chains has not been implemented yet")
prevChain.transition_latency = t_lat
return t_lat
def calculate_preliminary_deadline(self, total_deadline, sum_latencies, known_transition_deadlines):
"""
This function calculates a preliminary subchain deadline (deadline_tilde).
The more precise subchain deadlines are computed based on this preliminary one.
(Cf. [4] Equation (3.7): BET transition deadlines are expected to be 0 at this point of time!)
:param total_deadline: int
:param sum_latencies: int
:param known_transition_deadlines: dict
"""
if total_deadline is None:
# if no deadline has been defined, the maximum deadline has to be defined in a different way
total_deadline = sum_latencies + known_transition_deadlines['let'] + known_transition_deadlines['bet']
deadline_tilde = (self.latency / sum_latencies) * (total_deadline - known_transition_deadlines['let'])
self.e2e_deadline = deadline_tilde
def update_deadline(self, total_deadline, sum_latencies, chain_transition_deadlines):
""" Update a subchain's e2e deadline (Equation (3.9) [4])
:param total_deadline: int
:param sum_latencies: int
:param chain_transition_deadline: float
"""
if total_deadline is None:
total_deadline = sum_latencies # TODO + transition latencies?
assert self.latency is not None, "Error: updating subchain deadlines without subchain latency not possible."
deadline = (self.latency / sum_latencies) * (total_deadline - chain_transition_deadlines)
assert self.latency <= (deadline), "Error: subchain deadline (" + str(deadline) + ") was violated: latency = " + str(self.latency)
# results are always rounded of!
self.e2e_deadline = math.floor(deadline)
def update_transition_deadline(self):
""" Calculate a subchain's transition deadline (Equation (3.8) [4]).
Only used for BET subchains.
:rtype: int
"""
if self.transition_latency is None:
return 0
transition_deadline = self.e2e_deadline - self.latency + self.transition_latency
# TODO some assertion here as well?
self.transition_deadline = transition_deadline
return transition_deadline
def calc_actual_deadlines(self):
""" Implementation of Algorithm 6 [4].
Updating preliminary deadlines and transition latencies repeatitly until
a fix point has been reached.
:rtype: list
"""
# counter
cnt = 0
# ring buffer for checking for a basic form of convergence
ring_buffer = deque(maxlen=2)
while True:
results = list()
sum_transition_deadlines = 0
# accumulate transition deadlines of all subchains belonging to a cause-effect chain
for subchain in self.decomposed_chains:
if (subchain.semantic == Semantic.BET):
sum_transition_deadlines += subchain.update_transition_deadline()
elif (subchain.semantic == Semantic.LET):
if subchain.transition_latency is not None:
# LET subchain transition latencies is either = P (per def) or 0 if consecutive subchain is event-triggered
sum_transition_deadlines += subchain.transition_latency
else:
raise NotImplementedError("Procesing of chains that use a %s semantic has not been implemented yet" % subchain.semantic)
# update subchain deadline and create result vector used for convergence testing
sum_deadlines = 0
for subchain in self.decomposed_chains:
subchain.update_deadline(self.e2e_deadline, self.combined_latency, sum_transition_deadlines)
res = (subchain.e2e_deadline, subchain.transition_deadline)
results.append(res)
sum_deadlines += subchain.e2e_deadline
if subchain.transition_deadline is not None:
sum_deadlines += subchain.transition_deadline
# check for convergence
if results in ring_buffer:
# if the results vector can be found in the ring buffer, a fix point has been reached
# the ring buffer is used to be prone to alternating values that might occur
# Note: sum_deadlines already contains both "normal" and transition deadlines
assert sum_deadlines <= self.e2e_deadline, "Chain end-to-end deadline has been violated: deadline = " + str(self.e2e_deadline) + " - latency = " + str(sum_deadlines)
return results
ring_buffer.append(results)
cnt += 1
class Semantic(Enum): # pragma: no cover
""" enum for storing all task semantics """
LET = auto()
BET = auto()
EVENT_TRIGGERED = auto()
SPORADIC = auto()
MISC = auto() | en | 0.819204 | #!/usr/bin/env python # -*- coding: utf-8 -*- Toro | Copyright (C) 2021 Institute of Computer and Network Engineering (IDA) at TU BS | All rights reserved. | See LICENSE file for copyright and license details. :Authors: - <NAME> - <NAME> - <NAME> - <NAME> Description ----------- Parent module for software parts of a system. extTask and extEffectChain are derived from pyCPA, with class Job added specifically for the BET/LET chain analysis. Class extEffectChain contains functions for decomposing CECs and determining subchain deadlines. # pragma: no cover Derived task class from pyCPA. It includes BET and LET semantics. It provides a function to instantiate jobs. Define a task's priority if not already defined at a task's initilisation :param scheduling_parameter: task priority (int) :rtype: None Define a task's release/activation offset, if not already defined at a task's initilisation :param release_offset: task release/activation offset (int) :rtype: None Set flag showing a task is used as a LET interconnect task. :rtype: None This function instantiates a job with ID job_number. :param job_number: job_index :param wcrt: task WCRT :param bcrt: task BCRT :rtype: None # NOTE added that line # pragma: no cover Parameterized job model. # implicit deadline # arbitrary deadline set flag showing a job is used as a LET interconnect task. :rtype: None The function computes minimum and maximum read and data intervals of a job. [4] Equations (2.2a)-(2.2d) and (2.3a)-(2.3d). :rtype: None # job belongs to BET task Update the job's maximum possible slack theta. Results are stored on a job level. :param theta: int :rtype: None Cause-effect chain representation in TORO. Derived from pyCPA Contains methods for decomposing chains and analysing those decomposed chains with regards to transition latencies and deadlines. Can also calculate subchain deadlines. # init subchains with additional parameters: latency, transition_latency, transition deadline, ...? set chain semantic :param semantic: Semantic :rtype: None Determine and set semantic of chain. Must not work work if tasks with different semantics are included. :rtype: bool # compare semantic/activation pattern with previous task Calculate the transition latency between the previous and current (self) subchain. (cf. [3] theorem 2 and [4] Equation (3.4) Function does save the value directly in prevChain AND returns the result seperatly. :param prevChain: TORO extEffectChain :rtype: int # TODO would imo still be valid after adding: - (t.wcrt - t.bcrt) --> = P This function calculates a preliminary subchain deadline (deadline_tilde). The more precise subchain deadlines are computed based on this preliminary one. (Cf. [4] Equation (3.7): BET transition deadlines are expected to be 0 at this point of time!) :param total_deadline: int :param sum_latencies: int :param known_transition_deadlines: dict # if no deadline has been defined, the maximum deadline has to be defined in a different way Update a subchain's e2e deadline (Equation (3.9) [4]) :param total_deadline: int :param sum_latencies: int :param chain_transition_deadline: float # TODO + transition latencies? # results are always rounded of! Calculate a subchain's transition deadline (Equation (3.8) [4]). Only used for BET subchains. :rtype: int # TODO some assertion here as well? Implementation of Algorithm 6 [4]. Updating preliminary deadlines and transition latencies repeatitly until a fix point has been reached. :rtype: list # counter # ring buffer for checking for a basic form of convergence # accumulate transition deadlines of all subchains belonging to a cause-effect chain # LET subchain transition latencies is either = P (per def) or 0 if consecutive subchain is event-triggered # update subchain deadline and create result vector used for convergence testing # check for convergence # if the results vector can be found in the ring buffer, a fix point has been reached # the ring buffer is used to be prone to alternating values that might occur # Note: sum_deadlines already contains both "normal" and transition deadlines # pragma: no cover enum for storing all task semantics | 2.25918 | 2 |
fitgrid/errors.py | turbach/fitgrid | 0 | 6621112 | class FitGridError(Exception):
pass
| class FitGridError(Exception):
pass
| none | 1 | 1.096742 | 1 | |
test/test_vocab.py | ulf1/nlptasks | 2 | 6621113 | from nlptasks.vocab import (
identify_vocab_mincount, texttoken_to_index)
def test1():
data = ["abc", "abc", "abc", "def", "def", "ghi"]
min_occurrences = 2
VOCAB = identify_vocab_mincount(data, min_occurrences)
assert "abc" in VOCAB
assert "def" in VOCAB
assert "ghi" not in VOCAB
assert VOCAB == ["abc", "def"]
def test2():
sequence = ["abc", "abc", "abc", "def", "def", "ghi"]
VOCAB = ["abc", "def"]
indicies = texttoken_to_index(sequence, VOCAB)
assert indicies == [0, 0, 0, 1, 1, 2]
def test3():
sequence = ["abc", "abc", "abc", "def", "def", "ghi"]
VOCAB = ["abc", "def", "[UNK]"]
indicies = texttoken_to_index(sequence, VOCAB)
assert indicies == [0, 0, 0, 1, 1, 2]
| from nlptasks.vocab import (
identify_vocab_mincount, texttoken_to_index)
def test1():
data = ["abc", "abc", "abc", "def", "def", "ghi"]
min_occurrences = 2
VOCAB = identify_vocab_mincount(data, min_occurrences)
assert "abc" in VOCAB
assert "def" in VOCAB
assert "ghi" not in VOCAB
assert VOCAB == ["abc", "def"]
def test2():
sequence = ["abc", "abc", "abc", "def", "def", "ghi"]
VOCAB = ["abc", "def"]
indicies = texttoken_to_index(sequence, VOCAB)
assert indicies == [0, 0, 0, 1, 1, 2]
def test3():
sequence = ["abc", "abc", "abc", "def", "def", "ghi"]
VOCAB = ["abc", "def", "[UNK]"]
indicies = texttoken_to_index(sequence, VOCAB)
assert indicies == [0, 0, 0, 1, 1, 2]
| none | 1 | 3.036286 | 3 | |
example.py | mavnt/d | 0 | 6621114 | <filename>example.py
import json
import time
from d import d
def main():
time_ = time.time()
user1 = {
"name": "John",
"surname": "Smith",
"age": 99,
"data": [1, 2, 3, 5],
}
user2 = {
"name": "John",
"surname": "Smith",
"age": 99,
"data": [1, 2, 3, 5],
}
print(json.dumps(d(user1, time_, another_value=1), indent=4))
if __name__ == "__main__":
main()
| <filename>example.py
import json
import time
from d import d
def main():
time_ = time.time()
user1 = {
"name": "John",
"surname": "Smith",
"age": 99,
"data": [1, 2, 3, 5],
}
user2 = {
"name": "John",
"surname": "Smith",
"age": 99,
"data": [1, 2, 3, 5],
}
print(json.dumps(d(user1, time_, another_value=1), indent=4))
if __name__ == "__main__":
main()
| none | 1 | 3.117307 | 3 | |
allennlp/tango/text_only.py | ksteimel/allennlp | 11,433 | 6621115 | <reponame>ksteimel/allennlp
"""
*AllenNLP Tango is an experimental API and parts of it might change or disappear
every time we release a new version.*
"""
import dataclasses
from typing import Set, Optional, Iterable, Any
from allennlp.tango.dataset import DatasetDict
from allennlp.tango.step import Step
@Step.register("text_only")
class TextOnlyDataset(Step):
"""
This step converts a dataset into another dataset that contains only the strings from the original dataset.
You can specify exactly which fields to keep from the original dataset (default is all of them).
You can specify a minimum length of string to keep, to filter out strings that are too short.
"""
DETERMINISTIC = True
def run( # type: ignore
self,
input: DatasetDict,
*,
fields_to_keep: Optional[Set[str]] = None,
min_length: Optional[int] = None,
) -> DatasetDict:
"""
Turns the `input` dataset into another dataset that contains only the strings from the
original dataset.
* `fields_to_keep` is an optional list of field names that you want to keep in the result.
If this is `None`, all fields are kept.
* `min_length` specifies the minimum length that a string must have to be part of the
result. If this is `None`, all strings are considered.
"""
def find_nested_strings(o: Any, prefix: str = "") -> Iterable[str]:
if isinstance(o, list) or isinstance(o, tuple):
for i, item in enumerate(o):
new_prefix = f"{prefix}.{i}"
yield from find_nested_strings(item, new_prefix)
elif isinstance(o, dict):
for name, item in o.items():
new_prefix = f"{prefix}.{name}"
yield from find_nested_strings(item, new_prefix)
elif isinstance(o, str):
if fields_to_keep is None or prefix in fields_to_keep:
if min_length is None or len(o) >= min_length:
yield o
return dataclasses.replace(
input,
splits={
split_name: [
{"text": text} for instance in split for text in find_nested_strings(instance)
]
for split_name, split in input.splits.items()
},
)
| """
*AllenNLP Tango is an experimental API and parts of it might change or disappear
every time we release a new version.*
"""
import dataclasses
from typing import Set, Optional, Iterable, Any
from allennlp.tango.dataset import DatasetDict
from allennlp.tango.step import Step
@Step.register("text_only")
class TextOnlyDataset(Step):
"""
This step converts a dataset into another dataset that contains only the strings from the original dataset.
You can specify exactly which fields to keep from the original dataset (default is all of them).
You can specify a minimum length of string to keep, to filter out strings that are too short.
"""
DETERMINISTIC = True
def run( # type: ignore
self,
input: DatasetDict,
*,
fields_to_keep: Optional[Set[str]] = None,
min_length: Optional[int] = None,
) -> DatasetDict:
"""
Turns the `input` dataset into another dataset that contains only the strings from the
original dataset.
* `fields_to_keep` is an optional list of field names that you want to keep in the result.
If this is `None`, all fields are kept.
* `min_length` specifies the minimum length that a string must have to be part of the
result. If this is `None`, all strings are considered.
"""
def find_nested_strings(o: Any, prefix: str = "") -> Iterable[str]:
if isinstance(o, list) or isinstance(o, tuple):
for i, item in enumerate(o):
new_prefix = f"{prefix}.{i}"
yield from find_nested_strings(item, new_prefix)
elif isinstance(o, dict):
for name, item in o.items():
new_prefix = f"{prefix}.{name}"
yield from find_nested_strings(item, new_prefix)
elif isinstance(o, str):
if fields_to_keep is None or prefix in fields_to_keep:
if min_length is None or len(o) >= min_length:
yield o
return dataclasses.replace(
input,
splits={
split_name: [
{"text": text} for instance in split for text in find_nested_strings(instance)
]
for split_name, split in input.splits.items()
},
) | en | 0.871338 | *AllenNLP Tango is an experimental API and parts of it might change or disappear every time we release a new version.* This step converts a dataset into another dataset that contains only the strings from the original dataset. You can specify exactly which fields to keep from the original dataset (default is all of them). You can specify a minimum length of string to keep, to filter out strings that are too short. # type: ignore Turns the `input` dataset into another dataset that contains only the strings from the original dataset. * `fields_to_keep` is an optional list of field names that you want to keep in the result. If this is `None`, all fields are kept. * `min_length` specifies the minimum length that a string must have to be part of the result. If this is `None`, all strings are considered. | 3.001869 | 3 |
subject_classification_spanish/__init__.py | news-scrapers/subject-classification-spanish | 3 | 6621116 | <gh_stars>1-10
from . import subject_classifier
| from . import subject_classifier | none | 1 | 1.035449 | 1 | |
Chapter06/dataclass_stocks.py | 4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition | 393 | 6621117 | from dataclasses import make_dataclass, dataclass
# using make_dataclass
Stock = make_dataclass("Stock", ["symbol", "current", "high", "low"])
stock = Stock("FB", 177.46, high=178.67, low=175.79)
# compared to regular object
class StockRegClass:
def __init__(self, name, current, high, low):
self.name = name
self.current = current
self.high = high
self.low = low
stock_reg_class = StockRegClass("FB", 177.46, high=178.67, low=175.79)
# using dataclass decorator
@dataclass
class StockDecorated:
name: str
current: float
high: float
low: float
stock_decorated = StockDecorated("FB", 177.46, high=178.67, low=175.79)
@dataclass
class StockDefaults:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_defaults = StockDefaults("FB")
@dataclass(order=True)
class StockOrdered:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_ordered1 = StockOrdered("FB", 177.46, high=178.67, low=175.79)
stock_ordered2 = StockOrdered("FB")
stock_ordered3 = StockOrdered("FB", 178.42, high=179.28, low=176.39)
| from dataclasses import make_dataclass, dataclass
# using make_dataclass
Stock = make_dataclass("Stock", ["symbol", "current", "high", "low"])
stock = Stock("FB", 177.46, high=178.67, low=175.79)
# compared to regular object
class StockRegClass:
def __init__(self, name, current, high, low):
self.name = name
self.current = current
self.high = high
self.low = low
stock_reg_class = StockRegClass("FB", 177.46, high=178.67, low=175.79)
# using dataclass decorator
@dataclass
class StockDecorated:
name: str
current: float
high: float
low: float
stock_decorated = StockDecorated("FB", 177.46, high=178.67, low=175.79)
@dataclass
class StockDefaults:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_defaults = StockDefaults("FB")
@dataclass(order=True)
class StockOrdered:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_ordered1 = StockOrdered("FB", 177.46, high=178.67, low=175.79)
stock_ordered2 = StockOrdered("FB")
stock_ordered3 = StockOrdered("FB", 178.42, high=179.28, low=176.39)
| en | 0.537498 | # using make_dataclass # compared to regular object # using dataclass decorator | 3.651716 | 4 |
tests/test_feed_download.py | martineian/act-scio2 | 2 | 6621118 | <filename>tests/test_feed_download.py
""" test feed download """
from act.scio.feeds import extract
def test_safe_download() -> None:
""" test for safe download """
assert extract.safe_filename("test%.[x y z]") == "test.x_y_z"
| <filename>tests/test_feed_download.py
""" test feed download """
from act.scio.feeds import extract
def test_safe_download() -> None:
""" test for safe download """
assert extract.safe_filename("test%.[x y z]") == "test.x_y_z"
| en | 0.823029 | test feed download test for safe download | 1.911661 | 2 |
app/trainaclass_views.py | aroranipun04/CloudCV-Old | 11 | 6621119 | __author__ = 'clint'
import time
import os
import json
import traceback
from django.views.generic import CreateView, DeleteView
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from PIL import Image
from querystring_parser import parser
import redis
from app.models import Picture, Trainaclass
from celeryTasks.webTasks.trainTask import trainImages
from celeryTasks.webTasks.trainTask import customClassifyImages
from cloudcv17 import config
import app.conf as conf
redis_obj = redis.StrictRedis(host=config.REDIS_HOST, port=6379, db=0)
classify_channel_name = 'classify_queue'
# SET OF PATH CONSTANTS - SOME UNUSED
# File initially downloaded here
download_directory = conf.PIC_DIR
# Input image is saved here (symbolic links) - after resizing to 500 x 500
physical_job_root = conf.LOCAL_CLASSIFY_JOB_DIR
demo_log_file = physical_job_root + 'classify_demo.log'
rs = redis.StrictRedis(host=config.REDIS_HOST, port=6379)
def log_to_terminal(message, socketid):
redis_obj.publish('chat', json.dumps({'message': str(message), 'socketid': str(socketid)}))
def classify_wrapper_local(jobPath, socketid, result_path):
customClassifyImages.delay(jobPath, socketid, result_path)
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
else:
return "text/plain"
class TrainaclassCreateView(CreateView):
model = Trainaclass
r = None
socketid = None
count_hits = 0
fields = "__all__"
def form_valid(self, form):
"""
This function created the view and validates the form.
It adds images to a new label. Makes directory and save them.
"""
redis_obj.lpush('trainaclass', str(self.request))
self.r = redis_obj
socketid = self.request.POST['socketid']
labelnames = self.request.POST['labelnames'].replace(' ', '_')
log_to_terminal("Label: " + str(self.request.POST['labelnames']), socketid)
self.socketid = socketid
try:
self.object = form.save()
# fcountfile = open(os.path.join(conf.LOG_DIR, 'log_count.txt'), 'a')
# fcountfile.write(str(self.request.META.get('REMOTE_ADDR')) + '\n')
# fcountfile.close()
self.count_hits += 1
old_save_dir = os.path.dirname(conf.PIC_DIR)
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
train_dir = os.path.join(save_dir, 'train')
test_dir = os.path.join(save_dir, 'test')
util_dir = os.path.join(save_dir, 'util')
# Make the new directory based on time
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.makedirs(train_dir)
os.makedirs(util_dir)
os.makedirs(test_dir)
rs.publish('chat', json.dumps({'message': 'save_dir ' + save_dir, 'socketid': str(socketid)}))
try:
all_files = self.request.FILES.getlist('file')
data = {'files': []}
if len(all_files) == 1:
log_to_terminal(str('Downloading Image for label: ' + labelnames), self.socketid)
else:
log_to_terminal(str('Downloading Images for label: ' + labelnames), self.socketid)
if labelnames.strip().lower() != 'test':
label_dir = os.path.join(train_dir, labelnames)
url = os.path.join(conf.PIC_URL, socketid, 'train', labelnames)
else:
label_dir = test_dir
url = os.path.join(conf.PIC_URL, socketid, 'test')
if not os.path.exists(label_dir):
os.makedirs(label_dir)
for file in all_files:
try:
a = Picture()
tick = time.time()
strtick = str(tick).replace('.', '_')
fileName, fileExtension = os.path.splitext(file.name)
file.name = fileName + strtick + fileExtension
a.file.save(file.name, file)
file.name = a.file.name
imgfile = Image.open(os.path.join(old_save_dir, file.name))
size = (500, 500)
imgfile.thumbnail(size, Image.ANTIALIAS)
imgfile.save(os.path.join(label_dir, file.name))
data['files'].append({
'label': labelnames,
'url': os.path.join(url, file.name),
'name': file.name,
'type': 'image/png',
'thumbnailUrl': os.path.join(url, file.name),
'size': 0,
})
except Exception as e:
log_to_terminal(str(traceback.format_exc()), self.socketid)
except Exception as e:
print e
log_to_terminal(str(traceback.format_exc()), self.socketid)
log_to_terminal(str(len(all_files)) + str(' images saved for ' + labelnames), self.socketid)
response = JSONResponse(data, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
except Exception as e:
redis_obj.lpush('trainaclass_exception', str(traceback.format_exc()))
log_to_terminal(str(traceback.format_exc()), self.socketid)
def get_context_data(self, **kwargs):
context = super(TrainaclassCreateView, self).get_context_data(**kwargs)
context['pictures'] = Trainaclass.objects.all()
return context
class TrainaclassDeleteView(DeleteView):
model = Trainaclass
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
self.object.delete()
if request.is_ajax():
response = JSONResponse(True, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
return HttpResponseRedirect('/upload/new')
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/json", *args, **kwargs):
content = json.dumps(obj, **json_opts)
super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)
@csrf_exempt
def trainamodel(request):
data = {}
post_dict = parser.parse(request.POST.urlencode())
socketid = post_dict['socketid']
log_to_terminal('Beginning training a new model', post_dict['socketid'])
# old_save_dir = conf.PIC_DIR
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
# train_dir = os.path.join(save_dir, 'train')
# test_dir = os.path.join(save_dir, 'test')
# util_dir = os.path.join(save_dir, 'util')
trainImages.delay(os.path.join(save_dir, ''), socketid)
data['info'] = 'completed'
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
@csrf_exempt
def testmodel(request):
data = {}
try:
post_dict = parser.parse(request.POST.urlencode())
socketid = post_dict['socketid']
log_to_terminal('Classifying test images', post_dict['socketid'])
old_save_dir = conf.PIC_DIR
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
# train_dir = os.path.join(save_dir, 'train')
test_dir = os.path.join(save_dir, 'test')
util_dir = os.path.join(save_dir, 'util')
if not os.path.exists(os.path.join(old_save_dir, folder_name)):
raise Exception('No training images has been provided for this job.')
if len(os.listdir(os.path.join(test_dir))) == 0:
raise Exception('No test images provided')
if not os.path.isfile(os.path.join(util_dir, 'newCaffeModel.prototxt')):
# default_classify(test_dir, socketid, os.path.join(conf.PIC_URL, folder_name, 'test'))
raise Exception('No model has been trained for this job.')
classify_wrapper_local(save_dir, socketid, os.path.join(conf.PIC_URL, folder_name, 'test'))
data['info'] = 'completed'
data['prototxt'] = os.path.join(conf.PIC_URL, folder_name, 'util', 'newCaffeModel.prototxt')
data['caffemodel'] = os.path.join(conf.PIC_URL, folder_name, 'util', 'newCaffeModel.caffemodel')
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
except:
data['error'] = str(traceback.format_exc())
log_to_terminal(str(traceback.format_exc()), socketid)
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
| __author__ = 'clint'
import time
import os
import json
import traceback
from django.views.generic import CreateView, DeleteView
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from PIL import Image
from querystring_parser import parser
import redis
from app.models import Picture, Trainaclass
from celeryTasks.webTasks.trainTask import trainImages
from celeryTasks.webTasks.trainTask import customClassifyImages
from cloudcv17 import config
import app.conf as conf
redis_obj = redis.StrictRedis(host=config.REDIS_HOST, port=6379, db=0)
classify_channel_name = 'classify_queue'
# SET OF PATH CONSTANTS - SOME UNUSED
# File initially downloaded here
download_directory = conf.PIC_DIR
# Input image is saved here (symbolic links) - after resizing to 500 x 500
physical_job_root = conf.LOCAL_CLASSIFY_JOB_DIR
demo_log_file = physical_job_root + 'classify_demo.log'
rs = redis.StrictRedis(host=config.REDIS_HOST, port=6379)
def log_to_terminal(message, socketid):
redis_obj.publish('chat', json.dumps({'message': str(message), 'socketid': str(socketid)}))
def classify_wrapper_local(jobPath, socketid, result_path):
customClassifyImages.delay(jobPath, socketid, result_path)
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
else:
return "text/plain"
class TrainaclassCreateView(CreateView):
model = Trainaclass
r = None
socketid = None
count_hits = 0
fields = "__all__"
def form_valid(self, form):
"""
This function created the view and validates the form.
It adds images to a new label. Makes directory and save them.
"""
redis_obj.lpush('trainaclass', str(self.request))
self.r = redis_obj
socketid = self.request.POST['socketid']
labelnames = self.request.POST['labelnames'].replace(' ', '_')
log_to_terminal("Label: " + str(self.request.POST['labelnames']), socketid)
self.socketid = socketid
try:
self.object = form.save()
# fcountfile = open(os.path.join(conf.LOG_DIR, 'log_count.txt'), 'a')
# fcountfile.write(str(self.request.META.get('REMOTE_ADDR')) + '\n')
# fcountfile.close()
self.count_hits += 1
old_save_dir = os.path.dirname(conf.PIC_DIR)
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
train_dir = os.path.join(save_dir, 'train')
test_dir = os.path.join(save_dir, 'test')
util_dir = os.path.join(save_dir, 'util')
# Make the new directory based on time
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.makedirs(train_dir)
os.makedirs(util_dir)
os.makedirs(test_dir)
rs.publish('chat', json.dumps({'message': 'save_dir ' + save_dir, 'socketid': str(socketid)}))
try:
all_files = self.request.FILES.getlist('file')
data = {'files': []}
if len(all_files) == 1:
log_to_terminal(str('Downloading Image for label: ' + labelnames), self.socketid)
else:
log_to_terminal(str('Downloading Images for label: ' + labelnames), self.socketid)
if labelnames.strip().lower() != 'test':
label_dir = os.path.join(train_dir, labelnames)
url = os.path.join(conf.PIC_URL, socketid, 'train', labelnames)
else:
label_dir = test_dir
url = os.path.join(conf.PIC_URL, socketid, 'test')
if not os.path.exists(label_dir):
os.makedirs(label_dir)
for file in all_files:
try:
a = Picture()
tick = time.time()
strtick = str(tick).replace('.', '_')
fileName, fileExtension = os.path.splitext(file.name)
file.name = fileName + strtick + fileExtension
a.file.save(file.name, file)
file.name = a.file.name
imgfile = Image.open(os.path.join(old_save_dir, file.name))
size = (500, 500)
imgfile.thumbnail(size, Image.ANTIALIAS)
imgfile.save(os.path.join(label_dir, file.name))
data['files'].append({
'label': labelnames,
'url': os.path.join(url, file.name),
'name': file.name,
'type': 'image/png',
'thumbnailUrl': os.path.join(url, file.name),
'size': 0,
})
except Exception as e:
log_to_terminal(str(traceback.format_exc()), self.socketid)
except Exception as e:
print e
log_to_terminal(str(traceback.format_exc()), self.socketid)
log_to_terminal(str(len(all_files)) + str(' images saved for ' + labelnames), self.socketid)
response = JSONResponse(data, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
except Exception as e:
redis_obj.lpush('trainaclass_exception', str(traceback.format_exc()))
log_to_terminal(str(traceback.format_exc()), self.socketid)
def get_context_data(self, **kwargs):
context = super(TrainaclassCreateView, self).get_context_data(**kwargs)
context['pictures'] = Trainaclass.objects.all()
return context
class TrainaclassDeleteView(DeleteView):
model = Trainaclass
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
self.object.delete()
if request.is_ajax():
response = JSONResponse(True, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
return HttpResponseRedirect('/upload/new')
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/json", *args, **kwargs):
content = json.dumps(obj, **json_opts)
super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)
@csrf_exempt
def trainamodel(request):
data = {}
post_dict = parser.parse(request.POST.urlencode())
socketid = post_dict['socketid']
log_to_terminal('Beginning training a new model', post_dict['socketid'])
# old_save_dir = conf.PIC_DIR
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
# train_dir = os.path.join(save_dir, 'train')
# test_dir = os.path.join(save_dir, 'test')
# util_dir = os.path.join(save_dir, 'util')
trainImages.delay(os.path.join(save_dir, ''), socketid)
data['info'] = 'completed'
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
@csrf_exempt
def testmodel(request):
data = {}
try:
post_dict = parser.parse(request.POST.urlencode())
socketid = post_dict['socketid']
log_to_terminal('Classifying test images', post_dict['socketid'])
old_save_dir = conf.PIC_DIR
folder_name = str(socketid)
save_dir = os.path.join(conf.PIC_DIR, folder_name)
# train_dir = os.path.join(save_dir, 'train')
test_dir = os.path.join(save_dir, 'test')
util_dir = os.path.join(save_dir, 'util')
if not os.path.exists(os.path.join(old_save_dir, folder_name)):
raise Exception('No training images has been provided for this job.')
if len(os.listdir(os.path.join(test_dir))) == 0:
raise Exception('No test images provided')
if not os.path.isfile(os.path.join(util_dir, 'newCaffeModel.prototxt')):
# default_classify(test_dir, socketid, os.path.join(conf.PIC_URL, folder_name, 'test'))
raise Exception('No model has been trained for this job.')
classify_wrapper_local(save_dir, socketid, os.path.join(conf.PIC_URL, folder_name, 'test'))
data['info'] = 'completed'
data['prototxt'] = os.path.join(conf.PIC_URL, folder_name, 'util', 'newCaffeModel.prototxt')
data['caffemodel'] = os.path.join(conf.PIC_URL, folder_name, 'util', 'newCaffeModel.caffemodel')
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
except:
data['error'] = str(traceback.format_exc())
log_to_terminal(str(traceback.format_exc()), socketid)
response = JSONResponse(data, {}, response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
| en | 0.530375 | # SET OF PATH CONSTANTS - SOME UNUSED # File initially downloaded here # Input image is saved here (symbolic links) - after resizing to 500 x 500 This function created the view and validates the form. It adds images to a new label. Makes directory and save them. # fcountfile = open(os.path.join(conf.LOG_DIR, 'log_count.txt'), 'a') # fcountfile.write(str(self.request.META.get('REMOTE_ADDR')) + '\n') # fcountfile.close() # Make the new directory based on time This does not actually delete the file, only the database record. But that is easy to implement. JSON response class. # old_save_dir = conf.PIC_DIR # train_dir = os.path.join(save_dir, 'train') # test_dir = os.path.join(save_dir, 'test') # util_dir = os.path.join(save_dir, 'util') # train_dir = os.path.join(save_dir, 'train') # default_classify(test_dir, socketid, os.path.join(conf.PIC_URL, folder_name, 'test')) | 2.032206 | 2 |
utils.py | narumiruna/pytorch-wgangp | 3 | 6621120 | import os
from bokeh import plotting
class AverageMeter(object):
def __init__(self):
self.sum = 0
self.count = 0
self.average = None
def update(self, value, number=1):
self.sum += value * number
self.count += number
self.average = self.sum / self.count
class PlotHelper(object):
def __init__(self, filename):
self.filename = filename
self.loss_g_list = []
self.loss_d_list = []
self.indices = []
def append(self, loss_g, loss_d, index):
self.loss_g_list.append(loss_g)
self.loss_d_list.append(loss_d)
self.indices.append(index)
self.plot()
def plot(self):
figure = plotting.figure(sizing_mode='stretch_both')
figure.line(
self.indices,
self.loss_g_list,
line_color='green',
alpha=0.5,
line_width=5,
legend='loss g')
figure.line(
self.indices,
self.loss_d_list,
line_color='blue',
alpha=0.5,
line_width=5,
legend='loss d')
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
plotting.output_file(self.filename)
plotting.save(figure)
| import os
from bokeh import plotting
class AverageMeter(object):
def __init__(self):
self.sum = 0
self.count = 0
self.average = None
def update(self, value, number=1):
self.sum += value * number
self.count += number
self.average = self.sum / self.count
class PlotHelper(object):
def __init__(self, filename):
self.filename = filename
self.loss_g_list = []
self.loss_d_list = []
self.indices = []
def append(self, loss_g, loss_d, index):
self.loss_g_list.append(loss_g)
self.loss_d_list.append(loss_d)
self.indices.append(index)
self.plot()
def plot(self):
figure = plotting.figure(sizing_mode='stretch_both')
figure.line(
self.indices,
self.loss_g_list,
line_color='green',
alpha=0.5,
line_width=5,
legend='loss g')
figure.line(
self.indices,
self.loss_d_list,
line_color='blue',
alpha=0.5,
line_width=5,
legend='loss d')
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
plotting.output_file(self.filename)
plotting.save(figure)
| none | 1 | 2.958379 | 3 | |
Systems/Engine/Scene.py | RippeR37/PyPong | 1 | 6621121 | <reponame>RippeR37/PyPong
class Scene(object):
def __init__(self, stackable=True, stack_usable=True):
self._is_stackable = stackable
self._is_stack_usable = stack_usable
def is_stackable(self):
return self._is_stackable
def is_stack_usable(self):
return self._is_stack_usable
def update(self, dt):
pass
def render(self):
pass
def process_scene_stack(self, scene_stack, scene_index):
pass
| class Scene(object):
def __init__(self, stackable=True, stack_usable=True):
self._is_stackable = stackable
self._is_stack_usable = stack_usable
def is_stackable(self):
return self._is_stackable
def is_stack_usable(self):
return self._is_stack_usable
def update(self, dt):
pass
def render(self):
pass
def process_scene_stack(self, scene_stack, scene_index):
pass | none | 1 | 2.498118 | 2 | |
jeff/utils.py | JeffJerseyCow/jeff | 8 | 6621122 | import re
import os
import json
import importlib
import subprocess
def checkDocker():
"""Checks docker is installed and user is a member of the docker group.
Returns:
True if docker is installed and user is a member of the docker group
else False.
"""
try:
cmdArgs = ['docker', 'version']
subprocess.run(cmdArgs, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
return True
except FileNotFoundError:
print('Error: Install docker.io or docker ce')
return False
except subprocess.CalledProcessError:
print('Error: Add user %s to docker group' % os.getenv("USER"))
return False
def checkDockerContainer(name):
cmdArgs = ['docker', 'ps', '-a']
output = subprocess.run(cmdArgs, check=True, stdout=subprocess.PIPE).stdout
output = output.decode().splitlines()
for line in output:
containerName = re.search(r'([\-a-zA-Z0-9_]+)\s*$', line)
if name == containerName.group(0):
return True
return False
def checkJeffContainer(name, config):
for containerName in config['containers']:
if name == containerName:
return True
return False
def loadConfig():
"""Loads the jeff confgiruation file.
Returns:
The jeff configuration file as a dictionary else False.
"""
jeffDirPath = os.path.dirname(__file__)
jeffConfigPath = os.path.join(jeffDirPath, 'config', 'jeffconfig.json')
if not os.path.isfile(jeffConfigPath):
print('Error: Missing configuration file')
return False
with open(jeffConfigPath, 'r') as configFile:
return json.loads(configFile.read())
def loadPlugins(config):
"""Loads jeff plugins specified in the config file.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
A dictionary containing the loaded module indexed by the plugin nameself
else False.
"""
plugins = {}
importPrefix = 'jeff.plugins'
for name in config['plugins']:
plugins[name] = importlib.import_module('%s.%s' % (importPrefix, name))
return plugins
def listContainers(config):
"""Prints current jeff containers.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True
"""
for container in config['containers']:
print(container)
return True
def updateConfig(config):
"""Updates the configuration file with the configuration dictionary config.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True if configuration file was written to successfully else False.
"""
dirPath = os.path.dirname(__file__)
configPath = os.path.join(dirPath, 'config', 'jeffconfig.json')
if not os.path.isfile(configPath):
print('Error: Missing configuration file')
return False
with open(configPath, 'w') as configFile:
configFile.write(json.dumps(config))
return True
def removeContainer(name, config):
"""Removes the container specified with name.
Args:
name: Name of container to remove.
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True if container was removed else False.
"""
if checkJeffContainer(name, config) and not checkDockerContainer(name):
config['containers'].remove(name)
updateConfig(config)
print('Deleted invalid jeff entry')
return True
elif checkJeffContainer(name, config) and checkDockerContainer(name):
cmdArgs = ['docker', 'container', 'rm', '--force', '%s' % name]
subprocess.run(cmdArgs, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
config['containers'].remove(name)
updateConfig(config)
print('Deleted %s' % name)
return True
print('Error: Container %s does not exist' % name)
return False
| import re
import os
import json
import importlib
import subprocess
def checkDocker():
"""Checks docker is installed and user is a member of the docker group.
Returns:
True if docker is installed and user is a member of the docker group
else False.
"""
try:
cmdArgs = ['docker', 'version']
subprocess.run(cmdArgs, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
return True
except FileNotFoundError:
print('Error: Install docker.io or docker ce')
return False
except subprocess.CalledProcessError:
print('Error: Add user %s to docker group' % os.getenv("USER"))
return False
def checkDockerContainer(name):
cmdArgs = ['docker', 'ps', '-a']
output = subprocess.run(cmdArgs, check=True, stdout=subprocess.PIPE).stdout
output = output.decode().splitlines()
for line in output:
containerName = re.search(r'([\-a-zA-Z0-9_]+)\s*$', line)
if name == containerName.group(0):
return True
return False
def checkJeffContainer(name, config):
for containerName in config['containers']:
if name == containerName:
return True
return False
def loadConfig():
"""Loads the jeff confgiruation file.
Returns:
The jeff configuration file as a dictionary else False.
"""
jeffDirPath = os.path.dirname(__file__)
jeffConfigPath = os.path.join(jeffDirPath, 'config', 'jeffconfig.json')
if not os.path.isfile(jeffConfigPath):
print('Error: Missing configuration file')
return False
with open(jeffConfigPath, 'r') as configFile:
return json.loads(configFile.read())
def loadPlugins(config):
"""Loads jeff plugins specified in the config file.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
A dictionary containing the loaded module indexed by the plugin nameself
else False.
"""
plugins = {}
importPrefix = 'jeff.plugins'
for name in config['plugins']:
plugins[name] = importlib.import_module('%s.%s' % (importPrefix, name))
return plugins
def listContainers(config):
"""Prints current jeff containers.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True
"""
for container in config['containers']:
print(container)
return True
def updateConfig(config):
"""Updates the configuration file with the configuration dictionary config.
Args:
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True if configuration file was written to successfully else False.
"""
dirPath = os.path.dirname(__file__)
configPath = os.path.join(dirPath, 'config', 'jeffconfig.json')
if not os.path.isfile(configPath):
print('Error: Missing configuration file')
return False
with open(configPath, 'w') as configFile:
configFile.write(json.dumps(config))
return True
def removeContainer(name, config):
"""Removes the container specified with name.
Args:
name: Name of container to remove.
config: jeff configuration dictionary loaded with utils.loadConfig.
Returns:
True if container was removed else False.
"""
if checkJeffContainer(name, config) and not checkDockerContainer(name):
config['containers'].remove(name)
updateConfig(config)
print('Deleted invalid jeff entry')
return True
elif checkJeffContainer(name, config) and checkDockerContainer(name):
cmdArgs = ['docker', 'container', 'rm', '--force', '%s' % name]
subprocess.run(cmdArgs, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
config['containers'].remove(name)
updateConfig(config)
print('Deleted %s' % name)
return True
print('Error: Container %s does not exist' % name)
return False
| en | 0.692148 | Checks docker is installed and user is a member of the docker group. Returns: True if docker is installed and user is a member of the docker group else False. Loads the jeff confgiruation file. Returns: The jeff configuration file as a dictionary else False. Loads jeff plugins specified in the config file. Args: config: jeff configuration dictionary loaded with utils.loadConfig. Returns: A dictionary containing the loaded module indexed by the plugin nameself else False. Prints current jeff containers. Args: config: jeff configuration dictionary loaded with utils.loadConfig. Returns: True Updates the configuration file with the configuration dictionary config. Args: config: jeff configuration dictionary loaded with utils.loadConfig. Returns: True if configuration file was written to successfully else False. Removes the container specified with name. Args: name: Name of container to remove. config: jeff configuration dictionary loaded with utils.loadConfig. Returns: True if container was removed else False. | 2.613583 | 3 |
local_file_courier_py/sendFiles.py | sairash/local_file_courier | 1 | 6621123 | <gh_stars>1-10
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import os
import random
import socket
listPlace = 0
def get_folder_path():
folder_selected = filedialog.askdirectory()
if folder_selected == '':
get_folder_path()
else:
folderPath.set(folder_selected)
# print(folderPath.get().split('/')[-1])
def go_back():
gui.destroy()
gui.quit()
os.system('python main.py')
def random_port():
random_portText.set(str(random.randint(0, 9))+str(random.randint(0, 9)) +
str(random.randint(0, 9))+str(random.randint(0, 9)))
def start_key():
print("You pressed Start")
gui.quit()
gui.destroy()
os.system(f'py sender_sockets.py --port {int(random_portText.get())} --file {folderPath.get()}')
def just_started():
global listPlace
listPlace += 1
text_area.insert(listPlace, "Transported "+str(listPlace-2)+" Times..")
text_area.select_clear(text_area.size() - 2)
text_area.select_set(END)
text_area.yview(END)
start_key()
gui = Tk()
gui.geometry("300x260")
gui.iconbitmap('logo.ico')
gui.title("Send Files")
folderPath = StringVar()
folderPath.set('Folder Path Here')
random_portText = StringVar()
random_portText.set(str(random.randint(0, 9)) +
str(random.randint(0, 9)) +
str(random.randint(0, 9)) +
str(random.randint(0, 9)))
default_ip_text = StringVar()
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
default_ip_text.set(local_ip)
c = ttk.Button(gui, text="Back", command=go_back)
c.place(x=0, y=5)
a = Label(gui, text="File")
a.place(x=0, y=35)
btnFind = ttk.Button(gui, text="Select Folder", command=get_folder_path)
btnFind.place(x=50, y=35)
E = Entry(gui, textvariable=folderPath, state='disabled')
E.place(x=150, y=35)
portLbl = Label(gui, text="Port")
portLbl.place(x=0, y=65)
buttonRandom = ttk.Button(gui, text="Gen Port", command=random_port)
buttonRandom.place(x=50, y=65)
portEntry = Entry(gui, textvariable=random_portText, state='disabled')
portEntry.place(x=150, y=65)
keyLbl = Label(gui, text="IP")
keyLbl.place(x=0, y=95)
KeyEntry = Entry(gui, textvariable=default_ip_text,width=37, state='disabled')
KeyEntry.place(x=50, y=95)
startButton = ttk.Button(gui, text="Start", command=just_started)
startButton.place(x=100, y=125)
progress = ttk.Progressbar(gui, orient=HORIZONTAL, value=10, length=290, mode='determinate')
progress.place(x=0, y=160)
log = Label(gui, text="Logs", font=("Times New Roman", 7))
log.place(x=0, y=185)
text_area = Listbox(gui, width=48, height=3, font=("Times New Roman", 10))
text_area.place(x=0, y=205)
text_area.insert(listPlace, "Press Start To Start Program")
listPlace += 1
text_area.insert(listPlace, "When u press the key from keys this program will send ")
listPlace += 1
text_area.insert(listPlace, 'to all other computer receiving with the same port')
text_area.select_set(END)
text_area.yview(END)
get_folder_path()
gui.resizable(False, False)
gui.mainloop()
| from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import os
import random
import socket
listPlace = 0
def get_folder_path():
folder_selected = filedialog.askdirectory()
if folder_selected == '':
get_folder_path()
else:
folderPath.set(folder_selected)
# print(folderPath.get().split('/')[-1])
def go_back():
gui.destroy()
gui.quit()
os.system('python main.py')
def random_port():
random_portText.set(str(random.randint(0, 9))+str(random.randint(0, 9)) +
str(random.randint(0, 9))+str(random.randint(0, 9)))
def start_key():
print("You pressed Start")
gui.quit()
gui.destroy()
os.system(f'py sender_sockets.py --port {int(random_portText.get())} --file {folderPath.get()}')
def just_started():
global listPlace
listPlace += 1
text_area.insert(listPlace, "Transported "+str(listPlace-2)+" Times..")
text_area.select_clear(text_area.size() - 2)
text_area.select_set(END)
text_area.yview(END)
start_key()
gui = Tk()
gui.geometry("300x260")
gui.iconbitmap('logo.ico')
gui.title("Send Files")
folderPath = StringVar()
folderPath.set('Folder Path Here')
random_portText = StringVar()
random_portText.set(str(random.randint(0, 9)) +
str(random.randint(0, 9)) +
str(random.randint(0, 9)) +
str(random.randint(0, 9)))
default_ip_text = StringVar()
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
default_ip_text.set(local_ip)
c = ttk.Button(gui, text="Back", command=go_back)
c.place(x=0, y=5)
a = Label(gui, text="File")
a.place(x=0, y=35)
btnFind = ttk.Button(gui, text="Select Folder", command=get_folder_path)
btnFind.place(x=50, y=35)
E = Entry(gui, textvariable=folderPath, state='disabled')
E.place(x=150, y=35)
portLbl = Label(gui, text="Port")
portLbl.place(x=0, y=65)
buttonRandom = ttk.Button(gui, text="Gen Port", command=random_port)
buttonRandom.place(x=50, y=65)
portEntry = Entry(gui, textvariable=random_portText, state='disabled')
portEntry.place(x=150, y=65)
keyLbl = Label(gui, text="IP")
keyLbl.place(x=0, y=95)
KeyEntry = Entry(gui, textvariable=default_ip_text,width=37, state='disabled')
KeyEntry.place(x=50, y=95)
startButton = ttk.Button(gui, text="Start", command=just_started)
startButton.place(x=100, y=125)
progress = ttk.Progressbar(gui, orient=HORIZONTAL, value=10, length=290, mode='determinate')
progress.place(x=0, y=160)
log = Label(gui, text="Logs", font=("Times New Roman", 7))
log.place(x=0, y=185)
text_area = Listbox(gui, width=48, height=3, font=("Times New Roman", 10))
text_area.place(x=0, y=205)
text_area.insert(listPlace, "Press Start To Start Program")
listPlace += 1
text_area.insert(listPlace, "When u press the key from keys this program will send ")
listPlace += 1
text_area.insert(listPlace, 'to all other computer receiving with the same port')
text_area.select_set(END)
text_area.yview(END)
get_folder_path()
gui.resizable(False, False)
gui.mainloop() | ru | 0.120531 | # print(folderPath.get().split('/')[-1]) | 3.10647 | 3 |
backend/forum/base/apps.py | karolyi/forum-django | 7 | 6621124 | <reponame>karolyi/forum-django
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class BaseConfig(AppConfig):
name = 'forum.base'
verbose_name = _('Forum: Base')
label = 'forum_base'
| from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class BaseConfig(AppConfig):
name = 'forum.base'
verbose_name = _('Forum: Base')
label = 'forum_base' | none | 1 | 1.399315 | 1 | |
client/spot.py | ronpandolfi/Xi-cam | 19 | 6621125 | # -*- coding: utf-8 -*-
import os
from time import sleep
from StringIO import StringIO
from PIL import Image
import numpy as np
from client.newt import NewtClient
class SpotClient(NewtClient):
"""
Client class to handle SPOT API calls
"""
BASE_DIR = '/global/project/projectdirs/als/spade/warehouse'
SPOT_URL = 'https://portal-auth.nersc.gov/als'
def __init__(self):
super(SpotClient, self).__init__()
self.spot_authentication = None
def login(self, username, password):
"""
Login to SPOT
Parameters
----------
username : str
password : str
"""
credentials = {"username": username,
"password": password}
response = self.post(self.SPOT_URL + '/auth', data=credentials)
if response.json()['auth']:
self.authentication = response
return super(SpotClient, self).login(username, password)
else:
self.authentication = None
raise SPOTError('Bad Authentication: Unable to log in')
def search(self, query, **kwargs):
"""
Search a dataset on SPOT
Parameters
----------
query : str, search query
kwargs : {key: option}
Any of the following:
'sortterm'
attribute to sort results by
'sorttype'
ascending 'asc' or descending 'desc'
'end_station'
endstation of dataset
'limitnum'
maximum number of results to show
'skipnum'
number of results to skipd
'search'
search query
Returns
-------
json
response of search results
"""
self.check_login()
# sortterm probably allows more filters will need to check...
allowed_kwargs = {'sortterm': ['fs.stage_date', 'appmetadata.sdate'],
'sorttype': ['desc', 'asc'],
'end_station': ['bl832', 'bl733']}
generic_kwargs = ['limitnum', 'skipnum']
for key in kwargs:
if key not in allowed_kwargs.keys() and key not in generic_kwargs:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, list(allowed_kwargs.keys() +
generic_kwargs)))
elif key in allowed_kwargs:
if kwargs[key] not in allowed_kwargs[key]:
raise ValueError('%s keyword value must be on of %s' %
(kwargs[key], list(allowed_kwargs[key])))
kwargs.update(search=query)
r = self.get(self.SPOT_URL + '/hdf/search', params=kwargs)
return self.check_response(r)
def get_derived_datasets(self, dataset):
"""
Get datasets that are derived from the given dataset. ie raw, sino,
norm, etc...
Parameters
----------
dataset : str
dataset name
Returns
-------
json
Response with derived datasets
"""
self.check_login()
params = {'dataset': dataset}
r = self.post(self.SPOT_URL + '/hdf/dataset', params=params)
return self.check_response(r)
def get_stage_path(self, dataset, stage):
"""
Get the database path for a specific stage of a dataset. Stages
are raw, norm, sino, gridrec, etc...
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
str
database path
"""
self.check_login()
derivatives = self.get_derived_datasets(dataset)
for i in range(len(derivatives)):
if derivatives[i]['stage'] == stage:
path = derivatives[i]['path']
return path
raise SPOTError('Stage %s in dataset %s does not exist' %
(stage, dataset))
def get_file_location(self, dataset, stage):
"""
Get the full location (system path) for a specific stage of a dataset.
stages are raw, norm, sino, gridrec, etc...
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
str
file path
"""
self.check_login()
derivatives = self.get_derived_datasets(dataset)
for i in range(len(derivatives)):
if derivatives[i]['stage'] == stage:
location = derivatives[i]['phyloc']
return location
raise SPOTError('Stage %s in dataset %s does not exist' %
(stage, dataset))
def get_dataset_attributes(self, dataset, stage='raw', group='/'):
"""
Get hdf5 attributes of a specified dataset. group can be a specified
image within the hdf5 file, or default '/' for top level attributes
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
group : str
group name of hdf5
Returns
-------
json
json reponse of attributes
"""
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/attributes' + path, params=params)
return self.check_response(r)
def list_dataset_images(self, dataset, stage):
"""
List images inside a given dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
json
response of image list
"""
path = self.get_stage_path(dataset, stage)
r = self.post(self.SPOT_URL + '/hdf/listimages' + path)
return self.check_response(r)
def get_dataset_size(self, dataset, stage):
"""
Get the file size of a specified dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
int
dataset size in bytes
"""
path = self.get_stage_path(dataset, stage)
r = self.session.head(self.SPOT_URL + '/hdf/download' + path)
head = r.headers
if not 'content-length' in head: return 1
size = float(head['content-length'])
return size
def get_raw_image(self, dataset, stage, image=None, index=None):
"""
Download raw data from an image in a SPOT dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
ndarray
2D ndarray of image data
"""
images = list(self.list_dataset_images(dataset, stage))
if image is None and index is None:
raise ValueError('One of image or index must be given')
elif image is None and index is not None:
group = images[index]
else:
group = os.path.split(images[0])[0] + '/' + image
r = self.stage_tape_2_disk(dataset, stage)
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/rawdata' + path, params=params)
r = self.check_response(r)
return np.array(r['data'])
def get_image_download_URLS(self, dataset, stage, image=None, index=None):
"""
Get download URL's for a specific image in a SPOT dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
dict
Dictionary with urls to images
"""
images = list(self.list_dataset_images(dataset, stage))
if image is None and index is None:
raise ValueError('One of image or index must be given')
elif image is None and index is not None:
group = images[index]
else:
group = os.path.split(images[0])[0] + '/' + image
r = self.stage_tape_2_disk(dataset, stage)
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/image' + path, params=params)
r = self.check_response(r)
return r
def get_image_as(self, dataset, stage, ext='tif', image=None, index=None):
"""
Download an image in the specified format and return an array of the image
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
ext : str, optional
extension for image type (tif or png)
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
ndarray
2D ndarray of image data
"""
r = self.get_image_download_URLS(dataset, stage, image=image, index=index)
url = r['pnglocaion'] if ext == 'png' else r['tiflocaion'] # Careful when spot API fixes this spelling mistake
r = self.get(url)
img = Image.open(StringIO(r.content))
return np.asarray(img)
def download_image(self, dataset, stage, save_path=None, ext='tif', image=None, index=None):
"""
Download and save a specific image in a dataset as png or tif image
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str, optional
Path to save the image
ext : str, optional
extension for image type (tif or png)
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
"""
if ext not in ('png', 'tif'):
raise ValueError('ext can only be png or tif')
if image is None and index is None:
raise ValueError('One of image or index must be given')
if save_path is None:
name = image.split('.')[0] if image is not None else dataset + '_{}'.format(index)
save_path = os.path.join(os.path.expanduser('~'), '{}.{}'.format(name,ext))
r = self.get_image_download_URLS(dataset, stage, image=image, index=index)
url = r['pnglocaion'] if ext == 'png' else r['tiflocaion'] # Careful when spot API fixes this spelling mistake
r = self.get(url)
with open(save_path, 'w') as f:
for chunk in r:
f.write(chunk)
def stage_tape_2_disk(self, dataset, stage):
"""
Stage a dataset from tape to disk if needed
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
"""
path = self.get_stage_path(dataset, stage)
r = self.check_response(self.post(self.SPOT_URL + '/hdf/stageifneeded' + path))
# Wait for staging to finish
while r['location'] == 'unknown' or r['location'] == 'staging':
sleep(3)
r = self.check_response(self.post(self.SPOT_URL + '/hdf/stageifneeded' + path))
return r
def download_dataset(self, dataset, stage, save_path=None):
"""
Download a specified dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str
Path and name to save file locally. If None name on SPOT is used and save in home directory
"""
path = self.get_stage_path(dataset, stage)
if save_path is None:
save_path = os.path.join(os.path.expanduser('~'), path.split('/')[-1])
r = self.stage_tape_2_disk(dataset, stage)
r = self.get(self.SPOT_URL + '/hdf/download' + path, stream=True)
with open(save_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=64*1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
r.close()
return self.check_login(r)
def download_dataset_generator(self, dataset, stage, save_path=None, chunk_size=64*1024):
"""
Download a dataset as a generator (yields the fraction downloaded)
Useful to know the status of a download (for gui purposes)
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str
path and name to save file locally. If None name on SPOT is used and save in home directory
chunk_size : int
Chuck size of data in bytes
Yields
------
float
Percent downloaded
"""
path = self.get_stage_path(dataset, stage)
if save_path is None:
save_path = os.path.join(os.path.expanduser('~'), path.split('/')[-1])
r = self.stage_tape_2_disk(dataset, stage)
file_size = float(self.get_dataset_size(dataset, stage))
r = self.get(self.SPOT_URL + '/hdf/download' + path, stream=True)
with open(save_path, 'wb') as f:
downloaded = 0.0
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
downloaded += len(chunk)/file_size
yield downloaded
r.close()
return
def transfer_2_nersc(self, dataset, stage, path, system):
# TODO need to find a way to make a generator out of this, here or from cp/rsync
"""
Transfer a dataset to NERSC
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
path : str
absolute destination path on NERSC
"""
r = self.stage_tape_2_disk(dataset, stage)
if r['location'] != 'staging' or r['ocation'] != 'unknown':
r = self.rsync(r['location'], path, system)
return r
class SPOTError(Exception):
"""Raised when SPOT gets angry"""
pass
if __name__ == '__main__':
import time
from StringIO import StringIO
from PIL import Image
from matplotlib.pyplot import imshow, show, figure
s = SpotClient()
s.login('lbluque', '')
# t = time.time()
# img = s.get_raw_image('20160630_054009_prefire_3_0amp_scan7', 'raw', index=0)
# print 'Time: ', time.time() - t
t = time.time()
arr = s.get_image_as('20160630_054009_prefire_3_0amp_scan7', 'raw', ext='tif', index=0)
print arr.shape
imshow(arr)
show()
# for i in range(3):
# figure(i)
# imshow(arr[:, :, i])
# show()
print 'Time: ', time.time() - t
| # -*- coding: utf-8 -*-
import os
from time import sleep
from StringIO import StringIO
from PIL import Image
import numpy as np
from client.newt import NewtClient
class SpotClient(NewtClient):
"""
Client class to handle SPOT API calls
"""
BASE_DIR = '/global/project/projectdirs/als/spade/warehouse'
SPOT_URL = 'https://portal-auth.nersc.gov/als'
def __init__(self):
super(SpotClient, self).__init__()
self.spot_authentication = None
def login(self, username, password):
"""
Login to SPOT
Parameters
----------
username : str
password : str
"""
credentials = {"username": username,
"password": password}
response = self.post(self.SPOT_URL + '/auth', data=credentials)
if response.json()['auth']:
self.authentication = response
return super(SpotClient, self).login(username, password)
else:
self.authentication = None
raise SPOTError('Bad Authentication: Unable to log in')
def search(self, query, **kwargs):
"""
Search a dataset on SPOT
Parameters
----------
query : str, search query
kwargs : {key: option}
Any of the following:
'sortterm'
attribute to sort results by
'sorttype'
ascending 'asc' or descending 'desc'
'end_station'
endstation of dataset
'limitnum'
maximum number of results to show
'skipnum'
number of results to skipd
'search'
search query
Returns
-------
json
response of search results
"""
self.check_login()
# sortterm probably allows more filters will need to check...
allowed_kwargs = {'sortterm': ['fs.stage_date', 'appmetadata.sdate'],
'sorttype': ['desc', 'asc'],
'end_station': ['bl832', 'bl733']}
generic_kwargs = ['limitnum', 'skipnum']
for key in kwargs:
if key not in allowed_kwargs.keys() and key not in generic_kwargs:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, list(allowed_kwargs.keys() +
generic_kwargs)))
elif key in allowed_kwargs:
if kwargs[key] not in allowed_kwargs[key]:
raise ValueError('%s keyword value must be on of %s' %
(kwargs[key], list(allowed_kwargs[key])))
kwargs.update(search=query)
r = self.get(self.SPOT_URL + '/hdf/search', params=kwargs)
return self.check_response(r)
def get_derived_datasets(self, dataset):
"""
Get datasets that are derived from the given dataset. ie raw, sino,
norm, etc...
Parameters
----------
dataset : str
dataset name
Returns
-------
json
Response with derived datasets
"""
self.check_login()
params = {'dataset': dataset}
r = self.post(self.SPOT_URL + '/hdf/dataset', params=params)
return self.check_response(r)
def get_stage_path(self, dataset, stage):
"""
Get the database path for a specific stage of a dataset. Stages
are raw, norm, sino, gridrec, etc...
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
str
database path
"""
self.check_login()
derivatives = self.get_derived_datasets(dataset)
for i in range(len(derivatives)):
if derivatives[i]['stage'] == stage:
path = derivatives[i]['path']
return path
raise SPOTError('Stage %s in dataset %s does not exist' %
(stage, dataset))
def get_file_location(self, dataset, stage):
"""
Get the full location (system path) for a specific stage of a dataset.
stages are raw, norm, sino, gridrec, etc...
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
str
file path
"""
self.check_login()
derivatives = self.get_derived_datasets(dataset)
for i in range(len(derivatives)):
if derivatives[i]['stage'] == stage:
location = derivatives[i]['phyloc']
return location
raise SPOTError('Stage %s in dataset %s does not exist' %
(stage, dataset))
def get_dataset_attributes(self, dataset, stage='raw', group='/'):
"""
Get hdf5 attributes of a specified dataset. group can be a specified
image within the hdf5 file, or default '/' for top level attributes
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
group : str
group name of hdf5
Returns
-------
json
json reponse of attributes
"""
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/attributes' + path, params=params)
return self.check_response(r)
def list_dataset_images(self, dataset, stage):
"""
List images inside a given dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
json
response of image list
"""
path = self.get_stage_path(dataset, stage)
r = self.post(self.SPOT_URL + '/hdf/listimages' + path)
return self.check_response(r)
def get_dataset_size(self, dataset, stage):
"""
Get the file size of a specified dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
Returns
-------
int
dataset size in bytes
"""
path = self.get_stage_path(dataset, stage)
r = self.session.head(self.SPOT_URL + '/hdf/download' + path)
head = r.headers
if not 'content-length' in head: return 1
size = float(head['content-length'])
return size
def get_raw_image(self, dataset, stage, image=None, index=None):
"""
Download raw data from an image in a SPOT dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
ndarray
2D ndarray of image data
"""
images = list(self.list_dataset_images(dataset, stage))
if image is None and index is None:
raise ValueError('One of image or index must be given')
elif image is None and index is not None:
group = images[index]
else:
group = os.path.split(images[0])[0] + '/' + image
r = self.stage_tape_2_disk(dataset, stage)
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/rawdata' + path, params=params)
r = self.check_response(r)
return np.array(r['data'])
def get_image_download_URLS(self, dataset, stage, image=None, index=None):
"""
Get download URL's for a specific image in a SPOT dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
dict
Dictionary with urls to images
"""
images = list(self.list_dataset_images(dataset, stage))
if image is None and index is None:
raise ValueError('One of image or index must be given')
elif image is None and index is not None:
group = images[index]
else:
group = os.path.split(images[0])[0] + '/' + image
r = self.stage_tape_2_disk(dataset, stage)
path = self.get_stage_path(dataset, stage)
params = {'group': group}
r = self.post(self.SPOT_URL + '/hdf/image' + path, params=params)
r = self.check_response(r)
return r
def get_image_as(self, dataset, stage, ext='tif', image=None, index=None):
"""
Download an image in the specified format and return an array of the image
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
ext : str, optional
extension for image type (tif or png)
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
Returns
-------
ndarray
2D ndarray of image data
"""
r = self.get_image_download_URLS(dataset, stage, image=image, index=index)
url = r['pnglocaion'] if ext == 'png' else r['tiflocaion'] # Careful when spot API fixes this spelling mistake
r = self.get(url)
img = Image.open(StringIO(r.content))
return np.asarray(img)
def download_image(self, dataset, stage, save_path=None, ext='tif', image=None, index=None):
"""
Download and save a specific image in a dataset as png or tif image
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str, optional
Path to save the image
ext : str, optional
extension for image type (tif or png)
image : str
(optional), name of image in dataset
index : int
(optional) index of image in dataset (one of index or image must be given)
"""
if ext not in ('png', 'tif'):
raise ValueError('ext can only be png or tif')
if image is None and index is None:
raise ValueError('One of image or index must be given')
if save_path is None:
name = image.split('.')[0] if image is not None else dataset + '_{}'.format(index)
save_path = os.path.join(os.path.expanduser('~'), '{}.{}'.format(name,ext))
r = self.get_image_download_URLS(dataset, stage, image=image, index=index)
url = r['pnglocaion'] if ext == 'png' else r['tiflocaion'] # Careful when spot API fixes this spelling mistake
r = self.get(url)
with open(save_path, 'w') as f:
for chunk in r:
f.write(chunk)
def stage_tape_2_disk(self, dataset, stage):
"""
Stage a dataset from tape to disk if needed
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
"""
path = self.get_stage_path(dataset, stage)
r = self.check_response(self.post(self.SPOT_URL + '/hdf/stageifneeded' + path))
# Wait for staging to finish
while r['location'] == 'unknown' or r['location'] == 'staging':
sleep(3)
r = self.check_response(self.post(self.SPOT_URL + '/hdf/stageifneeded' + path))
return r
def download_dataset(self, dataset, stage, save_path=None):
"""
Download a specified dataset
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str
Path and name to save file locally. If None name on SPOT is used and save in home directory
"""
path = self.get_stage_path(dataset, stage)
if save_path is None:
save_path = os.path.join(os.path.expanduser('~'), path.split('/')[-1])
r = self.stage_tape_2_disk(dataset, stage)
r = self.get(self.SPOT_URL + '/hdf/download' + path, stream=True)
with open(save_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=64*1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
r.close()
return self.check_login(r)
def download_dataset_generator(self, dataset, stage, save_path=None, chunk_size=64*1024):
"""
Download a dataset as a generator (yields the fraction downloaded)
Useful to know the status of a download (for gui purposes)
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
save_path : str
path and name to save file locally. If None name on SPOT is used and save in home directory
chunk_size : int
Chuck size of data in bytes
Yields
------
float
Percent downloaded
"""
path = self.get_stage_path(dataset, stage)
if save_path is None:
save_path = os.path.join(os.path.expanduser('~'), path.split('/')[-1])
r = self.stage_tape_2_disk(dataset, stage)
file_size = float(self.get_dataset_size(dataset, stage))
r = self.get(self.SPOT_URL + '/hdf/download' + path, stream=True)
with open(save_path, 'wb') as f:
downloaded = 0.0
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
downloaded += len(chunk)/file_size
yield downloaded
r.close()
return
def transfer_2_nersc(self, dataset, stage, path, system):
# TODO need to find a way to make a generator out of this, here or from cp/rsync
"""
Transfer a dataset to NERSC
Parameters
----------
dataset : str
name of dataset
stage : str
stage name
path : str
absolute destination path on NERSC
"""
r = self.stage_tape_2_disk(dataset, stage)
if r['location'] != 'staging' or r['ocation'] != 'unknown':
r = self.rsync(r['location'], path, system)
return r
class SPOTError(Exception):
"""Raised when SPOT gets angry"""
pass
if __name__ == '__main__':
import time
from StringIO import StringIO
from PIL import Image
from matplotlib.pyplot import imshow, show, figure
s = SpotClient()
s.login('lbluque', '')
# t = time.time()
# img = s.get_raw_image('20160630_054009_prefire_3_0amp_scan7', 'raw', index=0)
# print 'Time: ', time.time() - t
t = time.time()
arr = s.get_image_as('20160630_054009_prefire_3_0amp_scan7', 'raw', ext='tif', index=0)
print arr.shape
imshow(arr)
show()
# for i in range(3):
# figure(i)
# imshow(arr[:, :, i])
# show()
print 'Time: ', time.time() - t
| en | 0.578605 | # -*- coding: utf-8 -*- Client class to handle SPOT API calls Login to SPOT Parameters ---------- username : str password : str Search a dataset on SPOT Parameters ---------- query : str, search query kwargs : {key: option} Any of the following: 'sortterm' attribute to sort results by 'sorttype' ascending 'asc' or descending 'desc' 'end_station' endstation of dataset 'limitnum' maximum number of results to show 'skipnum' number of results to skipd 'search' search query Returns ------- json response of search results # sortterm probably allows more filters will need to check... Get datasets that are derived from the given dataset. ie raw, sino, norm, etc... Parameters ---------- dataset : str dataset name Returns ------- json Response with derived datasets Get the database path for a specific stage of a dataset. Stages are raw, norm, sino, gridrec, etc... Parameters ---------- dataset : str name of dataset stage : str stage name Returns ------- str database path Get the full location (system path) for a specific stage of a dataset. stages are raw, norm, sino, gridrec, etc... Parameters ---------- dataset : str name of dataset stage : str stage name Returns ------- str file path Get hdf5 attributes of a specified dataset. group can be a specified image within the hdf5 file, or default '/' for top level attributes Parameters ---------- dataset : str name of dataset stage : str stage name group : str group name of hdf5 Returns ------- json json reponse of attributes List images inside a given dataset Parameters ---------- dataset : str name of dataset stage : str stage name Returns ------- json response of image list Get the file size of a specified dataset Parameters ---------- dataset : str name of dataset stage : str stage name Returns ------- int dataset size in bytes Download raw data from an image in a SPOT dataset Parameters ---------- dataset : str name of dataset stage : str stage name image : str (optional), name of image in dataset index : int (optional) index of image in dataset (one of index or image must be given) Returns ------- ndarray 2D ndarray of image data Get download URL's for a specific image in a SPOT dataset Parameters ---------- dataset : str name of dataset stage : str stage name image : str (optional), name of image in dataset index : int (optional) index of image in dataset (one of index or image must be given) Returns ------- dict Dictionary with urls to images Download an image in the specified format and return an array of the image Parameters ---------- dataset : str name of dataset stage : str stage name ext : str, optional extension for image type (tif or png) image : str (optional), name of image in dataset index : int (optional) index of image in dataset (one of index or image must be given) Returns ------- ndarray 2D ndarray of image data # Careful when spot API fixes this spelling mistake Download and save a specific image in a dataset as png or tif image Parameters ---------- dataset : str name of dataset stage : str stage name save_path : str, optional Path to save the image ext : str, optional extension for image type (tif or png) image : str (optional), name of image in dataset index : int (optional) index of image in dataset (one of index or image must be given) # Careful when spot API fixes this spelling mistake Stage a dataset from tape to disk if needed Parameters ---------- dataset : str name of dataset stage : str stage name # Wait for staging to finish Download a specified dataset Parameters ---------- dataset : str name of dataset stage : str stage name save_path : str Path and name to save file locally. If None name on SPOT is used and save in home directory # filter out keep-alive new chunks Download a dataset as a generator (yields the fraction downloaded) Useful to know the status of a download (for gui purposes) Parameters ---------- dataset : str name of dataset stage : str stage name save_path : str path and name to save file locally. If None name on SPOT is used and save in home directory chunk_size : int Chuck size of data in bytes Yields ------ float Percent downloaded # filter out keep-alive new chunks # TODO need to find a way to make a generator out of this, here or from cp/rsync Transfer a dataset to NERSC Parameters ---------- dataset : str name of dataset stage : str stage name path : str absolute destination path on NERSC Raised when SPOT gets angry # t = time.time() # img = s.get_raw_image('20160630_054009_prefire_3_0amp_scan7', 'raw', index=0) # print 'Time: ', time.time() - t # for i in range(3): # figure(i) # imshow(arr[:, :, i]) # show() | 2.839045 | 3 |
lkmltools/linter/rule_factory.py | iserko/lookml-tools | 0 | 6621126 | """
a rule factory
Authors:
<NAME> (<EMAIL>)
"""
from lkmltools.linter.rules.filerules.one_view_per_file_rule import OneViewPerFileRule
from lkmltools.linter.rules.filerules.filename_viewname_match_rule import (
FilenameViewnameMatchRule,
)
from lkmltools.linter.rules.filerules.data_source_rule import DataSourceRule
from lkmltools.linter.rules.fieldrules.description_rule import DescriptionRule
from lkmltools.linter.rules.fieldrules.all_caps_rule import AllCapsRule
from lkmltools.linter.rules.fieldrules.count_name_rule import CountNameRule
from lkmltools.linter.rules.fieldrules.yesno_name_rule import YesNoNameRule
from lkmltools.linter.rules.fieldrules.drill_down_rule import DrillDownRule
from lkmltools.linter.rules.fieldrules.lexicon_rule import LexiconRule
import logging
class RuleFactory:
"""
Singleton Factory where one can register Rules for instantiation
"""
instance = None
def __init__(self):
"""instantiate the factory but as a singleton. The guard rails are here"""
# where the magic happens, only one instance allowed:
if not RuleFactory.instance:
RuleFactory.instance = RuleFactory.__HiddenFactory()
def __getattr__(self, name):
"""getattr with instance name
Returns:
gettattr
"""
return getattr(self.instance, name)
class __HiddenFactory:
"""actual factory where registry and instantiation happens"""
def __init__(self):
"""instantiate the HiddenFactory"""
self.name_dict = {
"OneViewPerFileRule": OneViewPerFileRule,
"FilenameViewnameMatchRule": FilenameViewnameMatchRule,
"DataSourceRule": DataSourceRule,
"DescriptionRule": DescriptionRule,
"AllCapsRule": AllCapsRule,
"CountNameRule": CountNameRule,
"DrillDownRule": DrillDownRule,
"YesNoNameRule": YesNoNameRule,
"LexiconRule": LexiconRule,
}
def instantiate(self, class_name, json_config=None):
"""instantiate instances of rule, given name of rule class
Args:
class_name (str): name of the class
Returns:
instance (Rule): instance of a rule
"""
return self.name_dict[class_name](json_config)
def register(self, key, class_obj):
"""Registering class_obj with key
Args:
key (str): key such as class name, e.g. 'AllCapsRule'
class_obj (class obj), e.g. AllCapsRule
Returns:
nothing. Side effect is to register the class
"""
# FIXME do we want to warn/raise on overwriting?
self.name_dict[key] = class_obj
logging.debug("Registered %s : %s" % (key, class_obj))
def is_registered(self, class_key):
"""is this class registered?
Args:
class_key (str): key used to register class
Returns:
determination (boolean) of whether this is already register
"""
return class_key in self.name_dict
def unregister(self, key):
"""unregister an entry
Arguments:
key (str): key to unregister
Returns:
nothing. Side effect is that the object is unregistered
"""
if key in self.name_dict:
del self.name_dict[key]
logging.info("Unregistered %s", key)
else:
raise Exception("Key not found " + key)
| """
a rule factory
Authors:
<NAME> (<EMAIL>)
"""
from lkmltools.linter.rules.filerules.one_view_per_file_rule import OneViewPerFileRule
from lkmltools.linter.rules.filerules.filename_viewname_match_rule import (
FilenameViewnameMatchRule,
)
from lkmltools.linter.rules.filerules.data_source_rule import DataSourceRule
from lkmltools.linter.rules.fieldrules.description_rule import DescriptionRule
from lkmltools.linter.rules.fieldrules.all_caps_rule import AllCapsRule
from lkmltools.linter.rules.fieldrules.count_name_rule import CountNameRule
from lkmltools.linter.rules.fieldrules.yesno_name_rule import YesNoNameRule
from lkmltools.linter.rules.fieldrules.drill_down_rule import DrillDownRule
from lkmltools.linter.rules.fieldrules.lexicon_rule import LexiconRule
import logging
class RuleFactory:
"""
Singleton Factory where one can register Rules for instantiation
"""
instance = None
def __init__(self):
"""instantiate the factory but as a singleton. The guard rails are here"""
# where the magic happens, only one instance allowed:
if not RuleFactory.instance:
RuleFactory.instance = RuleFactory.__HiddenFactory()
def __getattr__(self, name):
"""getattr with instance name
Returns:
gettattr
"""
return getattr(self.instance, name)
class __HiddenFactory:
"""actual factory where registry and instantiation happens"""
def __init__(self):
"""instantiate the HiddenFactory"""
self.name_dict = {
"OneViewPerFileRule": OneViewPerFileRule,
"FilenameViewnameMatchRule": FilenameViewnameMatchRule,
"DataSourceRule": DataSourceRule,
"DescriptionRule": DescriptionRule,
"AllCapsRule": AllCapsRule,
"CountNameRule": CountNameRule,
"DrillDownRule": DrillDownRule,
"YesNoNameRule": YesNoNameRule,
"LexiconRule": LexiconRule,
}
def instantiate(self, class_name, json_config=None):
"""instantiate instances of rule, given name of rule class
Args:
class_name (str): name of the class
Returns:
instance (Rule): instance of a rule
"""
return self.name_dict[class_name](json_config)
def register(self, key, class_obj):
"""Registering class_obj with key
Args:
key (str): key such as class name, e.g. 'AllCapsRule'
class_obj (class obj), e.g. AllCapsRule
Returns:
nothing. Side effect is to register the class
"""
# FIXME do we want to warn/raise on overwriting?
self.name_dict[key] = class_obj
logging.debug("Registered %s : %s" % (key, class_obj))
def is_registered(self, class_key):
"""is this class registered?
Args:
class_key (str): key used to register class
Returns:
determination (boolean) of whether this is already register
"""
return class_key in self.name_dict
def unregister(self, key):
"""unregister an entry
Arguments:
key (str): key to unregister
Returns:
nothing. Side effect is that the object is unregistered
"""
if key in self.name_dict:
del self.name_dict[key]
logging.info("Unregistered %s", key)
else:
raise Exception("Key not found " + key)
| en | 0.758421 | a rule factory Authors: <NAME> (<EMAIL>) Singleton Factory where one can register Rules for instantiation instantiate the factory but as a singleton. The guard rails are here # where the magic happens, only one instance allowed: getattr with instance name Returns: gettattr actual factory where registry and instantiation happens instantiate the HiddenFactory instantiate instances of rule, given name of rule class Args: class_name (str): name of the class Returns: instance (Rule): instance of a rule Registering class_obj with key Args: key (str): key such as class name, e.g. 'AllCapsRule' class_obj (class obj), e.g. AllCapsRule Returns: nothing. Side effect is to register the class # FIXME do we want to warn/raise on overwriting? is this class registered? Args: class_key (str): key used to register class Returns: determination (boolean) of whether this is already register unregister an entry Arguments: key (str): key to unregister Returns: nothing. Side effect is that the object is unregistered | 2.346474 | 2 |
interview/leet/41_First_Missing_Positive.py | eroicaleo/LearningPython | 1 | 6621127 | <reponame>eroicaleo/LearningPython
#!/usr/bin/env python
# The thinking process is like the following:
# 1. remove O(n) space limit, then we can use a hash table to solve it
# 2. then the hash table is not necessary, can modify the original array
class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
for tmp in nums:
while 0 < tmp <= length and tmp != nums[tmp-1]:
print('tmp = %d, nums[tmp-1] = %d' % (tmp, nums[tmp-1]))
nums[tmp-1], tmp = tmp, nums[tmp-1]
for i in range(1, length+1):
if nums[i-1] != i:
return i
return length+1
sol = Solution()
nums = [3,4,-1,1]
nums = [1,2,0]
nums = [7,8,9,11,12]
print(sol.firstMissingPositive(nums))
| #!/usr/bin/env python
# The thinking process is like the following:
# 1. remove O(n) space limit, then we can use a hash table to solve it
# 2. then the hash table is not necessary, can modify the original array
class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
for tmp in nums:
while 0 < tmp <= length and tmp != nums[tmp-1]:
print('tmp = %d, nums[tmp-1] = %d' % (tmp, nums[tmp-1]))
nums[tmp-1], tmp = tmp, nums[tmp-1]
for i in range(1, length+1):
if nums[i-1] != i:
return i
return length+1
sol = Solution()
nums = [3,4,-1,1]
nums = [1,2,0]
nums = [7,8,9,11,12]
print(sol.firstMissingPositive(nums)) | en | 0.612696 | #!/usr/bin/env python # The thinking process is like the following: # 1. remove O(n) space limit, then we can use a hash table to solve it # 2. then the hash table is not necessary, can modify the original array :type nums: List[int] :rtype: int | 3.549221 | 4 |
src/ais_toy/classical_search/graph_search.py | smastelini/ai-search-toy-examples | 0 | 6621128 | <filename>src/ais_toy/classical_search/graph_search.py
class GraphSearch:
def __init__(self, graph):
self._graph = graph
def _prepare(self):
self._opened = []
self._closed = []
def search(self, origin, dest):
pass
| <filename>src/ais_toy/classical_search/graph_search.py
class GraphSearch:
def __init__(self, graph):
self._graph = graph
def _prepare(self):
self._opened = []
self._closed = []
def search(self, origin, dest):
pass
| none | 1 | 2.181977 | 2 | |
python/setup.py | tannercrook/Canvas-to-Infinite-Campus-Grade-Sync | 1 | 6621129 | <gh_stars>1-10
import server as s
import functions as f
import accounts as a
import os
import json
def mainMenu():
"""
Displays the main menu of the setup script.
"""
os.system('cls' if os.name == 'nt' else 'clear')
print('Main Menu')
print('1 - Set Canvas Instance URL')
print('2 - Set District Account ID')
print('3 - Set Canvas Access Token')
print('4 - Select Accounts to Sync')
print('5 - Select Terms to Sync')
print('6 - Set Ignored Courses (If specific courses should be skipped.)')
print('7 - Set Courses to Sync (If only select courses are synced)')
print('8 - Set Run Path')
print('9 - Help')
print('10 - Exit')
userInput = input('Select an option: ')
userInput = userInput.strip() # Removes the trailing space from the path.
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
os.system('cls' if os.name == 'nt' else 'clear')
setURL()
elif userInput == '2':
os.system('cls' if os.name == 'nt' else 'clear')
setDistrictID()
elif userInput == '3':
os.system('cls' if os.name == 'nt' else 'clear')
setAccessToken()
elif userInput == '4':
os.system('cls' if os.name == 'nt' else 'clear')
setAccountsToSync()
elif userInput == '5':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '6':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '7':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '8':
os.system('cls' if os.name == 'nt' else 'clear')
setPath()
elif userInput == '9':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '10':
os.system('cls' if os.name == 'nt' else 'clear')
print('Goodbye!')
exit(0)
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
mainMenu()
def writeJSON(variables):
"""
Writes the variables.json file.
"""
with open('variables.json', 'w') as jsonFile:
json.dump(variables, jsonFile, indent=4)
def setURL():
"""
Sets the Canvas URL variable.
"""
print('Please enter the url for your Canvas instance. '
'It should be complete, from the https to the .com. '
'i.e https://yourschool.instructure.com'
)
userInput = input("Your url: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['canvasURL'] = userInput
writeJSON(variables)
print(f'URL set to {userInput}')
mainMenu()
def setDistrictID():
"""
Sets the Canvas district account id variable. This is the base
account id for your Canvas instance.
"""
print('Please enter the district account id for the base account '
'of your Canvas instance. It should be a number.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['districtAccountID'] = userInput
writeJSON(variables)
print(f'District account iID set to {userInput}')
mainMenu()
def setAccessToken():
"""
Sets the Canvas access token.
"""
print('Please enter the access token for Canvas. This needs to be '
'for an admin account.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['accessToken'] = userInput
writeJSON(variables)
print(f'Canvas access token set to {userInput}')
mainMenu()
def setAccountsToSync():
"""
Sets the Canvas sub accounts from which you want to sync grades.
"""
variables = f.getVariables()
accounts = a.getAccounts()
if 'accounts' in variables:
sAccounts = variables['accounts']
else:
sAccounts = []
sAccountIDs = []
for account in sAccounts:
sAccountIDs.append(account['id'])
print('Accounts Menu')
print('1 - See Currently Syncing Accounts')
print('2 - Add Account')
print('3 - Remove Account')
print('4 - Main Menu')
userInput = input("Option: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
print('')
print('Currently Syncing Accounts')
if len(sAccounts) > 0:
for account in sAccounts:
print('Account ID - Account Name')
print(f'{account["id"]} - {account["name"]}')
else:
print('There are not any accounts set up to sync.')
print('')
setAccountsToSync()
elif userInput == '2':
accountIDs = []
print('')
print('Account ID - Account Name')
for account in accounts:
if account['id'] in sAccountIDs:
continue
print(f'{account["id"]} - {account["name"]}')
accountIDs.append(account['id'])
userInput = input("Account to add: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in accountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in accounts:
if account['id'] == int(userInput):
sAccounts.append(account)
print('')
elif userInput == '3':
print('')
print('Account ID - Account Name')
for account in sAccounts:
print(f'{account["id"]} - {account["name"]}')
userInput = input("Account to remove: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in sAccountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in sAccounts:
if account['id'] == int(userInput):
sAccounts.remove(account)
print('')
elif userInput =='4':
mainMenu()
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
variables['accounts'] = sAccounts
# print('Please enter the district account id for the base account '
# 'of your Canvas instance. It should be a number.'
# )
# userInput = input("District Account ID: ")
# userInput = userInput.strip()
# userInput = userInput.replace('"', "")
# userInput = userInput.replace("'", '')
# variables = f.getVariables()
# variables['districtAccountID'] = userInput
writeJSON(variables)
setAccountsToSync()
# print(f'District account iID set to {userInput}')
# mainMenu()
def setPath():
"""
Sets the run path of the script.
"""
print('If you are running this script as a scheduled task, you '
'need to put the full path to the script folder. i.e. '
'/home/sync/Canvas-to-Infinite-Campus-Grade-Sync/python/'
)
userInput = input("Path: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['path'] = userInput
writeJSON(variables)
print(f'Path set to {userInput}')
mainMenu()
mainMenu()
| import server as s
import functions as f
import accounts as a
import os
import json
def mainMenu():
"""
Displays the main menu of the setup script.
"""
os.system('cls' if os.name == 'nt' else 'clear')
print('Main Menu')
print('1 - Set Canvas Instance URL')
print('2 - Set District Account ID')
print('3 - Set Canvas Access Token')
print('4 - Select Accounts to Sync')
print('5 - Select Terms to Sync')
print('6 - Set Ignored Courses (If specific courses should be skipped.)')
print('7 - Set Courses to Sync (If only select courses are synced)')
print('8 - Set Run Path')
print('9 - Help')
print('10 - Exit')
userInput = input('Select an option: ')
userInput = userInput.strip() # Removes the trailing space from the path.
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
os.system('cls' if os.name == 'nt' else 'clear')
setURL()
elif userInput == '2':
os.system('cls' if os.name == 'nt' else 'clear')
setDistrictID()
elif userInput == '3':
os.system('cls' if os.name == 'nt' else 'clear')
setAccessToken()
elif userInput == '4':
os.system('cls' if os.name == 'nt' else 'clear')
setAccountsToSync()
elif userInput == '5':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '6':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '7':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '8':
os.system('cls' if os.name == 'nt' else 'clear')
setPath()
elif userInput == '9':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '10':
os.system('cls' if os.name == 'nt' else 'clear')
print('Goodbye!')
exit(0)
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
mainMenu()
def writeJSON(variables):
"""
Writes the variables.json file.
"""
with open('variables.json', 'w') as jsonFile:
json.dump(variables, jsonFile, indent=4)
def setURL():
"""
Sets the Canvas URL variable.
"""
print('Please enter the url for your Canvas instance. '
'It should be complete, from the https to the .com. '
'i.e https://yourschool.instructure.com'
)
userInput = input("Your url: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['canvasURL'] = userInput
writeJSON(variables)
print(f'URL set to {userInput}')
mainMenu()
def setDistrictID():
"""
Sets the Canvas district account id variable. This is the base
account id for your Canvas instance.
"""
print('Please enter the district account id for the base account '
'of your Canvas instance. It should be a number.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['districtAccountID'] = userInput
writeJSON(variables)
print(f'District account iID set to {userInput}')
mainMenu()
def setAccessToken():
"""
Sets the Canvas access token.
"""
print('Please enter the access token for Canvas. This needs to be '
'for an admin account.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['accessToken'] = userInput
writeJSON(variables)
print(f'Canvas access token set to {userInput}')
mainMenu()
def setAccountsToSync():
"""
Sets the Canvas sub accounts from which you want to sync grades.
"""
variables = f.getVariables()
accounts = a.getAccounts()
if 'accounts' in variables:
sAccounts = variables['accounts']
else:
sAccounts = []
sAccountIDs = []
for account in sAccounts:
sAccountIDs.append(account['id'])
print('Accounts Menu')
print('1 - See Currently Syncing Accounts')
print('2 - Add Account')
print('3 - Remove Account')
print('4 - Main Menu')
userInput = input("Option: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
print('')
print('Currently Syncing Accounts')
if len(sAccounts) > 0:
for account in sAccounts:
print('Account ID - Account Name')
print(f'{account["id"]} - {account["name"]}')
else:
print('There are not any accounts set up to sync.')
print('')
setAccountsToSync()
elif userInput == '2':
accountIDs = []
print('')
print('Account ID - Account Name')
for account in accounts:
if account['id'] in sAccountIDs:
continue
print(f'{account["id"]} - {account["name"]}')
accountIDs.append(account['id'])
userInput = input("Account to add: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in accountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in accounts:
if account['id'] == int(userInput):
sAccounts.append(account)
print('')
elif userInput == '3':
print('')
print('Account ID - Account Name')
for account in sAccounts:
print(f'{account["id"]} - {account["name"]}')
userInput = input("Account to remove: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in sAccountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in sAccounts:
if account['id'] == int(userInput):
sAccounts.remove(account)
print('')
elif userInput =='4':
mainMenu()
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
variables['accounts'] = sAccounts
# print('Please enter the district account id for the base account '
# 'of your Canvas instance. It should be a number.'
# )
# userInput = input("District Account ID: ")
# userInput = userInput.strip()
# userInput = userInput.replace('"', "")
# userInput = userInput.replace("'", '')
# variables = f.getVariables()
# variables['districtAccountID'] = userInput
writeJSON(variables)
setAccountsToSync()
# print(f'District account iID set to {userInput}')
# mainMenu()
def setPath():
"""
Sets the run path of the script.
"""
print('If you are running this script as a scheduled task, you '
'need to put the full path to the script folder. i.e. '
'/home/sync/Canvas-to-Infinite-Campus-Grade-Sync/python/'
)
userInput = input("Path: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['path'] = userInput
writeJSON(variables)
print(f'Path set to {userInput}')
mainMenu()
mainMenu() | en | 0.653326 | Displays the main menu of the setup script. # Removes the trailing space from the path. Writes the variables.json file. Sets the Canvas URL variable. Sets the Canvas district account id variable. This is the base account id for your Canvas instance. Sets the Canvas access token. Sets the Canvas sub accounts from which you want to sync grades. # print('Please enter the district account id for the base account ' # 'of your Canvas instance. It should be a number.' # ) # userInput = input("District Account ID: ") # userInput = userInput.strip() # userInput = userInput.replace('"', "") # userInput = userInput.replace("'", '') # variables = f.getVariables() # variables['districtAccountID'] = userInput # print(f'District account iID set to {userInput}') # mainMenu() Sets the run path of the script. | 3.082471 | 3 |
prophy/tests/test_union.py | florczakraf/prophy | 14 | 6621130 | import prophy
import pytest
@pytest.fixture(scope='session')
def SimpleUnion():
class SimpleUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.u32, 1),
("c", prophy.u32, 2)]
return SimpleUnion
@pytest.fixture(scope='session')
def VariableLengthFieldsUnion():
class VariableLengthFieldsUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", prophy.u16, 1),
("c", prophy.u32, 2),
("d", prophy.u64, 3)]
return VariableLengthFieldsUnion
def test_simple_union(SimpleUnion):
x = SimpleUnion()
assert 0 == x.discriminator
assert 0 == x.a
assert 'a: 0\n' == str(x)
assert b'\x00\x00\x00\x00\x00\x00\x00\x00' == x.encode(">")
x.decode(b'\x02\x00\x00\x00\x10\x00\x00\x00', "<")
assert 2 == x.discriminator
assert 16 == x.c
assert 'c: 16\n' == str(x)
assert b'\x00\x00\x00\x02\x00\x00\x00\x10' == x.encode(">")
def test_simple_union_discriminator_accepts_ints_or_field_name_and_clears(SimpleUnion):
x = SimpleUnion()
x.a = 42
x.discriminator = 1
assert 0 == x.b
assert 'b: 0\n' == str(x)
assert b'\x00\x00\x00\x01\x00\x00\x00\x00' == x.encode(">")
x.discriminator = "c"
assert 0 == x.c
assert 'c: 0\n' == str(x)
assert b'\x00\x00\x00\x02\x00\x00\x00\x00' == x.encode(">")
def test_union_copy_from(SimpleUnion):
x = SimpleUnion()
x.discriminator = 'b'
x.b = 3
y = SimpleUnion()
y.discriminator = 'c'
y.c = 10
y.copy_from(x)
assert 1 == y.discriminator
assert 3 == y.b
y.copy_from(y)
assert y == y
assert 1 == y.discriminator
assert 3 == y.b
with pytest.raises(TypeError, match="Parameter to copy_from must be instance of same class."):
y.copy_from(object())
with pytest.raises(TypeError, match="Parameter to copy_from must be instance of same class."):
y.copy_from(SimpleUnion)
def test_simple_union_discriminator_does_not_clear_fields_if_set_to_same_value(SimpleUnion):
x = SimpleUnion()
x.a = 42
x.discriminator = 0
assert 42 == x.a
x.discriminator = "a"
assert 42 == x.a
def test_union_nonsequential_discriminators():
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 3),
("b", prophy.u32, 10),
("c", prophy.u32, 55)]
x = U()
assert 3 == x.discriminator
x.discriminator = 3
assert 3 == x.discriminator
assert 0 == x.a
x.discriminator = 10
assert 10 == x.discriminator
assert 0 == x.b
x.discriminator = 55
assert 55 == x.discriminator
assert 0 == x.c
x.discriminator = "a"
assert 3 == x.discriminator
assert 0 == x.a
x.discriminator = "b"
assert 10 == x.discriminator
assert 0 == x.b
x.discriminator = "c"
assert 55 == x.discriminator
assert 0 == x.c
def test_union_encode_according_to_largest_field(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
x.discriminator = "a"
x.a = 0x12
assert b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00" == x.encode(">")
assert b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "b"
x.b = 0x1234
assert b"\x00\x00\x00\x01\x00\x00\x00\x00" b"\x12\x34\x00\x00\x00\x00\x00\x00" == x.encode(">")
assert b"\x01\x00\x00\x00\x00\x00\x00\x00" b"\x34\x12\x00\x00\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "c"
x.c = 0x12345678
assert b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00" == x.encode(">")
assert b"\x02\x00\x00\x00\x00\x00\x00\x00" b"\x78\x56\x34\x12\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "d"
x.d = 0x123456789ABCDEF1
assert b"\x00\x00\x00\x03\x00\x00\x00\x00" b"\x12\x34\x56\x78\x9a\xbc\xde\xf1" == x.encode(">")
assert b"\x03\x00\x00\x00\x00\x00\x00\x00" b"\xf1\xde\xbc\x9a\x78\x56\x34\x12" == x.encode("<")
def test_union_decode_according_to_largest_field(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
assert 16 == x.decode(b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00", ">")
assert 0 == x.discriminator
assert 0x12 == x.a
assert 16 == x.decode(b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00", "<")
assert 0 == x.discriminator
assert 0x12 == x.a
assert 16 == x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x00" b"\x12\x34\x00\x00\x00\x00\x00\x00", ">")
assert 1 == x.discriminator
assert 0x1234 == x.b
assert 16 == x.decode(b"\x01\x00\x00\x00\x00\x00\x00\x00" b"\x34\x12\x00\x00\x00\x00\x00\x00", "<")
assert 1 == x.discriminator
assert 0x1234 == x.b
assert 16 == x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00", ">")
assert 2 == x.discriminator
assert 0x12345678 == x.c
assert 16 == x.decode(b"\x02\x00\x00\x00\x00\x00\x00\x00" b"\x78\x56\x34\x12\x00\x00\x00\x00", "<")
assert 2 == x.discriminator
assert 0x12345678 == x.c
assert 16 == x.decode(b"\x00\x00\x00\x03\x00\x00\x00\x00" b"\x12\x34\x56\x78\x9a\xbc\xde\xf1", ">")
assert 3 == x.discriminator
assert 0x123456789ABCDEF1 == x.d
assert 16 == x.decode(b"\x03\x00\x00\x00\x00\x00\x00\x00" b"\xf1\xde\xbc\x9a\x78\x56\x34\x12", "<")
assert 3 == x.discriminator
assert 0x123456789ABCDEF1 == x.d
def test_union_with_struct():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u32),
("b", prophy.u32)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u16, 0),
("b", S, 1)]
x = U()
assert x.encode(">") == b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
x.a = 0x15
assert x.encode(">") == b"\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00"
assert x.encode("<") == b"\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00"
x.discriminator = "b"
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
assert x.encode("<") == b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
x.b.a = 0x15
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x15\x00\x00\x00\x00"
assert x.encode("<") == b"\x01\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00"
x.b.b = 0x20
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x15\x00\x00\x00\x20"
assert x.encode("<") == b"\x01\x00\x00\x00\x15\x00\x00\x00\x20\x00\x00\x00"
x.decode(b"\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00", "<")
assert x.discriminator == 0
assert x.a == 0x25
x.decode(b"\x00\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00", ">")
assert x.discriminator == 0
assert x.a == 0x25
with pytest.raises(prophy.ProphyError) as err:
x.b
assert str(err.value) == 'currently field 0 is discriminated'
x.decode(b"\x01\x00\x00\x00\x25\x00\x00\x00\x35\x00\x00\x00", "<")
assert x.discriminator == 1
assert x.b.a == 0x25
assert x.b.b == 0x35
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x25\x00\x00\x00\x35", ">")
assert x.discriminator == 1
assert x.b.a == 0x25
assert x.b.b == 0x35
with pytest.raises(prophy.ProphyError) as err:
x.b = 'anythig'
assert str(err.value) == 'assignment to composite field not allowed'
def test_union_discriminator_exceptions(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
with pytest.raises(Exception) as e:
x.b
assert "currently field 0 is discriminated" == str(e.value)
x.discriminator = 1
x.b = 42
with pytest.raises(Exception) as e:
x.a
assert "currently field 1 is discriminated" == str(e.value)
with pytest.raises(Exception) as e:
x.a = 1
assert "currently field 1 is discriminated" == str(e.value)
with pytest.raises(Exception) as e:
x.discriminator = "xxx"
assert "unknown discriminator: 'xxx'" == str(e.value)
with pytest.raises(Exception) as e:
x.discriminator = 666
assert "unknown discriminator: 666" == str(e.value)
assert 1 == x.discriminator
assert 42 == x.b
def test_union_decode_exceptions(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\xff", ">")
assert "unknown discriminator: 255" == str(e.value)
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00\x00", ">")
assert "not all bytes of VariableLengthFieldsUnion read" == str(e.value)
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00", ">")
assert "not enough bytes" == str(e.value)
def test_struct_with_union():
class UVarLen(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.u8, 1),
("c", prophy.u8, 2)]
class StructWithU(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u8),
("b", UVarLen),
("c", prophy.u32)]
x = StructWithU()
x.a = 1
x.b.discriminator = 2
x.b.c = 3
x.c = 4
assert b"\x01\x00\x00\x00" b"\x00\x00\x00\x02" b"\x03\x00\x00\x00" b"\x00\x00\x00\x04" == x.encode(">")
assert b"\x01\x00\x00\x00" b"\x02\x00\x00\x00" b"\x03\x00\x00\x00" b"\x04\x00\x00\x00" == x.encode("<")
x.decode(b"\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x20", ">")
assert 10 == x.a
assert 0 == x.b.discriminator
assert 1024 == x.b.a
assert 32 == x.c
assert """\
a: 10
b {
a: 1024
}
c: 32
""" == str(x)
def test_array_with_union():
class UVarLen(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u16, 0),
("b", prophy.u8, 1),
("c", prophy.u8, 2)]
class StructWithU(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a_len", prophy.u8),
("a", prophy.array(UVarLen, bound="a_len"))]
x = StructWithU()
y = x.a.add()
y.discriminator = "a"
y.a = 1
y = x.a.add()
y.discriminator = "b"
y.b = 2
y = x.a.add()
y.discriminator = "c"
y.c = 3
assert x.encode(">") == (b"\x03\x00\x00\x00"
b"\x00\x00\x00\x00"
b"\x00\x01\x00\x00"
b"\x00\x00\x00\x01"
b"\x02\x00\x00\x00"
b"\x00\x00\x00\x02"
b"\x03\x00\x00\x00")
assert x.encode("<") == (b"\x03\x00\x00\x00"
b"\x00\x00\x00\x00"
b"\x01\x00\x00\x00"
b"\x01\x00\x00\x00"
b"\x02\x00\x00\x00"
b"\x02\x00\x00\x00"
b"\x03\x00\x00\x00")
x.decode(b"\x02\x00\x00\x00"
b"\x00\x00\x00\x01"
b"\x01\x00\x00\x00"
b"\x00\x00\x00\x02"
b"\x02\x00\x00\x00", ">")
assert """\
a {
b: 1
}
a {
c: 2
}
""" == str(x)
def test_union_with_plain_struct():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8),
("b", prophy.u8)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", S, 1)]
x = U()
x.discriminator = 1
x.b.a = 2
x.b.b = 3
assert b"\x00\x00\x00\x01\x02\x03\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x01\x06\x07\x00\x00", ">")
assert 1 == x.discriminator
assert 6 == x.b.a
assert 7 == x.b.b
assert """\
b {
a: 6
b: 7
}
""" == str(x)
def test_union_with_struct_with_array_and_bytes():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8)]
class SBytesSized(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.bytes(size=3))]
class SArraySized(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.array(S, size=3))]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", SBytesSized, 0),
("b", SArraySized, 1)]
x = U()
x.discriminator = 0
x.a.a = b"abc"
assert b"\x00\x00\x00\x00" b"abc\x00" == x.encode(">")
x.discriminator = 1
x.b.a[0].a = 3
x.b.a[1].a = 4
x.b.a[2].a = 5
assert b"\x00\x00\x00\x01" b"\x03\x04\x05\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x01" b"\x07\x08\x09\x00", ">")
assert 1 == x.discriminator
assert 7 == x.b.a[0].a
assert 8 == x.b.a[1].a
assert 9 == x.b.a[2].a
assert """\
b {
a {
a: 7
}
a {
a: 8
}
a {
a: 9
}
}
""" == str(x)
def test_union_with_nested_struct_and_union():
class SInner(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8)]
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", SInner)]
class UInner(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", prophy.u16, 1)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", UInner, 0),
("b", S, 1)]
x = U()
x.discriminator = 0
x.a.discriminator = 1
x.a.b = 0xFFF
assert b"\x00\x00\x00\x00" b"\x00\x00\x00\x01" b"\x0f\xff\x00\x00" == x.encode(">")
x = U()
x.discriminator = 1
x.b.a.a = 0xF
assert b"\x00\x00\x00\x01" b"\x0f\x00\x00\x00\x00\x00\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x00" b"\x00\x00\x00\x01" b"\x00\x08\x00\x00", ">")
assert 8 == x.a.b
assert """\
a {
b: 8
}
""" == str(x)
y = U()
y.copy_from(x)
assert 0 == y.discriminator
assert 1 == y.a.discriminator
assert 8 == y.a.b
def test_union_with_typedef_and_enum():
TU16 = prophy.u16
class E(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [("E_1", 1),
("E_2", 2),
("E_3", 3)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", TU16, 0),
("b", E, 1)]
x = U()
x.discriminator = "a"
x.a = 17
assert b"\x00\x00\x00\x00\x00\x11\x00\x00" == x.encode(">")
x.discriminator = "b"
x.b = "E_2"
assert b"\x00\x00\x00\x01\x00\x00\x00\x02" == x.encode(">")
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x01", ">")
assert 1 == x.discriminator
assert 1 == x.b
assert """\
b: E_1
""" == str(x)
def test_union_exceptions_with_dynamic_arrays_and_bytes():
with pytest.raises(Exception) as e:
class U1(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.array(prophy.u32), 0)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U2(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.array(prophy.u32, bound="a_len"), 1)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U3(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.bytes(), 0)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U4(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.bytes(bound="a_len"), 1)]
assert "dynamic types not allowed in union" == str(e.value)
def test_union_exceptions_with_nested_limited_greedy_dynamic_arrays_and_bytes():
with pytest.raises(Exception) as e:
class S2(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.array(prophy.u32))]
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", S2)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", S, 0)]
assert "dynamic types not allowed in union" == str(e.value)
def test_union_with_limited_array_and_bytes():
with pytest.raises(Exception) as e:
class U1(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.bytes(bound="a_len", size=3), 1)]
assert "bound array/bytes not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U2(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.array(prophy.u32, bound="a_len", size=3), 1)]
assert "bound array/bytes not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U3(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.array(prophy.u8, size=3), 0)]
assert "static array not implemented in union" == str(e.value)
def test_union_with_static_bytes():
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.bytes(size=3), 0)]
x = U()
assert b"\x00\x00\x00\x00" b"\x00\x00\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x00" b"\x01\x02\x03\x00", "<")
assert """\
a: '\\x01\\x02\\x03'
""" == str(x)
def test_union_with_optional_exception():
with pytest.raises(Exception) as e:
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.optional(prophy.u32), 1),
("c", prophy.u32, 2)]
assert "union with optional field disallowed" == str(e.value)
| import prophy
import pytest
@pytest.fixture(scope='session')
def SimpleUnion():
class SimpleUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.u32, 1),
("c", prophy.u32, 2)]
return SimpleUnion
@pytest.fixture(scope='session')
def VariableLengthFieldsUnion():
class VariableLengthFieldsUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", prophy.u16, 1),
("c", prophy.u32, 2),
("d", prophy.u64, 3)]
return VariableLengthFieldsUnion
def test_simple_union(SimpleUnion):
x = SimpleUnion()
assert 0 == x.discriminator
assert 0 == x.a
assert 'a: 0\n' == str(x)
assert b'\x00\x00\x00\x00\x00\x00\x00\x00' == x.encode(">")
x.decode(b'\x02\x00\x00\x00\x10\x00\x00\x00', "<")
assert 2 == x.discriminator
assert 16 == x.c
assert 'c: 16\n' == str(x)
assert b'\x00\x00\x00\x02\x00\x00\x00\x10' == x.encode(">")
def test_simple_union_discriminator_accepts_ints_or_field_name_and_clears(SimpleUnion):
x = SimpleUnion()
x.a = 42
x.discriminator = 1
assert 0 == x.b
assert 'b: 0\n' == str(x)
assert b'\x00\x00\x00\x01\x00\x00\x00\x00' == x.encode(">")
x.discriminator = "c"
assert 0 == x.c
assert 'c: 0\n' == str(x)
assert b'\x00\x00\x00\x02\x00\x00\x00\x00' == x.encode(">")
def test_union_copy_from(SimpleUnion):
x = SimpleUnion()
x.discriminator = 'b'
x.b = 3
y = SimpleUnion()
y.discriminator = 'c'
y.c = 10
y.copy_from(x)
assert 1 == y.discriminator
assert 3 == y.b
y.copy_from(y)
assert y == y
assert 1 == y.discriminator
assert 3 == y.b
with pytest.raises(TypeError, match="Parameter to copy_from must be instance of same class."):
y.copy_from(object())
with pytest.raises(TypeError, match="Parameter to copy_from must be instance of same class."):
y.copy_from(SimpleUnion)
def test_simple_union_discriminator_does_not_clear_fields_if_set_to_same_value(SimpleUnion):
x = SimpleUnion()
x.a = 42
x.discriminator = 0
assert 42 == x.a
x.discriminator = "a"
assert 42 == x.a
def test_union_nonsequential_discriminators():
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 3),
("b", prophy.u32, 10),
("c", prophy.u32, 55)]
x = U()
assert 3 == x.discriminator
x.discriminator = 3
assert 3 == x.discriminator
assert 0 == x.a
x.discriminator = 10
assert 10 == x.discriminator
assert 0 == x.b
x.discriminator = 55
assert 55 == x.discriminator
assert 0 == x.c
x.discriminator = "a"
assert 3 == x.discriminator
assert 0 == x.a
x.discriminator = "b"
assert 10 == x.discriminator
assert 0 == x.b
x.discriminator = "c"
assert 55 == x.discriminator
assert 0 == x.c
def test_union_encode_according_to_largest_field(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
x.discriminator = "a"
x.a = 0x12
assert b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00" == x.encode(">")
assert b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "b"
x.b = 0x1234
assert b"\x00\x00\x00\x01\x00\x00\x00\x00" b"\x12\x34\x00\x00\x00\x00\x00\x00" == x.encode(">")
assert b"\x01\x00\x00\x00\x00\x00\x00\x00" b"\x34\x12\x00\x00\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "c"
x.c = 0x12345678
assert b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00" == x.encode(">")
assert b"\x02\x00\x00\x00\x00\x00\x00\x00" b"\x78\x56\x34\x12\x00\x00\x00\x00" == x.encode("<")
x.discriminator = "d"
x.d = 0x123456789ABCDEF1
assert b"\x00\x00\x00\x03\x00\x00\x00\x00" b"\x12\x34\x56\x78\x9a\xbc\xde\xf1" == x.encode(">")
assert b"\x03\x00\x00\x00\x00\x00\x00\x00" b"\xf1\xde\xbc\x9a\x78\x56\x34\x12" == x.encode("<")
def test_union_decode_according_to_largest_field(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
assert 16 == x.decode(b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00", ">")
assert 0 == x.discriminator
assert 0x12 == x.a
assert 16 == x.decode(b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x12\x00\x00\x00\x00\x00\x00\x00", "<")
assert 0 == x.discriminator
assert 0x12 == x.a
assert 16 == x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x00" b"\x12\x34\x00\x00\x00\x00\x00\x00", ">")
assert 1 == x.discriminator
assert 0x1234 == x.b
assert 16 == x.decode(b"\x01\x00\x00\x00\x00\x00\x00\x00" b"\x34\x12\x00\x00\x00\x00\x00\x00", "<")
assert 1 == x.discriminator
assert 0x1234 == x.b
assert 16 == x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00", ">")
assert 2 == x.discriminator
assert 0x12345678 == x.c
assert 16 == x.decode(b"\x02\x00\x00\x00\x00\x00\x00\x00" b"\x78\x56\x34\x12\x00\x00\x00\x00", "<")
assert 2 == x.discriminator
assert 0x12345678 == x.c
assert 16 == x.decode(b"\x00\x00\x00\x03\x00\x00\x00\x00" b"\x12\x34\x56\x78\x9a\xbc\xde\xf1", ">")
assert 3 == x.discriminator
assert 0x123456789ABCDEF1 == x.d
assert 16 == x.decode(b"\x03\x00\x00\x00\x00\x00\x00\x00" b"\xf1\xde\xbc\x9a\x78\x56\x34\x12", "<")
assert 3 == x.discriminator
assert 0x123456789ABCDEF1 == x.d
def test_union_with_struct():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u32),
("b", prophy.u32)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u16, 0),
("b", S, 1)]
x = U()
assert x.encode(">") == b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
x.a = 0x15
assert x.encode(">") == b"\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00"
assert x.encode("<") == b"\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00"
x.discriminator = "b"
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
assert x.encode("<") == b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
x.b.a = 0x15
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x15\x00\x00\x00\x00"
assert x.encode("<") == b"\x01\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00"
x.b.b = 0x20
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x15\x00\x00\x00\x20"
assert x.encode("<") == b"\x01\x00\x00\x00\x15\x00\x00\x00\x20\x00\x00\x00"
x.decode(b"\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00", "<")
assert x.discriminator == 0
assert x.a == 0x25
x.decode(b"\x00\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00", ">")
assert x.discriminator == 0
assert x.a == 0x25
with pytest.raises(prophy.ProphyError) as err:
x.b
assert str(err.value) == 'currently field 0 is discriminated'
x.decode(b"\x01\x00\x00\x00\x25\x00\x00\x00\x35\x00\x00\x00", "<")
assert x.discriminator == 1
assert x.b.a == 0x25
assert x.b.b == 0x35
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x25\x00\x00\x00\x35", ">")
assert x.discriminator == 1
assert x.b.a == 0x25
assert x.b.b == 0x35
with pytest.raises(prophy.ProphyError) as err:
x.b = 'anythig'
assert str(err.value) == 'assignment to composite field not allowed'
def test_union_discriminator_exceptions(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
with pytest.raises(Exception) as e:
x.b
assert "currently field 0 is discriminated" == str(e.value)
x.discriminator = 1
x.b = 42
with pytest.raises(Exception) as e:
x.a
assert "currently field 1 is discriminated" == str(e.value)
with pytest.raises(Exception) as e:
x.a = 1
assert "currently field 1 is discriminated" == str(e.value)
with pytest.raises(Exception) as e:
x.discriminator = "xxx"
assert "unknown discriminator: 'xxx'" == str(e.value)
with pytest.raises(Exception) as e:
x.discriminator = 666
assert "unknown discriminator: 666" == str(e.value)
assert 1 == x.discriminator
assert 42 == x.b
def test_union_decode_exceptions(VariableLengthFieldsUnion):
x = VariableLengthFieldsUnion()
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\xff", ">")
assert "unknown discriminator: 255" == str(e.value)
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00\x00\x00", ">")
assert "not all bytes of VariableLengthFieldsUnion read" == str(e.value)
with pytest.raises(Exception) as e:
x.decode(b"\x00\x00\x00\x02\x00\x00\x00\x00" b"\x12\x34\x56\x78\x00\x00\x00", ">")
assert "not enough bytes" == str(e.value)
def test_struct_with_union():
class UVarLen(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.u8, 1),
("c", prophy.u8, 2)]
class StructWithU(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u8),
("b", UVarLen),
("c", prophy.u32)]
x = StructWithU()
x.a = 1
x.b.discriminator = 2
x.b.c = 3
x.c = 4
assert b"\x01\x00\x00\x00" b"\x00\x00\x00\x02" b"\x03\x00\x00\x00" b"\x00\x00\x00\x04" == x.encode(">")
assert b"\x01\x00\x00\x00" b"\x02\x00\x00\x00" b"\x03\x00\x00\x00" b"\x04\x00\x00\x00" == x.encode("<")
x.decode(b"\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x20", ">")
assert 10 == x.a
assert 0 == x.b.discriminator
assert 1024 == x.b.a
assert 32 == x.c
assert """\
a: 10
b {
a: 1024
}
c: 32
""" == str(x)
def test_array_with_union():
class UVarLen(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u16, 0),
("b", prophy.u8, 1),
("c", prophy.u8, 2)]
class StructWithU(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a_len", prophy.u8),
("a", prophy.array(UVarLen, bound="a_len"))]
x = StructWithU()
y = x.a.add()
y.discriminator = "a"
y.a = 1
y = x.a.add()
y.discriminator = "b"
y.b = 2
y = x.a.add()
y.discriminator = "c"
y.c = 3
assert x.encode(">") == (b"\x03\x00\x00\x00"
b"\x00\x00\x00\x00"
b"\x00\x01\x00\x00"
b"\x00\x00\x00\x01"
b"\x02\x00\x00\x00"
b"\x00\x00\x00\x02"
b"\x03\x00\x00\x00")
assert x.encode("<") == (b"\x03\x00\x00\x00"
b"\x00\x00\x00\x00"
b"\x01\x00\x00\x00"
b"\x01\x00\x00\x00"
b"\x02\x00\x00\x00"
b"\x02\x00\x00\x00"
b"\x03\x00\x00\x00")
x.decode(b"\x02\x00\x00\x00"
b"\x00\x00\x00\x01"
b"\x01\x00\x00\x00"
b"\x00\x00\x00\x02"
b"\x02\x00\x00\x00", ">")
assert """\
a {
b: 1
}
a {
c: 2
}
""" == str(x)
def test_union_with_plain_struct():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8),
("b", prophy.u8)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", S, 1)]
x = U()
x.discriminator = 1
x.b.a = 2
x.b.b = 3
assert b"\x00\x00\x00\x01\x02\x03\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x01\x06\x07\x00\x00", ">")
assert 1 == x.discriminator
assert 6 == x.b.a
assert 7 == x.b.b
assert """\
b {
a: 6
b: 7
}
""" == str(x)
def test_union_with_struct_with_array_and_bytes():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8)]
class SBytesSized(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.bytes(size=3))]
class SArraySized(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.array(S, size=3))]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", SBytesSized, 0),
("b", SArraySized, 1)]
x = U()
x.discriminator = 0
x.a.a = b"abc"
assert b"\x00\x00\x00\x00" b"abc\x00" == x.encode(">")
x.discriminator = 1
x.b.a[0].a = 3
x.b.a[1].a = 4
x.b.a[2].a = 5
assert b"\x00\x00\x00\x01" b"\x03\x04\x05\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x01" b"\x07\x08\x09\x00", ">")
assert 1 == x.discriminator
assert 7 == x.b.a[0].a
assert 8 == x.b.a[1].a
assert 9 == x.b.a[2].a
assert """\
b {
a {
a: 7
}
a {
a: 8
}
a {
a: 9
}
}
""" == str(x)
def test_union_with_nested_struct_and_union():
class SInner(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8)]
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", SInner)]
class UInner(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u8, 0),
("b", prophy.u16, 1)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", UInner, 0),
("b", S, 1)]
x = U()
x.discriminator = 0
x.a.discriminator = 1
x.a.b = 0xFFF
assert b"\x00\x00\x00\x00" b"\x00\x00\x00\x01" b"\x0f\xff\x00\x00" == x.encode(">")
x = U()
x.discriminator = 1
x.b.a.a = 0xF
assert b"\x00\x00\x00\x01" b"\x0f\x00\x00\x00\x00\x00\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x00" b"\x00\x00\x00\x01" b"\x00\x08\x00\x00", ">")
assert 8 == x.a.b
assert """\
a {
b: 8
}
""" == str(x)
y = U()
y.copy_from(x)
assert 0 == y.discriminator
assert 1 == y.a.discriminator
assert 8 == y.a.b
def test_union_with_typedef_and_enum():
TU16 = prophy.u16
class E(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [("E_1", 1),
("E_2", 2),
("E_3", 3)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", TU16, 0),
("b", E, 1)]
x = U()
x.discriminator = "a"
x.a = 17
assert b"\x00\x00\x00\x00\x00\x11\x00\x00" == x.encode(">")
x.discriminator = "b"
x.b = "E_2"
assert b"\x00\x00\x00\x01\x00\x00\x00\x02" == x.encode(">")
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x01", ">")
assert 1 == x.discriminator
assert 1 == x.b
assert """\
b: E_1
""" == str(x)
def test_union_exceptions_with_dynamic_arrays_and_bytes():
with pytest.raises(Exception) as e:
class U1(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.array(prophy.u32), 0)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U2(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.array(prophy.u32, bound="a_len"), 1)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U3(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.bytes(), 0)]
assert "dynamic types not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U4(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.bytes(bound="a_len"), 1)]
assert "dynamic types not allowed in union" == str(e.value)
def test_union_exceptions_with_nested_limited_greedy_dynamic_arrays_and_bytes():
with pytest.raises(Exception) as e:
class S2(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.array(prophy.u32))]
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", S2)]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", S, 0)]
assert "dynamic types not allowed in union" == str(e.value)
def test_union_with_limited_array_and_bytes():
with pytest.raises(Exception) as e:
class U1(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.bytes(bound="a_len", size=3), 1)]
assert "bound array/bytes not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U2(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a_len", prophy.u8, 0),
("a", prophy.array(prophy.u32, bound="a_len", size=3), 1)]
assert "bound array/bytes not allowed in union" == str(e.value)
with pytest.raises(Exception) as e:
class U3(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.array(prophy.u8, size=3), 0)]
assert "static array not implemented in union" == str(e.value)
def test_union_with_static_bytes():
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.bytes(size=3), 0)]
x = U()
assert b"\x00\x00\x00\x00" b"\x00\x00\x00\x00" == x.encode(">")
x.decode(b"\x00\x00\x00\x00" b"\x01\x02\x03\x00", "<")
assert """\
a: '\\x01\\x02\\x03'
""" == str(x)
def test_union_with_optional_exception():
with pytest.raises(Exception) as e:
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [("a", prophy.u32, 0),
("b", prophy.optional(prophy.u32), 1),
("c", prophy.u32, 2)]
assert "union with optional field disallowed" == str(e.value)
| gl | 0.373091 | \ a: 10 b { a: 1024 } c: 32 \ a { b: 1 } a { c: 2 } \ b { a: 6 b: 7 } \ b { a { a: 7 } a { a: 8 } a { a: 9 } } \ a { b: 8 } \ b: E_1 \ a: '\\x01\\x02\\x03' | 2.317574 | 2 |
AppLatencyExperiments/main.py | andres0sorio/AODS4AWork | 1 | 6621131 | from src.Experiment import Experiment
if __name__ == '__main__':
"""
"""
experiment = Experiment()
experiment.run()
| from src.Experiment import Experiment
if __name__ == '__main__':
"""
"""
experiment = Experiment()
experiment.run()
| none | 1 | 1.202916 | 1 | |
pass.py | itzjustalan/pass | 2 | 6621132 | <gh_stars>1-10
import random # for random obviously
import subprocess #for clipboard also apparently XD
#alan k john
caps = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
small = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z']
specials = ['#','(',')','?','[',']','`','~',',',';',':','$','%','^','&','*','=','+','/','*','-','?','!','@']
numbs = ['1','2','3','4','5','6','7','8','9','0']
#numbs = list(range(0,10)) #does the same thing
splespce = caps + small + specials + numbs
#print(*splespce, sep='') #to print it continuously like a string
# initialising passw
passw = []
i=0
for b in range(1,17): #cuz we want 16 digits
# print(b)
a = random.choice(splespce) #pulls a random element from the list
# print(a)
passw.insert(i,a) #can also try .append to simple add to the end
i=i+1 # i did it this way because i didnt understand how python for loops worked yet
#print(*passw, sep='') #if you want to see the list
# list to string #to convert the list into a string
str = ""
for j in passw:
str += j # short hand for str=str+j
# to clipboard
data = str #not necessary you can just type str instead of data below
subprocess.run("clip", universal_newlines=True, input=data) #to push the string into the clip board so now try ctrl+v to paste it anywhere
| import random # for random obviously
import subprocess #for clipboard also apparently XD
#alan k john
caps = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
small = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z']
specials = ['#','(',')','?','[',']','`','~',',',';',':','$','%','^','&','*','=','+','/','*','-','?','!','@']
numbs = ['1','2','3','4','5','6','7','8','9','0']
#numbs = list(range(0,10)) #does the same thing
splespce = caps + small + specials + numbs
#print(*splespce, sep='') #to print it continuously like a string
# initialising passw
passw = []
i=0
for b in range(1,17): #cuz we want 16 digits
# print(b)
a = random.choice(splespce) #pulls a random element from the list
# print(a)
passw.insert(i,a) #can also try .append to simple add to the end
i=i+1 # i did it this way because i didnt understand how python for loops worked yet
#print(*passw, sep='') #if you want to see the list
# list to string #to convert the list into a string
str = ""
for j in passw:
str += j # short hand for str=str+j
# to clipboard
data = str #not necessary you can just type str instead of data below
subprocess.run("clip", universal_newlines=True, input=data) #to push the string into the clip board so now try ctrl+v to paste it anywhere | en | 0.799199 | # for random obviously #for clipboard also apparently XD #alan k john #numbs = list(range(0,10)) #does the same thing #print(*splespce, sep='') #to print it continuously like a string # initialising passw #cuz we want 16 digits # print(b) #pulls a random element from the list # print(a) #can also try .append to simple add to the end # i did it this way because i didnt understand how python for loops worked yet #print(*passw, sep='') #if you want to see the list # list to string #to convert the list into a string # short hand for str=str+j # to clipboard #not necessary you can just type str instead of data below #to push the string into the clip board so now try ctrl+v to paste it anywhere | 3.117918 | 3 |
tests/test_LookupTable2.py | jwilso/sst-core | 77 | 6621133 | <filename>tests/test_LookupTable2.py
import sst
import inspect, os, sys
nitems = 10
params = dict({
"num_entities" : nitems
})
for i in range(nitems):
comp = sst.Component("Table Comp %d"%i, "coreTestElement.simpleLookupTableComponent")
comp.addParams(params)
comp.addParam("myid", i)
| <filename>tests/test_LookupTable2.py
import sst
import inspect, os, sys
nitems = 10
params = dict({
"num_entities" : nitems
})
for i in range(nitems):
comp = sst.Component("Table Comp %d"%i, "coreTestElement.simpleLookupTableComponent")
comp.addParams(params)
comp.addParam("myid", i)
| none | 1 | 2.22717 | 2 | |
coh-metrix_3/09_tangozyouhou.py | Lee-guccii/ExtensiveReading_YL_Estimation | 0 | 6621134 | <reponame>Lee-guccii/ExtensiveReading_YL_Estimation
import nltk
import numpy as np
import re
from scipy import stats
from scipy.stats import spearmanr
import spacy
from functools import lru_cache
import en_core_web_lg
nlp = en_core_web_lg.load()
#親やすさdicを作成する
###############
#textをnew_listに読み込む
with open("tango_sitasimiyasusa_list.txt", "r", encoding="utf-8") as f:
list = f.readlines()
new_list = []
for i in list:
word = i.split()
new_list.append(word)
#####################################
#使いたいパラメータの数字を取り出す→相関の確認
#単語名,親やすさ(100:親しみがない,700:親しみがある)
sitasimi_tango={}
count_level = 1
while count_level < 1945:
#値を取り出す
tango_list = new_list[count_level][0] #単語名
suuti_list = new_list[count_level][5] #数値
#文字列を数値に変換
y = round(float(suuti_list)*100)
sitasimi_tango[tango_list] = y
count_level+=1
with open('book/book33.txt', 'r') as f:
#改行("\n")を""に変換
#text_list = f.read().splitlines()
text = f.read()
#正規表現で"を削除
text = re.sub('"', '', text)
morph = nltk.word_tokenize(text)
pos = nltk.pos_tag(morph)
#[0]=元の文字,[1]=品詞タグ
kazu=0
hinsi=[]#品詞の名前
hinsi_kosuu=[]#品詞の個数.配列は品詞の名前と対応している.
list_bangou=0
kigou_reigai=["=","+","'"]#総単語数に数えない記号
kigou=0
#内容語の品詞
naiyougo_list=["NN","NNS", "NNP", "NNPS", "VB", "VBN", "VBP", "VBZ","JJ", "JJR", "JJS", "RB", "RBR", "RBS"]
#naiyougo_list=["NN", "VB", "JJ", "RB"] #名詞,動詞,形容詞,副詞
sent=""
wariai=[]
while kazu < len(pos):
#もし内容語なら
if pos[kazu][1] in naiyougo_list:
a = nlp(pos[kazu][0])
sent=(a.lemma_)
print(sent,pos[kazu][0].lower)
#親やすさdicに入っている単語なら
if pos[kazu][0].lower() in sitasimi_tango:
wariai.append(sitasimi_tango[pos[kazu][0].lower()])
print(pos[kazu][0].lower(),sitasimi_tango[pos[kazu][0].lower()])
kazu+=1
#結果
print(sum(wariai))
print(len(wariai))
hasseiritu = sum(wariai)/len(wariai)
print(hasseiritu)
#476.452 438.136
#418.619 429.575
| import nltk
import numpy as np
import re
from scipy import stats
from scipy.stats import spearmanr
import spacy
from functools import lru_cache
import en_core_web_lg
nlp = en_core_web_lg.load()
#親やすさdicを作成する
###############
#textをnew_listに読み込む
with open("tango_sitasimiyasusa_list.txt", "r", encoding="utf-8") as f:
list = f.readlines()
new_list = []
for i in list:
word = i.split()
new_list.append(word)
#####################################
#使いたいパラメータの数字を取り出す→相関の確認
#単語名,親やすさ(100:親しみがない,700:親しみがある)
sitasimi_tango={}
count_level = 1
while count_level < 1945:
#値を取り出す
tango_list = new_list[count_level][0] #単語名
suuti_list = new_list[count_level][5] #数値
#文字列を数値に変換
y = round(float(suuti_list)*100)
sitasimi_tango[tango_list] = y
count_level+=1
with open('book/book33.txt', 'r') as f:
#改行("\n")を""に変換
#text_list = f.read().splitlines()
text = f.read()
#正規表現で"を削除
text = re.sub('"', '', text)
morph = nltk.word_tokenize(text)
pos = nltk.pos_tag(morph)
#[0]=元の文字,[1]=品詞タグ
kazu=0
hinsi=[]#品詞の名前
hinsi_kosuu=[]#品詞の個数.配列は品詞の名前と対応している.
list_bangou=0
kigou_reigai=["=","+","'"]#総単語数に数えない記号
kigou=0
#内容語の品詞
naiyougo_list=["NN","NNS", "NNP", "NNPS", "VB", "VBN", "VBP", "VBZ","JJ", "JJR", "JJS", "RB", "RBR", "RBS"]
#naiyougo_list=["NN", "VB", "JJ", "RB"] #名詞,動詞,形容詞,副詞
sent=""
wariai=[]
while kazu < len(pos):
#もし内容語なら
if pos[kazu][1] in naiyougo_list:
a = nlp(pos[kazu][0])
sent=(a.lemma_)
print(sent,pos[kazu][0].lower)
#親やすさdicに入っている単語なら
if pos[kazu][0].lower() in sitasimi_tango:
wariai.append(sitasimi_tango[pos[kazu][0].lower()])
print(pos[kazu][0].lower(),sitasimi_tango[pos[kazu][0].lower()])
kazu+=1
#結果
print(sum(wariai))
print(len(wariai))
hasseiritu = sum(wariai)/len(wariai)
print(hasseiritu)
#476.452 438.136
#418.619 429.575 | ja | 0.945648 | #親やすさdicを作成する ############### #textをnew_listに読み込む ##################################### #使いたいパラメータの数字を取り出す→相関の確認 #単語名,親やすさ(100:親しみがない,700:親しみがある) #値を取り出す #単語名 #数値 #文字列を数値に変換 #改行("\n")を""に変換 #text_list = f.read().splitlines() #正規表現で"を削除 #[0]=元の文字,[1]=品詞タグ #品詞の名前 #品詞の個数.配列は品詞の名前と対応している. #総単語数に数えない記号 #内容語の品詞 #naiyougo_list=["NN", "VB", "JJ", "RB"] #名詞,動詞,形容詞,副詞 #もし内容語なら #親やすさdicに入っている単語なら #結果 #476.452 438.136 #418.619 429.575 | 2.818258 | 3 |
piston/post.py | ausbitbank/piston | 0 | 6621135 | <reponame>ausbitbank/piston
import warnings
from steem.post import Post as PostSteem
from steem.post import (
VotingInvalidOnArchivedPost
)
class Post(PostSteem):
def __init__(self, *args, **kwargs):
warnings.warn(
"[DeprecationWarning] Please replace 'import piston.post' by 'import steem.post'"
)
super(Post, self).__init__(*args, **kwargs)
| import warnings
from steem.post import Post as PostSteem
from steem.post import (
VotingInvalidOnArchivedPost
)
class Post(PostSteem):
def __init__(self, *args, **kwargs):
warnings.warn(
"[DeprecationWarning] Please replace 'import piston.post' by 'import steem.post'"
)
super(Post, self).__init__(*args, **kwargs) | none | 1 | 2.224481 | 2 | |
flow/transformations.py | li012589/NeuralWavelet | 28 | 6621136 | <filename>flow/transformations.py
import torch
from torch import nn
from .flow import Flow
class ScalingNshifting(Flow):
def __init__(self, scaling, shifting, name="ScalingNshifting"):
super(ScalingNshifting, self).__init__(None, name)
self.scaling = nn.Parameter(torch.tensor(scaling).float(), requires_grad=False)
self.shifting = nn.Parameter(torch.tensor(shifting).float(), requires_grad=False)
def inverse(self, y):
# to decimal
return (y + self.shifting) * (1 / self.scaling), y.new_zeros(y.shape[0])
def inverse_(self, y):
return (y + self.shifting) * (1 / self.scaling)
def forward(self, z):
# to int
return z * (self.scaling) - self.shifting, z.new_zeros(z.shape[0])
def forward_(self, z):
return z * (self.scaling) - self.shifting | <filename>flow/transformations.py
import torch
from torch import nn
from .flow import Flow
class ScalingNshifting(Flow):
def __init__(self, scaling, shifting, name="ScalingNshifting"):
super(ScalingNshifting, self).__init__(None, name)
self.scaling = nn.Parameter(torch.tensor(scaling).float(), requires_grad=False)
self.shifting = nn.Parameter(torch.tensor(shifting).float(), requires_grad=False)
def inverse(self, y):
# to decimal
return (y + self.shifting) * (1 / self.scaling), y.new_zeros(y.shape[0])
def inverse_(self, y):
return (y + self.shifting) * (1 / self.scaling)
def forward(self, z):
# to int
return z * (self.scaling) - self.shifting, z.new_zeros(z.shape[0])
def forward_(self, z):
return z * (self.scaling) - self.shifting | en | 0.842465 | # to decimal # to int | 2.757917 | 3 |
command/main.py | Canon11/dj-cref | 1 | 6621137 | <filename>command/main.py<gh_stars>1-10
import argparse
from .runner import (
View, FormView, CreateView, UpdateView, DetailView,
DeleteView, TemplateView, ListView, RedirectView
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-type', type=str, help='the type of django generic view.')
parser.add_argument(
'--initial', action='store_true', help='either import or not'
)
args = parser.parse_args()
if args.type:
run(args.type, args.initial)
else:
print('args.type is undefined.')
def run(t, exists_initial):
if t == 'View':
view = View()
elif t == 'FormView':
view = FormView()
elif t == 'CreateView':
view = CreateView()
elif t == 'UpdateView':
view = UpdateView()
elif t == 'DetailView':
view = DetailView()
elif t == 'DeleteView':
view = DeleteView()
elif t == 'TemplateView':
view = TemplateView()
elif t == 'ListView':
view = ListView()
elif t == 'RedirectView':
view = RedirectView()
else:
print('type is invalid.')
return
view.output(exists_initial)
if __name__ == '__main__':
main()
| <filename>command/main.py<gh_stars>1-10
import argparse
from .runner import (
View, FormView, CreateView, UpdateView, DetailView,
DeleteView, TemplateView, ListView, RedirectView
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-type', type=str, help='the type of django generic view.')
parser.add_argument(
'--initial', action='store_true', help='either import or not'
)
args = parser.parse_args()
if args.type:
run(args.type, args.initial)
else:
print('args.type is undefined.')
def run(t, exists_initial):
if t == 'View':
view = View()
elif t == 'FormView':
view = FormView()
elif t == 'CreateView':
view = CreateView()
elif t == 'UpdateView':
view = UpdateView()
elif t == 'DetailView':
view = DetailView()
elif t == 'DeleteView':
view = DeleteView()
elif t == 'TemplateView':
view = TemplateView()
elif t == 'ListView':
view = ListView()
elif t == 'RedirectView':
view = RedirectView()
else:
print('type is invalid.')
return
view.output(exists_initial)
if __name__ == '__main__':
main()
| none | 1 | 2.527437 | 3 | |
makodedit/file_manage.py | Eleven-junichi2/mamo | 1 | 6621138 | from abc import ABCMeta, abstractmethod
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
class FileBrowserDialogLayout(RelativeLayout):
file_manage_user = ObjectProperty(None)
file_manager = ObjectProperty(None)
popup = ObjectProperty(None)
def __init__(self, file_manage_user, file_manager, popup,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manage_user = file_manage_user
self.file_manager = file_manager
self.popup = popup
class FileSaveDialogLayout(FileBrowserDialogLayout):
pass
class FileLoadDialogLayout(FileBrowserDialogLayout):
pass
class FileBrowserDialog(Popup):
def __init__(self, file_manage_user, file_manager,
dialog_layout,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.size_hint = (0.9, 0.9)
self.title = " "
self.content = dialog_layout(file_manage_user, file_manager, self)
class FileSaveDialog(FileBrowserDialog):
def __init__(self, file_manage_user, file_manager,
dialog_layout=FileSaveDialogLayout,
*args, **kwargs):
super().__init__(file_manage_user, file_manager, dialog_layout,
*args, **kwargs)
class FileLoadDialog(FileBrowserDialog):
def __init__(self, file_manage_user, file_manager,
dialog_layout=FileLoadDialogLayout,
*args, **kwargs):
super().__init__(file_manage_user, file_manager, dialog_layout,
*args, **kwargs)
class FileManager(metaclass=ABCMeta):
"""
Example usage:
class ExampleFileManager(FileManager):
def load_file(self):
pass
def save_file(self):
pass
class ExampleFileManageUser(Widget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manager = ExampleFileManager(self)
"""
def __init__(self, file_manage_user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manage_user = file_manage_user
@abstractmethod
def load_file(self):
pass
@abstractmethod
def save_file(self):
pass
def save_dialog(self):
popup = FileSaveDialog(self.file_manage_user, self)
popup.open()
def load_dialog(self):
popup = FileLoadDialog(self.file_manage_user, self)
popup.open()
| from abc import ABCMeta, abstractmethod
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
class FileBrowserDialogLayout(RelativeLayout):
file_manage_user = ObjectProperty(None)
file_manager = ObjectProperty(None)
popup = ObjectProperty(None)
def __init__(self, file_manage_user, file_manager, popup,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manage_user = file_manage_user
self.file_manager = file_manager
self.popup = popup
class FileSaveDialogLayout(FileBrowserDialogLayout):
pass
class FileLoadDialogLayout(FileBrowserDialogLayout):
pass
class FileBrowserDialog(Popup):
def __init__(self, file_manage_user, file_manager,
dialog_layout,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.size_hint = (0.9, 0.9)
self.title = " "
self.content = dialog_layout(file_manage_user, file_manager, self)
class FileSaveDialog(FileBrowserDialog):
def __init__(self, file_manage_user, file_manager,
dialog_layout=FileSaveDialogLayout,
*args, **kwargs):
super().__init__(file_manage_user, file_manager, dialog_layout,
*args, **kwargs)
class FileLoadDialog(FileBrowserDialog):
def __init__(self, file_manage_user, file_manager,
dialog_layout=FileLoadDialogLayout,
*args, **kwargs):
super().__init__(file_manage_user, file_manager, dialog_layout,
*args, **kwargs)
class FileManager(metaclass=ABCMeta):
"""
Example usage:
class ExampleFileManager(FileManager):
def load_file(self):
pass
def save_file(self):
pass
class ExampleFileManageUser(Widget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manager = ExampleFileManager(self)
"""
def __init__(self, file_manage_user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_manage_user = file_manage_user
@abstractmethod
def load_file(self):
pass
@abstractmethod
def save_file(self):
pass
def save_dialog(self):
popup = FileSaveDialog(self.file_manage_user, self)
popup.open()
def load_dialog(self):
popup = FileLoadDialog(self.file_manage_user, self)
popup.open()
| en | 0.365635 | Example usage: class ExampleFileManager(FileManager): def load_file(self): pass def save_file(self): pass class ExampleFileManageUser(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.file_manager = ExampleFileManager(self) | 2.434105 | 2 |
models/BayesianModels/Bayesian3Conv3FC.py | SuperBruceJia/EEG-BayesianCNN | 21 | 6621139 | import math
import torch
import torch.nn as nn
from layers.BBBConv import BBBConv2d
from layers.BBBLinear import BBBLinear
from layers.misc import FlattenLayer, ModuleWrapper
class BBB3Conv3FC(ModuleWrapper):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.num_classes = outputs
self.conv1 = BBBConv2d(inputs, 16, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv1')
self.bn1 = nn.BatchNorm2d(16)
self.activate1 = nn.PReLU()
self.dropout1 = nn.Dropout2d(p=0.25)
self.conv2 = BBBConv2d(16, 16, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv2')
self.bn2 = nn.BatchNorm2d(16)
self.activate2 = nn.PReLU()
self.dropout2 = nn.Dropout2d(p=0.25)
self.conv3 = BBBConv2d(16, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv3')
self.bn3 = nn.BatchNorm2d(32)
self.activate3 = nn.PReLU()
self.dropout3 = nn.Dropout2d(p=0.25)
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv4 = BBBConv2d(33, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv4')
self.bn4 = nn.BatchNorm2d(32)
self.activate4 = nn.PReLU()
self.dropout4 = nn.Dropout2d(p=0.25)
self.conv5 = BBBConv2d(32, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv5')
self.bn5 = nn.BatchNorm2d(32)
self.activate5 = nn.PReLU()
self.dropout5 = nn.Dropout2d(p=0.25)
self.conv6 = BBBConv2d(32, 64, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv6')
self.bn6 = nn.BatchNorm2d(64)
self.activate6 = nn.PReLU()
self.dropout6 = nn.Dropout2d(p=0.25)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.flatten = FlattenLayer(8 * 5 * 97)
self.fc1 = BBBLinear(8 * 5 * 97, 512, alpha_shape=(1, 1), bias=True, name='fc1')
self.fc1_bn = nn.BatchNorm1d(512)
self.fc1_activate = nn.PReLU()
self.fc1_dropout = nn.Dropout(p=0.50)
self.fc2 = BBBLinear(512, 256, alpha_shape=(1, 1), bias=True, name='fc2')
self.fc2_bn = nn.BatchNorm1d(256)
self.fc2_activate = nn.Softplus()
self.fc2_dropout = nn.Dropout(p=0.50)
#
# self.fc3 = BBBLinear(256, 128, alpha_shape=(1, 1), bias=True, name='fc3')
# self.fc3_bn = nn.BatchNorm1d(128)
# self.fc3_activate = nn.Softplus()
# self.fc3_dropout = nn.Dropout(p=0.50)
#
# self.fc4 = BBBLinear(128, 64, alpha_shape=(1, 1), bias=True, name='fc4')
# self.fc4_bn = nn.BatchNorm1d(64)
# self.fc4_activate = nn.Softplus()
# self.fc4_dropout = nn.Dropout(p=0.50)
self.fc5 = BBBLinear(256, outputs, alpha_shape=(1, 1), bias=True, name='fc5')
# x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x))))
# x2 = self.dropout2(self.activate2(self.bn2(self.conv2(x1))))
# x3 = torch.cat((x1, x2), dim=1)
# x3 = self.pool1(x3)
#
# x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3))))
# x5 = self.dropout5(self.activate5(self.bn5(self.conv5(x4))))
# x6 = torch.cat((x4, x5), dim=1)
# x6 = self.pool2(x6)
#
# x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6))))
# x8 = self.dropout8(self.activate8(self.bn8(self.conv8(x7))))
# x9 = torch.cat((x7, x8), dim=1)
# x9 = self.pool3(x9)
#
# x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9))))
# x11 = self.dropout11(self.activate11(self.bn11(self.conv11(x10))))
# x12 = torch.cat((x10, x11), dim=1)
# x12 = self.pool4(x12)
#
# x12 = self.flatten(x12)
#
# x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12))))
# x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13))))
# x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14))))
#
# x16 = self.fc4(x15)
# # ResNet Architecture
# x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x))))
# x2 = self.bn2(self.conv2(x1))
# x2 = torch.cat((x2, x), dim=1)
# x2 = self.dropout2(self.activate2(x2))
#
# x3 = self.pool1(x2)
#
# x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3))))
# x5 = self.bn5(self.conv5(x4))
# x5 = torch.cat((x5, x3), dim=1)
# x5 = self.dropout5(self.activate5(x5))
#
# x6 = self.pool2(x5)
#
# x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6))))
# x8 = self.bn8(self.conv8(x7))
# x8 = torch.cat((x8, x6), dim=1)
# x8 = self.dropout8(self.activate8(x8))
#
# x9 = self.pool3(x8)
#
# x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9))))
# x11 = self.bn11(self.conv11(x10))
# x11 = torch.cat((x11, x9), dim=1)
# x11 = self.dropout11(self.activate11(x11))
#
# x12 = self.pool4(x11)
#
# x12 = self.flatten(x12)
#
# x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12))))
# x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13))))
# x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14))))
#
# x16 = self.fc4(x15)
#
| import math
import torch
import torch.nn as nn
from layers.BBBConv import BBBConv2d
from layers.BBBLinear import BBBLinear
from layers.misc import FlattenLayer, ModuleWrapper
class BBB3Conv3FC(ModuleWrapper):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.num_classes = outputs
self.conv1 = BBBConv2d(inputs, 16, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv1')
self.bn1 = nn.BatchNorm2d(16)
self.activate1 = nn.PReLU()
self.dropout1 = nn.Dropout2d(p=0.25)
self.conv2 = BBBConv2d(16, 16, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv2')
self.bn2 = nn.BatchNorm2d(16)
self.activate2 = nn.PReLU()
self.dropout2 = nn.Dropout2d(p=0.25)
self.conv3 = BBBConv2d(16, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv3')
self.bn3 = nn.BatchNorm2d(32)
self.activate3 = nn.PReLU()
self.dropout3 = nn.Dropout2d(p=0.25)
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv4 = BBBConv2d(33, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv4')
self.bn4 = nn.BatchNorm2d(32)
self.activate4 = nn.PReLU()
self.dropout4 = nn.Dropout2d(p=0.25)
self.conv5 = BBBConv2d(32, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv5')
self.bn5 = nn.BatchNorm2d(32)
self.activate5 = nn.PReLU()
self.dropout5 = nn.Dropout2d(p=0.25)
self.conv6 = BBBConv2d(32, 64, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv6')
self.bn6 = nn.BatchNorm2d(64)
self.activate6 = nn.PReLU()
self.dropout6 = nn.Dropout2d(p=0.25)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.flatten = FlattenLayer(8 * 5 * 97)
self.fc1 = BBBLinear(8 * 5 * 97, 512, alpha_shape=(1, 1), bias=True, name='fc1')
self.fc1_bn = nn.BatchNorm1d(512)
self.fc1_activate = nn.PReLU()
self.fc1_dropout = nn.Dropout(p=0.50)
self.fc2 = BBBLinear(512, 256, alpha_shape=(1, 1), bias=True, name='fc2')
self.fc2_bn = nn.BatchNorm1d(256)
self.fc2_activate = nn.Softplus()
self.fc2_dropout = nn.Dropout(p=0.50)
#
# self.fc3 = BBBLinear(256, 128, alpha_shape=(1, 1), bias=True, name='fc3')
# self.fc3_bn = nn.BatchNorm1d(128)
# self.fc3_activate = nn.Softplus()
# self.fc3_dropout = nn.Dropout(p=0.50)
#
# self.fc4 = BBBLinear(128, 64, alpha_shape=(1, 1), bias=True, name='fc4')
# self.fc4_bn = nn.BatchNorm1d(64)
# self.fc4_activate = nn.Softplus()
# self.fc4_dropout = nn.Dropout(p=0.50)
self.fc5 = BBBLinear(256, outputs, alpha_shape=(1, 1), bias=True, name='fc5')
# x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x))))
# x2 = self.dropout2(self.activate2(self.bn2(self.conv2(x1))))
# x3 = torch.cat((x1, x2), dim=1)
# x3 = self.pool1(x3)
#
# x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3))))
# x5 = self.dropout5(self.activate5(self.bn5(self.conv5(x4))))
# x6 = torch.cat((x4, x5), dim=1)
# x6 = self.pool2(x6)
#
# x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6))))
# x8 = self.dropout8(self.activate8(self.bn8(self.conv8(x7))))
# x9 = torch.cat((x7, x8), dim=1)
# x9 = self.pool3(x9)
#
# x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9))))
# x11 = self.dropout11(self.activate11(self.bn11(self.conv11(x10))))
# x12 = torch.cat((x10, x11), dim=1)
# x12 = self.pool4(x12)
#
# x12 = self.flatten(x12)
#
# x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12))))
# x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13))))
# x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14))))
#
# x16 = self.fc4(x15)
# # ResNet Architecture
# x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x))))
# x2 = self.bn2(self.conv2(x1))
# x2 = torch.cat((x2, x), dim=1)
# x2 = self.dropout2(self.activate2(x2))
#
# x3 = self.pool1(x2)
#
# x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3))))
# x5 = self.bn5(self.conv5(x4))
# x5 = torch.cat((x5, x3), dim=1)
# x5 = self.dropout5(self.activate5(x5))
#
# x6 = self.pool2(x5)
#
# x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6))))
# x8 = self.bn8(self.conv8(x7))
# x8 = torch.cat((x8, x6), dim=1)
# x8 = self.dropout8(self.activate8(x8))
#
# x9 = self.pool3(x8)
#
# x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9))))
# x11 = self.bn11(self.conv11(x10))
# x11 = torch.cat((x11, x9), dim=1)
# x11 = self.dropout11(self.activate11(x11))
#
# x12 = self.pool4(x11)
#
# x12 = self.flatten(x12)
#
# x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12))))
# x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13))))
# x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14))))
#
# x16 = self.fc4(x15)
#
| en | 0.33881 | Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers. # # self.fc3 = BBBLinear(256, 128, alpha_shape=(1, 1), bias=True, name='fc3') # self.fc3_bn = nn.BatchNorm1d(128) # self.fc3_activate = nn.Softplus() # self.fc3_dropout = nn.Dropout(p=0.50) # # self.fc4 = BBBLinear(128, 64, alpha_shape=(1, 1), bias=True, name='fc4') # self.fc4_bn = nn.BatchNorm1d(64) # self.fc4_activate = nn.Softplus() # self.fc4_dropout = nn.Dropout(p=0.50) # x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x)))) # x2 = self.dropout2(self.activate2(self.bn2(self.conv2(x1)))) # x3 = torch.cat((x1, x2), dim=1) # x3 = self.pool1(x3) # # x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3)))) # x5 = self.dropout5(self.activate5(self.bn5(self.conv5(x4)))) # x6 = torch.cat((x4, x5), dim=1) # x6 = self.pool2(x6) # # x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6)))) # x8 = self.dropout8(self.activate8(self.bn8(self.conv8(x7)))) # x9 = torch.cat((x7, x8), dim=1) # x9 = self.pool3(x9) # # x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9)))) # x11 = self.dropout11(self.activate11(self.bn11(self.conv11(x10)))) # x12 = torch.cat((x10, x11), dim=1) # x12 = self.pool4(x12) # # x12 = self.flatten(x12) # # x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12)))) # x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13)))) # x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14)))) # # x16 = self.fc4(x15) # # ResNet Architecture # x1 = self.dropout1(self.activate1(self.bn1(self.conv1(x)))) # x2 = self.bn2(self.conv2(x1)) # x2 = torch.cat((x2, x), dim=1) # x2 = self.dropout2(self.activate2(x2)) # # x3 = self.pool1(x2) # # x4 = self.dropout4(self.activate4(self.bn4(self.conv4(x3)))) # x5 = self.bn5(self.conv5(x4)) # x5 = torch.cat((x5, x3), dim=1) # x5 = self.dropout5(self.activate5(x5)) # # x6 = self.pool2(x5) # # x7 = self.dropout7(self.activate7(self.bn7(self.conv7(x6)))) # x8 = self.bn8(self.conv8(x7)) # x8 = torch.cat((x8, x6), dim=1) # x8 = self.dropout8(self.activate8(x8)) # # x9 = self.pool3(x8) # # x10 = self.dropout10(self.activate10(self.bn10(self.conv10(x9)))) # x11 = self.bn11(self.conv11(x10)) # x11 = torch.cat((x11, x9), dim=1) # x11 = self.dropout11(self.activate11(x11)) # # x12 = self.pool4(x11) # # x12 = self.flatten(x12) # # x13 = self.fc1_dropout(self.fc1_activate(self.fc1_bn(self.fc1(x12)))) # x14 = self.fc2_dropout(self.fc2_activate(self.fc2_bn(self.fc2(x13)))) # x15 = self.fc3_dropout(self.fc3_activate(self.fc3_bn(self.fc3(x14)))) # # x16 = self.fc4(x15) # | 2.833926 | 3 |
main.py | janhenrikbern/VT_OPT | 2 | 6621140 | <gh_stars>1-10
import matplotlib.pyplot as plt
import numpy as np
import argparse
from track import load_track
from vehicles import PointCar
import viterbi
import path_finding
from scoring_functions import (
time_score,
distance_score,
centerline_score
)
import metrics
import optimal
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot", default=False, help="plot trajectories"
)
a = parser.parse_args()
def get_time(path):
car = PointCar(*path[0])
car.theta = car.heading(path[1])
for coor in path[1:]:
car.update(coor, is_coor=True)
return car.travel_time
def get_distance(path):
x = [i[0] for i in path]
y = [i[1] for i in path]
return metrics.summed_distance(x, y)
def print_stats(path):
print(f"{get_distance(path)}, {get_time(path)}")
def min_distance():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
n_loops = 1
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=31)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
c_idx = len(trellis[0])//2
in_idx = 0
centerline = [t[c_idx] for t in trellis]
print_stats(centerline)
inner_contour = [t[in_idx] for t in trellis]
print_stats(inner_contour)
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)
print_stats(path_v[split_idx:2*split_idx])
path_s = optimal.run(trellis, alpha=1.0, beta=0.0)
print_stats(path_s[split_idx:2*split_idx])
xv = [i[0] for i in path_v[split_idx:2*split_idx]]
yv = [i[1] for i in path_v[split_idx:2*split_idx]]
xs = [i[0] for i in path_s[split_idx:2*split_idx]]
ys = [i[1] for i in path_s[split_idx:2*split_idx]]
if a.plot:
fig, ax = plt.subplots()
ax.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax.fill(xv, yv, facecolor='none', edgecolor='green', linestyle="-", label="Distance Objective")
fig, ax = plt.subplots()
ax.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax.fill(xs, ys, facecolor='none', edgecolor='blue', linestyle="-", label="Time Objective")
plt.show()
def min_time():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
n_loops = 3
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=30)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)
print_stats(path_v[split_idx:2*split_idx])
path_s = optimal.run(trellis, alpha=0.5, beta=0.5)
print_stats(path_s[split_idx:])
xv = [i[0] for i in path_v[split_idx:2*split_idx]]
yv = [i[1] for i in path_v[split_idx:2*split_idx]]
xs = [i[0] for i in path_v[split_idx:2*split_idx]]
ys = [i[1] for i in path_v[split_idx:2*split_idx]]
if a.plot:
fig1, ax1 = plt.subplots()
ax1.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax1.fill(xv, yv, facecolor='none', edgecolor='green', linestyle="-")
fig2, ax2 = plt.subplots()
ax2.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax2.fill(xs, ys, facecolor='none', edgecolor='blue', linestyle="-")
plt.show()
def state_tradeoff():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
for s in [1, 5, 10, 20, 40, 80]:
n_loops = 3
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=s)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)[split_idx:2*split_idx]
print_stats(path_v)
path_s = optimal.run(trellis, alpha=1.0, beta=0.0)[split_idx:2*split_idx]
print_stats(path_s)
if __name__ == "__main__":
# min_distance()
# min_time()
state_tradeoff()
# IMG_PATH = "./tracks/loop.png"
# track = load_track(IMG_PATH)
# # Set to a valid point in trajectory
# starting_position = (150., 200.)
# car = PointCar(*starting_position)
# baseline_trellis = path_finding.find_valid_trajectory(car, track, states=1)
# baseline = viterbi.additive_viterbi(baseline_trellis, starting_position, centerline_score)
# n_loops = 1
# trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=10)
# split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
# time = viterbi.additive_viterbi(trellis, starting_position, time_score)
# distance = viterbi.additive_viterbi(trellis, starting_position, distance_score)
# for path in (baseline, time[split_idx:], distance[split_idx:]):
# print(get_stats(path, starting_position))
# x = np.array([x[0] for x in distance])
# y = np.array([x[1] for x in distance])
# print(metrics.summed_distance(x, y))
# if a.plot:
# fig, ax = plt.subplots()
# ax.imshow(track)
# plt.xlabel("Meters")
# plt.ylabel("Meters")
# ax.fill(baseline[:,0], baseline[:,1], facecolor='none', edgecolor='black', linestyle="-.", label="Centerline")
# ax.fill(time[split_idx:,0], time[split_idx:,1], facecolor='none', edgecolor='red', linestyle="-", label="Time Objective")
# ax.fill(distance[:split_idx,0], distance[:split_idx,1], facecolor='none', edgecolor='blue', linestyle="-", label="Distance Objective")
# plt.legend(loc=4)
# plt.show() | import matplotlib.pyplot as plt
import numpy as np
import argparse
from track import load_track
from vehicles import PointCar
import viterbi
import path_finding
from scoring_functions import (
time_score,
distance_score,
centerline_score
)
import metrics
import optimal
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot", default=False, help="plot trajectories"
)
a = parser.parse_args()
def get_time(path):
car = PointCar(*path[0])
car.theta = car.heading(path[1])
for coor in path[1:]:
car.update(coor, is_coor=True)
return car.travel_time
def get_distance(path):
x = [i[0] for i in path]
y = [i[1] for i in path]
return metrics.summed_distance(x, y)
def print_stats(path):
print(f"{get_distance(path)}, {get_time(path)}")
def min_distance():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
n_loops = 1
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=31)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
c_idx = len(trellis[0])//2
in_idx = 0
centerline = [t[c_idx] for t in trellis]
print_stats(centerline)
inner_contour = [t[in_idx] for t in trellis]
print_stats(inner_contour)
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)
print_stats(path_v[split_idx:2*split_idx])
path_s = optimal.run(trellis, alpha=1.0, beta=0.0)
print_stats(path_s[split_idx:2*split_idx])
xv = [i[0] for i in path_v[split_idx:2*split_idx]]
yv = [i[1] for i in path_v[split_idx:2*split_idx]]
xs = [i[0] for i in path_s[split_idx:2*split_idx]]
ys = [i[1] for i in path_s[split_idx:2*split_idx]]
if a.plot:
fig, ax = plt.subplots()
ax.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax.fill(xv, yv, facecolor='none', edgecolor='green', linestyle="-", label="Distance Objective")
fig, ax = plt.subplots()
ax.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax.fill(xs, ys, facecolor='none', edgecolor='blue', linestyle="-", label="Time Objective")
plt.show()
def min_time():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
n_loops = 3
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=30)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)
print_stats(path_v[split_idx:2*split_idx])
path_s = optimal.run(trellis, alpha=0.5, beta=0.5)
print_stats(path_s[split_idx:])
xv = [i[0] for i in path_v[split_idx:2*split_idx]]
yv = [i[1] for i in path_v[split_idx:2*split_idx]]
xs = [i[0] for i in path_v[split_idx:2*split_idx]]
ys = [i[1] for i in path_v[split_idx:2*split_idx]]
if a.plot:
fig1, ax1 = plt.subplots()
ax1.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax1.fill(xv, yv, facecolor='none', edgecolor='green', linestyle="-")
fig2, ax2 = plt.subplots()
ax2.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax2.fill(xs, ys, facecolor='none', edgecolor='blue', linestyle="-")
plt.show()
def state_tradeoff():
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
starting_position = (150., 200.)
car = PointCar(*starting_position)
for s in [1, 5, 10, 20, 40, 80]:
n_loops = 3
trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=s)
split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
path_v = viterbi.additive_viterbi(trellis, alpha=1.0, beta=0.0)[split_idx:2*split_idx]
print_stats(path_v)
path_s = optimal.run(trellis, alpha=1.0, beta=0.0)[split_idx:2*split_idx]
print_stats(path_s)
if __name__ == "__main__":
# min_distance()
# min_time()
state_tradeoff()
# IMG_PATH = "./tracks/loop.png"
# track = load_track(IMG_PATH)
# # Set to a valid point in trajectory
# starting_position = (150., 200.)
# car = PointCar(*starting_position)
# baseline_trellis = path_finding.find_valid_trajectory(car, track, states=1)
# baseline = viterbi.additive_viterbi(baseline_trellis, starting_position, centerline_score)
# n_loops = 1
# trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=10)
# split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0
# time = viterbi.additive_viterbi(trellis, starting_position, time_score)
# distance = viterbi.additive_viterbi(trellis, starting_position, distance_score)
# for path in (baseline, time[split_idx:], distance[split_idx:]):
# print(get_stats(path, starting_position))
# x = np.array([x[0] for x in distance])
# y = np.array([x[1] for x in distance])
# print(metrics.summed_distance(x, y))
# if a.plot:
# fig, ax = plt.subplots()
# ax.imshow(track)
# plt.xlabel("Meters")
# plt.ylabel("Meters")
# ax.fill(baseline[:,0], baseline[:,1], facecolor='none', edgecolor='black', linestyle="-.", label="Centerline")
# ax.fill(time[split_idx:,0], time[split_idx:,1], facecolor='none', edgecolor='red', linestyle="-", label="Time Objective")
# ax.fill(distance[:split_idx,0], distance[:split_idx,1], facecolor='none', edgecolor='blue', linestyle="-", label="Distance Objective")
# plt.legend(loc=4)
# plt.show() | en | 0.345402 | # Set to a valid point in trajectory # Set to a valid point in trajectory # Set to a valid point in trajectory # min_distance() # min_time() # IMG_PATH = "./tracks/loop.png" # track = load_track(IMG_PATH) # # Set to a valid point in trajectory # starting_position = (150., 200.) # car = PointCar(*starting_position) # baseline_trellis = path_finding.find_valid_trajectory(car, track, states=1) # baseline = viterbi.additive_viterbi(baseline_trellis, starting_position, centerline_score) # n_loops = 1 # trellis = path_finding.find_valid_trajectory(car, track, loops=n_loops, states=10) # split_idx = (len(trellis) // n_loops) + 1 if n_loops > 1 else 0 # time = viterbi.additive_viterbi(trellis, starting_position, time_score) # distance = viterbi.additive_viterbi(trellis, starting_position, distance_score) # for path in (baseline, time[split_idx:], distance[split_idx:]): # print(get_stats(path, starting_position)) # x = np.array([x[0] for x in distance]) # y = np.array([x[1] for x in distance]) # print(metrics.summed_distance(x, y)) # if a.plot: # fig, ax = plt.subplots() # ax.imshow(track) # plt.xlabel("Meters") # plt.ylabel("Meters") # ax.fill(baseline[:,0], baseline[:,1], facecolor='none', edgecolor='black', linestyle="-.", label="Centerline") # ax.fill(time[split_idx:,0], time[split_idx:,1], facecolor='none', edgecolor='red', linestyle="-", label="Time Objective") # ax.fill(distance[:split_idx,0], distance[:split_idx,1], facecolor='none', edgecolor='blue', linestyle="-", label="Distance Objective") # plt.legend(loc=4) # plt.show() | 2.510123 | 3 |
xmas/lib/logger.py | pikesley/christmas-pixels | 0 | 6621141 | <reponame>pikesley/christmas-pixels<gh_stars>0
import logging
LOGGER = logging.getLogger('christmas-pixels')
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler())
def disable():
"""Switch logging off because it messes up the test output."""
LOGGER.setLevel(logging.CRITICAL)
| import logging
LOGGER = logging.getLogger('christmas-pixels')
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler())
def disable():
"""Switch logging off because it messes up the test output."""
LOGGER.setLevel(logging.CRITICAL) | en | 0.724761 | Switch logging off because it messes up the test output. | 2.561223 | 3 |
9. Um pouco mais sobre strings/quantos_dias_se_passaram.py | andrebrito16/python-academy | 1 | 6621142 | def dias_do_ano(data):
dia = int(data[:2])
mes = int(data[3:5])
ind_mes = mes-1
ano = data[6:]
meses = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Calcular os dias até o mês anterior
if mes == 1:
return dia -1
total = 0
for c in range(0, ind_mes):
total += meses[c]
return total + dia - 1
print(dias_do_ano("02/03/2018")) | def dias_do_ano(data):
dia = int(data[:2])
mes = int(data[3:5])
ind_mes = mes-1
ano = data[6:]
meses = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Calcular os dias até o mês anterior
if mes == 1:
return dia -1
total = 0
for c in range(0, ind_mes):
total += meses[c]
return total + dia - 1
print(dias_do_ano("02/03/2018")) | pt | 0.993581 | # Calcular os dias até o mês anterior | 3.606177 | 4 |
scraper/storage_spiders/zenocomvn.py | chongiadung/choinho | 0 | 6621143 | <reponame>chongiadung/choinho<filename>scraper/storage_spiders/zenocomvn.py<gh_stars>0
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='t_ctsp21']/div[@class='t_ctsp1']/div/h1",
'price' : "//b[@style='color: #b50000;']|//b[@style='color: #ff0000;font-size: 20px;']",
'category' : "//div[@class='nav_product']/ul/li/a",
'description' : "//div[@class='inf_product']/div[@class='tt_chitiet']",
'images' : "//div[@id='gallery_01']/a/@data-zoom-image",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'zeno.com.vn'
allowed_domains = ['zeno.com.vn']
start_urls = ['http://zeno.com.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-p+\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-c+\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='t_ctsp21']/div[@class='t_ctsp1']/div/h1",
'price' : "//b[@style='color: #b50000;']|//b[@style='color: #ff0000;font-size: 20px;']",
'category' : "//div[@class='nav_product']/ul/li/a",
'description' : "//div[@class='inf_product']/div[@class='tt_chitiet']",
'images' : "//div[@id='gallery_01']/a/@data-zoom-image",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'zeno.com.vn'
allowed_domains = ['zeno.com.vn']
start_urls = ['http://zeno.com.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-p+\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-c+\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
] | en | 0.519191 | # Auto generated by generator.py. Delete this line if you make modification. #b50000;']|//b[@style='color: #ff0000;font-size: 20px;']", #Rule(LinkExtractor(), 'parse_item_and_links'), | 2.066622 | 2 |
scripts/05_modules/bodypaint/get_uv_seams_s22_114.py | PluginCafe/cinema4d_py_sdk_extended | 85 | 6621144 | <gh_stars>10-100
"""
Copyright: MAXON Computer GmbH
Author: <NAME>
Description:
- Copies the UV seams to the edge polygon selection.
Class/method highlighted:
- c4d.modules.bodypaint.GetUVSeams()
- PolygonObject.GetEdgeS()
- BaseSelect.CopyTo()
- c4d.modules.bodypaint.UpdateMeshUV()
"""
import c4d
def main():
# Checks if selected object is valid
if op is None:
raise ValueError("op is none, please select one object.")
# Checks if there is a texture tag selected
if doc.GetActiveTag() is None or not doc.GetActiveTag().CheckType(c4d.Tuvw) or doc.GetActiveTag().GetObject() != op:
raise RuntimeError("A UVW tag being part of the select op should be selected.")
# Enables UV Edge Mode if not already in any UV mode (needed for GetActiveUVSet to works)
docMode = doc.GetMode()
if docMode not in [c4d.Muvpoints, c4d.Muvedges, c4d.Muvpolygons]:
doc.SetMode(c4d.Muvpolygons)
# UVSet have to be defined to do that ensure the UV windows is opened at least one time
handle = c4d.modules.bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if handle is None:
# If fail it may be because the Texture view is not open
# Open A texture View
c4d.CallCommand(170103)
# In S22 you need to update the UV Mesh
if c4d.API_VERSION >= 22000:
c4d.modules.bodypaint.UpdateMeshUV(False)
# Retrieves active UVSet, The UV windows need to be opened at least one time
handle = c4d.modules.bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if handle is None:
raise RuntimeError("There is no Active UVSet")
uvSeams = c4d.modules.bodypaint.GetUVSeams(op)
if uvSeams is None:
raise RuntimeError("Failed to retrieves the uv seams.")
if uvSeams.GetCount() == 0:
raise RuntimeError("There is no seams for the selected object.")
# Retrieves the current Edge Selection stored in the Polygon Object
edgeSelect = op.GetEdgeS()
if edgeSelect is None:
raise RuntimeError("Failed to retrieves the edge selection.")
# Deselect all the currently selected edge
edgeSelect.DeselectAll()
# Copies the UV seams to the Polygon Edge selection.
uvSeams.CopyTo(edgeSelect)
# Resets the previous document mode
doc.SetMode(docMode)
# Pushes an update event to Cinema 4D
c4d.EventAdd()
if __name__ == "__main__":
main()
| """
Copyright: MAXON Computer GmbH
Author: <NAME>
Description:
- Copies the UV seams to the edge polygon selection.
Class/method highlighted:
- c4d.modules.bodypaint.GetUVSeams()
- PolygonObject.GetEdgeS()
- BaseSelect.CopyTo()
- c4d.modules.bodypaint.UpdateMeshUV()
"""
import c4d
def main():
# Checks if selected object is valid
if op is None:
raise ValueError("op is none, please select one object.")
# Checks if there is a texture tag selected
if doc.GetActiveTag() is None or not doc.GetActiveTag().CheckType(c4d.Tuvw) or doc.GetActiveTag().GetObject() != op:
raise RuntimeError("A UVW tag being part of the select op should be selected.")
# Enables UV Edge Mode if not already in any UV mode (needed for GetActiveUVSet to works)
docMode = doc.GetMode()
if docMode not in [c4d.Muvpoints, c4d.Muvedges, c4d.Muvpolygons]:
doc.SetMode(c4d.Muvpolygons)
# UVSet have to be defined to do that ensure the UV windows is opened at least one time
handle = c4d.modules.bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if handle is None:
# If fail it may be because the Texture view is not open
# Open A texture View
c4d.CallCommand(170103)
# In S22 you need to update the UV Mesh
if c4d.API_VERSION >= 22000:
c4d.modules.bodypaint.UpdateMeshUV(False)
# Retrieves active UVSet, The UV windows need to be opened at least one time
handle = c4d.modules.bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if handle is None:
raise RuntimeError("There is no Active UVSet")
uvSeams = c4d.modules.bodypaint.GetUVSeams(op)
if uvSeams is None:
raise RuntimeError("Failed to retrieves the uv seams.")
if uvSeams.GetCount() == 0:
raise RuntimeError("There is no seams for the selected object.")
# Retrieves the current Edge Selection stored in the Polygon Object
edgeSelect = op.GetEdgeS()
if edgeSelect is None:
raise RuntimeError("Failed to retrieves the edge selection.")
# Deselect all the currently selected edge
edgeSelect.DeselectAll()
# Copies the UV seams to the Polygon Edge selection.
uvSeams.CopyTo(edgeSelect)
# Resets the previous document mode
doc.SetMode(docMode)
# Pushes an update event to Cinema 4D
c4d.EventAdd()
if __name__ == "__main__":
main() | en | 0.697883 | Copyright: MAXON Computer GmbH Author: <NAME> Description: - Copies the UV seams to the edge polygon selection. Class/method highlighted: - c4d.modules.bodypaint.GetUVSeams() - PolygonObject.GetEdgeS() - BaseSelect.CopyTo() - c4d.modules.bodypaint.UpdateMeshUV() # Checks if selected object is valid # Checks if there is a texture tag selected # Enables UV Edge Mode if not already in any UV mode (needed for GetActiveUVSet to works) # UVSet have to be defined to do that ensure the UV windows is opened at least one time # If fail it may be because the Texture view is not open # Open A texture View # In S22 you need to update the UV Mesh # Retrieves active UVSet, The UV windows need to be opened at least one time # Retrieves the current Edge Selection stored in the Polygon Object # Deselect all the currently selected edge # Copies the UV seams to the Polygon Edge selection. # Resets the previous document mode # Pushes an update event to Cinema 4D | 2.270016 | 2 |
lth_srl.py | trondth/master | 0 | 6621145 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
from masters_project_config import *
from subprocess import Popen, PIPE, STDOUT
import re
lth_dir = DATA_PREFIX + '/lth_srl'
class Lth_srl:
def run(self, conllfile, tagged=True):
"""
@param conllfile conllfile
@param tagged True if conllfile contains lemmas and pos-tags
@return path to output file
"""
p_run = Popen("sh scripts/run.sh < {} > {}.out".format(conllfile, conllfile), shell=True, cwd=lth_dir)
p_run.communicate()[0]
return conllfile + '.out'
if __name__ == "__main__":
#testconll = "devtest.conll"
testconll = DATA_PREFIX + "/out/heldouttest.conll2008.conll"
trainconll = DATA_PREFIX + "/out/heldouttrain.conll2008.conll"
lthsrl = Lth_srl()
outfile = lthsrl.run(testconll)
outfile_t = lthsrl.run(trainconll)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
from masters_project_config import *
from subprocess import Popen, PIPE, STDOUT
import re
lth_dir = DATA_PREFIX + '/lth_srl'
class Lth_srl:
def run(self, conllfile, tagged=True):
"""
@param conllfile conllfile
@param tagged True if conllfile contains lemmas and pos-tags
@return path to output file
"""
p_run = Popen("sh scripts/run.sh < {} > {}.out".format(conllfile, conllfile), shell=True, cwd=lth_dir)
p_run.communicate()[0]
return conllfile + '.out'
if __name__ == "__main__":
#testconll = "devtest.conll"
testconll = DATA_PREFIX + "/out/heldouttest.conll2008.conll"
trainconll = DATA_PREFIX + "/out/heldouttrain.conll2008.conll"
lthsrl = Lth_srl()
outfile = lthsrl.run(testconll)
outfile_t = lthsrl.run(trainconll)
| en | 0.264523 | #!/usr/bin/env python # -*- coding: utf-8 -*- @param conllfile conllfile @param tagged True if conllfile contains lemmas and pos-tags @return path to output file #testconll = "devtest.conll" | 2.258338 | 2 |
05_behavior_vis/main0b.py | cxrodgers/Rodgers2021 | 0 | 6621146 | <reponame>cxrodgers/Rodgers2021
## Plot the example frames (Fig 1B)
"""
1B, left
example_3contact_frame_with_edge_180221_KF132_242546.png
Image showing tracked whiskers in contact with concave shape
1B, right
example_3contact_frame_with_edge_180221_KF132_490102.png
Image showing tracked whiskers in contact with concave shape
"""
import os
import json
import imageio
import pandas
import numpy as np
import matplotlib.pyplot as plt
import whiskvid
import my
import my.plot
## Parameters
with open('../parameters') as fi:
params = json.load(fi)
## Load metadata about sessions
session_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)
## Example session and frames
session_name = '180221_KF132'
convex_frame = 490102
concave_frame = 242546
## Get handles
vs = whiskvid.django_db.VideoSession.from_name(session_name)
# Frame shape
frame_height = session_df.loc[session_name, 'frame_height']
frame_width = session_df.loc[session_name, 'frame_width']
## Load joints for plotting whiskers
joints = vs.data.joints.load_data()
## Load edges for plotting
edges = vs.data.all_edges.load_data()
# Choose example edges at the further distances
trial_matrix = vs.data.trial_matrix.load_data().dropna()
shape2rwin_frame = trial_matrix.groupby(['rewside', 'servo_pos'])[
'rwin_frame'].last().astype(np.int)
## Truncation
TRUNCATE_LATERAL = 15
shape2truncate_medial = shape2rwin_frame * 0 + 1
shape2truncate_medial.loc[('left', 1850)] = 10
shape2truncate_medial.loc[('left', 1760)] = 30
shape2truncate_medial.loc[('left', 1670)] = 35
## Extract edges
es = vs.data.edge_summary.load_data()
# Drop the flatter (or nothing) ones
es = es.loc[pandas.IndexSlice[:, :, [50, 150]], :].copy()
# Drop the rewside level which is not useful because we have stepper_pos
es.index = es.index.droplevel('rewside')
es.index = es.index.remove_unused_levels()
assert not es.index.duplicated().any()
# Normalize to a max of 1.0
norm_es = es.unstack('row').divide(
es.unstack('row').max(axis=1), axis=0).stack('row')
# Binarize
# This fattens the edges a little
binary_norm_es = (norm_es > .0001).astype(np.int)
# Mean over stimuli
esumm = binary_norm_es.mean(level='row')
## Plot each example frame
# The pixel size of the image is always the same
# But as DPI increases, the image is rendered smaller, while the lines stay
# the same widths
DPI = 500
for rewside in ['left', 'right']:
# Get the appropriate frame
if rewside == 'left':
frame_number = concave_frame
other_rewside = 'right'
else:
frame_number = convex_frame
other_rewside = 'left'
# Get the frame
frame = imageio.imread(os.path.join(
params['example_frames_dir'],
'{}_{}.png'.format(session_name, frame_number)))
# Create a figure with a single axis filling it
figsize = (frame_width / float(DPI), frame_height / float(DPI))
f = plt.figure(frameon=False, figsize=figsize)
ax = f.add_subplot(position=[0, 0, 1, 1])
ax.set_frame_on(False)
# Display image
im = my.plot.imshow(frame, ax=ax, cmap=plt.cm.gray, interpolation='bilinear')
im.set_clim((0, 255))
# Plot the edge at this time
TRUNCATE_MEDIAL = shape2truncate_medial.loc[rewside].loc[1850]
edge = edges[frame_number][TRUNCATE_LATERAL:-TRUNCATE_MEDIAL]
ax.plot(edge[:, 1], edge[:, 0], color='pink', lw=1)
# Plot the example edges
example_edge_frames = shape2rwin_frame.loc[rewside].loc[[1670, 1760]]
for servo_pos in example_edge_frames.index:
example_edge_frame = example_edge_frames.loc[servo_pos]
TRUNCATE_MEDIAL = shape2truncate_medial.loc[rewside].loc[servo_pos]
edge = edges[example_edge_frame][TRUNCATE_LATERAL:-TRUNCATE_MEDIAL]
ax.plot(edge[:, 1], edge[:, 0], color='cyan', lw=1)
# Plot whiskers
for whisker in ['C1', 'C2', 'C3']:
color = {'C1': 'b', 'C2': 'g', 'C3': 'r'}[whisker]
# Extract and plot joints
try:
whisker_joints = joints.loc[frame_number].loc[whisker].unstack().T
except KeyError:
continue
ax.plot(whisker_joints['c'], whisker_joints['r'], color=color, lw=1)
# Plot a yellow dot on the end for contact (since these were selected for
# having a contact, although we may be off by a frame)
ax.plot(
whisker_joints['c'][0:1], whisker_joints['r'][0:1],
color='yellow', marker='o', ms=4)
# Rotate into standard orientation
ax.axis('image')
ax.set_xlim((frame_width, 0))
ax.set_ylim((0, frame_height))
# Save
f.savefig(
'example_3contact_frame_with_edge_{}_{}.png'.format(
session_name, frame_number), dpi=DPI)
plt.show() | ## Plot the example frames (Fig 1B)
"""
1B, left
example_3contact_frame_with_edge_180221_KF132_242546.png
Image showing tracked whiskers in contact with concave shape
1B, right
example_3contact_frame_with_edge_180221_KF132_490102.png
Image showing tracked whiskers in contact with concave shape
"""
import os
import json
import imageio
import pandas
import numpy as np
import matplotlib.pyplot as plt
import whiskvid
import my
import my.plot
## Parameters
with open('../parameters') as fi:
params = json.load(fi)
## Load metadata about sessions
session_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)
## Example session and frames
session_name = '180221_KF132'
convex_frame = 490102
concave_frame = 242546
## Get handles
vs = whiskvid.django_db.VideoSession.from_name(session_name)
# Frame shape
frame_height = session_df.loc[session_name, 'frame_height']
frame_width = session_df.loc[session_name, 'frame_width']
## Load joints for plotting whiskers
joints = vs.data.joints.load_data()
## Load edges for plotting
edges = vs.data.all_edges.load_data()
# Choose example edges at the further distances
trial_matrix = vs.data.trial_matrix.load_data().dropna()
shape2rwin_frame = trial_matrix.groupby(['rewside', 'servo_pos'])[
'rwin_frame'].last().astype(np.int)
## Truncation
TRUNCATE_LATERAL = 15
shape2truncate_medial = shape2rwin_frame * 0 + 1
shape2truncate_medial.loc[('left', 1850)] = 10
shape2truncate_medial.loc[('left', 1760)] = 30
shape2truncate_medial.loc[('left', 1670)] = 35
## Extract edges
es = vs.data.edge_summary.load_data()
# Drop the flatter (or nothing) ones
es = es.loc[pandas.IndexSlice[:, :, [50, 150]], :].copy()
# Drop the rewside level which is not useful because we have stepper_pos
es.index = es.index.droplevel('rewside')
es.index = es.index.remove_unused_levels()
assert not es.index.duplicated().any()
# Normalize to a max of 1.0
norm_es = es.unstack('row').divide(
es.unstack('row').max(axis=1), axis=0).stack('row')
# Binarize
# This fattens the edges a little
binary_norm_es = (norm_es > .0001).astype(np.int)
# Mean over stimuli
esumm = binary_norm_es.mean(level='row')
## Plot each example frame
# The pixel size of the image is always the same
# But as DPI increases, the image is rendered smaller, while the lines stay
# the same widths
DPI = 500
for rewside in ['left', 'right']:
# Get the appropriate frame
if rewside == 'left':
frame_number = concave_frame
other_rewside = 'right'
else:
frame_number = convex_frame
other_rewside = 'left'
# Get the frame
frame = imageio.imread(os.path.join(
params['example_frames_dir'],
'{}_{}.png'.format(session_name, frame_number)))
# Create a figure with a single axis filling it
figsize = (frame_width / float(DPI), frame_height / float(DPI))
f = plt.figure(frameon=False, figsize=figsize)
ax = f.add_subplot(position=[0, 0, 1, 1])
ax.set_frame_on(False)
# Display image
im = my.plot.imshow(frame, ax=ax, cmap=plt.cm.gray, interpolation='bilinear')
im.set_clim((0, 255))
# Plot the edge at this time
TRUNCATE_MEDIAL = shape2truncate_medial.loc[rewside].loc[1850]
edge = edges[frame_number][TRUNCATE_LATERAL:-TRUNCATE_MEDIAL]
ax.plot(edge[:, 1], edge[:, 0], color='pink', lw=1)
# Plot the example edges
example_edge_frames = shape2rwin_frame.loc[rewside].loc[[1670, 1760]]
for servo_pos in example_edge_frames.index:
example_edge_frame = example_edge_frames.loc[servo_pos]
TRUNCATE_MEDIAL = shape2truncate_medial.loc[rewside].loc[servo_pos]
edge = edges[example_edge_frame][TRUNCATE_LATERAL:-TRUNCATE_MEDIAL]
ax.plot(edge[:, 1], edge[:, 0], color='cyan', lw=1)
# Plot whiskers
for whisker in ['C1', 'C2', 'C3']:
color = {'C1': 'b', 'C2': 'g', 'C3': 'r'}[whisker]
# Extract and plot joints
try:
whisker_joints = joints.loc[frame_number].loc[whisker].unstack().T
except KeyError:
continue
ax.plot(whisker_joints['c'], whisker_joints['r'], color=color, lw=1)
# Plot a yellow dot on the end for contact (since these were selected for
# having a contact, although we may be off by a frame)
ax.plot(
whisker_joints['c'][0:1], whisker_joints['r'][0:1],
color='yellow', marker='o', ms=4)
# Rotate into standard orientation
ax.axis('image')
ax.set_xlim((frame_width, 0))
ax.set_ylim((0, frame_height))
# Save
f.savefig(
'example_3contact_frame_with_edge_{}_{}.png'.format(
session_name, frame_number), dpi=DPI)
plt.show() | en | 0.803137 | ## Plot the example frames (Fig 1B) 1B, left example_3contact_frame_with_edge_180221_KF132_242546.png Image showing tracked whiskers in contact with concave shape 1B, right example_3contact_frame_with_edge_180221_KF132_490102.png Image showing tracked whiskers in contact with concave shape ## Parameters ## Load metadata about sessions ## Example session and frames ## Get handles # Frame shape ## Load joints for plotting whiskers ## Load edges for plotting # Choose example edges at the further distances ## Truncation ## Extract edges # Drop the flatter (or nothing) ones # Drop the rewside level which is not useful because we have stepper_pos # Normalize to a max of 1.0 # Binarize # This fattens the edges a little # Mean over stimuli ## Plot each example frame # The pixel size of the image is always the same # But as DPI increases, the image is rendered smaller, while the lines stay # the same widths # Get the appropriate frame # Get the frame # Create a figure with a single axis filling it # Display image # Plot the edge at this time # Plot the example edges # Plot whiskers # Extract and plot joints # Plot a yellow dot on the end for contact (since these were selected for # having a contact, although we may be off by a frame) # Rotate into standard orientation # Save | 2.387332 | 2 |
tests/test_positionrank.py | sohyeonhwang/pytextrank | 0 | 6621147 | """Unit tests for PositionRank."""
from spacy.tokens import Doc
import sys ; sys.path.insert(0, "../pytextrank")
from pytextrank.base import BaseTextRank
from pytextrank.positionrank import PositionRank
def test_position_rank (doc: Doc):
"""It ranks keywords that appear early in the document higher than TextRank."""
# given
position_rank = PositionRank()
base_text_rank = BaseTextRank()
# when
processed_doc = position_rank(doc)
phrases = processed_doc._.phrases
comparison_doc = base_text_rank(doc)
comparison_phrases = comparison_doc._.phrases
# then
assert set(p.rank for p in phrases) != set(p.rank for p in comparison_phrases)
# the test article mentions Chelsea at the begginning of the article
# while it mentions Shanghai Shenhua annecdotally later in the article
# with normal TextRank, Shanghai Shenhua is part of top 10 phrases and Chelsea is not
# with PositionRank, the situation is the opposite, which is desired for a piece of news.
assert "Chelsea" in [p.text for p in phrases[:10]]
assert "Chelsea" not in [p.text for p in comparison_phrases[:10]]
assert "Shanghai Shenhua" not in [p.text for p in phrases[:10]]
assert "Shanghai Shenhua" in [p.text for p in comparison_phrases[:10]]
| """Unit tests for PositionRank."""
from spacy.tokens import Doc
import sys ; sys.path.insert(0, "../pytextrank")
from pytextrank.base import BaseTextRank
from pytextrank.positionrank import PositionRank
def test_position_rank (doc: Doc):
"""It ranks keywords that appear early in the document higher than TextRank."""
# given
position_rank = PositionRank()
base_text_rank = BaseTextRank()
# when
processed_doc = position_rank(doc)
phrases = processed_doc._.phrases
comparison_doc = base_text_rank(doc)
comparison_phrases = comparison_doc._.phrases
# then
assert set(p.rank for p in phrases) != set(p.rank for p in comparison_phrases)
# the test article mentions Chelsea at the begginning of the article
# while it mentions Shanghai Shenhua annecdotally later in the article
# with normal TextRank, Shanghai Shenhua is part of top 10 phrases and Chelsea is not
# with PositionRank, the situation is the opposite, which is desired for a piece of news.
assert "Chelsea" in [p.text for p in phrases[:10]]
assert "Chelsea" not in [p.text for p in comparison_phrases[:10]]
assert "Shanghai Shenhua" not in [p.text for p in phrases[:10]]
assert "Shanghai Shenhua" in [p.text for p in comparison_phrases[:10]]
| en | 0.904647 | Unit tests for PositionRank. It ranks keywords that appear early in the document higher than TextRank. # given # when # then # the test article mentions Chelsea at the begginning of the article # while it mentions Shanghai Shenhua annecdotally later in the article # with normal TextRank, Shanghai Shenhua is part of top 10 phrases and Chelsea is not # with PositionRank, the situation is the opposite, which is desired for a piece of news. | 3.273934 | 3 |
modnet/__init__.py | modl-uclouvain/modnet | 0 | 6621148 | __version__ = "0.1.12.dev"
| __version__ = "0.1.12.dev"
| none | 1 | 1.069398 | 1 | |
nicett6/emulator/controller.py | pp81381/nicett6 | 0 | 6621149 | import asyncio
from contextlib import contextmanager, ExitStack
import logging
from nicett6.emulator.cover_emulator import TT6CoverEmulator
from nicett6.emulator.line_handler import LineHandler
from nicett6.utils import AsyncObserver
_LOGGER = logging.getLogger(__name__)
SEND_EOL = b"\r\n"
RCV_EOL = b"\r"
@contextmanager
def make_tt6controller(web_on, devices):
controller = TT6Controller(web_on)
with ExitStack() as stack:
for device in devices:
controller.register_device(device)
stack.callback(controller.deregister_device, device.tt_addr)
yield controller
class DuplicateDeviceError(Exception):
pass
class WriterWrapper:
def __init__(self, writer):
self.writer = writer
self.ok = True
async def write_msg(self, msg: str):
if self.ok:
try:
self.writer.write(msg.encode("utf-8") + SEND_EOL)
await self.writer.drain()
except ConnectionResetError:
self.ok = False
_LOGGER.warning("Caught ConnectionResetError. Connection marked bad.")
if not self.ok:
_LOGGER.warning(f"Message could not be written to defunkt client: {msg!r}")
async def read_line_bytes(reader):
try:
line_bytes = await reader.readuntil(RCV_EOL)
except asyncio.IncompleteReadError as err:
if len(err.partial) > 0 and err.partial != b"\n":
raise
line_bytes = b""
return line_bytes
class TT6Controller(AsyncObserver):
def __init__(self, web_on):
super().__init__()
self.web_on = web_on
self.devices = {}
self.writers = set()
self._server = None
def register_device(self, device: TT6CoverEmulator):
if device.tt_addr in self.devices:
raise DuplicateDeviceError()
self.devices[device.tt_addr] = device
device.attach(self)
_LOGGER.info(f"registered device {device.tt_addr}")
def deregister_device(self, tt_addr):
device = self.devices[tt_addr]
device.detach(self)
del self.devices[tt_addr]
_LOGGER.info(f"deregistered device {tt_addr}")
def lookup_device(self, tt_addr):
return self.devices[tt_addr]
async def run_server(self, port):
async with await asyncio.start_server(
self.handle_messages, port=port
) as self._server:
for s in self._server.sockets:
logging.info("Serving on {}".format(s.getsockname()))
try:
await self._server.serve_forever()
except asyncio.CancelledError:
logging.info("Server stopped")
def stop_server(self):
if self._server is not None and self._server.is_serving():
self._server.close()
@contextmanager
def wrap_writer(self, writer):
_LOGGER.info("Connection opened")
wrapped_writer = WriterWrapper(writer)
self.writers.add(wrapped_writer)
try:
yield wrapped_writer
finally:
self.writers.remove(wrapped_writer)
writer.close()
_LOGGER.info("Connection closed")
async def handle_messages(self, reader, writer):
with self.wrap_writer(writer) as wrapped_writer:
line_handler = LineHandler(wrapped_writer, self)
listener_task = asyncio.create_task(read_line_bytes(reader))
pending = {listener_task}
while pending:
done, pending = await asyncio.wait(
pending, return_when=asyncio.FIRST_COMPLETED
)
for d in done:
if listener_task is not None and d is listener_task:
line_bytes = await d
if not line_bytes:
listener_task = None
else:
listener_task = asyncio.create_task(read_line_bytes(reader))
line_handler_task = asyncio.create_task(
line_handler.handle_line(line_bytes)
)
pending.add(listener_task)
pending.add(line_handler_task)
else:
await d
async def write_all_wrapped_writers(self, msg):
for wrapped_writer in self.writers:
await wrapped_writer.write_msg(msg)
async def update(self, device):
if self.web_on:
await self.write_all_wrapped_writers(LineHandler.fmt_pos_msg(device))
| import asyncio
from contextlib import contextmanager, ExitStack
import logging
from nicett6.emulator.cover_emulator import TT6CoverEmulator
from nicett6.emulator.line_handler import LineHandler
from nicett6.utils import AsyncObserver
_LOGGER = logging.getLogger(__name__)
SEND_EOL = b"\r\n"
RCV_EOL = b"\r"
@contextmanager
def make_tt6controller(web_on, devices):
controller = TT6Controller(web_on)
with ExitStack() as stack:
for device in devices:
controller.register_device(device)
stack.callback(controller.deregister_device, device.tt_addr)
yield controller
class DuplicateDeviceError(Exception):
pass
class WriterWrapper:
def __init__(self, writer):
self.writer = writer
self.ok = True
async def write_msg(self, msg: str):
if self.ok:
try:
self.writer.write(msg.encode("utf-8") + SEND_EOL)
await self.writer.drain()
except ConnectionResetError:
self.ok = False
_LOGGER.warning("Caught ConnectionResetError. Connection marked bad.")
if not self.ok:
_LOGGER.warning(f"Message could not be written to defunkt client: {msg!r}")
async def read_line_bytes(reader):
try:
line_bytes = await reader.readuntil(RCV_EOL)
except asyncio.IncompleteReadError as err:
if len(err.partial) > 0 and err.partial != b"\n":
raise
line_bytes = b""
return line_bytes
class TT6Controller(AsyncObserver):
def __init__(self, web_on):
super().__init__()
self.web_on = web_on
self.devices = {}
self.writers = set()
self._server = None
def register_device(self, device: TT6CoverEmulator):
if device.tt_addr in self.devices:
raise DuplicateDeviceError()
self.devices[device.tt_addr] = device
device.attach(self)
_LOGGER.info(f"registered device {device.tt_addr}")
def deregister_device(self, tt_addr):
device = self.devices[tt_addr]
device.detach(self)
del self.devices[tt_addr]
_LOGGER.info(f"deregistered device {tt_addr}")
def lookup_device(self, tt_addr):
return self.devices[tt_addr]
async def run_server(self, port):
async with await asyncio.start_server(
self.handle_messages, port=port
) as self._server:
for s in self._server.sockets:
logging.info("Serving on {}".format(s.getsockname()))
try:
await self._server.serve_forever()
except asyncio.CancelledError:
logging.info("Server stopped")
def stop_server(self):
if self._server is not None and self._server.is_serving():
self._server.close()
@contextmanager
def wrap_writer(self, writer):
_LOGGER.info("Connection opened")
wrapped_writer = WriterWrapper(writer)
self.writers.add(wrapped_writer)
try:
yield wrapped_writer
finally:
self.writers.remove(wrapped_writer)
writer.close()
_LOGGER.info("Connection closed")
async def handle_messages(self, reader, writer):
with self.wrap_writer(writer) as wrapped_writer:
line_handler = LineHandler(wrapped_writer, self)
listener_task = asyncio.create_task(read_line_bytes(reader))
pending = {listener_task}
while pending:
done, pending = await asyncio.wait(
pending, return_when=asyncio.FIRST_COMPLETED
)
for d in done:
if listener_task is not None and d is listener_task:
line_bytes = await d
if not line_bytes:
listener_task = None
else:
listener_task = asyncio.create_task(read_line_bytes(reader))
line_handler_task = asyncio.create_task(
line_handler.handle_line(line_bytes)
)
pending.add(listener_task)
pending.add(line_handler_task)
else:
await d
async def write_all_wrapped_writers(self, msg):
for wrapped_writer in self.writers:
await wrapped_writer.write_msg(msg)
async def update(self, device):
if self.web_on:
await self.write_all_wrapped_writers(LineHandler.fmt_pos_msg(device))
| none | 1 | 2.498835 | 2 | |
AlgorithmTest/CODING_CHALLENGE/CC_1859.py | bluesky0960/AlgorithmTest | 0 | 6621150 | <reponame>bluesky0960/AlgorithmTest<gh_stars>0
# https://ktaivle-ai.moducoding.com/Question/1859/View/1#1
# 천하제일무술대회(중급)
import sys
from collections import deque
n = int(sys.stdin.readline())
q = deque()
for _ in range(n):
name, score = sys.stdin.readline().strip().split()
q.append((name, int(score)))
if n==1:
print(q.popleft()[0])
exit(0)
elif n%2!=0:
n += 1
q.append(('tmp', 0))
cnt = 0
tonument = deque()
tmp = []
while len(q)!=1:
n1 = q.popleft()
n2 = q.popleft()
tmp.append(n1[0])
if n2[0] != 'tmp':
tmp.append(n2[0])
q.append(n1) if n1[1] >= n2[1] else q.append(n2)
cnt+=2
if cnt == n:
tonument.append(tmp)
n //= 2
cnt = 0
tmp = []
tonument.append([q.popleft()[0]])
while tonument:
print(*tonument.pop()) | # https://ktaivle-ai.moducoding.com/Question/1859/View/1#1
# 천하제일무술대회(중급)
import sys
from collections import deque
n = int(sys.stdin.readline())
q = deque()
for _ in range(n):
name, score = sys.stdin.readline().strip().split()
q.append((name, int(score)))
if n==1:
print(q.popleft()[0])
exit(0)
elif n%2!=0:
n += 1
q.append(('tmp', 0))
cnt = 0
tonument = deque()
tmp = []
while len(q)!=1:
n1 = q.popleft()
n2 = q.popleft()
tmp.append(n1[0])
if n2[0] != 'tmp':
tmp.append(n2[0])
q.append(n1) if n1[1] >= n2[1] else q.append(n2)
cnt+=2
if cnt == n:
tonument.append(tmp)
n //= 2
cnt = 0
tmp = []
tonument.append([q.popleft()[0]])
while tonument:
print(*tonument.pop()) | en | 0.693021 | # https://ktaivle-ai.moducoding.com/Question/1859/View/1#1 # 천하제일무술대회(중급) | 3.157304 | 3 |