max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
data_structures/linked_list/problems/count_bobs/py/solution.py | lilsweetcaligula/Algorithms | 0 | 6619251 | <filename>data_structures/linked_list/problems/count_bobs/py/solution.py<gh_stars>0
import LinkedList
# Problem description: Given a string represented as a linked list of characters, count the occurrence
# of a substring "bob" in the original string.
# Solution time complexity: O(Kn), where K = len(src) = len('bob') = 3
# Comments: Recursive solution.
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def CountBobs(head: LinkedList.Node) -> int:
if head == None or head.nxt == None:
return 0
node = head
for c in 'bob':
if node == None:
# The list ended here. Substring "bob" cannot
# be present fully.
return 0
elif node.val != c:
# Mismatch, move the head pointer one node forward
# and recurse.
return CountBobs(head.nxt)
else:
# Match so far, move the node pointer one node forward.
node = node.nxt
else:
# If we got this far, we have found the "bob" substring.
# Jump over a node starting from the head and recurse.
# We could jump to the next one but if we just had "bob"
# match, head.nxt will have the value of "o", - and there's
# no way we have "bob" with the first letter "o".
return 1 + CountBobs(head.nxt.nxt)
return 0
| <filename>data_structures/linked_list/problems/count_bobs/py/solution.py<gh_stars>0
import LinkedList
# Problem description: Given a string represented as a linked list of characters, count the occurrence
# of a substring "bob" in the original string.
# Solution time complexity: O(Kn), where K = len(src) = len('bob') = 3
# Comments: Recursive solution.
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def CountBobs(head: LinkedList.Node) -> int:
if head == None or head.nxt == None:
return 0
node = head
for c in 'bob':
if node == None:
# The list ended here. Substring "bob" cannot
# be present fully.
return 0
elif node.val != c:
# Mismatch, move the head pointer one node forward
# and recurse.
return CountBobs(head.nxt)
else:
# Match so far, move the node pointer one node forward.
node = node.nxt
else:
# If we got this far, we have found the "bob" substring.
# Jump over a node starting from the head and recurse.
# We could jump to the next one but if we just had "bob"
# match, head.nxt will have the value of "o", - and there's
# no way we have "bob" with the first letter "o".
return 1 + CountBobs(head.nxt.nxt)
return 0
| en | 0.859965 | # Problem description: Given a string represented as a linked list of characters, count the occurrence # of a substring "bob" in the original string. # Solution time complexity: O(Kn), where K = len(src) = len('bob') = 3 # Comments: Recursive solution. # Linked List Node inside the LinkedList module is declared as: # # class Node: # def __init__(self, val, nxt=None): # self.val = val # self.nxt = nxt # # The list ended here. Substring "bob" cannot # be present fully. # Mismatch, move the head pointer one node forward # and recurse. # Match so far, move the node pointer one node forward. # If we got this far, we have found the "bob" substring. # Jump over a node starting from the head and recurse. # We could jump to the next one but if we just had "bob" # match, head.nxt will have the value of "o", - and there's # no way we have "bob" with the first letter "o". | 4.026783 | 4 |
src/app/config.py | tmatsuo/appengine-blobstoremigrator-python | 12 | 6619252 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Manages configuration.
To configure these values, you need to adjust appengine_config.py.
E.g., to control the number of shards, you specify in appengine_config.py:
blobmigrator_NUM_SHARDS = 128
See appengine_config.py for all the possible configurations.
"""
from google.appengine.api import lib_config
CONFIG_NAMESPACE = "blobmigrator"
class _ConfigDefaults(object):
"""Default configs.
NUM_SHARDS
The number of shards that will map over the BlobInfo records.
ROOT_GCS_FOLDER
If set, all the migrated files will be placed in this folder within
the bucket.
DIRECT_MIGRATION_MAX_SIZE
Blobs smaller than this size will be directly copied within the
MapperPipelines. Blobs larger than this size will be copied using
a secondary MapperPipeline. Editing this value is not recommended.
MAPPING_DATASTORE_KIND_NAME
The name of the Datastore kind that will hold the mapping from old
blob key to new GCS filename and new blob key.
QUEUE_NAME
Specifies the queue to run the mapper jobs in.
"""
NUM_SHARDS = 16
ROOT_GCS_FOLDER = '_blobmigrator_root'
DIRECT_MIGRATION_MAX_SIZE = 2 * 1024 * 1024 * 1024
MAPPING_DATASTORE_KIND_NAME = '_blobmigrator_BlobKeyMapping'
QUEUE_NAME = 'default'
# This is a bit of a hack but does the trick for the UI.
CONFIGURATION_KEYS_FOR_INDEX = [k for k in _ConfigDefaults.__dict__
if not k.startswith('_')]
config = lib_config.register(CONFIG_NAMESPACE, _ConfigDefaults.__dict__)
| # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Manages configuration.
To configure these values, you need to adjust appengine_config.py.
E.g., to control the number of shards, you specify in appengine_config.py:
blobmigrator_NUM_SHARDS = 128
See appengine_config.py for all the possible configurations.
"""
from google.appengine.api import lib_config
CONFIG_NAMESPACE = "blobmigrator"
class _ConfigDefaults(object):
"""Default configs.
NUM_SHARDS
The number of shards that will map over the BlobInfo records.
ROOT_GCS_FOLDER
If set, all the migrated files will be placed in this folder within
the bucket.
DIRECT_MIGRATION_MAX_SIZE
Blobs smaller than this size will be directly copied within the
MapperPipelines. Blobs larger than this size will be copied using
a secondary MapperPipeline. Editing this value is not recommended.
MAPPING_DATASTORE_KIND_NAME
The name of the Datastore kind that will hold the mapping from old
blob key to new GCS filename and new blob key.
QUEUE_NAME
Specifies the queue to run the mapper jobs in.
"""
NUM_SHARDS = 16
ROOT_GCS_FOLDER = '_blobmigrator_root'
DIRECT_MIGRATION_MAX_SIZE = 2 * 1024 * 1024 * 1024
MAPPING_DATASTORE_KIND_NAME = '_blobmigrator_BlobKeyMapping'
QUEUE_NAME = 'default'
# This is a bit of a hack but does the trick for the UI.
CONFIGURATION_KEYS_FOR_INDEX = [k for k in _ConfigDefaults.__dict__
if not k.startswith('_')]
config = lib_config.register(CONFIG_NAMESPACE, _ConfigDefaults.__dict__)
| en | 0.803404 | # Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Manages configuration. To configure these values, you need to adjust appengine_config.py. E.g., to control the number of shards, you specify in appengine_config.py: blobmigrator_NUM_SHARDS = 128 See appengine_config.py for all the possible configurations. Default configs. NUM_SHARDS The number of shards that will map over the BlobInfo records. ROOT_GCS_FOLDER If set, all the migrated files will be placed in this folder within the bucket. DIRECT_MIGRATION_MAX_SIZE Blobs smaller than this size will be directly copied within the MapperPipelines. Blobs larger than this size will be copied using a secondary MapperPipeline. Editing this value is not recommended. MAPPING_DATASTORE_KIND_NAME The name of the Datastore kind that will hold the mapping from old blob key to new GCS filename and new blob key. QUEUE_NAME Specifies the queue to run the mapper jobs in. # This is a bit of a hack but does the trick for the UI. | 1.795875 | 2 |
select_data.py | sankar-mukherjee/Expressive-Speech-Synthesis | 1 | 6619253 | import os
import shutil
import librosa
source_path = '../database/blizzard2013/segmented/wavn'
dest_path = '../database/blizzard2013/segmented/small_wavn_lead_trail_silence_removed_16000'
data_length = 7200 # sec
############################
text_dict = {}
text_file = open('../database/blizzard2013/segmented/metadata.csv', 'r')
Lines = text_file.readlines()
for line in Lines:
a = line.rstrip().split('|')
text_dict[a[0]] = a[-1]
text_file.close()
#########################
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
os.mkdir(dest_path)
small_text_file = open(dest_path + '/metadata.csv', 'w')
dur = 0
file_list = os.listdir(source_path)
for f in file_list:
if f.endswith('.wav'):
os.system('sox ' + source_path + '/' + f
+ ' -r 16000 -c 1 -b 16 ' + dest_path + '/' + f
+ ' silence -l 1 0.1 0.4% -1 0.2 1%')
x, y = librosa.load(dest_path + '/' + f)
dur = dur + len(x) / y
file_name = f.replace('.wav', '')
small_text_file.write(file_name + '|' + text_dict[file_name] + "\n")
if dur > data_length:
break
print(dur)
small_text_file.close()
| import os
import shutil
import librosa
source_path = '../database/blizzard2013/segmented/wavn'
dest_path = '../database/blizzard2013/segmented/small_wavn_lead_trail_silence_removed_16000'
data_length = 7200 # sec
############################
text_dict = {}
text_file = open('../database/blizzard2013/segmented/metadata.csv', 'r')
Lines = text_file.readlines()
for line in Lines:
a = line.rstrip().split('|')
text_dict[a[0]] = a[-1]
text_file.close()
#########################
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
os.mkdir(dest_path)
small_text_file = open(dest_path + '/metadata.csv', 'w')
dur = 0
file_list = os.listdir(source_path)
for f in file_list:
if f.endswith('.wav'):
os.system('sox ' + source_path + '/' + f
+ ' -r 16000 -c 1 -b 16 ' + dest_path + '/' + f
+ ' silence -l 1 0.1 0.4% -1 0.2 1%')
x, y = librosa.load(dest_path + '/' + f)
dur = dur + len(x) / y
file_name = f.replace('.wav', '')
small_text_file.write(file_name + '|' + text_dict[file_name] + "\n")
if dur > data_length:
break
print(dur)
small_text_file.close()
| de | 0.859172 | # sec ############################ ######################### | 2.287539 | 2 |
api/utils/pagination.py | onecrayon/api.ashes.live | 11 | 6619254 | <filename>api/utils/pagination.py
import inspect
import urllib.parse
from api import db
from api.environment import settings
from api.schemas.pagination import PaginationOptions
def replace_offset(url: str, offset: int) -> str:
"""Updates the offset in the given URL's query string"""
url = urllib.parse.urlparse(url)
query_params = urllib.parse.parse_qs(url.query)
if offset == 0 and "offset" in query_params:
del query_params["offset"]
else:
query_params["offset"] = [offset]
encoded_query = urllib.parse.urlencode(query_params, doseq=True)
# Force HTTPS (unfortunately Render performs some hijinks that result in the scheme improperly being reported
# as HTTP internally)
scheme = "https" if settings.env == "production" else url.scheme
return urllib.parse.ParseResult(
scheme, url.netloc, url.path, url.params, encoded_query, url.fragment
).geturl()
def paginated_results_for_query(
query: db.Query,
paging: PaginationOptions,
url: str,
) -> dict:
"""Generic pagination results output"""
# Fetch count and actual query data
total_rows = query.count()
rows = query.limit(paging.limit).offset(paging.offset).all()
# Construct our next and previous links
previous_url = None
if paging.offset > 0:
prev_offset = paging.offset - paging.limit
if prev_offset < 0:
prev_offset = 0
previous_url = replace_offset(url, prev_offset)
next_url = None
next_offset = paging.offset + paging.limit
if next_offset < total_rows:
next_url = replace_offset(url, next_offset)
# Construct our result rows and return
if len(query.column_descriptions) == 1 and (
not inspect.isclass(query.column_descriptions[0]["type"])
or not issubclass(query.column_descriptions[0]["type"], db.AlchemyBase)
):
row_list = [x[0] for x in rows]
else:
row_list = rows
return {
"count": total_rows,
"next": next_url,
"previous": previous_url,
"results": row_list,
}
| <filename>api/utils/pagination.py
import inspect
import urllib.parse
from api import db
from api.environment import settings
from api.schemas.pagination import PaginationOptions
def replace_offset(url: str, offset: int) -> str:
"""Updates the offset in the given URL's query string"""
url = urllib.parse.urlparse(url)
query_params = urllib.parse.parse_qs(url.query)
if offset == 0 and "offset" in query_params:
del query_params["offset"]
else:
query_params["offset"] = [offset]
encoded_query = urllib.parse.urlencode(query_params, doseq=True)
# Force HTTPS (unfortunately Render performs some hijinks that result in the scheme improperly being reported
# as HTTP internally)
scheme = "https" if settings.env == "production" else url.scheme
return urllib.parse.ParseResult(
scheme, url.netloc, url.path, url.params, encoded_query, url.fragment
).geturl()
def paginated_results_for_query(
query: db.Query,
paging: PaginationOptions,
url: str,
) -> dict:
"""Generic pagination results output"""
# Fetch count and actual query data
total_rows = query.count()
rows = query.limit(paging.limit).offset(paging.offset).all()
# Construct our next and previous links
previous_url = None
if paging.offset > 0:
prev_offset = paging.offset - paging.limit
if prev_offset < 0:
prev_offset = 0
previous_url = replace_offset(url, prev_offset)
next_url = None
next_offset = paging.offset + paging.limit
if next_offset < total_rows:
next_url = replace_offset(url, next_offset)
# Construct our result rows and return
if len(query.column_descriptions) == 1 and (
not inspect.isclass(query.column_descriptions[0]["type"])
or not issubclass(query.column_descriptions[0]["type"], db.AlchemyBase)
):
row_list = [x[0] for x in rows]
else:
row_list = rows
return {
"count": total_rows,
"next": next_url,
"previous": previous_url,
"results": row_list,
}
| en | 0.815196 | Updates the offset in the given URL's query string # Force HTTPS (unfortunately Render performs some hijinks that result in the scheme improperly being reported # as HTTP internally) Generic pagination results output # Fetch count and actual query data # Construct our next and previous links # Construct our result rows and return | 2.751496 | 3 |
tests/test_nectarifere.py | XanAstia/Nectarifere | 1 | 6619255 | <gh_stars>1-10
from nectarifere.nectar import nectar
@nectar
def do(a):
if a != 5:
raise ValueError
do(5)
do(4) | from nectarifere.nectar import nectar
@nectar
def do(a):
if a != 5:
raise ValueError
do(5)
do(4) | none | 1 | 1.786047 | 2 | |
tob-api/api/utils.py | mehmetaydar/TheOrgBook | 1 | 6619256 | <gh_stars>1-10
"""
A collection of utility classes for TOB
"""
import os
from django.conf import settings
#
# Read settings from a custom settings file
# based on the path provided as an input parameter
# The choice of the custom settings file is driven by the value of the TOB_THEME env
# variable (i.e. ongov)
#
def fetch_custom_settings(*args):
_values = {}
if not hasattr(settings, 'CUSTOMIZATIONS'):
return _values
_dict = settings.CUSTOMIZATIONS
for arg in args:
if not _dict[arg]:
return _values
_dict = _dict[arg]
return _dict | """
A collection of utility classes for TOB
"""
import os
from django.conf import settings
#
# Read settings from a custom settings file
# based on the path provided as an input parameter
# The choice of the custom settings file is driven by the value of the TOB_THEME env
# variable (i.e. ongov)
#
def fetch_custom_settings(*args):
_values = {}
if not hasattr(settings, 'CUSTOMIZATIONS'):
return _values
_dict = settings.CUSTOMIZATIONS
for arg in args:
if not _dict[arg]:
return _values
_dict = _dict[arg]
return _dict | en | 0.792521 | A collection of utility classes for TOB # # Read settings from a custom settings file # based on the path provided as an input parameter # The choice of the custom settings file is driven by the value of the TOB_THEME env # variable (i.e. ongov) # | 2.599886 | 3 |
tests/ws_admin_functions.py | iRomi14/drmlib | 4 | 6619257 | """
Copyright 2018 Accelize
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import requests
class WSListFunction(object):
def __init__(self, url=None, login=None, password=<PASSWORD>, token=None):
self.url = url
self.login = login
self.password = password
self.token = token
def _get_user_token_raw(self):
r = requests.post(self.url + '/o/token/?grant_type=client_credentials',
auth=(self.login, self.password),
headers={'Content-Type': 'application/json'})
json_acceptable_string = r.content.decode("latin1").replace("'", "\"")
try:
text = json.loads(json_acceptable_string)
except:
text = json_acceptable_string
return text, r.status_code
def _get_user_token(self):
text, status_code = self._get_user_token_raw()
assert status_code == 200
assert 'access_token' in text
self.token = text['access_token']
def _authentifed_call(self, method, url, data=None, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
headers['Content-Type'] = "application/json"
r = requests.request(method, self.url + url, data=json.dumps(data), headers=headers)
# json_acceptable_string = r.content.replace("'", "\"")
try:
text = json.loads(r.content)
except:
text = r.content
return text, r.status_code
def _download_authentifed_call(self, method, url, data, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
r = requests.request(method, self.url + url, data=data, headers=headers, stream=True)
return r.content, r.status_code
def get_authentification_token(self):
return self.token
def application_create(self, data):
# url(r'^auth/createapplication/', APIMetering.create_application),
response, status = self._authentifed_call("POST", "/auth/createapplication/", data=data)
return response, status
def application_list(self, data):
# url(r'^auth/listapplication/', APIMetering.list_application),
response, status = self._authentifed_call("POST", "/auth/listapplication/", data=data)
return response, status
def application_delete(self, data):
# url(r'^auth/deleteapplication/', APIMetering.delete_application),
response, status = self._authentifed_call("POST", "/auth/deleteapplication/", data=data)
return response, status
def metering_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/meteringinformation/", data=data)
return response, status
def floatingallinformation(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/floatingallinformation/", data=data)
return response, status
def vendorallinformation(self, data):
# url(r'^auth/vendorallinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorallinformation/", data=data)
return response, status
def vendorcheckinformation(self, data):
# url(r'^auth/vendorcheckinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorcheckinformation/", data=data)
return response, status
def nodelock_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.nodelockassociated),
response, status = self._authentifed_call("POST", "/auth/nodelockassociated/", data=data)
return response, status
def metering_get_license_timeout(self):
response, status = self._authentifed_call("GET", "/auth/getlicensetimeout/")
return response, status
def metering_lastinformation(self, data):
# url(r'^auth/lastmeteringinformation/', APIMetering.last_metering),
response, status = self._authentifed_call("POST", "/auth/lastmeteringinformation/", data=data)
return response, status
def metering_getlicense(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def metering_getlicense_random(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/tests/genlicense/", data=data)
return response, status
def nodelock_getlicense(self, data):
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def configuration_list(self):
# url(r'^auth/getlastcspconfiguration/', APIMetering.get_last_configuration),
response, status = self._authentifed_call("GET", "/auth/getlastcspconfiguration/")
return response, status
def configuration_create(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("POST", "/auth/cspconfiguration/", data=data)
return response, status
def configuration_delete(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("DELETE", "/auth/cspconfiguration/", data=data)
return response, status
def user_update_list(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/updateuserlist/")
return response, status
def remove_test_session(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/metering/rmsession/")
return response, status
def clear_token(self):
# url(r'^auth/admin/clear_token/', APIMetering.clear_token),
response, status = self._authentifed_call("GET", "/auth/admin/clear_token/")
return response, status
def user_single_user(self, data):
# url(r'^auth/userupdate/', APIMetering.user_update),
response, status = self._authentifed_call("POST", "/auth/userupdate/", data=data)
return response, status
def user_single_user_card(self, data):
# url(r'^auth/usercardupdate/', APIMetering.user_card_update),
response, status = self._authentifed_call("POST", "/auth/usercardupdate/", data=data)
return response, status
def ip_create(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/create/", data=data)
return response, status
def ip_delete(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/delete/", data=data)
return response, status
def ip_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/hdk/", data=data)
return response, status
def ip_create_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/get_create_hdk/", data=data)
return response, status
def server_get_version(self):
# url(r'^version/', APIMetering.get_version),
response, status = self._authentifed_call("GET", "/version/")
return response, status
def metering_synips(self):
# url(r'^auth/metering/syncips/', APIMetering.sync_IP_with_LGDN),
response, status = self._authentifed_call("GET", "/auth/metering/syncips/")
return response, status
def remove_product_information(self, data):
# url(r'^auth/metering/rmthissession/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/metering/archiveduserproductinfo/", data=data)
return response, status
def get_user_token(self, email):
# url(r'^auth/admin/get_token/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/admin/get_token/", data={"email":email})
return response, status
def object_manager(self, api_object, method, data, urlsuffix=''):
if urlsuffix == '' or urlsuffix.endswith('/'):
urlsuffix = urlsuffix+"?from=drmportalpreview.accelize.com"
else:
urlsuffix = urlsuffix+"&from=drmportalpreview.accelize.com"
response, status = self._authentifed_call(method, "/auth/objects/%s/%s" % (api_object, urlsuffix), data=data)
return response, status
| """
Copyright 2018 Accelize
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import requests
class WSListFunction(object):
def __init__(self, url=None, login=None, password=<PASSWORD>, token=None):
self.url = url
self.login = login
self.password = password
self.token = token
def _get_user_token_raw(self):
r = requests.post(self.url + '/o/token/?grant_type=client_credentials',
auth=(self.login, self.password),
headers={'Content-Type': 'application/json'})
json_acceptable_string = r.content.decode("latin1").replace("'", "\"")
try:
text = json.loads(json_acceptable_string)
except:
text = json_acceptable_string
return text, r.status_code
def _get_user_token(self):
text, status_code = self._get_user_token_raw()
assert status_code == 200
assert 'access_token' in text
self.token = text['access_token']
def _authentifed_call(self, method, url, data=None, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
headers['Content-Type'] = "application/json"
r = requests.request(method, self.url + url, data=json.dumps(data), headers=headers)
# json_acceptable_string = r.content.replace("'", "\"")
try:
text = json.loads(r.content)
except:
text = r.content
return text, r.status_code
def _download_authentifed_call(self, method, url, data, headers={}):
headers['Authorization'] = "Bearer " + str(self.token)
r = requests.request(method, self.url + url, data=data, headers=headers, stream=True)
return r.content, r.status_code
def get_authentification_token(self):
return self.token
def application_create(self, data):
# url(r'^auth/createapplication/', APIMetering.create_application),
response, status = self._authentifed_call("POST", "/auth/createapplication/", data=data)
return response, status
def application_list(self, data):
# url(r'^auth/listapplication/', APIMetering.list_application),
response, status = self._authentifed_call("POST", "/auth/listapplication/", data=data)
return response, status
def application_delete(self, data):
# url(r'^auth/deleteapplication/', APIMetering.delete_application),
response, status = self._authentifed_call("POST", "/auth/deleteapplication/", data=data)
return response, status
def metering_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/meteringinformation/", data=data)
return response, status
def floatingallinformation(self, data):
# url(r'^auth/meteringinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/floatingallinformation/", data=data)
return response, status
def vendorallinformation(self, data):
# url(r'^auth/vendorallinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorallinformation/", data=data)
return response, status
def vendorcheckinformation(self, data):
# url(r'^auth/vendorcheckinformation/', APIMetering.user_metering),
response, status = self._authentifed_call("POST", "/auth/vendorcheckinformation/", data=data)
return response, status
def nodelock_information(self, data):
# url(r'^auth/meteringinformation/', APIMetering.nodelockassociated),
response, status = self._authentifed_call("POST", "/auth/nodelockassociated/", data=data)
return response, status
def metering_get_license_timeout(self):
response, status = self._authentifed_call("GET", "/auth/getlicensetimeout/")
return response, status
def metering_lastinformation(self, data):
# url(r'^auth/lastmeteringinformation/', APIMetering.last_metering),
response, status = self._authentifed_call("POST", "/auth/lastmeteringinformation/", data=data)
return response, status
def metering_getlicense(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def metering_getlicense_random(self, data):
# url(r'^auth/metering/genlicense/', APIMetering.get_license ),
response, status = self._authentifed_call("POST", "/auth/tests/genlicense/", data=data)
return response, status
def nodelock_getlicense(self, data):
response, status = self._authentifed_call("POST", "/auth/metering/genlicense/", data=data)
return response, status
def configuration_list(self):
# url(r'^auth/getlastcspconfiguration/', APIMetering.get_last_configuration),
response, status = self._authentifed_call("GET", "/auth/getlastcspconfiguration/")
return response, status
def configuration_create(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("POST", "/auth/cspconfiguration/", data=data)
return response, status
def configuration_delete(self, data):
# url(r'^auth/createcspconfiguration/', APIMetering.configuration),
response, status = self._authentifed_call("DELETE", "/auth/cspconfiguration/", data=data)
return response, status
def user_update_list(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/updateuserlist/")
return response, status
def remove_test_session(self):
# url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore),
response, status = self._authentifed_call("GET", "/auth/metering/rmsession/")
return response, status
def clear_token(self):
# url(r'^auth/admin/clear_token/', APIMetering.clear_token),
response, status = self._authentifed_call("GET", "/auth/admin/clear_token/")
return response, status
def user_single_user(self, data):
# url(r'^auth/userupdate/', APIMetering.user_update),
response, status = self._authentifed_call("POST", "/auth/userupdate/", data=data)
return response, status
def user_single_user_card(self, data):
# url(r'^auth/usercardupdate/', APIMetering.user_card_update),
response, status = self._authentifed_call("POST", "/auth/usercardupdate/", data=data)
return response, status
def ip_create(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/create/", data=data)
return response, status
def ip_delete(self, data):
# url(r'^auth/ip/create/', APIMetering.CreateIP),
response, status = self._authentifed_call("POST", "/auth/ip/delete/", data=data)
return response, status
def ip_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/hdk/", data=data)
return response, status
def ip_create_get_hdk(self, data):
# url(r'^auth/ip/hdk/', APIMetering.get_HDK),
response, status = self._download_authentifed_call("POST", "/auth/ip/get_create_hdk/", data=data)
return response, status
def server_get_version(self):
# url(r'^version/', APIMetering.get_version),
response, status = self._authentifed_call("GET", "/version/")
return response, status
def metering_synips(self):
# url(r'^auth/metering/syncips/', APIMetering.sync_IP_with_LGDN),
response, status = self._authentifed_call("GET", "/auth/metering/syncips/")
return response, status
def remove_product_information(self, data):
# url(r'^auth/metering/rmthissession/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/metering/archiveduserproductinfo/", data=data)
return response, status
def get_user_token(self, email):
# url(r'^auth/admin/get_token/', APIMetering.remove_product_information),
response, status = self._authentifed_call("POST", "/auth/admin/get_token/", data={"email":email})
return response, status
def object_manager(self, api_object, method, data, urlsuffix=''):
if urlsuffix == '' or urlsuffix.endswith('/'):
urlsuffix = urlsuffix+"?from=drmportalpreview.accelize.com"
else:
urlsuffix = urlsuffix+"&from=drmportalpreview.accelize.com"
response, status = self._authentifed_call(method, "/auth/objects/%s/%s" % (api_object, urlsuffix), data=data)
return response, status
| en | 0.496799 | Copyright 2018 Accelize
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. # json_acceptable_string = r.content.replace("'", "\"") # url(r'^auth/createapplication/', APIMetering.create_application), # url(r'^auth/listapplication/', APIMetering.list_application), # url(r'^auth/deleteapplication/', APIMetering.delete_application), # url(r'^auth/meteringinformation/', APIMetering.user_metering), # url(r'^auth/meteringinformation/', APIMetering.user_metering), # url(r'^auth/vendorallinformation/', APIMetering.user_metering), # url(r'^auth/vendorcheckinformation/', APIMetering.user_metering), # url(r'^auth/meteringinformation/', APIMetering.nodelockassociated), # url(r'^auth/lastmeteringinformation/', APIMetering.last_metering), # url(r'^auth/metering/genlicense/', APIMetering.get_license ), # url(r'^auth/metering/genlicense/', APIMetering.get_license ), # url(r'^auth/getlastcspconfiguration/', APIMetering.get_last_configuration), # url(r'^auth/createcspconfiguration/', APIMetering.configuration), # url(r'^auth/createcspconfiguration/', APIMetering.configuration), # url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore), # url(r'^auth/updateuserlist/', APIMetering.update_user_list_from_accelstore), # url(r'^auth/admin/clear_token/', APIMetering.clear_token), # url(r'^auth/userupdate/', APIMetering.user_update), # url(r'^auth/usercardupdate/', APIMetering.user_card_update), # url(r'^auth/ip/create/', APIMetering.CreateIP), # url(r'^auth/ip/create/', APIMetering.CreateIP), # url(r'^auth/ip/hdk/', APIMetering.get_HDK), # url(r'^auth/ip/hdk/', APIMetering.get_HDK), # url(r'^version/', APIMetering.get_version), # url(r'^auth/metering/syncips/', APIMetering.sync_IP_with_LGDN), # url(r'^auth/metering/rmthissession/', APIMetering.remove_product_information), # url(r'^auth/admin/get_token/', APIMetering.remove_product_information), | 2.448973 | 2 |
programs/models/programs.py | bycristhian/psp | 2 | 6619258 | <gh_stars>1-10
# Django
from django.db import models
# Models
from django.contrib.auth.models import User
from programs.models import ProgrammingLanguage
from projects.models import Module
# Utils
from projects.validators import validate_min_length_description
class Program(models.Model):
name = models.CharField(max_length=100, help_text='Program name')
description = models.CharField(max_length=200, validators=[validate_min_length_description])
programmer = models.ForeignKey(User, on_delete=models.CASCADE, related_name='program_user')
language = models.ForeignKey(ProgrammingLanguage, on_delete=models.CASCADE, related_name='program_language')
module = models.ForeignKey(Module, on_delete=models.CASCADE, related_name='program_module')
total_lines = models.IntegerField(default=0)
planning_date = models.DateField()
start_date = models.DateField()
finish_date = models.DateField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Pip(models.Model):
program = models.ForeignKey(Program, on_delete=models.CASCADE, related_name='list_pip_program')
name = models.CharField(max_length=100, help_text='Pip name')
date = models.DateTimeField()
problems = models.TextField(max_length=350)
proposal = models.TextField(max_length=350)
comment = models.TextField(max_length=350)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Report(models.Model):
program = models.ForeignKey(Program, on_delete=models.CASCADE, related_name='reports_view')
name = models.CharField(max_length=100, help_text='Report name')
date = models.DateTimeField()
objetive = models.TextField(max_length=350)
description = models.TextField(max_length=350)
conditions = models.TextField(max_length=350)
expect_results = models.TextField(max_length=350)
current_results = models.TextField(max_length=350, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
| # Django
from django.db import models
# Models
from django.contrib.auth.models import User
from programs.models import ProgrammingLanguage
from projects.models import Module
# Utils
from projects.validators import validate_min_length_description
class Program(models.Model):
name = models.CharField(max_length=100, help_text='Program name')
description = models.CharField(max_length=200, validators=[validate_min_length_description])
programmer = models.ForeignKey(User, on_delete=models.CASCADE, related_name='program_user')
language = models.ForeignKey(ProgrammingLanguage, on_delete=models.CASCADE, related_name='program_language')
module = models.ForeignKey(Module, on_delete=models.CASCADE, related_name='program_module')
total_lines = models.IntegerField(default=0)
planning_date = models.DateField()
start_date = models.DateField()
finish_date = models.DateField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Pip(models.Model):
program = models.ForeignKey(Program, on_delete=models.CASCADE, related_name='list_pip_program')
name = models.CharField(max_length=100, help_text='Pip name')
date = models.DateTimeField()
problems = models.TextField(max_length=350)
proposal = models.TextField(max_length=350)
comment = models.TextField(max_length=350)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Report(models.Model):
program = models.ForeignKey(Program, on_delete=models.CASCADE, related_name='reports_view')
name = models.CharField(max_length=100, help_text='Report name')
date = models.DateTimeField()
objetive = models.TextField(max_length=350)
description = models.TextField(max_length=350)
conditions = models.TextField(max_length=350)
expect_results = models.TextField(max_length=350)
current_results = models.TextField(max_length=350, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name | en | 0.244878 | # Django # Models # Utils | 2.19896 | 2 |
database/tests/test_urls.py | erischon/p8_eri_schon | 0 | 6619259 | <reponame>erischon/p8_eri_schon
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from database.views import etl, etl_extract, etl_transform, etl_load, etl_manage_nutriscore, etl_manage_delete
class DatabaseTestUrls(SimpleTestCase):
'''
I test all the urls in database app
'''
def test_etl_url_is_resolved(self):
url = reverse('etl')
self.assertEqual(resolve(url).func, etl)
def test_etl_extract_url_is_resolved(self):
url = reverse('etl_extract')
self.assertEqual(resolve(url).func, etl_extract)
def test_etl_transform_url_is_resolved(self):
url = reverse('etl_transform')
self.assertEqual(resolve(url).func, etl_transform)
def test_etl_load_url_is_resolved(self):
url = reverse('etl_load')
self.assertEqual(resolve(url).func, etl_load)
def test_etl_manage_nutriscore_url_is_resolved(self):
url = reverse('etl_manage_nutriscore')
self.assertEqual(resolve(url).func, etl_manage_nutriscore)
def test_etl_manage_delete_url_is_resolved(self):
url = reverse('etl_manage_delete')
self.assertEqual(resolve(url).func, etl_manage_delete)
| from django.test import SimpleTestCase
from django.urls import reverse, resolve
from database.views import etl, etl_extract, etl_transform, etl_load, etl_manage_nutriscore, etl_manage_delete
class DatabaseTestUrls(SimpleTestCase):
'''
I test all the urls in database app
'''
def test_etl_url_is_resolved(self):
url = reverse('etl')
self.assertEqual(resolve(url).func, etl)
def test_etl_extract_url_is_resolved(self):
url = reverse('etl_extract')
self.assertEqual(resolve(url).func, etl_extract)
def test_etl_transform_url_is_resolved(self):
url = reverse('etl_transform')
self.assertEqual(resolve(url).func, etl_transform)
def test_etl_load_url_is_resolved(self):
url = reverse('etl_load')
self.assertEqual(resolve(url).func, etl_load)
def test_etl_manage_nutriscore_url_is_resolved(self):
url = reverse('etl_manage_nutriscore')
self.assertEqual(resolve(url).func, etl_manage_nutriscore)
def test_etl_manage_delete_url_is_resolved(self):
url = reverse('etl_manage_delete')
self.assertEqual(resolve(url).func, etl_manage_delete) | en | 0.87187 | I test all the urls in database app | 2.482045 | 2 |
tests/app/api_v0/test_revisions.py | tigerzhong13/server | 0 | 6619260 | <reponame>tigerzhong13/server
# TODO: split E2E test to unit test
from typing import Dict, Iterator
import pytest
from sqlalchemy.orm import Session
from starlette.testclient import TestClient
import pol.server
from pol.models import Avatar, PublicUser
from pol.db.tables import ChiiRevHistory
from tests.conftest import MockUser
from pol.services.user_service import UserService
from pol.services.rev_service.person_rev import person_rev_type_filters
from pol.services.rev_service.character_rev import character_rev_type_filters
person_revisions_api_prefix = "/v0/revisions/persons"
@pytest.mark.env("e2e", "database")
def test_person_revisions_basic(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
response = client.get(person_revisions_api_prefix, params={"person_id": 9})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert res["total"]
assert res["data"]
assert res["offset"] == 0
assert "limit" in res
for item in res["data"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_person_revisions_offset(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
offset = 1
common_params = {"person_id": 9}
response1 = client.get(
person_revisions_api_prefix, params={"offset": 1, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(person_revisions_api_prefix, params=common_params).json()["data"][
1
]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_person_revisions_offset_limit(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
offset = 30000
response = client.get(
person_revisions_api_prefix, params={"offset": offset, "person_id": 9}
)
assert response.status_code == 422, response.text
character_revisions_api_prefix = "/v0/revisions/characters"
@pytest.mark.env("e2e", "database")
def test_character_revisions_basic(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
response = client.get(character_revisions_api_prefix, params={"character_id": 1})
assert response.status_code == 200, response.json()
assert response.headers["content-type"] == "application/json"
res = response.json()
assert res["total"]
assert res["offset"] == 0
assert "limit" in res
assert res["data"]
@pytest.mark.env("e2e", "database")
def test_character_revisions_offset(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
offset = 1
common_params = {"character_id": 1}
response1 = client.get(
character_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(character_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_character_revisions_page_limit(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
offset = 30000
response = client.get(
character_revisions_api_prefix, params={"character_id": 1, "offset": offset}
)
assert response.status_code == 422, response.text
subject_revisions_api_prefix = "/v0/revisions/subjects"
class MockUserService:
async def get_users_by_id(self, ids: Iterator[int]) -> Dict[int, PublicUser]:
ids = list(ids)
for uid in ids:
assert uid > 0
return {
uid: PublicUser(
id=uid,
username=f"username {uid}",
nickname=f"nickname {uid}",
avatar=Avatar.from_db_record(""),
)
for uid in ids
}
async def get_by_uid(self, uid: int) -> PublicUser:
assert uid > 0
return PublicUser(
id=uid,
username=f"username {uid}",
nickname=f"nickname {uid}",
avatar=Avatar.from_db_record(""),
)
@pytest.mark.env("e2e", "database")
def test_subject_revisions_basic(client: TestClient):
pol.server.app.dependency_overrides[UserService.new] = MockUserService
response = client.get(subject_revisions_api_prefix, params={"subject_id": 26})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert "total" in res
assert "limit" in res
assert res["offset"] == 0
if res["total"] <= res["limit"]:
assert res["total"] == len(res["data"])
else:
assert res["limit"] == len(res["data"])
for item in res["data"]:
if item["creator"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_subject_revisions_offset(client: TestClient):
offset = 1
common_params = {"subject_id": 1}
response1 = client.get(
subject_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(subject_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_subject_revisions_page_limit(
client: TestClient,
):
offset = 30000
response = client.get(
subject_revisions_api_prefix, params={"subject_id": 1, "offset": offset}
)
assert response.status_code == 422, response.text
episode_revisions_api_prefix = "/v0/revisions/episodes"
@pytest.mark.env("e2e", "database")
def test_episode_revisions_basic(
client: TestClient,
):
response = client.get(episode_revisions_api_prefix, params={"episode_id": 522})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert "total" in res
assert "limit" in res
assert res["offset"] == 0
if res["total"] <= res["limit"]:
assert res["total"] == len(res["data"])
else:
assert res["limit"] == len(res["data"])
for item in res["data"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_episode_revisions_offset(
client: TestClient,
):
offset = 1
common_params = {"episode_id": 1045}
response1 = client.get(
episode_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(episode_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_episode_revisions_page_limit(
client: TestClient,
):
offset = 30000
response = client.get(
episode_revisions_api_prefix, params={"episode_id": 522, "offset": offset}
)
assert response.status_code == 422, response.text
| # TODO: split E2E test to unit test
from typing import Dict, Iterator
import pytest
from sqlalchemy.orm import Session
from starlette.testclient import TestClient
import pol.server
from pol.models import Avatar, PublicUser
from pol.db.tables import ChiiRevHistory
from tests.conftest import MockUser
from pol.services.user_service import UserService
from pol.services.rev_service.person_rev import person_rev_type_filters
from pol.services.rev_service.character_rev import character_rev_type_filters
person_revisions_api_prefix = "/v0/revisions/persons"
@pytest.mark.env("e2e", "database")
def test_person_revisions_basic(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
response = client.get(person_revisions_api_prefix, params={"person_id": 9})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert res["total"]
assert res["data"]
assert res["offset"] == 0
assert "limit" in res
for item in res["data"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_person_revisions_offset(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
offset = 1
common_params = {"person_id": 9}
response1 = client.get(
person_revisions_api_prefix, params={"offset": 1, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(person_revisions_api_prefix, params=common_params).json()["data"][
1
]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_person_revisions_offset_limit(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 9, person_rev_type_filters
):
mock_user(r.rev_creator)
offset = 30000
response = client.get(
person_revisions_api_prefix, params={"offset": offset, "person_id": 9}
)
assert response.status_code == 422, response.text
character_revisions_api_prefix = "/v0/revisions/characters"
@pytest.mark.env("e2e", "database")
def test_character_revisions_basic(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
response = client.get(character_revisions_api_prefix, params={"character_id": 1})
assert response.status_code == 200, response.json()
assert response.headers["content-type"] == "application/json"
res = response.json()
assert res["total"]
assert res["offset"] == 0
assert "limit" in res
assert res["data"]
@pytest.mark.env("e2e", "database")
def test_character_revisions_offset(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
offset = 1
common_params = {"character_id": 1}
response1 = client.get(
character_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(character_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_character_revisions_page_limit(
client: TestClient,
db_session: Session,
mock_user: MockUser,
):
for r in db_session.query(ChiiRevHistory.rev_creator).where(
ChiiRevHistory.rev_mid == 1, character_rev_type_filters
):
mock_user(r.rev_creator)
offset = 30000
response = client.get(
character_revisions_api_prefix, params={"character_id": 1, "offset": offset}
)
assert response.status_code == 422, response.text
subject_revisions_api_prefix = "/v0/revisions/subjects"
class MockUserService:
async def get_users_by_id(self, ids: Iterator[int]) -> Dict[int, PublicUser]:
ids = list(ids)
for uid in ids:
assert uid > 0
return {
uid: PublicUser(
id=uid,
username=f"username {uid}",
nickname=f"nickname {uid}",
avatar=Avatar.from_db_record(""),
)
for uid in ids
}
async def get_by_uid(self, uid: int) -> PublicUser:
assert uid > 0
return PublicUser(
id=uid,
username=f"username {uid}",
nickname=f"nickname {uid}",
avatar=Avatar.from_db_record(""),
)
@pytest.mark.env("e2e", "database")
def test_subject_revisions_basic(client: TestClient):
pol.server.app.dependency_overrides[UserService.new] = MockUserService
response = client.get(subject_revisions_api_prefix, params={"subject_id": 26})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert "total" in res
assert "limit" in res
assert res["offset"] == 0
if res["total"] <= res["limit"]:
assert res["total"] == len(res["data"])
else:
assert res["limit"] == len(res["data"])
for item in res["data"]:
if item["creator"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_subject_revisions_offset(client: TestClient):
offset = 1
common_params = {"subject_id": 1}
response1 = client.get(
subject_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(subject_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_subject_revisions_page_limit(
client: TestClient,
):
offset = 30000
response = client.get(
subject_revisions_api_prefix, params={"subject_id": 1, "offset": offset}
)
assert response.status_code == 422, response.text
episode_revisions_api_prefix = "/v0/revisions/episodes"
@pytest.mark.env("e2e", "database")
def test_episode_revisions_basic(
client: TestClient,
):
response = client.get(episode_revisions_api_prefix, params={"episode_id": 522})
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
res = response.json()
assert "total" in res
assert "limit" in res
assert res["offset"] == 0
if res["total"] <= res["limit"]:
assert res["total"] == len(res["data"])
else:
assert res["limit"] == len(res["data"])
for item in res["data"]:
assert "nickname" in item["creator"]
@pytest.mark.env("e2e", "database")
def test_episode_revisions_offset(
client: TestClient,
):
offset = 1
common_params = {"episode_id": 1045}
response1 = client.get(
episode_revisions_api_prefix, params={"offset": offset, **common_params}
)
assert response1.status_code == 200
assert response1.headers["content-type"] == "application/json"
res = response1.json()
assert (
res["data"][0]["id"]
== client.get(episode_revisions_api_prefix, params=common_params).json()[
"data"
][1]["id"]
)
assert res["offset"] == offset
@pytest.mark.env("e2e", "database")
def test_episode_revisions_page_limit(
client: TestClient,
):
offset = 30000
response = client.get(
episode_revisions_api_prefix, params={"episode_id": 522, "offset": offset}
)
assert response.status_code == 422, response.text | en | 0.519165 | # TODO: split E2E test to unit test | 2.230392 | 2 |
overestimation_analysis.py | geyang/ml-logger_examples | 1 | 6619261 | <filename>overestimation_analysis.py
import os
import matplotlib.pyplot as plt
import pandas as pd
from cmx import doc
from ml_logger import ML_Logger, memoize
from tqdm import tqdm
if __name__ == "__main__":
envs = ['dmc:Acrobot-swingup-v1', 'dmc:Quadruped-run-v1', 'dmc:Quadruped-walk-v1', 'dmc:Humanoid-run-v1', 'dmc:Finger-turn_hard-v1', 'dmc:Walker-run-v1', 'dmc:Cheetah-run-v1', 'dmc:Hopper-hop-v1']
b_vals = [20, 10, 5, 5, 20, 5, 10, 15]
colors = ['#23aaff', '#ff7575', '#66c56c', '#f4b247']
with doc @ """# MUJOCO Comparisons""":
loader = ML_Logger(prefix="model-free/model-free/rff_post_iclr/dmc/drqv2/4_layer")
print(loader)
# loader.glob = memoize(loader.glob)
# loader.load_pkl = memoize(loader.load_pkl)
with doc:
def plot_line(path, color, label):
print(path)
mean, low, high, step, = loader.read_metrics("eval/avg_true_q/mean@mean", "eval/avg_true_q/mean@16%",
"eval/avg_true_q/mean@84%", x_key="frames@min", path=path)
plt.xlabel('Steps', fontsize=18)
plt.ylabel('Q value', fontsize=18)
plt.plot(step.to_list(), mean.to_list(), color=color, label=f'{label} (true)')
plt.fill_between(step, low, high, alpha=0.1, color=color)
mean, low, high, step, = loader.read_metrics("eval/avg_pred_q/mean@mean", "eval/avg_pred_q/mean@16%",
"eval/avg_pred_q/mean@84%", x_key="frames@min", path=path)
plt.plot(step.to_list(), mean.to_list(), color=color, label=f'{label} (pred)', linestyle='--')
plt.fill_between(step, low, high, alpha=0.1, color=color)
doc @ """Drqv2 (state) -RFF vs Drqv2"""
with doc:
for (b_val, env_name) in tqdm(zip(b_vals, envs), desc="(b_val, env)"):
if loader.every(4, 'figure', start_on=1):
r = doc.table().figure_row()
if env_name == 'dmc:Finger-turn_hard-v1':
menv = 'dmc:Finger-turn_hard2-v1'
else:
menv = env_name
plt.title(f"DDPG {env_name.split(':')[-1][:-3]}", fontsize=18)
plot_line(path=f"mlp/{menv.split(':')[-1][:-3]}/**/metrics.pkl", color='black', label='DDPG')
if env_name == 'dmc:Humanoid-run-v1':
tenv = 'dmc:Humanoid-run2-v1'
else:
tenv = env_name
plot_line(path=f"rff_mean_std_full/rff/iso/b-{b_val}/{tenv.split(':')[-1][:-3]}/**/metrics.pkl",
color=colors[0], label=f'RFAC')
plt.legend()
plt.tight_layout()
r.savefig(f'{os.path.basename(__file__)[:-3]}/{env_name}.png', dpi=300, zoom=0.3)
plt.close()
doc.flush()
| <filename>overestimation_analysis.py
import os
import matplotlib.pyplot as plt
import pandas as pd
from cmx import doc
from ml_logger import ML_Logger, memoize
from tqdm import tqdm
if __name__ == "__main__":
envs = ['dmc:Acrobot-swingup-v1', 'dmc:Quadruped-run-v1', 'dmc:Quadruped-walk-v1', 'dmc:Humanoid-run-v1', 'dmc:Finger-turn_hard-v1', 'dmc:Walker-run-v1', 'dmc:Cheetah-run-v1', 'dmc:Hopper-hop-v1']
b_vals = [20, 10, 5, 5, 20, 5, 10, 15]
colors = ['#23aaff', '#ff7575', '#66c56c', '#f4b247']
with doc @ """# MUJOCO Comparisons""":
loader = ML_Logger(prefix="model-free/model-free/rff_post_iclr/dmc/drqv2/4_layer")
print(loader)
# loader.glob = memoize(loader.glob)
# loader.load_pkl = memoize(loader.load_pkl)
with doc:
def plot_line(path, color, label):
print(path)
mean, low, high, step, = loader.read_metrics("eval/avg_true_q/mean@mean", "eval/avg_true_q/mean@16%",
"eval/avg_true_q/mean@84%", x_key="frames@min", path=path)
plt.xlabel('Steps', fontsize=18)
plt.ylabel('Q value', fontsize=18)
plt.plot(step.to_list(), mean.to_list(), color=color, label=f'{label} (true)')
plt.fill_between(step, low, high, alpha=0.1, color=color)
mean, low, high, step, = loader.read_metrics("eval/avg_pred_q/mean@mean", "eval/avg_pred_q/mean@16%",
"eval/avg_pred_q/mean@84%", x_key="frames@min", path=path)
plt.plot(step.to_list(), mean.to_list(), color=color, label=f'{label} (pred)', linestyle='--')
plt.fill_between(step, low, high, alpha=0.1, color=color)
doc @ """Drqv2 (state) -RFF vs Drqv2"""
with doc:
for (b_val, env_name) in tqdm(zip(b_vals, envs), desc="(b_val, env)"):
if loader.every(4, 'figure', start_on=1):
r = doc.table().figure_row()
if env_name == 'dmc:Finger-turn_hard-v1':
menv = 'dmc:Finger-turn_hard2-v1'
else:
menv = env_name
plt.title(f"DDPG {env_name.split(':')[-1][:-3]}", fontsize=18)
plot_line(path=f"mlp/{menv.split(':')[-1][:-3]}/**/metrics.pkl", color='black', label='DDPG')
if env_name == 'dmc:Humanoid-run-v1':
tenv = 'dmc:Humanoid-run2-v1'
else:
tenv = env_name
plot_line(path=f"rff_mean_std_full/rff/iso/b-{b_val}/{tenv.split(':')[-1][:-3]}/**/metrics.pkl",
color=colors[0], label=f'RFAC')
plt.legend()
plt.tight_layout()
r.savefig(f'{os.path.basename(__file__)[:-3]}/{env_name}.png', dpi=300, zoom=0.3)
plt.close()
doc.flush()
| en | 0.231235 | # MUJOCO Comparisons # loader.glob = memoize(loader.glob) # loader.load_pkl = memoize(loader.load_pkl) Drqv2 (state) -RFF vs Drqv2 | 2.163909 | 2 |
Rough_Work/for_loop.py | conor1982/Labs_Practice | 0 | 6619262 | <filename>Rough_Work/for_loop.py<gh_stars>0
ilename = "count.txt"
def readNumber():
with open(filename) as f:
number = int(f.read())
return number
def writeNumber(number):
with open(filename, "wt") as f:
# write takes a string so we need to convert
f.write(str(number))
# main
num = readNumber()
num += 1
print ("we have run this program {} times".format(num))
writeNumber(num)
| <filename>Rough_Work/for_loop.py<gh_stars>0
ilename = "count.txt"
def readNumber():
with open(filename) as f:
number = int(f.read())
return number
def writeNumber(number):
with open(filename, "wt") as f:
# write takes a string so we need to convert
f.write(str(number))
# main
num = readNumber()
num += 1
print ("we have run this program {} times".format(num))
writeNumber(num)
| en | 0.635566 | # write takes a string so we need to convert # main | 3.528465 | 4 |
src/ggrc/notifications/cron_jobs.py | MikalaiMikalalai/ggrc-core | 1 | 6619263 | <reponame>MikalaiMikalalai/ggrc-core<gh_stars>1-10
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module contains notification cron jobs."""
from ggrc.notifications import common as notif_common
from ggrc.notifications import fast_digest as notif_fast_digest
from ggrc.notifications import import_export as notif_import_export
NIGHTLY_CRON_JOBS = [
notif_common.generate_cycle_tasks_notifs,
notif_common.create_daily_digest_bg,
]
HALF_HOUR_CRON_JOBS = [
notif_fast_digest.send_notification,
]
IMPORT_EXPORT_JOBS = [
notif_import_export.check_import_export_jobs,
]
| # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module contains notification cron jobs."""
from ggrc.notifications import common as notif_common
from ggrc.notifications import fast_digest as notif_fast_digest
from ggrc.notifications import import_export as notif_import_export
NIGHTLY_CRON_JOBS = [
notif_common.generate_cycle_tasks_notifs,
notif_common.create_daily_digest_bg,
]
HALF_HOUR_CRON_JOBS = [
notif_fast_digest.send_notification,
]
IMPORT_EXPORT_JOBS = [
notif_import_export.check_import_export_jobs,
] | en | 0.61435 | # Copyright (C) 2020 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> This module contains notification cron jobs. | 1.28583 | 1 |
day05/main.py | Isilindil/adventOfCode2020 | 0 | 6619264 | <filename>day05/main.py
#!/usr/bin/python
def seat(coord_code, coord_authorized) :
if len(coord_authorized) == 1 : return coord_authorized
code = coord_code[0]
code_tail = coord_code[1:]
if code == "F" :
return seat(code_tail, coord_authorized[:len(coord_authorized)//2])
elif code == "B" :
return seat(code_tail, coord_authorized[len(coord_authorized)//2:])
elif code == "L" :
return seat(code_tail, coord_authorized[:len(coord_authorized)//2])
elif code == "R" :
return seat(code_tail, coord_authorized[len(coord_authorized)//2:])
if __name__ == "__main__" :
with open("input") as a :
data = a.read().split("\n")
a.close()
row = range(128)
column = range(8)
max_seat_ID = 0
all_seat_ID = []
for coord in data :
row_coord = coord[:7]
col_coord = coord[7:]
row_val = int(seat(row_coord, row)[0])
col_val = int(seat(col_coord, column)[0])
max_seat_ID = max(max_seat_ID, row_val*8+col_val)
all_seat_ID.append(row_val*8+col_val)
print("highest seat : {}".format(max_seat_ID))
all_seat_ID.sort()
#print(all_seat_ID)
for s in range(len(all_seat_ID)-1) :
if all_seat_ID[s+1]-all_seat_ID[s] != 1 :
print("Your seat : {}".format(all_seat_ID[s+1]-1))
| <filename>day05/main.py
#!/usr/bin/python
def seat(coord_code, coord_authorized) :
if len(coord_authorized) == 1 : return coord_authorized
code = coord_code[0]
code_tail = coord_code[1:]
if code == "F" :
return seat(code_tail, coord_authorized[:len(coord_authorized)//2])
elif code == "B" :
return seat(code_tail, coord_authorized[len(coord_authorized)//2:])
elif code == "L" :
return seat(code_tail, coord_authorized[:len(coord_authorized)//2])
elif code == "R" :
return seat(code_tail, coord_authorized[len(coord_authorized)//2:])
if __name__ == "__main__" :
with open("input") as a :
data = a.read().split("\n")
a.close()
row = range(128)
column = range(8)
max_seat_ID = 0
all_seat_ID = []
for coord in data :
row_coord = coord[:7]
col_coord = coord[7:]
row_val = int(seat(row_coord, row)[0])
col_val = int(seat(col_coord, column)[0])
max_seat_ID = max(max_seat_ID, row_val*8+col_val)
all_seat_ID.append(row_val*8+col_val)
print("highest seat : {}".format(max_seat_ID))
all_seat_ID.sort()
#print(all_seat_ID)
for s in range(len(all_seat_ID)-1) :
if all_seat_ID[s+1]-all_seat_ID[s] != 1 :
print("Your seat : {}".format(all_seat_ID[s+1]-1))
| en | 0.180034 | #!/usr/bin/python #print(all_seat_ID) | 3.802416 | 4 |
datasets/__init__.py | tony2016uestc/RTD-Action | 64 | 6619265 | # Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from .thumos14 import build as build_thumos14
def build_dataset(image_set, args):
if args.dataset_file == 'thumos14':
return build_thumos14(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
| # Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from .thumos14 import build as build_thumos14
def build_dataset(image_set, args):
if args.dataset_file == 'thumos14':
return build_thumos14(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
| en | 0.627167 | # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ | 2.063092 | 2 |
campuscats/file/admin.py | CaptainMorch/CampusCats | 1 | 6619266 | from django.contrib import admin
from .models import Photo
# Register your models here.
admin.site.register(Photo) | from django.contrib import admin
from .models import Photo
# Register your models here.
admin.site.register(Photo) | en | 0.968259 | # Register your models here. | 1.287778 | 1 |
s2e/qemu/scripts/qapi-visit.py | wyz7155/SymDrive | 0 | 6619267 | #
# QAPI visitor generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_visit_struct_body(field_prefix, members):
ret = ""
if len(field_prefix):
field_prefix = field_prefix + "."
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
visit_start_optional(m, (obj && *obj) ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", errp);
if ((*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
ret += mcgen('''
visit_start_struct(m, NULL, "", "%(name)s", 0, errp);
''',
name=argname)
ret += generate_visit_struct_body(field_prefix + argname, argentry)
ret += mcgen('''
visit_end_struct(m, errp);
''')
else:
ret += mcgen('''
visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", errp);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
name=argname)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(m, errp);
''')
return ret
def generate_visit_struct(name, members):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
if (error_is_set(errp)) {
return;
}
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), errp);
if (obj && !*obj) {
goto end;
}
''',
name=name)
push_indent()
ret += generate_visit_struct_body("", members)
pop_indent()
ret += mcgen('''
end:
visit_end_struct(m, errp);
}
''')
return ret
def generate_visit_list(name, members):
return mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i, **prev = (GenericList **)obj;
if (error_is_set(errp)) {
return;
}
visit_start_list(m, name, errp);
for (; (i = visit_next_list(m, prev, errp)) != NULL; prev = &i) {
%(name)sList *native_i = (%(name)sList *)i;
visit_type_%(name)s(m, &native_i->value, NULL, errp);
}
visit_end_list(m, errp);
}
''',
name=name)
def generate_visit_enum(name, members):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp)
{
visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
}
''',
name=name)
def generate_visit_union(name, members):
ret = generate_visit_enum('%sKind' % name, members.keys())
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
Error *err = NULL;
if (error_is_set(errp)) {
return;
}
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
if (obj && !*obj) {
goto end;
}
visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err);
if (err) {
error_propagate(errp, err);
goto end;
}
switch ((*obj)->kind) {
''',
name=name)
for key in members:
ret += mcgen('''
case %(abbrev)s_KIND_%(enum)s:
visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", errp);
break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key)).upper(),
c_type=members[key],
c_name=c_fun(key))
ret += mcgen('''
default:
abort();
}
end:
visit_end_struct(m, errp);
}
''')
return ret
def generate_declaration(name, members, genlist=True):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp);
''',
name=name)
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:",
["source", "header", "prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-visit.c'
h_file = 'qapi-visit.h'
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor functions
*
* Copyright IBM, Corp. 2011
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "%(header)s"
''',
header=basename(h_file)))
fdecl.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor function
*
* Copyright IBM, Corp. 2011
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-visit-core.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix, guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
if expr.has_key('type'):
ret = generate_visit_struct(expr['type'], expr['data'])
ret += generate_visit_list(expr['type'], expr['data'])
fdef.write(ret)
ret = generate_declaration(expr['type'], expr['data'])
fdecl.write(ret)
elif expr.has_key('union'):
ret = generate_visit_union(expr['union'], expr['data'])
ret += generate_visit_list(expr['union'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys())
ret += generate_declaration(expr['union'], expr['data'])
fdecl.write(ret)
elif expr.has_key('enum'):
ret = generate_visit_enum(expr['enum'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum(expr['enum'], expr['data'])
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| #
# QAPI visitor generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_visit_struct_body(field_prefix, members):
ret = ""
if len(field_prefix):
field_prefix = field_prefix + "."
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
visit_start_optional(m, (obj && *obj) ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", errp);
if ((*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
ret += mcgen('''
visit_start_struct(m, NULL, "", "%(name)s", 0, errp);
''',
name=argname)
ret += generate_visit_struct_body(field_prefix + argname, argentry)
ret += mcgen('''
visit_end_struct(m, errp);
''')
else:
ret += mcgen('''
visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", errp);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
name=argname)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(m, errp);
''')
return ret
def generate_visit_struct(name, members):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
if (error_is_set(errp)) {
return;
}
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), errp);
if (obj && !*obj) {
goto end;
}
''',
name=name)
push_indent()
ret += generate_visit_struct_body("", members)
pop_indent()
ret += mcgen('''
end:
visit_end_struct(m, errp);
}
''')
return ret
def generate_visit_list(name, members):
return mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i, **prev = (GenericList **)obj;
if (error_is_set(errp)) {
return;
}
visit_start_list(m, name, errp);
for (; (i = visit_next_list(m, prev, errp)) != NULL; prev = &i) {
%(name)sList *native_i = (%(name)sList *)i;
visit_type_%(name)s(m, &native_i->value, NULL, errp);
}
visit_end_list(m, errp);
}
''',
name=name)
def generate_visit_enum(name, members):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp)
{
visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
}
''',
name=name)
def generate_visit_union(name, members):
ret = generate_visit_enum('%sKind' % name, members.keys())
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
Error *err = NULL;
if (error_is_set(errp)) {
return;
}
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
if (obj && !*obj) {
goto end;
}
visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err);
if (err) {
error_propagate(errp, err);
goto end;
}
switch ((*obj)->kind) {
''',
name=name)
for key in members:
ret += mcgen('''
case %(abbrev)s_KIND_%(enum)s:
visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", errp);
break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key)).upper(),
c_type=members[key],
c_name=c_fun(key))
ret += mcgen('''
default:
abort();
}
end:
visit_end_struct(m, errp);
}
''')
return ret
def generate_declaration(name, members, genlist=True):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp);
''',
name=name)
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:",
["source", "header", "prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-visit.c'
h_file = 'qapi-visit.h'
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor functions
*
* Copyright IBM, Corp. 2011
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "%(header)s"
''',
header=basename(h_file)))
fdecl.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor function
*
* Copyright IBM, Corp. 2011
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-visit-core.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix, guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
if expr.has_key('type'):
ret = generate_visit_struct(expr['type'], expr['data'])
ret += generate_visit_list(expr['type'], expr['data'])
fdef.write(ret)
ret = generate_declaration(expr['type'], expr['data'])
fdecl.write(ret)
elif expr.has_key('union'):
ret = generate_visit_union(expr['union'], expr['data'])
ret += generate_visit_list(expr['union'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys())
ret += generate_declaration(expr['union'], expr['data'])
fdecl.write(ret)
elif expr.has_key('enum'):
ret = generate_visit_enum(expr['enum'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum(expr['enum'], expr['data'])
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| en | 0.382275 | # # QAPI visitor generator # # Copyright IBM, Corp. 2011 # # Authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. visit_start_optional(m, (obj && *obj) ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", errp); if ((*obj)->%(prefix)shas_%(c_name)s) { visit_start_struct(m, NULL, "", "%(name)s", 0, errp); visit_end_struct(m, errp); visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", errp); } visit_end_optional(m, errp); void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp) { if (error_is_set(errp)) { return; } visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), errp); if (obj && !*obj) { goto end; } end: visit_end_struct(m, errp); } void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp) { GenericList *i, **prev = (GenericList **)obj; if (error_is_set(errp)) { return; } visit_start_list(m, name, errp); for (; (i = visit_next_list(m, prev, errp)) != NULL; prev = &i) { %(name)sList *native_i = (%(name)sList *)i; visit_type_%(name)s(m, &native_i->value, NULL, errp); } visit_end_list(m, errp); } void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp) { visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp); } void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp) { Error *err = NULL; if (error_is_set(errp)) { return; } visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); if (obj && !*obj) { goto end; } visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err); if (err) { error_propagate(errp, err); goto end; } switch ((*obj)->kind) { case %(abbrev)s_KIND_%(enum)s: visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", errp); break; default: abort(); } end: visit_end_struct(m, errp); } void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp); void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp); void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp); /* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI visitor functions * * Copyright IBM, Corp. 2011 * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "%(header)s" /* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI visitor function * * Copyright IBM, Corp. 2011 * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #ifndef %(guard)s #define %(guard)s #include "qapi/qapi-visit-core.h" #include "%(prefix)sqapi-types.h" #endif | 2.103683 | 2 |
scrape/spatula/cli.py | bhrutledge/openstates-people | 0 | 6619268 | <gh_stars>0
import click
import importlib
import pprint
from .core import Scraper
from .pages import HtmlListPage
def get_class(dotted_name):
mod_name, cls_name = dotted_name.rsplit(".", 1)
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name)
def _display(obj):
if isinstance(obj, dict):
return pprint.pformat(obj)
elif hasattr(obj, "to_dict"):
return pprint.pformat(obj.to_dict())
else:
return repr(obj)
@click.group()
def cli():
pass
@cli.command()
@click.argument("class_name")
@click.argument("url", required=False)
def test(class_name, url):
Cls = get_class(class_name)
page = Cls(url)
s = Scraper()
s.fetch_page_data(page)
# TODO: better way to check this
if issubclass(Cls, HtmlListPage):
for i, item in enumerate(page.get_data()):
print(f"{i}:", _display(item))
else:
print(_display(page.get_data()))
@cli.command()
@click.argument("workflow_name")
@click.option("-o", "--output-dir", default=None)
def scrape(workflow_name, output_dir):
workflow = get_class(workflow_name)
workflow.execute(output_dir=output_dir)
if __name__ == "__main__":
cli()
| import click
import importlib
import pprint
from .core import Scraper
from .pages import HtmlListPage
def get_class(dotted_name):
mod_name, cls_name = dotted_name.rsplit(".", 1)
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name)
def _display(obj):
if isinstance(obj, dict):
return pprint.pformat(obj)
elif hasattr(obj, "to_dict"):
return pprint.pformat(obj.to_dict())
else:
return repr(obj)
@click.group()
def cli():
pass
@cli.command()
@click.argument("class_name")
@click.argument("url", required=False)
def test(class_name, url):
Cls = get_class(class_name)
page = Cls(url)
s = Scraper()
s.fetch_page_data(page)
# TODO: better way to check this
if issubclass(Cls, HtmlListPage):
for i, item in enumerate(page.get_data()):
print(f"{i}:", _display(item))
else:
print(_display(page.get_data()))
@cli.command()
@click.argument("workflow_name")
@click.option("-o", "--output-dir", default=None)
def scrape(workflow_name, output_dir):
workflow = get_class(workflow_name)
workflow.execute(output_dir=output_dir)
if __name__ == "__main__":
cli() | en | 0.562999 | # TODO: better way to check this | 2.622262 | 3 |
AttendanceProject.py | francisojeah/facial_recognition | 0 | 6619269 | import cv2
import numpy as np
import face_recognition
import os
pathname = 'ImagesAttendance'
imageslist = []
imagename = []
mylist = os.listdir(pathname)
for x in mylist:
newImg = face_recognition.load_image_file(f'{pathname}/{x}')
imageslist.append(newImg)
imagename.append(os.path.splitext(x)[0])
#print(imagename)
def getEncodings(imageslist):
listEncode = []
for i in imageslist:
i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
i = cv2.resize(i, (0,0), None, 0.25,0.25)
faloc = face_recogniton.face_locations(i)[0]
encode = face_recognition.face_encodings(i)[0]
# d = face_recognition.face_locations(i)[0]
# cv2.rectangle(i,(d[3],d[0]),(d[1],d[2]),(255,255,0),2)
# cv2.imshow('e',i)
# cv2.waitKey(0)
listEncode.append(encode)
return listEncode
encodeListKnown = getEncodings(imageslist)
print(len(encodeListKnown))
cam = cv2.VideoCapture(0)
while True:
success, img = cam.read()
imgshrunk = cv2.resize(img,(0,0),None,0.25,0.25)
imgshrunk = cv2.cvtColor(imgshrunk, cv2.COLOR_BGR2RGB)
faceinFrame = face_recognition.face_locations(imgshrunk)[0]
encodeinFrame = face_recognition.face_encodings(imgshrunk,faceinFrame)[0]
for encodeFace, faceLoc in zip(encodeinFrame, faceinFrame):
match = face_recognition.compare_faces(encodeListKnown, encodeFace)
facedis = face_recognition.face_distance(encodeListKnown, encodeFace)
matchIndex = np.argmin(facedis)
print(facedis)
if match[matchIndex]:
name = imagename[matchIndex].upper()
print(name)
y1,x2,y2,x1 = faceLoc
y1,x2,y2,x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(img, (x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0), cv2.FILLED)
cv2.putText(img,name,(x1+6, y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
cv2.imshow('webcam', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| import cv2
import numpy as np
import face_recognition
import os
pathname = 'ImagesAttendance'
imageslist = []
imagename = []
mylist = os.listdir(pathname)
for x in mylist:
newImg = face_recognition.load_image_file(f'{pathname}/{x}')
imageslist.append(newImg)
imagename.append(os.path.splitext(x)[0])
#print(imagename)
def getEncodings(imageslist):
listEncode = []
for i in imageslist:
i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
i = cv2.resize(i, (0,0), None, 0.25,0.25)
faloc = face_recogniton.face_locations(i)[0]
encode = face_recognition.face_encodings(i)[0]
# d = face_recognition.face_locations(i)[0]
# cv2.rectangle(i,(d[3],d[0]),(d[1],d[2]),(255,255,0),2)
# cv2.imshow('e',i)
# cv2.waitKey(0)
listEncode.append(encode)
return listEncode
encodeListKnown = getEncodings(imageslist)
print(len(encodeListKnown))
cam = cv2.VideoCapture(0)
while True:
success, img = cam.read()
imgshrunk = cv2.resize(img,(0,0),None,0.25,0.25)
imgshrunk = cv2.cvtColor(imgshrunk, cv2.COLOR_BGR2RGB)
faceinFrame = face_recognition.face_locations(imgshrunk)[0]
encodeinFrame = face_recognition.face_encodings(imgshrunk,faceinFrame)[0]
for encodeFace, faceLoc in zip(encodeinFrame, faceinFrame):
match = face_recognition.compare_faces(encodeListKnown, encodeFace)
facedis = face_recognition.face_distance(encodeListKnown, encodeFace)
matchIndex = np.argmin(facedis)
print(facedis)
if match[matchIndex]:
name = imagename[matchIndex].upper()
print(name)
y1,x2,y2,x1 = faceLoc
y1,x2,y2,x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(img, (x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0), cv2.FILLED)
cv2.putText(img,name,(x1+6, y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
cv2.imshow('webcam', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| en | 0.307979 | #print(imagename) # d = face_recognition.face_locations(i)[0] # cv2.rectangle(i,(d[3],d[0]),(d[1],d[2]),(255,255,0),2) # cv2.imshow('e',i) # cv2.waitKey(0) | 2.906737 | 3 |
loxpy/LoxRuntimeError.py | largomst/LoxPy | 0 | 6619270 | <gh_stars>0
from . import ErrorState
from .Tokens import Token
class LoxRuntimeError(RuntimeError):
def __init__(self, token: Token, message: str):
super().__init__(message)
self.token = token
self.message = message
def runtimeError(error: LoxRuntimeError):
print("{}\n[line {}]".format(error.message, error.token.line))
ErrorState.hadRuntimeError = True
| from . import ErrorState
from .Tokens import Token
class LoxRuntimeError(RuntimeError):
def __init__(self, token: Token, message: str):
super().__init__(message)
self.token = token
self.message = message
def runtimeError(error: LoxRuntimeError):
print("{}\n[line {}]".format(error.message, error.token.line))
ErrorState.hadRuntimeError = True | none | 1 | 2.742482 | 3 | |
app/cachedmodel/models.py | Uniquode/uniquode2 | 0 | 6619271 | # -*- coding: utf-8 -*-
import json
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.base import ModelBase
from django.db.models.fields import related_descriptors
from django.db.models.signals import (
pre_delete,
post_delete,
post_save,
m2m_changed
)
from .manager import RowCacheManager
from .signals import removed_from_cache
from .utils.lazymodel import get_model_cache, model_cache_key
from .utils.modelutils import get_identifier_string, lookup_cache_master_key
DEFAULT_MANAGER_NAME = 'objects'
BASE_MANAGER_NAME = '_related'
class MetaCaching(ModelBase):
"""
Sets ``objects'' on any model that inherits from ModelWithCaching to be a RowCacheManager.
This is tightly coupled to Django internals, so it could (and did) break if you upgrade Django.
This was done partially as a proof-of-concept.
Django now stores managers in the model._meta (class Options) attribute and cannot be set
directly at the class level.
"""
# noinspection PyMethodParameters
def __new__(cls, name, bases, attrs, default_manager_name=None, base_manager_name=None, **kwargs):
new_class = ModelBase.__new__(cls, name, bases, attrs, **kwargs)
# noinspection PyProtectedMember
opts = new_class._meta
def add_manager(manager_name, attr_base):
if not hasattr(new_class, manager_name):
# Attach a new manager.
manager = RowCacheManager()
manager.name = manager_name
manager.model = opts.model
manager.contribute_to_class(new_class, manager_name)
opts.managers_map[manager_name] = manager
else:
manager = getattr(new_class, manager_name)
try:
# if required, insert RowCacheManager into manager bases first in MRO
# note that this cannot work if manager.__class__.__base__ == object
# see https://stackoverflow.com/questions/3193158/bases-doesnt-work-whats-next/3193260
if manager.__class__ != RowCacheManager and RowCacheManager not in manager.__class__.__bases__: # noqa
manager.__class__.__bases__ = (RowCacheManager,) + manager.__class__.__bases__
except TypeError:
pass
if manager not in opts.local_managers:
opts.local_managers.append(manager)
setattr(opts, attr_base, manager)
setattr(opts, f"{attr_base}_name", manager_name)
add_manager(default_manager_name or DEFAULT_MANAGER_NAME, 'default_manager')
add_manager(base_manager_name or BASE_MANAGER_NAME, 'base_manager')
return new_class
class CachedModel(models.Model, metaclass=MetaCaching):
class Meta:
default_manager_name = DEFAULT_MANAGER_NAME
base_manager_name = BASE_MANAGER_NAME
abstract = True
def create_manager(func, superclass, rel, *args):
manager_cls = func(superclass, rel, *args)
if issubclass(rel.model, CachedModel) and not issubclass(manager_cls, RowCacheManager):
manager_cls = type(manager_cls.__name__, (RowCacheManager,) + manager_cls.__bases__, {})
return manager_cls
# fixups for manager creators
original_create_forward_many_to_many_manager = related_descriptors.create_forward_many_to_many_manager
original_create_reverse_many_to_one_manager = related_descriptors.create_reverse_many_to_one_manager
def create_forward_many_to_many_manager(superclass, rel, reverse):
return create_manager(original_create_forward_many_to_many_manager, superclass, rel, reverse)
def create_reverse_many_to_one_manager(superclass, rel):
return create_manager(original_create_reverse_many_to_one_manager, superclass, rel)
# noinspection PyUnusedLocal
def remove_object_from_cache(sender, instance, **kwargs):
model = instance.__class__
instance_pk = instance.pk
if isinstance(instance, ContentType):
# The model cache key stuff has special handling to allow passing
# in a content type instead of the model. At this point though, we are
# actually working with the content type itself and not the model it
# represents. So we need to bypass that special handling code.
instance = get_identifier_string(instance, instance.pk)
cache, timeout = get_model_cache()
cache_key = model_cache_key(instance, instance_pk)
cache.delete(cache_key)
try:
# reset cache with new data from master DB
model.objects.get(id=instance_pk)
except model.DoesNotExist:
pass
# and remove lookup cache keys
master_key = lookup_cache_master_key(instance, instance_pk)
if master_key in cache:
list_lookup_cache_keys = json.loads(cache.get(master_key))
for key in list_lookup_cache_keys:
cache.delete(key)
cache.delete(master_key)
# Tell anyone else who may be interested that cache was cleaned of instance
removed_from_cache.send(sender=sender, instance=instance, **kwargs)
pre_delete.connect(remove_object_from_cache)
post_delete.connect(remove_object_from_cache)
post_save.connect(remove_object_from_cache)
m2m_changed.connect(remove_object_from_cache)
related_descriptors.create_forward_many_to_many_manager = create_forward_many_to_many_manager
related_descriptors.create_reverse_many_to_one_manager = create_reverse_many_to_one_manager
class CachedModelTypes(CachedModel):
"""
List of cached models
This is not actively used by this unit but provides a useful table for tests
"""
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
@classmethod
def populate(cls):
from django.apps import apps
created_counter = 0
for content_type in ContentType.objects.all():
try:
model = apps.get_model(app_label=content_type.app_label, model_name=content_type.model, require_ready=True)
except LookupError:
continue
if issubclass(model, CachedModel):
_, created = cls.objects.get_or_create(
app_label=content_type.app_label,
model=content_type.model,
content_type=content_type)
if created:
created_counter += 1
return created_counter
@classmethod
def reset(cls):
ContentType.objects.all().delete()
cls.populate()
def __str__(self):
return f'{self.app_label}.{self.model} (id={self.id})'
class Meta:
unique_together = ('app_label', 'model')
ordering = ('app_label', 'model')
| # -*- coding: utf-8 -*-
import json
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.base import ModelBase
from django.db.models.fields import related_descriptors
from django.db.models.signals import (
pre_delete,
post_delete,
post_save,
m2m_changed
)
from .manager import RowCacheManager
from .signals import removed_from_cache
from .utils.lazymodel import get_model_cache, model_cache_key
from .utils.modelutils import get_identifier_string, lookup_cache_master_key
DEFAULT_MANAGER_NAME = 'objects'
BASE_MANAGER_NAME = '_related'
class MetaCaching(ModelBase):
"""
Sets ``objects'' on any model that inherits from ModelWithCaching to be a RowCacheManager.
This is tightly coupled to Django internals, so it could (and did) break if you upgrade Django.
This was done partially as a proof-of-concept.
Django now stores managers in the model._meta (class Options) attribute and cannot be set
directly at the class level.
"""
# noinspection PyMethodParameters
def __new__(cls, name, bases, attrs, default_manager_name=None, base_manager_name=None, **kwargs):
new_class = ModelBase.__new__(cls, name, bases, attrs, **kwargs)
# noinspection PyProtectedMember
opts = new_class._meta
def add_manager(manager_name, attr_base):
if not hasattr(new_class, manager_name):
# Attach a new manager.
manager = RowCacheManager()
manager.name = manager_name
manager.model = opts.model
manager.contribute_to_class(new_class, manager_name)
opts.managers_map[manager_name] = manager
else:
manager = getattr(new_class, manager_name)
try:
# if required, insert RowCacheManager into manager bases first in MRO
# note that this cannot work if manager.__class__.__base__ == object
# see https://stackoverflow.com/questions/3193158/bases-doesnt-work-whats-next/3193260
if manager.__class__ != RowCacheManager and RowCacheManager not in manager.__class__.__bases__: # noqa
manager.__class__.__bases__ = (RowCacheManager,) + manager.__class__.__bases__
except TypeError:
pass
if manager not in opts.local_managers:
opts.local_managers.append(manager)
setattr(opts, attr_base, manager)
setattr(opts, f"{attr_base}_name", manager_name)
add_manager(default_manager_name or DEFAULT_MANAGER_NAME, 'default_manager')
add_manager(base_manager_name or BASE_MANAGER_NAME, 'base_manager')
return new_class
class CachedModel(models.Model, metaclass=MetaCaching):
class Meta:
default_manager_name = DEFAULT_MANAGER_NAME
base_manager_name = BASE_MANAGER_NAME
abstract = True
def create_manager(func, superclass, rel, *args):
manager_cls = func(superclass, rel, *args)
if issubclass(rel.model, CachedModel) and not issubclass(manager_cls, RowCacheManager):
manager_cls = type(manager_cls.__name__, (RowCacheManager,) + manager_cls.__bases__, {})
return manager_cls
# fixups for manager creators
original_create_forward_many_to_many_manager = related_descriptors.create_forward_many_to_many_manager
original_create_reverse_many_to_one_manager = related_descriptors.create_reverse_many_to_one_manager
def create_forward_many_to_many_manager(superclass, rel, reverse):
return create_manager(original_create_forward_many_to_many_manager, superclass, rel, reverse)
def create_reverse_many_to_one_manager(superclass, rel):
return create_manager(original_create_reverse_many_to_one_manager, superclass, rel)
# noinspection PyUnusedLocal
def remove_object_from_cache(sender, instance, **kwargs):
model = instance.__class__
instance_pk = instance.pk
if isinstance(instance, ContentType):
# The model cache key stuff has special handling to allow passing
# in a content type instead of the model. At this point though, we are
# actually working with the content type itself and not the model it
# represents. So we need to bypass that special handling code.
instance = get_identifier_string(instance, instance.pk)
cache, timeout = get_model_cache()
cache_key = model_cache_key(instance, instance_pk)
cache.delete(cache_key)
try:
# reset cache with new data from master DB
model.objects.get(id=instance_pk)
except model.DoesNotExist:
pass
# and remove lookup cache keys
master_key = lookup_cache_master_key(instance, instance_pk)
if master_key in cache:
list_lookup_cache_keys = json.loads(cache.get(master_key))
for key in list_lookup_cache_keys:
cache.delete(key)
cache.delete(master_key)
# Tell anyone else who may be interested that cache was cleaned of instance
removed_from_cache.send(sender=sender, instance=instance, **kwargs)
pre_delete.connect(remove_object_from_cache)
post_delete.connect(remove_object_from_cache)
post_save.connect(remove_object_from_cache)
m2m_changed.connect(remove_object_from_cache)
related_descriptors.create_forward_many_to_many_manager = create_forward_many_to_many_manager
related_descriptors.create_reverse_many_to_one_manager = create_reverse_many_to_one_manager
class CachedModelTypes(CachedModel):
"""
List of cached models
This is not actively used by this unit but provides a useful table for tests
"""
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
@classmethod
def populate(cls):
from django.apps import apps
created_counter = 0
for content_type in ContentType.objects.all():
try:
model = apps.get_model(app_label=content_type.app_label, model_name=content_type.model, require_ready=True)
except LookupError:
continue
if issubclass(model, CachedModel):
_, created = cls.objects.get_or_create(
app_label=content_type.app_label,
model=content_type.model,
content_type=content_type)
if created:
created_counter += 1
return created_counter
@classmethod
def reset(cls):
ContentType.objects.all().delete()
cls.populate()
def __str__(self):
return f'{self.app_label}.{self.model} (id={self.id})'
class Meta:
unique_together = ('app_label', 'model')
ordering = ('app_label', 'model')
| en | 0.878403 | # -*- coding: utf-8 -*- Sets ``objects'' on any model that inherits from ModelWithCaching to be a RowCacheManager. This is tightly coupled to Django internals, so it could (and did) break if you upgrade Django. This was done partially as a proof-of-concept. Django now stores managers in the model._meta (class Options) attribute and cannot be set directly at the class level. # noinspection PyMethodParameters # noinspection PyProtectedMember # Attach a new manager. # if required, insert RowCacheManager into manager bases first in MRO # note that this cannot work if manager.__class__.__base__ == object # see https://stackoverflow.com/questions/3193158/bases-doesnt-work-whats-next/3193260 # noqa # fixups for manager creators # noinspection PyUnusedLocal # The model cache key stuff has special handling to allow passing # in a content type instead of the model. At this point though, we are # actually working with the content type itself and not the model it # represents. So we need to bypass that special handling code. # reset cache with new data from master DB # and remove lookup cache keys # Tell anyone else who may be interested that cache was cleaned of instance List of cached models This is not actively used by this unit but provides a useful table for tests | 2.023689 | 2 |
tools/BlenderProc/src/writer/WriterInterface.py | GeorgSchenzel/pose-detector | 0 | 6619272 | <reponame>GeorgSchenzel/pose-detector
import os
import csv
import json
import bpy
import h5py
import numpy as np
from src.main.Module import Module
from src.utility.BlenderUtility import load_image
from src.utility.MathUtility import MathUtility
from src.utility.Utility import Utility
class WriterInterface(Module):
"""
Parent class for all other writers classes, it had the functionality to return objects attributes and write \
them to file and to load and process post processing modules
**Configuration**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - postprocessing_modules
- A dict of list of postprocessing modules. The key in the dict specifies the output to which the
postprocessing modules should be applied. Every postprocessing module has to have a run function which
takes in the raw data and returns the processed data.
- dict
* - destination_frame
- Used to transform point to blender coordinate frame. Default: ["X", "Y", "Z"]
- list
* - attributes_to_write
- A list of attribute names that should written to file. The next table lists all attributes that can be
used here.
- list
* - output_file_prefix
- The prefix of the file that should be created.
- string
* - output_key
- The key which should be used for storing the output in a merged file.
- string
* - write_alpha_channel
- If true, the alpha channel will be written to file. Default: False.
- bool
"""
def __init__(self, config):
Module.__init__(self, config)
self.postprocessing_modules_per_output = {}
module_configs = config.get_raw_dict("postprocessing_modules", {})
for output_key in module_configs:
self.postprocessing_modules_per_output[output_key] = Utility.initialize_modules(module_configs[output_key])
self.name_to_id = {}
self.destination_frame = self.config.get_list("destination_frame", ["X", "Y", "Z"])
def write_attributes_to_file(self, item_writer, items, default_file_prefix, default_output_key, default_attributes, version="1.0.0"):
""" Writes the state of the given items to a file with the configured prefix.
This method also registers the corresponding output.
:param item_writer: The item writer object to use. Type: object.
:param items: The list of items. Type: list.
:param default_file_prefix: The default file name prefix to use. Type: string.
:param default_output_key: The default output key to use. Type: string.
:param default_attributes: The default attributes to write, if no attributes are specified in the config. Type: list.
:param version: The version to use when registering the output. Type: string.
"""
file_prefix = self.config.get_string("output_file_prefix", default_file_prefix)
path_prefix = os.path.join(self._determine_output_dir(), file_prefix)
item_writer.write_items_to_file(path_prefix, items, self.config.get_list("attributes_to_write", default_attributes))
self._register_output(file_prefix, self.config.get_string("output_key", default_output_key), ".npy", version)
def _get_attribute(self, item, attribute_name):
""" Returns the value of the requested attribute for the given item.
This method covers all general attributes that blender objects have.
:param item: The item. Type: blender object.
:param attribute_name: The attribute name. Type: string.
:return: The attribute value.
"""
if attribute_name == "id":
if item.name not in self.name_to_id:
self.name_to_id[item.name] = len(self.name_to_id.values())
return self.name_to_id[item.name]
elif attribute_name == "name":
return item.name
elif attribute_name == "location":
return MathUtility.transform_point_to_blender_coord_frame(item.location, self.destination_frame)
elif attribute_name == "rotation_euler":
return MathUtility.transform_point_to_blender_coord_frame(item.rotation_euler, self.destination_frame)
elif attribute_name == "rotation_forward_vec":
# Calc forward vector from rotation matrix
rot_mat = item.rotation_euler.to_matrix()
forward = rot_mat @ mathutils.Vector([0, 0, -1])
return MathUtility.transform_point_to_blender_coord_frame(forward, self.destination_frame)
elif attribute_name == "rotation_up_vec":
# Calc up vector from rotation matrix
rot_mat = item.rotation_euler.to_matrix()
up = rot_mat @ mathutils.Vector([0, 1, 0])
return MathUtility.transform_point_to_blender_coord_frame(up, self.destination_frame)
elif attribute_name == "cam2world_matrix":
# Transform to matrix_world to given destination frame
cam2world_matrix = Utility.transform_matrix_to_blender_coord_frame(item.matrix_world, self.destination_frame)
return [[x for x in c] for c in cam2world_matrix]
elif attribute_name.startswith("customprop_"):
custom_property_name = attribute_name[len("customprop_"):]
# Make sure the requested custom property exist
if custom_property_name in item:
return item[custom_property_name]
else:
raise Exception("No such custom property: " + custom_property_name)
else:
raise Exception("No such attribute: " + attribute_name)
def _apply_postprocessing(self, output_key, data, version):
"""
Applies all postprocessing modules registered for this output type.
:param output_key: The key of the output type. Type: string
:param data: The numpy data.
:param version: The version number original data.
:return: The modified numpy data after doing the postprocessing
"""
if output_key in self.postprocessing_modules_per_output:
for module in self.postprocessing_modules_per_output[output_key]:
data, new_key, new_version = module.run(data, output_key, version)
else:
new_key = output_key
new_version = version
return data, new_key, new_version
def _load_and_postprocess(self, file_path, key, version = "1.0.0"):
"""
Loads an image and post process it.
:param file_path: Image path. Type: string.
:param key: The image's key with regards to the hdf5 file. Type: string.
:param version: The version number original data. Type: String. Default: 1.0.0.
:return: The post-processed image that was loaded using the file path.
"""
data = self._load_file(Utility.resolve_path(file_path))
data, new_key, new_version = self._apply_postprocessing(key, data, version)
print("Key: " + key + " - shape: " + str(data.shape) + " - dtype: " + str(data.dtype) + " - path: " + file_path)
return data, new_key, new_version
def _load_file(self, file_path):
""" Tries to read in the file with the given path into a numpy array.
:param file_path: The file path. Type: string.
:return: A numpy array containing the data of the file.
"""
if not os.path.exists(file_path):
raise Exception("File not found: " + file_path)
file_ending = file_path[file_path.rfind(".") + 1:].lower()
if file_ending in ["exr", "png", "jpg"]:
#num_channels is 4 if transparent_background is true in config
return load_image(file_path, num_channels = 3 + self.config.get_bool("write_alpha_channel", False))
elif file_ending in ["npy", "npz"]:
return self._load_npy(file_path)
elif file_ending in ["csv"]:
return self._load_csv(file_path)
else:
raise NotImplementedError("File with ending " + file_ending + " cannot be loaded.")
def _load_npy(self, file_path):
""" Load the npy/npz file at the given path.
:param file_path: The path. Type: string.
:return: The content of the file
"""
return np.load(file_path)
def _load_csv(self, file_path):
""" Load the csv file at the given path.
:param file_path: The path. Type: string.
:return: The content of the file
"""
rows = []
with open(file_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
rows.append(row)
return np.string_(json.dumps(rows)) # make the list of dicts as a string
| import os
import csv
import json
import bpy
import h5py
import numpy as np
from src.main.Module import Module
from src.utility.BlenderUtility import load_image
from src.utility.MathUtility import MathUtility
from src.utility.Utility import Utility
class WriterInterface(Module):
"""
Parent class for all other writers classes, it had the functionality to return objects attributes and write \
them to file and to load and process post processing modules
**Configuration**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - postprocessing_modules
- A dict of list of postprocessing modules. The key in the dict specifies the output to which the
postprocessing modules should be applied. Every postprocessing module has to have a run function which
takes in the raw data and returns the processed data.
- dict
* - destination_frame
- Used to transform point to blender coordinate frame. Default: ["X", "Y", "Z"]
- list
* - attributes_to_write
- A list of attribute names that should written to file. The next table lists all attributes that can be
used here.
- list
* - output_file_prefix
- The prefix of the file that should be created.
- string
* - output_key
- The key which should be used for storing the output in a merged file.
- string
* - write_alpha_channel
- If true, the alpha channel will be written to file. Default: False.
- bool
"""
def __init__(self, config):
Module.__init__(self, config)
self.postprocessing_modules_per_output = {}
module_configs = config.get_raw_dict("postprocessing_modules", {})
for output_key in module_configs:
self.postprocessing_modules_per_output[output_key] = Utility.initialize_modules(module_configs[output_key])
self.name_to_id = {}
self.destination_frame = self.config.get_list("destination_frame", ["X", "Y", "Z"])
def write_attributes_to_file(self, item_writer, items, default_file_prefix, default_output_key, default_attributes, version="1.0.0"):
""" Writes the state of the given items to a file with the configured prefix.
This method also registers the corresponding output.
:param item_writer: The item writer object to use. Type: object.
:param items: The list of items. Type: list.
:param default_file_prefix: The default file name prefix to use. Type: string.
:param default_output_key: The default output key to use. Type: string.
:param default_attributes: The default attributes to write, if no attributes are specified in the config. Type: list.
:param version: The version to use when registering the output. Type: string.
"""
file_prefix = self.config.get_string("output_file_prefix", default_file_prefix)
path_prefix = os.path.join(self._determine_output_dir(), file_prefix)
item_writer.write_items_to_file(path_prefix, items, self.config.get_list("attributes_to_write", default_attributes))
self._register_output(file_prefix, self.config.get_string("output_key", default_output_key), ".npy", version)
def _get_attribute(self, item, attribute_name):
""" Returns the value of the requested attribute for the given item.
This method covers all general attributes that blender objects have.
:param item: The item. Type: blender object.
:param attribute_name: The attribute name. Type: string.
:return: The attribute value.
"""
if attribute_name == "id":
if item.name not in self.name_to_id:
self.name_to_id[item.name] = len(self.name_to_id.values())
return self.name_to_id[item.name]
elif attribute_name == "name":
return item.name
elif attribute_name == "location":
return MathUtility.transform_point_to_blender_coord_frame(item.location, self.destination_frame)
elif attribute_name == "rotation_euler":
return MathUtility.transform_point_to_blender_coord_frame(item.rotation_euler, self.destination_frame)
elif attribute_name == "rotation_forward_vec":
# Calc forward vector from rotation matrix
rot_mat = item.rotation_euler.to_matrix()
forward = rot_mat @ mathutils.Vector([0, 0, -1])
return MathUtility.transform_point_to_blender_coord_frame(forward, self.destination_frame)
elif attribute_name == "rotation_up_vec":
# Calc up vector from rotation matrix
rot_mat = item.rotation_euler.to_matrix()
up = rot_mat @ mathutils.Vector([0, 1, 0])
return MathUtility.transform_point_to_blender_coord_frame(up, self.destination_frame)
elif attribute_name == "cam2world_matrix":
# Transform to matrix_world to given destination frame
cam2world_matrix = Utility.transform_matrix_to_blender_coord_frame(item.matrix_world, self.destination_frame)
return [[x for x in c] for c in cam2world_matrix]
elif attribute_name.startswith("customprop_"):
custom_property_name = attribute_name[len("customprop_"):]
# Make sure the requested custom property exist
if custom_property_name in item:
return item[custom_property_name]
else:
raise Exception("No such custom property: " + custom_property_name)
else:
raise Exception("No such attribute: " + attribute_name)
def _apply_postprocessing(self, output_key, data, version):
"""
Applies all postprocessing modules registered for this output type.
:param output_key: The key of the output type. Type: string
:param data: The numpy data.
:param version: The version number original data.
:return: The modified numpy data after doing the postprocessing
"""
if output_key in self.postprocessing_modules_per_output:
for module in self.postprocessing_modules_per_output[output_key]:
data, new_key, new_version = module.run(data, output_key, version)
else:
new_key = output_key
new_version = version
return data, new_key, new_version
def _load_and_postprocess(self, file_path, key, version = "1.0.0"):
"""
Loads an image and post process it.
:param file_path: Image path. Type: string.
:param key: The image's key with regards to the hdf5 file. Type: string.
:param version: The version number original data. Type: String. Default: 1.0.0.
:return: The post-processed image that was loaded using the file path.
"""
data = self._load_file(Utility.resolve_path(file_path))
data, new_key, new_version = self._apply_postprocessing(key, data, version)
print("Key: " + key + " - shape: " + str(data.shape) + " - dtype: " + str(data.dtype) + " - path: " + file_path)
return data, new_key, new_version
def _load_file(self, file_path):
""" Tries to read in the file with the given path into a numpy array.
:param file_path: The file path. Type: string.
:return: A numpy array containing the data of the file.
"""
if not os.path.exists(file_path):
raise Exception("File not found: " + file_path)
file_ending = file_path[file_path.rfind(".") + 1:].lower()
if file_ending in ["exr", "png", "jpg"]:
#num_channels is 4 if transparent_background is true in config
return load_image(file_path, num_channels = 3 + self.config.get_bool("write_alpha_channel", False))
elif file_ending in ["npy", "npz"]:
return self._load_npy(file_path)
elif file_ending in ["csv"]:
return self._load_csv(file_path)
else:
raise NotImplementedError("File with ending " + file_ending + " cannot be loaded.")
def _load_npy(self, file_path):
""" Load the npy/npz file at the given path.
:param file_path: The path. Type: string.
:return: The content of the file
"""
return np.load(file_path)
def _load_csv(self, file_path):
""" Load the csv file at the given path.
:param file_path: The path. Type: string.
:return: The content of the file
"""
rows = []
with open(file_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
rows.append(row)
return np.string_(json.dumps(rows)) # make the list of dicts as a string | en | 0.703022 | Parent class for all other writers classes, it had the functionality to return objects attributes and write \ them to file and to load and process post processing modules **Configuration**: .. list-table:: :widths: 25 100 10 :header-rows: 1 * - Parameter - Description - Type * - postprocessing_modules - A dict of list of postprocessing modules. The key in the dict specifies the output to which the postprocessing modules should be applied. Every postprocessing module has to have a run function which takes in the raw data and returns the processed data. - dict * - destination_frame - Used to transform point to blender coordinate frame. Default: ["X", "Y", "Z"] - list * - attributes_to_write - A list of attribute names that should written to file. The next table lists all attributes that can be used here. - list * - output_file_prefix - The prefix of the file that should be created. - string * - output_key - The key which should be used for storing the output in a merged file. - string * - write_alpha_channel - If true, the alpha channel will be written to file. Default: False. - bool Writes the state of the given items to a file with the configured prefix. This method also registers the corresponding output. :param item_writer: The item writer object to use. Type: object. :param items: The list of items. Type: list. :param default_file_prefix: The default file name prefix to use. Type: string. :param default_output_key: The default output key to use. Type: string. :param default_attributes: The default attributes to write, if no attributes are specified in the config. Type: list. :param version: The version to use when registering the output. Type: string. Returns the value of the requested attribute for the given item. This method covers all general attributes that blender objects have. :param item: The item. Type: blender object. :param attribute_name: The attribute name. Type: string. :return: The attribute value. # Calc forward vector from rotation matrix # Calc up vector from rotation matrix # Transform to matrix_world to given destination frame # Make sure the requested custom property exist Applies all postprocessing modules registered for this output type. :param output_key: The key of the output type. Type: string :param data: The numpy data. :param version: The version number original data. :return: The modified numpy data after doing the postprocessing Loads an image and post process it. :param file_path: Image path. Type: string. :param key: The image's key with regards to the hdf5 file. Type: string. :param version: The version number original data. Type: String. Default: 1.0.0. :return: The post-processed image that was loaded using the file path. Tries to read in the file with the given path into a numpy array. :param file_path: The file path. Type: string. :return: A numpy array containing the data of the file. #num_channels is 4 if transparent_background is true in config Load the npy/npz file at the given path. :param file_path: The path. Type: string. :return: The content of the file Load the csv file at the given path. :param file_path: The path. Type: string. :return: The content of the file # make the list of dicts as a string | 3.001691 | 3 |
project/completeness/KDE.py | rickdr/Data-analysis-DNN-testing | 0 | 6619273 | import math
import numpy as np
from scipy.stats import multivariate_normal
from scipy.spatial import distance
from scipy.stats import norm as univariate_normal
from numpy.linalg import norm as L2
DEFAULT_BATCH_SIZE = 20
class KernelDensityEstimator:
def __init__(self, kernel="multivariate_gaussian", bandwidth_estimator="silverman", univariate_bandwidth=None):
self.n = 0
self.const_scorw = 0
self.d = 0
self.muk = 0
self.invphi = (math.sqrt(5) - 1) / 2 # 1 / phi
self.invphi2 = (3 - math.sqrt(5)) / 2 # 1 / phi^2
self.min_dists = np.array([])
kernels = {"multivariate_gaussian": self.kernel_multivariate_gaussian,
"univariate_gaussian": self.kernel_univariate_gaussian}
bandwidth_estimators = {"silverman": self.est_bandwidth_silverman,
"scott": self.est_bandwidth_scott,
"identity": self.est_bandwidth_identity}
compatible_estimators = {"multivariate_gaussian": ["silverman", "scott", "identity"],
"univariate": []}
self.kernel = kernels[kernel]
# if multivariate gaussian kernel is chosen, choose an estimator
if kernel == "multivariate_gaussian":
self.bandwidth_estimator = bandwidth_estimators[bandwidth_estimator]
# if choosing univariate kernel without bandwidth clarified, print out a warning
elif kernel == "univariate_gaussian" and (not univariate_bandwidth):
print("Please define your \"univariate_bandwidth\" parameters since the bandwidth cannot \
automatically estimated using univariate kernel yet")
else:
self.univariate_bandwidth = univariate_bandwidth
# Kernel choice
self.kernel = kernels[kernel]
# Bandwidth for estimating density
self.bandwidth = None
# Store data
self.data = None
def kernel_multivariate_gaussian(self, x):
# Estimate density using multivariate gaussian kernel
# Retrieve data
data = self.data
# Get dim of data
d = data.shape[1]
# Estimate bandwidth
H = self.bandwidth_estimator()
self.bandwidth = H
# Calculate determinant of non zeros entry
diag_H = np.diagonal(H).copy()
diag_H[diag_H == 0] = 1
det_H = np.prod(diag_H)
# Multivariate normal density estimate of x
var = multivariate_normal(mean=np.zeros(d), cov=H, allow_singular=True)
density = np.expand_dims(var.pdf(x), 1)
return density
def kernel_univariate_gaussian(self, x):
# Estimate density using univariate gaussian kernel
# Retrieve data
data = self.data
# Get dim of data
d = data.shape[1]
# Estimate bandwidth
h = self.univariate_bandwidth
# Calculate density
density = univariate_normal.pdf(L2(x, axis=1)/h)/h
return density
def fit(self, X, y=None):
if len(X.shape) == 1:
self.data = X[:, np.newaxis]
else:
self.data = X
self.n = len(self.data)
self.muk = 1 / (2**self.d * np.sqrt(np.pi**self.d))\
self.const_score = (-self.n * self.d / 2 *
np.log(2 * np.pi) - self.n * np.log(self.n - 1))
self.d = self.data.shape[1]
return self
def set_samples(self, data, diff=False):
if len(data.shape) == 1:
data = data[:, np.newaxis]
self.newshape = data.shape[:-1]
if len(data.shape) == 2:
self.data_score_samples = data.copy()
if not len(data.shape) == 2:
self.data_score_samples = data.reshape(
(np.prod(self.newshape), data.shape[-1]))
self.data_dist = distance.cdist(self.data,
self.data_score_samples,
metric='sqeuclidean')
if diff:
self.difference = \
np.zeros((len(self.data),
len(self.data_score_samples),
self.d))
for i, datam in enumerate(self.data_score_samples):
self.difference[:, i, :] = self.data - datam
def score_samples(self, data = None):
if data is None:
# The data is already set. We can compute the scores directly using _logscore_samples
scores = np.exp(self._logscore_samples())
# The data needs to be converted to the original input shape
return scores.reshape(self.newshape)
# If the input x is a 1D array, it is assumed that each entry corresponds to a
# datapoint
# This might result in an error if x is meant to be a single (multi-dimensional)
# datapoint
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) == 2:
return np.exp(self._logscore_samples(data))
# It is assumed that the last dimension corresponds to the dimension of the data
# (i.e., a single datapoint)
# Data is transformed to a 2d-array which can be used by self.kde. Afterwards,
# data is converted to input shape
newshape = data.shape[:-1]
scores = np.exp(self._logscore_samples(
data.reshape((np.prod(newshape), data.shape[-1]))))
return scores.reshape(newshape)
def _logscore_samples(self, data = None):
if data is None:
eucl_dist = self.data_dist[:self.n]
else:
eucl_dist = distance.cdist(
self.data[:self.n], data, metric='sqeuclidean')
sum_kernel = np.zeros(eucl_dist.shape[1])
for dimension in eucl_dist:
sum_kernel += np.exp(-dimension / (2 * self.bandwidth ** 2))
const = -self.d/2*np.log(2*np.pi) - np.log(self.n) - \
self.d*np.log(self.bandwidth)
return const + np.log(sum_kernel)
def eval(self, X, y, batch_size=DEFAULT_BATCH_SIZE):
# Print out evaluation using MSE and CE
MSE, CE = self.MSE_CE(X, y, batch_size=batch_size)
print("Cross entropy", CE)
print("Mean Square Error: ", MSE)
return MSE, CE
def MSE_CE(self, X, y, batch_size=DEFAULT_BATCH_SIZE):
# Calculate mean square error and a binary cross entropy for a given H
# Retrieve number of classes
num_classes = len(np.unique(y))
# Retrieve number of instances in X
N = len(X)
# Predict proba
proba = self.predict_proba(
X, batch_size=batch_size) + 1e-15 # to fix log(0)
# Construct mean square error
MSE = (proba.mean() - 1/num_classes)**2
# Construct mean cross entropy
CE = 1/N*np.sum(1/num_classes*np.log(proba) -
(1-1/num_classes)*np.log(proba))
return MSE, CE
def est_mise(self, laplacian, pdf):
integral_laplacian = np.trapz(laplacian ** 2, pdf)
mise_est = 1 / (2 * np.sqrt(np.pi)) / (self.n * self.bandwidth) + \
integral_laplacian * self.bandwidth ** 4 / 4
return mise_est
def est_bandwidth_scott(self):
# Estimate bandwidth using scott's rule
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Compute standard along each i-th variable
std = np.std(data, axis=0)
# Construct the H diagonal bandwidth matrix with std along the diag
H = (n**(-1/(d+4))*np.diag(std))**2
return H
def est_bandwidth_identity(self):
# Generate an identity matrix of density for bandwidth
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Construct the H bandwidth matrix
H = np.identity(d)
return H
def est_bandwidth_silverman(self):
# Estimate bandwidth using silverman's rule of thumbs
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Compute standard along each i-th variable
std = np.std(data, axis=0)
# Construct the H diagonal bandwidth matrix with std along the diag
H = (4/(d+2))**(1/(d+4))*(n**(-1/(d+4)))*np.diag(std)
return H
def gss(self, fun, l_bound, u_bound, tol=1e-5, max_n=100):
"""Golden-section search.
Given a function f with a single local minimum in
the interval [a,b], gss returns a subset interval
[c,d] that contains the minimum with d-c <= tol.
Example:
>>> f = lambda x: (x-2)**2
>>> a = 1
>>> b = 5
>>> tol = 1e-5
>>> (c,d) = gss(f, a, b, tol)
>>> print(c, d)
1.9999959837979107 2.0000050911830893
"""
(l_bound, u_bound) = (min(l_bound, u_bound), max(l_bound, u_bound))
h = u_bound - l_bound
if h <= tol:
return (l_bound, u_bound)
# Required steps to achieve tolerance
n = int(math.ceil(math.log(tol / h) / math.log(self.invphi)))
n = max(1, min(n, max_n))
c = l_bound + self.invphi2 * h
d = l_bound + self.invphi * h
yc = fun(c)
yd = fun(d)
for k in range(n):
if yc > yd:
u_bound = d
d = c
yd = yc
h = self.invphi * h
c = l_bound + self.invphi2 * h
yc = fun(c)
else:
l_bound = c
c = d
yc = yd
h = self.invphi * h
d = l_bound + self.invphi * h
yd = fun(d)
if yc < yd:
self.bandwidth = (l_bound + d) / 2
return (l_bound, d)
else:
self.bandwidth = (c + u_bound) / 2
return (c, u_bound)
def gsection(self, f, a, b, tol):
# Evaluate function at upper and lower bound
fa = f(a)
fb = f(b)
# Compute two new points which correspond to golden ratio
width = b - a
c = a + self.invphi2*width
#c = b - self.invphi*width
#c = a + self.invphi**2*width
d = a + self.invphi*width
fc = f(c)
fd = f(d)
while (b - a) > tol:
if fc < fd:
b = d
d = c
fd = fc
width = self.invphi*width
c = b - self.invphi*width
#c = a + self.invphi2*width
fc = f(c)
else:
a = c
fa = fc
c = d
fc = fd
width = self.invphi*width
d = a + self.invphi*width
fd = f(d)
if fc < fd:
self.bandwidth = (a + d) / 2
return (a, d)
else:
self.bandwidth = (c + b) / 2
return (c, b)
def score_leave_one_out(self, bandwidth):
if self.min_dists.size == 0:
# print("score min_dists", self.min_dists)
self.min_dists = distance.squareform(
distance.pdist(self.data, metric='sqeuclidean')) / 2
self.min_dists *= -1 # Do it this way to prevent invalid warning
# Compute the one-leave-out score
bandwidth = self.bandwidth if bandwidth is None else bandwidth
score = (np.sum(np.log(np.sum(np.exp(self.min_dists
[:self.n, :self.n] /
bandwidth ** 2),
axis=0) - 1)) -
self.n * self.d * np.log(bandwidth) + self.const_score)
return score
def predict_proba(self, X, batch_size=10):
# Predict proba for an input matrix X
kernel_func = self.kernel
# Retrieve data
data = self.data
# number of samples in data
n_data = data.shape[0]
# number of samples in input set
n_X = X.shape[0]
# Init the estimated probabilities list
est_probs = np.empty(0)
num_batches = np.ceil(n_X/batch_size)
print("bs:", batch_size)
for X_ in tqdm(np.array_split(X, num_batches)):
print("...")
# Add third dimension for broardcasting
# shape (1, dim, n_X)
X_ = np.expand_dims(X, 0).transpose((0, 2, 1))
# shape(n_data, dim, 1)
data_ = np.expand_dims(data, 2)
# The difference of input set and data set pairwise (using broadcasting)
print(type(X_), type(data_))
print(X_.shape, data_.shape)
# shape (n_data, dim, n_X)
delta = X_ - data_
print("hier")
# Flatten the delta into matrix
delta = delta.reshape(n_data*n_X, -1) # shape (n_data*n_X, dim)
est_prob = kernel_func(delta) # (n_data*n_X, )
# Calculate mean sum of probability for each sample
est_prob = 1/n_data*est_prob.reshape(n_data, n_X).T.sum(axis=1)
est_probs = np.concatenate((est_probs, est_prob))
return est_probs
def laplacian(self, data: np.ndarray = None):
if data is None:
laplacian = self._laplacian()
return laplacian.reshape(self.newshape)
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) == 2:
return self._laplacian(data)
newshape = data.shape[:-1]
laplacian = self._laplacian(data.reshape(
(np.prod(newshape), data.shape[-1])))
return laplacian.reshape(newshape)
def _laplacian(self, data: np.ndarray = None):
if data is None:
eucl_dist = self.data_dist[:self.n]
else:
eucl_dist = distance.cdist(
self.data[:self.n], data, metric='sqeuclidean')
laplacian = np.zeros(eucl_dist.shape[1])
for dimension in eucl_dist:
pdf = np.exp(-dimension / (2 * self.bandwidth ** 2)) / \
((2 * np.pi) ** (self.d / 2) * self.bandwidth ** self.d)
laplacian += pdf * (dimension / self.bandwidth ** 4 - self.d /
self.bandwidth ** 2)
return laplacian / self.n
def random_sample(self, scaling_factor):
# Get H
H = self.bandwidth_estimator()*scaling_factor
# Retrieve data
data = self.data
# Randomly pick a data point
random_data = np.random.permutation(self.data)[0]
# sample
sample = np.random.multivariate_normal(mean=random_data, cov=H)
# Print out predicted density for new sample
print("Density new sample: ", self.predict_proba(
np.expand_dims(sample, 0))[0])
return random_data, sample
def predict(self, X, batch_size=DEFAULT_BATCH_SIZE):
# Predict proba for a given X to belong to a dataset
# if x is a vector (has 1 axis)
if len(X.shape) == 1:
# expand one more axis to represent a matrix
X = np.expand_dims(X, 0)
proba = self.predict_proba(X, batch_size=batch_size)
return proba
| import math
import numpy as np
from scipy.stats import multivariate_normal
from scipy.spatial import distance
from scipy.stats import norm as univariate_normal
from numpy.linalg import norm as L2
DEFAULT_BATCH_SIZE = 20
class KernelDensityEstimator:
def __init__(self, kernel="multivariate_gaussian", bandwidth_estimator="silverman", univariate_bandwidth=None):
self.n = 0
self.const_scorw = 0
self.d = 0
self.muk = 0
self.invphi = (math.sqrt(5) - 1) / 2 # 1 / phi
self.invphi2 = (3 - math.sqrt(5)) / 2 # 1 / phi^2
self.min_dists = np.array([])
kernels = {"multivariate_gaussian": self.kernel_multivariate_gaussian,
"univariate_gaussian": self.kernel_univariate_gaussian}
bandwidth_estimators = {"silverman": self.est_bandwidth_silverman,
"scott": self.est_bandwidth_scott,
"identity": self.est_bandwidth_identity}
compatible_estimators = {"multivariate_gaussian": ["silverman", "scott", "identity"],
"univariate": []}
self.kernel = kernels[kernel]
# if multivariate gaussian kernel is chosen, choose an estimator
if kernel == "multivariate_gaussian":
self.bandwidth_estimator = bandwidth_estimators[bandwidth_estimator]
# if choosing univariate kernel without bandwidth clarified, print out a warning
elif kernel == "univariate_gaussian" and (not univariate_bandwidth):
print("Please define your \"univariate_bandwidth\" parameters since the bandwidth cannot \
automatically estimated using univariate kernel yet")
else:
self.univariate_bandwidth = univariate_bandwidth
# Kernel choice
self.kernel = kernels[kernel]
# Bandwidth for estimating density
self.bandwidth = None
# Store data
self.data = None
def kernel_multivariate_gaussian(self, x):
# Estimate density using multivariate gaussian kernel
# Retrieve data
data = self.data
# Get dim of data
d = data.shape[1]
# Estimate bandwidth
H = self.bandwidth_estimator()
self.bandwidth = H
# Calculate determinant of non zeros entry
diag_H = np.diagonal(H).copy()
diag_H[diag_H == 0] = 1
det_H = np.prod(diag_H)
# Multivariate normal density estimate of x
var = multivariate_normal(mean=np.zeros(d), cov=H, allow_singular=True)
density = np.expand_dims(var.pdf(x), 1)
return density
def kernel_univariate_gaussian(self, x):
# Estimate density using univariate gaussian kernel
# Retrieve data
data = self.data
# Get dim of data
d = data.shape[1]
# Estimate bandwidth
h = self.univariate_bandwidth
# Calculate density
density = univariate_normal.pdf(L2(x, axis=1)/h)/h
return density
def fit(self, X, y=None):
if len(X.shape) == 1:
self.data = X[:, np.newaxis]
else:
self.data = X
self.n = len(self.data)
self.muk = 1 / (2**self.d * np.sqrt(np.pi**self.d))\
self.const_score = (-self.n * self.d / 2 *
np.log(2 * np.pi) - self.n * np.log(self.n - 1))
self.d = self.data.shape[1]
return self
def set_samples(self, data, diff=False):
if len(data.shape) == 1:
data = data[:, np.newaxis]
self.newshape = data.shape[:-1]
if len(data.shape) == 2:
self.data_score_samples = data.copy()
if not len(data.shape) == 2:
self.data_score_samples = data.reshape(
(np.prod(self.newshape), data.shape[-1]))
self.data_dist = distance.cdist(self.data,
self.data_score_samples,
metric='sqeuclidean')
if diff:
self.difference = \
np.zeros((len(self.data),
len(self.data_score_samples),
self.d))
for i, datam in enumerate(self.data_score_samples):
self.difference[:, i, :] = self.data - datam
def score_samples(self, data = None):
if data is None:
# The data is already set. We can compute the scores directly using _logscore_samples
scores = np.exp(self._logscore_samples())
# The data needs to be converted to the original input shape
return scores.reshape(self.newshape)
# If the input x is a 1D array, it is assumed that each entry corresponds to a
# datapoint
# This might result in an error if x is meant to be a single (multi-dimensional)
# datapoint
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) == 2:
return np.exp(self._logscore_samples(data))
# It is assumed that the last dimension corresponds to the dimension of the data
# (i.e., a single datapoint)
# Data is transformed to a 2d-array which can be used by self.kde. Afterwards,
# data is converted to input shape
newshape = data.shape[:-1]
scores = np.exp(self._logscore_samples(
data.reshape((np.prod(newshape), data.shape[-1]))))
return scores.reshape(newshape)
def _logscore_samples(self, data = None):
if data is None:
eucl_dist = self.data_dist[:self.n]
else:
eucl_dist = distance.cdist(
self.data[:self.n], data, metric='sqeuclidean')
sum_kernel = np.zeros(eucl_dist.shape[1])
for dimension in eucl_dist:
sum_kernel += np.exp(-dimension / (2 * self.bandwidth ** 2))
const = -self.d/2*np.log(2*np.pi) - np.log(self.n) - \
self.d*np.log(self.bandwidth)
return const + np.log(sum_kernel)
def eval(self, X, y, batch_size=DEFAULT_BATCH_SIZE):
# Print out evaluation using MSE and CE
MSE, CE = self.MSE_CE(X, y, batch_size=batch_size)
print("Cross entropy", CE)
print("Mean Square Error: ", MSE)
return MSE, CE
def MSE_CE(self, X, y, batch_size=DEFAULT_BATCH_SIZE):
# Calculate mean square error and a binary cross entropy for a given H
# Retrieve number of classes
num_classes = len(np.unique(y))
# Retrieve number of instances in X
N = len(X)
# Predict proba
proba = self.predict_proba(
X, batch_size=batch_size) + 1e-15 # to fix log(0)
# Construct mean square error
MSE = (proba.mean() - 1/num_classes)**2
# Construct mean cross entropy
CE = 1/N*np.sum(1/num_classes*np.log(proba) -
(1-1/num_classes)*np.log(proba))
return MSE, CE
def est_mise(self, laplacian, pdf):
integral_laplacian = np.trapz(laplacian ** 2, pdf)
mise_est = 1 / (2 * np.sqrt(np.pi)) / (self.n * self.bandwidth) + \
integral_laplacian * self.bandwidth ** 4 / 4
return mise_est
def est_bandwidth_scott(self):
# Estimate bandwidth using scott's rule
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Compute standard along each i-th variable
std = np.std(data, axis=0)
# Construct the H diagonal bandwidth matrix with std along the diag
H = (n**(-1/(d+4))*np.diag(std))**2
return H
def est_bandwidth_identity(self):
# Generate an identity matrix of density for bandwidth
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Construct the H bandwidth matrix
H = np.identity(d)
return H
def est_bandwidth_silverman(self):
# Estimate bandwidth using silverman's rule of thumbs
# Retrieve data
data = self.data
# Get number of samples
n = data.shape[0]
# Get dim of data
d = data.shape[1]
# Compute standard along each i-th variable
std = np.std(data, axis=0)
# Construct the H diagonal bandwidth matrix with std along the diag
H = (4/(d+2))**(1/(d+4))*(n**(-1/(d+4)))*np.diag(std)
return H
def gss(self, fun, l_bound, u_bound, tol=1e-5, max_n=100):
"""Golden-section search.
Given a function f with a single local minimum in
the interval [a,b], gss returns a subset interval
[c,d] that contains the minimum with d-c <= tol.
Example:
>>> f = lambda x: (x-2)**2
>>> a = 1
>>> b = 5
>>> tol = 1e-5
>>> (c,d) = gss(f, a, b, tol)
>>> print(c, d)
1.9999959837979107 2.0000050911830893
"""
(l_bound, u_bound) = (min(l_bound, u_bound), max(l_bound, u_bound))
h = u_bound - l_bound
if h <= tol:
return (l_bound, u_bound)
# Required steps to achieve tolerance
n = int(math.ceil(math.log(tol / h) / math.log(self.invphi)))
n = max(1, min(n, max_n))
c = l_bound + self.invphi2 * h
d = l_bound + self.invphi * h
yc = fun(c)
yd = fun(d)
for k in range(n):
if yc > yd:
u_bound = d
d = c
yd = yc
h = self.invphi * h
c = l_bound + self.invphi2 * h
yc = fun(c)
else:
l_bound = c
c = d
yc = yd
h = self.invphi * h
d = l_bound + self.invphi * h
yd = fun(d)
if yc < yd:
self.bandwidth = (l_bound + d) / 2
return (l_bound, d)
else:
self.bandwidth = (c + u_bound) / 2
return (c, u_bound)
def gsection(self, f, a, b, tol):
# Evaluate function at upper and lower bound
fa = f(a)
fb = f(b)
# Compute two new points which correspond to golden ratio
width = b - a
c = a + self.invphi2*width
#c = b - self.invphi*width
#c = a + self.invphi**2*width
d = a + self.invphi*width
fc = f(c)
fd = f(d)
while (b - a) > tol:
if fc < fd:
b = d
d = c
fd = fc
width = self.invphi*width
c = b - self.invphi*width
#c = a + self.invphi2*width
fc = f(c)
else:
a = c
fa = fc
c = d
fc = fd
width = self.invphi*width
d = a + self.invphi*width
fd = f(d)
if fc < fd:
self.bandwidth = (a + d) / 2
return (a, d)
else:
self.bandwidth = (c + b) / 2
return (c, b)
def score_leave_one_out(self, bandwidth):
if self.min_dists.size == 0:
# print("score min_dists", self.min_dists)
self.min_dists = distance.squareform(
distance.pdist(self.data, metric='sqeuclidean')) / 2
self.min_dists *= -1 # Do it this way to prevent invalid warning
# Compute the one-leave-out score
bandwidth = self.bandwidth if bandwidth is None else bandwidth
score = (np.sum(np.log(np.sum(np.exp(self.min_dists
[:self.n, :self.n] /
bandwidth ** 2),
axis=0) - 1)) -
self.n * self.d * np.log(bandwidth) + self.const_score)
return score
def predict_proba(self, X, batch_size=10):
# Predict proba for an input matrix X
kernel_func = self.kernel
# Retrieve data
data = self.data
# number of samples in data
n_data = data.shape[0]
# number of samples in input set
n_X = X.shape[0]
# Init the estimated probabilities list
est_probs = np.empty(0)
num_batches = np.ceil(n_X/batch_size)
print("bs:", batch_size)
for X_ in tqdm(np.array_split(X, num_batches)):
print("...")
# Add third dimension for broardcasting
# shape (1, dim, n_X)
X_ = np.expand_dims(X, 0).transpose((0, 2, 1))
# shape(n_data, dim, 1)
data_ = np.expand_dims(data, 2)
# The difference of input set and data set pairwise (using broadcasting)
print(type(X_), type(data_))
print(X_.shape, data_.shape)
# shape (n_data, dim, n_X)
delta = X_ - data_
print("hier")
# Flatten the delta into matrix
delta = delta.reshape(n_data*n_X, -1) # shape (n_data*n_X, dim)
est_prob = kernel_func(delta) # (n_data*n_X, )
# Calculate mean sum of probability for each sample
est_prob = 1/n_data*est_prob.reshape(n_data, n_X).T.sum(axis=1)
est_probs = np.concatenate((est_probs, est_prob))
return est_probs
def laplacian(self, data: np.ndarray = None):
if data is None:
laplacian = self._laplacian()
return laplacian.reshape(self.newshape)
if len(data.shape) == 1:
data = data[:, np.newaxis]
if len(data.shape) == 2:
return self._laplacian(data)
newshape = data.shape[:-1]
laplacian = self._laplacian(data.reshape(
(np.prod(newshape), data.shape[-1])))
return laplacian.reshape(newshape)
def _laplacian(self, data: np.ndarray = None):
if data is None:
eucl_dist = self.data_dist[:self.n]
else:
eucl_dist = distance.cdist(
self.data[:self.n], data, metric='sqeuclidean')
laplacian = np.zeros(eucl_dist.shape[1])
for dimension in eucl_dist:
pdf = np.exp(-dimension / (2 * self.bandwidth ** 2)) / \
((2 * np.pi) ** (self.d / 2) * self.bandwidth ** self.d)
laplacian += pdf * (dimension / self.bandwidth ** 4 - self.d /
self.bandwidth ** 2)
return laplacian / self.n
def random_sample(self, scaling_factor):
# Get H
H = self.bandwidth_estimator()*scaling_factor
# Retrieve data
data = self.data
# Randomly pick a data point
random_data = np.random.permutation(self.data)[0]
# sample
sample = np.random.multivariate_normal(mean=random_data, cov=H)
# Print out predicted density for new sample
print("Density new sample: ", self.predict_proba(
np.expand_dims(sample, 0))[0])
return random_data, sample
def predict(self, X, batch_size=DEFAULT_BATCH_SIZE):
# Predict proba for a given X to belong to a dataset
# if x is a vector (has 1 axis)
if len(X.shape) == 1:
# expand one more axis to represent a matrix
X = np.expand_dims(X, 0)
proba = self.predict_proba(X, batch_size=batch_size)
return proba
| en | 0.710515 | # 1 / phi # 1 / phi^2 # if multivariate gaussian kernel is chosen, choose an estimator # if choosing univariate kernel without bandwidth clarified, print out a warning # Kernel choice # Bandwidth for estimating density # Store data # Estimate density using multivariate gaussian kernel # Retrieve data # Get dim of data # Estimate bandwidth # Calculate determinant of non zeros entry # Multivariate normal density estimate of x # Estimate density using univariate gaussian kernel # Retrieve data # Get dim of data # Estimate bandwidth # Calculate density # The data is already set. We can compute the scores directly using _logscore_samples # The data needs to be converted to the original input shape # If the input x is a 1D array, it is assumed that each entry corresponds to a # datapoint # This might result in an error if x is meant to be a single (multi-dimensional) # datapoint # It is assumed that the last dimension corresponds to the dimension of the data # (i.e., a single datapoint) # Data is transformed to a 2d-array which can be used by self.kde. Afterwards, # data is converted to input shape # Print out evaluation using MSE and CE # Calculate mean square error and a binary cross entropy for a given H # Retrieve number of classes # Retrieve number of instances in X # Predict proba # to fix log(0) # Construct mean square error # Construct mean cross entropy # Estimate bandwidth using scott's rule # Retrieve data # Get number of samples # Get dim of data # Compute standard along each i-th variable # Construct the H diagonal bandwidth matrix with std along the diag # Generate an identity matrix of density for bandwidth # Retrieve data # Get number of samples # Get dim of data # Construct the H bandwidth matrix # Estimate bandwidth using silverman's rule of thumbs # Retrieve data # Get number of samples # Get dim of data # Compute standard along each i-th variable # Construct the H diagonal bandwidth matrix with std along the diag Golden-section search. Given a function f with a single local minimum in the interval [a,b], gss returns a subset interval [c,d] that contains the minimum with d-c <= tol. Example: >>> f = lambda x: (x-2)**2 >>> a = 1 >>> b = 5 >>> tol = 1e-5 >>> (c,d) = gss(f, a, b, tol) >>> print(c, d) 1.9999959837979107 2.0000050911830893 # Required steps to achieve tolerance # Evaluate function at upper and lower bound # Compute two new points which correspond to golden ratio #c = b - self.invphi*width #c = a + self.invphi**2*width #c = a + self.invphi2*width # print("score min_dists", self.min_dists) # Do it this way to prevent invalid warning # Compute the one-leave-out score # Predict proba for an input matrix X # Retrieve data # number of samples in data # number of samples in input set # Init the estimated probabilities list # Add third dimension for broardcasting # shape (1, dim, n_X) # shape(n_data, dim, 1) # The difference of input set and data set pairwise (using broadcasting) # shape (n_data, dim, n_X) # Flatten the delta into matrix # shape (n_data*n_X, dim) # (n_data*n_X, ) # Calculate mean sum of probability for each sample # Get H # Retrieve data # Randomly pick a data point # sample # Print out predicted density for new sample # Predict proba for a given X to belong to a dataset # if x is a vector (has 1 axis) # expand one more axis to represent a matrix | 2.937346 | 3 |
sickbeard/sickbeard/clients/utorrent.py | Branlala/docker-sickbeardfr | 0 | 6619274 | # Authors: Mr_Orange <<EMAIL>>, EchelonFour
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import sickbeard
from sickbeard.clients.generic import GenericClient
class uTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=<PASSWORD>):
super(uTorrentAPI, self).__init__('uTorrent', host, username, password)
self.url = self.host + 'gui/'
def _request(self, method='get', params={}, files=None):
params.update({'token':self._get_auth()})
return super(uTorrentAPI, self)._request(method=method, params=params, files=files)
def _get_auth(self):
try:
self.response = self.session.get(self.url + 'token.html')
self.auth = re.findall("<div.*?>(.*?)</", self.response.text)[0]
except:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
params={'action':'add-url', 's': result.url}
return self._request(params=params)
def _add_torrent_file(self, result):
params = {'action':'add-file'}
files={'torrent_file': ('tv.torrent', result.content)}
return self._request(method='post', params=params, files=files)
def _set_torrent_label(self, result):
params = {'action':'setprops',
'hash':result.hash,
's':'label',
'v':sickbeard.TORRENT_LABEL
}
return self._request(params=params)
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
params = {'action':'pause', 'hash':result.hash}
return self._request(params=params)
return True
api = uTorrentAPI() | # Authors: Mr_Orange <<EMAIL>>, EchelonFour
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import sickbeard
from sickbeard.clients.generic import GenericClient
class uTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=<PASSWORD>):
super(uTorrentAPI, self).__init__('uTorrent', host, username, password)
self.url = self.host + 'gui/'
def _request(self, method='get', params={}, files=None):
params.update({'token':self._get_auth()})
return super(uTorrentAPI, self)._request(method=method, params=params, files=files)
def _get_auth(self):
try:
self.response = self.session.get(self.url + 'token.html')
self.auth = re.findall("<div.*?>(.*?)</", self.response.text)[0]
except:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
params={'action':'add-url', 's': result.url}
return self._request(params=params)
def _add_torrent_file(self, result):
params = {'action':'add-file'}
files={'torrent_file': ('tv.torrent', result.content)}
return self._request(method='post', params=params, files=files)
def _set_torrent_label(self, result):
params = {'action':'setprops',
'hash':result.hash,
's':'label',
'v':sickbeard.TORRENT_LABEL
}
return self._request(params=params)
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
params = {'action':'pause', 'hash':result.hash}
return self._request(params=params)
return True
api = uTorrentAPI() | en | 0.86345 | # Authors: Mr_Orange <<EMAIL>>, EchelonFour # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. | 2.356934 | 2 |
apilotus/settings.py | almir-sahinovic/apilotus_django | 0 | 6619275 | """
Django settings for apilotus project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from sys import path
from datetime import date
from collections import OrderedDict
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_DIR = os.path.dirname(__file__)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(BASE_DIR)
path.append(os.path.join(BASE_DIR, 'apps'))
INTERNAL_IPS = config('ALLOWED_HOSTS', cast=Csv())
SITE_ID = 1
# Application definition
DJANGO_APPS = [
'jet.dashboard',
'jet',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
LOCAL_APPS = [
'lotus_auth',
'notifications',
'loci',
'lotus_dashboard',
'lotus_alert',
]
THIRD_PARTY_APPS = [
'rest_framework',
'ckeditor',
'rosetta',
'constance',
'constance.backends.database',
# 'debug_toolbar',
'localflavor',
'widget_tweaks',
'django_telegrambot',
'django_extensions',
]
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.gzip.GZipMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
]
ROOT_URLCONF = 'apilotus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'constance.context_processors.config',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apilotus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if os.environ.get('DJANGO_DEVELOPMENT'):
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
SECRET_KEY = 'dummykey'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'public/static')]
else:
DEBUG = False
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
SECRET_KEY = config('SECRET_KEY')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': 'localhost',
}
}
STATIC_ROOT = os.path.join(BASE_DIR, 'public/static')
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# CKEDITOR_CONFIGS = {
# 'default': {
# 'toolbar': 'Custom',
# 'toolbar_Custom': [
# ['Bold'],
# ['Link'],
# ['BulletedList', 'NumberedList']
# ]
# }
# }
# MODELTRANSLATION_TRANSLATION_FILES = ('lotus_auth.translation',)
# MODELTRANSLATION_TRANSLATION_REGISTRY = 'matau.translation'
CONSTANCE_CONFIG = OrderedDict([
('Telephone', ('', '')),
('Fax', ('', '')),
('Email', ('', '')),
('Address', ('', '')),
])
CONSTANCE_CONFIG_FIELDSETS = {
'General Settings': ('Telephone', 'Fax', 'Email', 'Address'),
}
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
def show_toolbar(request):
if not request.is_ajax() and request.user:
return True
return False
DEBUG_TOOLBAR_CONFIG = {
# 'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'apilotus.settings.show_toolbar',
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('zh-cn', gettext('China')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# settings for JET
JET_THEMES = [
{
'theme': 'default',
'color': '#47bac1',
'title': 'Default'
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
}
]
JET_SIDE_MENU_COMPACT = True
# JET_INDEX_DASHBOARD = 'apilotus.dashboard.CustomIndexDashboard'
MINIMUM_USER_AGE = 13
########## AUTH CONFIGURATION
AUTH_USER_MODEL = 'lotus_auth.LotusUser'
LOGIN_REDIRECT_URL = '/admin/dashboard/'
# This is a custom Admin site URL used to replace the easily guessable /admin
LOTUS_ADMIN_URL = os.environ.get('LOTUS_ADMIN_URL', 'apilotus-admin/')
# Password reset should be expired after 24 hours
PASSWORD_RESET_TIMEOUT_DAYS = 1
########## END AUTH CONFIGURATION
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
try:
from .bot_settings import *
except Exception as e:
print(e)
| """
Django settings for apilotus project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from sys import path
from datetime import date
from collections import OrderedDict
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_DIR = os.path.dirname(__file__)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(BASE_DIR)
path.append(os.path.join(BASE_DIR, 'apps'))
INTERNAL_IPS = config('ALLOWED_HOSTS', cast=Csv())
SITE_ID = 1
# Application definition
DJANGO_APPS = [
'jet.dashboard',
'jet',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
LOCAL_APPS = [
'lotus_auth',
'notifications',
'loci',
'lotus_dashboard',
'lotus_alert',
]
THIRD_PARTY_APPS = [
'rest_framework',
'ckeditor',
'rosetta',
'constance',
'constance.backends.database',
# 'debug_toolbar',
'localflavor',
'widget_tweaks',
'django_telegrambot',
'django_extensions',
]
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.gzip.GZipMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
]
ROOT_URLCONF = 'apilotus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'constance.context_processors.config',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apilotus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if os.environ.get('DJANGO_DEVELOPMENT'):
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
SECRET_KEY = 'dummykey'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'public/static')]
else:
DEBUG = False
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
SECRET_KEY = config('SECRET_KEY')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': 'localhost',
}
}
STATIC_ROOT = os.path.join(BASE_DIR, 'public/static')
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# CKEDITOR_CONFIGS = {
# 'default': {
# 'toolbar': 'Custom',
# 'toolbar_Custom': [
# ['Bold'],
# ['Link'],
# ['BulletedList', 'NumberedList']
# ]
# }
# }
# MODELTRANSLATION_TRANSLATION_FILES = ('lotus_auth.translation',)
# MODELTRANSLATION_TRANSLATION_REGISTRY = 'matau.translation'
CONSTANCE_CONFIG = OrderedDict([
('Telephone', ('', '')),
('Fax', ('', '')),
('Email', ('', '')),
('Address', ('', '')),
])
CONSTANCE_CONFIG_FIELDSETS = {
'General Settings': ('Telephone', 'Fax', 'Email', 'Address'),
}
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
def show_toolbar(request):
if not request.is_ajax() and request.user:
return True
return False
DEBUG_TOOLBAR_CONFIG = {
# 'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'apilotus.settings.show_toolbar',
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('zh-cn', gettext('China')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# settings for JET
JET_THEMES = [
{
'theme': 'default',
'color': '#47bac1',
'title': 'Default'
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
}
]
JET_SIDE_MENU_COMPACT = True
# JET_INDEX_DASHBOARD = 'apilotus.dashboard.CustomIndexDashboard'
MINIMUM_USER_AGE = 13
########## AUTH CONFIGURATION
AUTH_USER_MODEL = 'lotus_auth.LotusUser'
LOGIN_REDIRECT_URL = '/admin/dashboard/'
# This is a custom Admin site URL used to replace the easily guessable /admin
LOTUS_ADMIN_URL = os.environ.get('LOTUS_ADMIN_URL', 'apilotus-admin/')
# Password reset should be expired after 24 hours
PASSWORD_RESET_TIMEOUT_DAYS = 1
########## END AUTH CONFIGURATION
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
try:
from .bot_settings import *
except Exception as e:
print(e)
| en | 0.491407 | Django settings for apilotus project. Generated by 'django-admin startproject' using Django 1.11.7. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # PROJECT_DIR = os.path.dirname(__file__) # Add our project to our pythonpath, this way we don't need to type our project # name in our dotted import paths: # Application definition # 'debug_toolbar', # 'debug_toolbar.middleware.DebugToolbarMiddleware', # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators # CKEDITOR_CONFIGS = { # 'default': { # 'toolbar': 'Custom', # 'toolbar_Custom': [ # ['Bold'], # ['Link'], # ['BulletedList', 'NumberedList'] # ] # } # } # MODELTRANSLATION_TRANSLATION_FILES = ('lotus_auth.translation',) # MODELTRANSLATION_TRANSLATION_REGISTRY = 'matau.translation' # 'INTERCEPT_REDIRECTS': False, # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ # settings for JET # JET_INDEX_DASHBOARD = 'apilotus.dashboard.CustomIndexDashboard' ########## AUTH CONFIGURATION # This is a custom Admin site URL used to replace the easily guessable /admin # Password reset should be expired after 24 hours ########## END AUTH CONFIGURATION # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. # 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' | 1.836931 | 2 |
2381.py | ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python | 1 | 6619276 | <filename>2381.py
n, k = map(int,input().split())
names = []
for i in range(n):
names.append(input())
names.sort()
print(names[k-1]) | <filename>2381.py
n, k = map(int,input().split())
names = []
for i in range(n):
names.append(input())
names.sort()
print(names[k-1]) | none | 1 | 3.131359 | 3 | |
squest_survey/models/sub_template.py | SyedMaseerulla/squest | 0 | 6619277 | <reponame>SyedMaseerulla/squest<filename>squest_survey/models/sub_template.py
# -*- coding: utf-8 -*-
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from service_catalog.models import Service
class SubscriptionTemplate(models.Model):
services = models.ManyToManyField(Service, verbose_name=_("Service"), related_name='sub_templates')
description = models.TextField(_("Description"))
class Meta:
verbose_name = _("Subscription Template")
verbose_name_plural = _("Subscription Templates")
def __str__(self):
return f"{self.description}"
| # -*- coding: utf-8 -*-
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from service_catalog.models import Service
class SubscriptionTemplate(models.Model):
services = models.ManyToManyField(Service, verbose_name=_("Service"), related_name='sub_templates')
description = models.TextField(_("Description"))
class Meta:
verbose_name = _("Subscription Template")
verbose_name_plural = _("Subscription Templates")
def __str__(self):
return f"{self.description}" | en | 0.769321 | # -*- coding: utf-8 -*- | 2.110852 | 2 |
tensorimage/classify/restore_model.py | hunglethanh9/tensorimage | 1 | 6619278 | <filename>tensorimage/classify/restore_model.py
import numpy as np
import tensorflow as tf
from tensorimage.config.info import workspace_dir
from tensorimage.train.weights_initializer import init_weights
class ModelRestorer:
def __init__(self, model_folder_name: str, model_name: str, architecture: str, sess):
self.model_folder_name = model_folder_name
self.model_name = model_name
self.architecture = architecture
self.sess = sess
def start(self):
if self.architecture == 'RosNet':
self.restore_rosnet_model()
elif self.architecture == 'AlexNet':
self.restore_alexnet_model()
def restore_rosnet_model(self):
saver = tf.train.import_meta_graph(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/' + self.model_name + '.meta')
saver.restore(self.sess, tf.train.latest_checkpoint(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/./'))
layers = ['conv1', 'conv2', 'fcl', 'out']
with tf.variable_scope('RosNet', reuse=tf.AUTO_REUSE):
with tf.name_scope('weights_restore'):
for layer in layers:
ly = self.sess.run('weights/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('weights', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
with tf.name_scope('biases_restore'):
for layer in layers:
ly = self.sess.run('biases/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('biases', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
def restore_alexnet_model(self):
saver = tf.train.import_meta_graph(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/' + self.model_name + '.meta')
saver.restore(self.sess, tf.train.latest_checkpoint(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/./'))
layers = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fcl', 'fcl2', 'out']
with tf.name_scope('weights_restore'):
for layer in layers:
ly = self.sess.run('weights/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('weights', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
with tf.name_scope('biases_restore'):
for layer in layers:
ly = self.sess.run('biases/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('biases', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
| <filename>tensorimage/classify/restore_model.py
import numpy as np
import tensorflow as tf
from tensorimage.config.info import workspace_dir
from tensorimage.train.weights_initializer import init_weights
class ModelRestorer:
def __init__(self, model_folder_name: str, model_name: str, architecture: str, sess):
self.model_folder_name = model_folder_name
self.model_name = model_name
self.architecture = architecture
self.sess = sess
def start(self):
if self.architecture == 'RosNet':
self.restore_rosnet_model()
elif self.architecture == 'AlexNet':
self.restore_alexnet_model()
def restore_rosnet_model(self):
saver = tf.train.import_meta_graph(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/' + self.model_name + '.meta')
saver.restore(self.sess, tf.train.latest_checkpoint(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/./'))
layers = ['conv1', 'conv2', 'fcl', 'out']
with tf.variable_scope('RosNet', reuse=tf.AUTO_REUSE):
with tf.name_scope('weights_restore'):
for layer in layers:
ly = self.sess.run('weights/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('weights', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
with tf.name_scope('biases_restore'):
for layer in layers:
ly = self.sess.run('biases/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('biases', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
def restore_alexnet_model(self):
saver = tf.train.import_meta_graph(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/' + self.model_name + '.meta')
saver.restore(self.sess, tf.train.latest_checkpoint(workspace_dir + 'user/trained_models/' + self.model_folder_name + '/./'))
layers = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fcl', 'fcl2', 'out']
with tf.name_scope('weights_restore'):
for layer in layers:
ly = self.sess.run('weights/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('weights', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
with tf.name_scope('biases_restore'):
for layer in layers:
ly = self.sess.run('biases/'+layer+':0')
ly_list = np.ndarray.tolist(ly)
init_weights('biases', layer, ly.shape, initializer=tf.initializers.constant(ly_list))
| none | 1 | 2.337702 | 2 | |
tvarit_backend/operation/serializers.py | theharshitgarg/tvarit_task2 | 0 | 6619279 |
from rest_framework import serializers
class AdditionSerializer(serializers.Serializer):
num1 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
num2 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
num3 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
|
from rest_framework import serializers
class AdditionSerializer(serializers.Serializer):
num1 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
num2 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
num3 = serializers.IntegerField(required=True, error_messages={'required': 'Number is mandatory'})
| none | 1 | 2.316783 | 2 | |
tEvash/tevash_09.py | techartorg/Advent_of_Code_2020 | 3 | 6619280 | <filename>tEvash/tevash_09.py
from itertools import combinations
with open("inputs_9.txt") as f:
inputs = [int(x) for x in f.readlines()]
ex_inputs = [
35,
20,
15,
25,
47,
40,
62,
55,
65,
95,
102,
117,
150,
182,
127,
219,
299,
277,
309,
576
]
def part1(inputs, pre_len = 25):
index = pre_len
while index < len(inputs):
num = inputs[index]
combos = [sum(x) for x in combinations(inputs[index-pre_len:index], 2)]
# print(f"{num} : {combos}")
if num not in combos:
return num
index += 1
def part2(inputs):
inv_num = part1(inputs)
index = 0
while True:
summed = []
for num in inputs[index:]:
if sum(summed, num) == inv_num and len(summed) >= 2:
summed.append(num)
summed = sorted(summed)
print(summed[0] + summed[len(summed) -1])
return
elif sum(summed, num) > inv_num:
summed.clear()
break
summed.append(num)
index += 1
print(part1(inputs))
part2(inputs)
| <filename>tEvash/tevash_09.py
from itertools import combinations
with open("inputs_9.txt") as f:
inputs = [int(x) for x in f.readlines()]
ex_inputs = [
35,
20,
15,
25,
47,
40,
62,
55,
65,
95,
102,
117,
150,
182,
127,
219,
299,
277,
309,
576
]
def part1(inputs, pre_len = 25):
index = pre_len
while index < len(inputs):
num = inputs[index]
combos = [sum(x) for x in combinations(inputs[index-pre_len:index], 2)]
# print(f"{num} : {combos}")
if num not in combos:
return num
index += 1
def part2(inputs):
inv_num = part1(inputs)
index = 0
while True:
summed = []
for num in inputs[index:]:
if sum(summed, num) == inv_num and len(summed) >= 2:
summed.append(num)
summed = sorted(summed)
print(summed[0] + summed[len(summed) -1])
return
elif sum(summed, num) > inv_num:
summed.clear()
break
summed.append(num)
index += 1
print(part1(inputs))
part2(inputs)
| en | 0.187415 | # print(f"{num} : {combos}") | 3.255819 | 3 |
v0.1/membrane_solvers/FCD-2D/main.py | drizdar/Propmod | 3 | 6619281 | import classes as cl
import EvaporationPonds as ep
import formulas as f
import json
import math
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import (MONTHLY, DateFormatter, rrulewrapper, RRuleLocator, drange)
import MembraneFlow as mf
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import Reporting as r
T = 298.15
P = 1.01325
RO = {
"recovery_rate": 0.4
}
RO_water = cl.flow({
"pc_wt": 0.0001,
"P": P,
"T": T,
"flow": 128e6 #L/d
})
seawater = cl.flow({
"pc_wt": 0.035,
"P": P,
"T": T,
"flow": RO_water.data["flow"] / RO["recovery_rate"] #MLD
})
seawater.CalcOsmoticProperties()
seawater.CalcMassRate()
RO_water.CalcOsmoticProperties()
RO_water.CalcMassRate()
RO_brine = cl.flow({
"P": P,
"T": T,
"mass_water": seawater.data["mass_water"] - RO_water.data["mass_water"],
"mass_NaCl": seawater.data["mass_NaCl"] - RO_water.data["mass_NaCl"],
"mass_total": seawater.data["mass_water"] - RO_water.data["mass_water"] + \
seawater.data["mass_NaCl"] - RO_water.data["mass_NaCl"]
})
RO_brine.CalcPcWt()
RO_brine.CalcOsmoticProperties()
RO_brine.CalcFlow()
D = 9000 #mm
A = 24.8e6 #m^2 play around with this until it looks good
d_start = 243 #start in September just as evaporation is increasing for Summer
d = d_start
n_years = 5
no_volume = {
"A": A,
"D": D,
"P": P,
"T": T,
"mass_water": 0,
"mass_NaCl": 0,
"mass_total": 0,
"level": 0,
"pc_wt": 0,
"mass_NaCl_solid": 450*A*2.17 #450mm bed existing
}
no_flow = {
"P": P,
"T": T,
"mass_water": 0,
"mass_NaCl": 0,
"mass_total": 0,
"pc_wt": 0,
"flow": 0
}
pond = [cl.pond(no_volume)]
concentrate = [cl.flow(no_flow)]
discharge = [RO_brine]
no_PRO = {
"J_w_avg": 0,
"J_s_avg": 0,
"Pd_avg": 0
}
PRO = [{
"J_w_avg": 0,
"J_s_avg": 0,
"Pd_avg": 0
}]
mix_ratio = 1.209163317980615
Qb = RO_brine.GetFlow("MLD")
# This is where the logic for deciding whether to start PRO begins
while d < (d_start+n_years*365):
pond = ep.IterateFlows(concentrate, d, D, discharge, pond)
pond_pc_wt = pond[-1].data.get("pc_wt")
pond_L = pond[-1].data.get("level")
if (pond_L > 1000 and pond_pc_wt > 0.15):
concentrate.append(cl.flow({
"P": 1.01325,
"T": 273.15 + 25,
"pc_wt": pond_pc_wt,
"flow": Qb*mix_ratio
}))
concentrate[-1].CalcOsmoticProperties()
concentrate[-1].CalcMassRate()
discharge.append(cl.combineFlows(discharge[0], concentrate[-1]))
else:
concentrate.append(cl.flow(no_flow))
PRO.append(no_PRO)
discharge.append(discharge[0])
print(d,
pond[-1].data["level"],
pond[-1].data["pc_wt"],
concentrate[-1].data["flow"],
discharge[-1].data["flow"])
d += 1
#Pond time-series
val_of_int = ["level", "pc_wt"]
y_labels = ['Depth of solution in pond - d (mm)','Concentration NaCl - (%wt)']
colours = ['g', 'r']
legends = ['Solution Depth', 'Concentration']
data = []
for i in range(0, len(val_of_int)):
item = val_of_int[i]
data.append([])
for p in pond:
data[i].append(p.data.get(item))
t = np.linspace(0, d-d_start, d-d_start+1)
axes = []
fig, ax = plt.subplots()
axes.append(ax)
axes[0].set_xlabel("Time - d (days)")
axes[0].set_ylabel(y_labels[0])
lines = axes[0].plot(t, data[0], colours[0])
for i in range(1, len(val_of_int)):
axes.append(axes[-1].twinx())
axes[-1].set_ylabel(y_labels[i])
lines += axes[-1].plot(t, data[i], colours[i])
plt.legend(lines, legends,loc=9) #or bbox_to_anchor=(0.2, 1.0)
plt.tight_layout()
plt.savefig('./../capstone/images/pond-level-conc.png')
plt.show()
val_of_int = ["level", "level_NaCl"]
y_labels = ['Depth of solution in pond - d (mm)','Thickness of solid NaCl in pond - d (mm)']
colours = ['g', 'b']
legends = ['Solution Depth', 'Salt Layer Thickness']
data = []
for i in range(0, len(val_of_int)):
item = val_of_int[i]
data.append([])
for p in pond:
data[i].append(p.data.get(item))
t = np.linspace(0, d-d_start, d-d_start+1)
axes = []
fig, ax = plt.subplots()
axes.append(ax)
axes[0].set_xlabel("Time - d (days)")
axes[0].set_ylabel(y_labels[0])
lines = axes[0].plot(t, data[0], colours[0])
for i in range(1, len(val_of_int)):
axes.append(axes[-1].twinx())
axes[-1].set_ylabel(y_labels[i])
lines += axes[-1].plot(t, data[i], colours[i])
plt.legend(lines, legends,loc=9) #or bbox_to_anchor=(0.2, 1.0)
plt.tight_layout()
plt.savefig('./../capstone/images/pond-salt-and-solution-level.png')
plt.show() | import classes as cl
import EvaporationPonds as ep
import formulas as f
import json
import math
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import (MONTHLY, DateFormatter, rrulewrapper, RRuleLocator, drange)
import MembraneFlow as mf
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import Reporting as r
T = 298.15
P = 1.01325
RO = {
"recovery_rate": 0.4
}
RO_water = cl.flow({
"pc_wt": 0.0001,
"P": P,
"T": T,
"flow": 128e6 #L/d
})
seawater = cl.flow({
"pc_wt": 0.035,
"P": P,
"T": T,
"flow": RO_water.data["flow"] / RO["recovery_rate"] #MLD
})
seawater.CalcOsmoticProperties()
seawater.CalcMassRate()
RO_water.CalcOsmoticProperties()
RO_water.CalcMassRate()
RO_brine = cl.flow({
"P": P,
"T": T,
"mass_water": seawater.data["mass_water"] - RO_water.data["mass_water"],
"mass_NaCl": seawater.data["mass_NaCl"] - RO_water.data["mass_NaCl"],
"mass_total": seawater.data["mass_water"] - RO_water.data["mass_water"] + \
seawater.data["mass_NaCl"] - RO_water.data["mass_NaCl"]
})
RO_brine.CalcPcWt()
RO_brine.CalcOsmoticProperties()
RO_brine.CalcFlow()
D = 9000 #mm
A = 24.8e6 #m^2 play around with this until it looks good
d_start = 243 #start in September just as evaporation is increasing for Summer
d = d_start
n_years = 5
no_volume = {
"A": A,
"D": D,
"P": P,
"T": T,
"mass_water": 0,
"mass_NaCl": 0,
"mass_total": 0,
"level": 0,
"pc_wt": 0,
"mass_NaCl_solid": 450*A*2.17 #450mm bed existing
}
no_flow = {
"P": P,
"T": T,
"mass_water": 0,
"mass_NaCl": 0,
"mass_total": 0,
"pc_wt": 0,
"flow": 0
}
pond = [cl.pond(no_volume)]
concentrate = [cl.flow(no_flow)]
discharge = [RO_brine]
no_PRO = {
"J_w_avg": 0,
"J_s_avg": 0,
"Pd_avg": 0
}
PRO = [{
"J_w_avg": 0,
"J_s_avg": 0,
"Pd_avg": 0
}]
mix_ratio = 1.209163317980615
Qb = RO_brine.GetFlow("MLD")
# This is where the logic for deciding whether to start PRO begins
while d < (d_start+n_years*365):
pond = ep.IterateFlows(concentrate, d, D, discharge, pond)
pond_pc_wt = pond[-1].data.get("pc_wt")
pond_L = pond[-1].data.get("level")
if (pond_L > 1000 and pond_pc_wt > 0.15):
concentrate.append(cl.flow({
"P": 1.01325,
"T": 273.15 + 25,
"pc_wt": pond_pc_wt,
"flow": Qb*mix_ratio
}))
concentrate[-1].CalcOsmoticProperties()
concentrate[-1].CalcMassRate()
discharge.append(cl.combineFlows(discharge[0], concentrate[-1]))
else:
concentrate.append(cl.flow(no_flow))
PRO.append(no_PRO)
discharge.append(discharge[0])
print(d,
pond[-1].data["level"],
pond[-1].data["pc_wt"],
concentrate[-1].data["flow"],
discharge[-1].data["flow"])
d += 1
#Pond time-series
val_of_int = ["level", "pc_wt"]
y_labels = ['Depth of solution in pond - d (mm)','Concentration NaCl - (%wt)']
colours = ['g', 'r']
legends = ['Solution Depth', 'Concentration']
data = []
for i in range(0, len(val_of_int)):
item = val_of_int[i]
data.append([])
for p in pond:
data[i].append(p.data.get(item))
t = np.linspace(0, d-d_start, d-d_start+1)
axes = []
fig, ax = plt.subplots()
axes.append(ax)
axes[0].set_xlabel("Time - d (days)")
axes[0].set_ylabel(y_labels[0])
lines = axes[0].plot(t, data[0], colours[0])
for i in range(1, len(val_of_int)):
axes.append(axes[-1].twinx())
axes[-1].set_ylabel(y_labels[i])
lines += axes[-1].plot(t, data[i], colours[i])
plt.legend(lines, legends,loc=9) #or bbox_to_anchor=(0.2, 1.0)
plt.tight_layout()
plt.savefig('./../capstone/images/pond-level-conc.png')
plt.show()
val_of_int = ["level", "level_NaCl"]
y_labels = ['Depth of solution in pond - d (mm)','Thickness of solid NaCl in pond - d (mm)']
colours = ['g', 'b']
legends = ['Solution Depth', 'Salt Layer Thickness']
data = []
for i in range(0, len(val_of_int)):
item = val_of_int[i]
data.append([])
for p in pond:
data[i].append(p.data.get(item))
t = np.linspace(0, d-d_start, d-d_start+1)
axes = []
fig, ax = plt.subplots()
axes.append(ax)
axes[0].set_xlabel("Time - d (days)")
axes[0].set_ylabel(y_labels[0])
lines = axes[0].plot(t, data[0], colours[0])
for i in range(1, len(val_of_int)):
axes.append(axes[-1].twinx())
axes[-1].set_ylabel(y_labels[i])
lines += axes[-1].plot(t, data[i], colours[i])
plt.legend(lines, legends,loc=9) #or bbox_to_anchor=(0.2, 1.0)
plt.tight_layout()
plt.savefig('./../capstone/images/pond-salt-and-solution-level.png')
plt.show() | en | 0.89926 | #L/d #MLD #mm #m^2 play around with this until it looks good #start in September just as evaporation is increasing for Summer #450mm bed existing # This is where the logic for deciding whether to start PRO begins #Pond time-series #or bbox_to_anchor=(0.2, 1.0) #or bbox_to_anchor=(0.2, 1.0) | 2.413879 | 2 |
allink_core/core_apps/allink_teaser/models.py | allink/allink-core | 5 | 6619282 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from django.db import models
from cms.models.pluginmodel import CMSPlugin
from allink_core.core.models.fields import CMSPluginField
from allink_core.core.models import AllinkInternalLinkFieldsModel
from allink_core.core.utils import get_additional_templates
from allink_core.core.models.fields_model import AllinkTeaserFieldsModel, AllinkTeaserTranslatedFieldsModel
from allink_core.core.models.base_plugins import AllinkBaseSectionPlugin
class AllinkTeaserGridContainerPlugin(AllinkBaseSectionPlugin):
COLUMN_ORDERS = AllinkBaseSectionPlugin.COLUMN_ORDERS + (
('alternating', 'Alternating'),
)
class AllinkTeaserPlugin(AllinkInternalLinkFieldsModel, AllinkTeaserFieldsModel, AllinkTeaserTranslatedFieldsModel,
CMSPlugin):
template = models.CharField(
'Template',
max_length=50
)
softpage_enabled = models.BooleanField(
'Show detailed information in Softpage',
help_text='If checked, the detail view of an entry will be displayed in a "softpage".'
' Otherwise the page will be reloaded.',
default=False
)
cmsplugin_ptr = CMSPluginField()
def __str__(self):
return '{}'.format(self.link_object)
@classmethod
def get_templates(cls):
templates = ()
for x, y in get_additional_templates('TEASER'):
templates += ((x, y),)
return templates
| # -*- coding: utf-8 -*-
from django.db import models
from cms.models.pluginmodel import CMSPlugin
from allink_core.core.models.fields import CMSPluginField
from allink_core.core.models import AllinkInternalLinkFieldsModel
from allink_core.core.utils import get_additional_templates
from allink_core.core.models.fields_model import AllinkTeaserFieldsModel, AllinkTeaserTranslatedFieldsModel
from allink_core.core.models.base_plugins import AllinkBaseSectionPlugin
class AllinkTeaserGridContainerPlugin(AllinkBaseSectionPlugin):
COLUMN_ORDERS = AllinkBaseSectionPlugin.COLUMN_ORDERS + (
('alternating', 'Alternating'),
)
class AllinkTeaserPlugin(AllinkInternalLinkFieldsModel, AllinkTeaserFieldsModel, AllinkTeaserTranslatedFieldsModel,
CMSPlugin):
template = models.CharField(
'Template',
max_length=50
)
softpage_enabled = models.BooleanField(
'Show detailed information in Softpage',
help_text='If checked, the detail view of an entry will be displayed in a "softpage".'
' Otherwise the page will be reloaded.',
default=False
)
cmsplugin_ptr = CMSPluginField()
def __str__(self):
return '{}'.format(self.link_object)
@classmethod
def get_templates(cls):
templates = ()
for x, y in get_additional_templates('TEASER'):
templates += ((x, y),)
return templates | en | 0.769321 | # -*- coding: utf-8 -*- | 1.94383 | 2 |
card_voice.py | luanths/shadowverse-bot-lou | 0 | 6619283 | import io
import os
import tempfile
import aiohttp
import card_data
_soundmanifest = None
async def get_soundmanifest() -> dict:
global _soundmanifest
if _soundmanifest is not None:
return _soundmanifest
res_ver = os.environ["RES_VER"]
url = f"https://shadowverse.akamaized.net/dl/Manifest/{res_ver}/Eng/Windows/soundmanifest"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
ret = {}
for line in text.splitlines():
fields = line.split(",")
if len(fields) < 2:
continue
[name, hexcode, *_] = fields
prefix = "v/vo_"
suffix = ".acb"
if not name.startswith(prefix) and name.endswith(suffix):
continue
card_id = name[len(prefix) : -len(suffix)]
if not card_id.isdigit():
continue
card_id = int(card_id)
ret[card_id] = hexcode
_soundmanifest = ret
return ret
async def svgdb_get(card_id: int) -> list:
url = f"https://svgdb.me/api/voices/{card_id}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
json = await response.json()
ret = []
for label, basenames in json.items():
translate = {
"plays": "play",
"evolves": "evolve",
"attacks": "attack",
"deaths": "death",
}
label = translate.get(label, label).capitalize()
for basename in basenames:
en = f"https://svgdb.me/assets/audio/en/{basename}"
jp = f"https://svgdb.me/assets/audio/jp/{basename}"
ret.append((label, en, jp))
return ret
async def svgdb_embed(card: dict) -> dict:
card_id = card["card_id"]
data = await svgdb_get(card_id)
title = card_data.effective_card_name(card)
description = (
"Provided by [svgdb.me](https://svgdb.me). "
"This bot is not affiliated with svgdb."
)
fields = [
dict(name=label, value=f"[en]({en}) [jp]({jp})") for label, en, jp in data
]
url = f"https://svgdb.me/cards/{card_id}"
return dict(title=title, description=description, fields=fields, url=url)
| import io
import os
import tempfile
import aiohttp
import card_data
_soundmanifest = None
async def get_soundmanifest() -> dict:
global _soundmanifest
if _soundmanifest is not None:
return _soundmanifest
res_ver = os.environ["RES_VER"]
url = f"https://shadowverse.akamaized.net/dl/Manifest/{res_ver}/Eng/Windows/soundmanifest"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
ret = {}
for line in text.splitlines():
fields = line.split(",")
if len(fields) < 2:
continue
[name, hexcode, *_] = fields
prefix = "v/vo_"
suffix = ".acb"
if not name.startswith(prefix) and name.endswith(suffix):
continue
card_id = name[len(prefix) : -len(suffix)]
if not card_id.isdigit():
continue
card_id = int(card_id)
ret[card_id] = hexcode
_soundmanifest = ret
return ret
async def svgdb_get(card_id: int) -> list:
url = f"https://svgdb.me/api/voices/{card_id}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
json = await response.json()
ret = []
for label, basenames in json.items():
translate = {
"plays": "play",
"evolves": "evolve",
"attacks": "attack",
"deaths": "death",
}
label = translate.get(label, label).capitalize()
for basename in basenames:
en = f"https://svgdb.me/assets/audio/en/{basename}"
jp = f"https://svgdb.me/assets/audio/jp/{basename}"
ret.append((label, en, jp))
return ret
async def svgdb_embed(card: dict) -> dict:
card_id = card["card_id"]
data = await svgdb_get(card_id)
title = card_data.effective_card_name(card)
description = (
"Provided by [svgdb.me](https://svgdb.me). "
"This bot is not affiliated with svgdb."
)
fields = [
dict(name=label, value=f"[en]({en}) [jp]({jp})") for label, en, jp in data
]
url = f"https://svgdb.me/cards/{card_id}"
return dict(title=title, description=description, fields=fields, url=url)
| none | 1 | 2.614002 | 3 | |
Expunge_eMails_v1.6.py | cmetolive/Delete_Expunge_eMails | 1 | 6619284 | <gh_stars>1-10
"""
**************************************************************************
Script Name : Expunge_eMail_v1.6.py
Author : <NAME>.
Description : This bot deletes all email messages from Inbox on
: specified with date range.
Version History : Dates Version Description
: 22 May,2021 1.0 Initial Release.
: 01 June,2021 1.5 Final release.
: 27 June,2021 1.6 Added Command Line Support
**************************************************************************
"""
import Expunge_eMails_Utilities as xUtils
import Expunge_eMails_Constants as CONST
import datetime as SYSDT
import sys as SYSTEM
# Init local variables.
ip_email_address = ""
ip_start_date = ""
ip_end_date = ""
ip_pwd = ""
logfile = ""
if __name__ == '__main__':
#-------------------------------------------------------------------------------------------
# Integrate CLP options.
if len(SYSTEM.argv) > 1:
# Requested help message.
if SYSTEM.argv[1].lower() in CONST.HELP_SWITCH.lower():
xUtils.DisplayHelpText()
exit()
if len(SYSTEM.argv) >= 5:
# Create logfile.
logfile = xUtils.CreateLogFile() # ("\Expunge_eMail_Log_")
xUtils.WriteLogFile(CONST.STARTED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
# Validate email address.
if not xUtils.isValid_eMailAddress(SYSTEM.argv[1]):
print(CONST.INVALID_EMAIL_ADDR)
exit()
# Validate start date.
if not xUtils.isValid_Date(SYSTEM.argv[3]):
exit()
# Validate end date.
if not xUtils.isValid_Date(SYSTEM.argv[4], SYSTEM.argv[3], True):
print(CONST.INVALID_END_DATE_GR)
exit()
ip_email_address = SYSTEM.argv[1]
ip_pwd = SYSTEM.argv[2]
ip_start_date = SYSTEM.argv[3]
ip_end_date = SYSTEM.argv[4]
else:
# Create logfile.
logfile = xUtils.CreateLogFile() # ("\Expunge_eMail_Log_")
xUtils.WriteLogFile(CONST.STARTED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
print(CONST.WELCOME)
ip_email_address, ip_pwd, ip_start_date, ip_end_date = xUtils.GetInputs()
# -------------------------------------------------------------------------------------------
# Collected all required inputs from user. Call remove/expunge email module.
xUtils.Expunge_eMails(ip_email_address, ip_pwd, ip_start_date, ip_end_date)
# -------------------------------------------------------------------------------------------
# All done. Quit.
xUtils.WriteLogFile(CONST.FINISHED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
print("~~~ All Done. ~~~")
| """
**************************************************************************
Script Name : Expunge_eMail_v1.6.py
Author : <NAME>.
Description : This bot deletes all email messages from Inbox on
: specified with date range.
Version History : Dates Version Description
: 22 May,2021 1.0 Initial Release.
: 01 June,2021 1.5 Final release.
: 27 June,2021 1.6 Added Command Line Support
**************************************************************************
"""
import Expunge_eMails_Utilities as xUtils
import Expunge_eMails_Constants as CONST
import datetime as SYSDT
import sys as SYSTEM
# Init local variables.
ip_email_address = ""
ip_start_date = ""
ip_end_date = ""
ip_pwd = ""
logfile = ""
if __name__ == '__main__':
#-------------------------------------------------------------------------------------------
# Integrate CLP options.
if len(SYSTEM.argv) > 1:
# Requested help message.
if SYSTEM.argv[1].lower() in CONST.HELP_SWITCH.lower():
xUtils.DisplayHelpText()
exit()
if len(SYSTEM.argv) >= 5:
# Create logfile.
logfile = xUtils.CreateLogFile() # ("\Expunge_eMail_Log_")
xUtils.WriteLogFile(CONST.STARTED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
# Validate email address.
if not xUtils.isValid_eMailAddress(SYSTEM.argv[1]):
print(CONST.INVALID_EMAIL_ADDR)
exit()
# Validate start date.
if not xUtils.isValid_Date(SYSTEM.argv[3]):
exit()
# Validate end date.
if not xUtils.isValid_Date(SYSTEM.argv[4], SYSTEM.argv[3], True):
print(CONST.INVALID_END_DATE_GR)
exit()
ip_email_address = SYSTEM.argv[1]
ip_pwd = SYSTEM.argv[2]
ip_start_date = SYSTEM.argv[3]
ip_end_date = SYSTEM.argv[4]
else:
# Create logfile.
logfile = xUtils.CreateLogFile() # ("\Expunge_eMail_Log_")
xUtils.WriteLogFile(CONST.STARTED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
print(CONST.WELCOME)
ip_email_address, ip_pwd, ip_start_date, ip_end_date = xUtils.GetInputs()
# -------------------------------------------------------------------------------------------
# Collected all required inputs from user. Call remove/expunge email module.
xUtils.Expunge_eMails(ip_email_address, ip_pwd, ip_start_date, ip_end_date)
# -------------------------------------------------------------------------------------------
# All done. Quit.
xUtils.WriteLogFile(CONST.FINISHED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
print("~~~ All Done. ~~~") | en | 0.371751 | **************************************************************************
Script Name : Expunge_eMail_v1.6.py
Author : <NAME>.
Description : This bot deletes all email messages from Inbox on
: specified with date range.
Version History : Dates Version Description
: 22 May,2021 1.0 Initial Release.
: 01 June,2021 1.5 Final release.
: 27 June,2021 1.6 Added Command Line Support
************************************************************************** # Init local variables. #------------------------------------------------------------------------------------------- # Integrate CLP options. # Requested help message. # Create logfile. # ("\Expunge_eMail_Log_") # Validate email address. # Validate start date. # Validate end date. # Create logfile. # ("\Expunge_eMail_Log_") # ------------------------------------------------------------------------------------------- # Collected all required inputs from user. Call remove/expunge email module. # ------------------------------------------------------------------------------------------- # All done. Quit. | 2.417478 | 2 |
app/baseModel.py | nmluci/ZonaCipta-Backend | 0 | 6619285 | <filename>app/baseModel.py
from __future__ import annotations
from typing import Any, List, Dict
from datetime import datetime
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from dataclasses import dataclass
import dotenv
db = SQLAlchemy()
migrate = Migrate()
config = dotenv.load_dotenv()
@dataclass
class SuccessResponse:
data: List[Any] = None
count: int = None
perPage: int = None
page: int = None
totalPages: int = None
created: datetime = None
def toJson(cls):
if not cls.data:
return {
"status": "OK",
"date_created": datetime.utcnow()
}
res = {
"status": "OK",
"data": cls.data,
"count": len(cls.data),
"date_created": datetime.utcnow()
}
if cls.page:
res["per_page"] = cls.perPage
res["page"] = cls.page
# res["totalPages"] = cls.totalPages if cls.totalPages else (cls.count // cls.perPage)
return res
@dataclass
class FailedResponse:
errorCode: str = None
errorMessage: str = None
created: datetime = None
def toJson(cls):
res = {
"status": "ERROR",
}
if cls.errorCode:
res["error_code"] = cls.errorCode
elif cls.errorMessage:
res["error_message"] = cls.errorMessage
else:
res["error_message"] = "no specific error returned"
res["date_created"] = datetime.utcnow()
return res
| <filename>app/baseModel.py
from __future__ import annotations
from typing import Any, List, Dict
from datetime import datetime
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from dataclasses import dataclass
import dotenv
db = SQLAlchemy()
migrate = Migrate()
config = dotenv.load_dotenv()
@dataclass
class SuccessResponse:
data: List[Any] = None
count: int = None
perPage: int = None
page: int = None
totalPages: int = None
created: datetime = None
def toJson(cls):
if not cls.data:
return {
"status": "OK",
"date_created": datetime.utcnow()
}
res = {
"status": "OK",
"data": cls.data,
"count": len(cls.data),
"date_created": datetime.utcnow()
}
if cls.page:
res["per_page"] = cls.perPage
res["page"] = cls.page
# res["totalPages"] = cls.totalPages if cls.totalPages else (cls.count // cls.perPage)
return res
@dataclass
class FailedResponse:
errorCode: str = None
errorMessage: str = None
created: datetime = None
def toJson(cls):
res = {
"status": "ERROR",
}
if cls.errorCode:
res["error_code"] = cls.errorCode
elif cls.errorMessage:
res["error_message"] = cls.errorMessage
else:
res["error_message"] = "no specific error returned"
res["date_created"] = datetime.utcnow()
return res
| en | 0.164081 | # res["totalPages"] = cls.totalPages if cls.totalPages else (cls.count // cls.perPage) | 2.507559 | 3 |
dmb/modeling/stereo/models/__init__.py | yiranzhong/DenseMatchingBenchmark | 1 | 6619286 | <reponame>yiranzhong/DenseMatchingBenchmark<filename>dmb/modeling/stereo/models/__init__.py<gh_stars>1-10
from .general_stereo_model import GeneralizedStereoModel
_META_ARCHITECTURES = {"GeneralizedStereoModel": GeneralizedStereoModel}
def build_stereo_model(cfg):
meta_arch = _META_ARCHITECTURES[cfg.model.meta_architecture]
return meta_arch(cfg)
| from .general_stereo_model import GeneralizedStereoModel
_META_ARCHITECTURES = {"GeneralizedStereoModel": GeneralizedStereoModel}
def build_stereo_model(cfg):
meta_arch = _META_ARCHITECTURES[cfg.model.meta_architecture]
return meta_arch(cfg) | none | 1 | 1.675514 | 2 | |
pypi_librarian/pip_endpoints.py | jayvdb/pypi_librarian | 3 | 6619287 | <gh_stars>1-10
# coding=utf-8
"""
Shell to pip.
Not planning on supporting every metadata command & option that pip does.
Meta Data Relevant Commands:
download Download packages.
freeze Output installed packages in requirements format.
list List installed packages.
show Show information about installed packages.
search Search PyPI for packages.
"""
from typing import List, Optional
import subprocess
import sys
def _prologue() -> List[str]:
return [sys.executable, "-m", "pip"]
def download(project_name: str, dest: str) -> None:
"""
Many switches to specify which file to download
:param project_name:
:param dest:
:return:
"""
if not dest:
raise TypeError("destination folder required")
command = _prologue()
command.extend(["download", project_name, "--dest={0}".format(dest)])
results = subprocess.check_output(command)
return results
def freeze_current() -> None:
"""
-r, --requirement <file> Use the order in the given requirements file and its comments when generating output.
This option can be used multiple times.
-f, --find-links <url> URL for finding packages, which will be added to the output.
-l, --local If in a virtualenv that has global access, do not output globally-installed packages.
--user Only output packages installed in user-site.
--all Do not skip these packages in the output: setuptools, distribute, wheel, pip
--exclude-editable Exclude editable package from output.
:return:
"""
command = _prologue()
command.extend(["freeze"])
results = subprocess.check_output(command)
return results
def list_current() -> None:
"""
-o, --outdated List outdated packages
-u, --uptodate List uptodate packages
-e, --editable List editable projects.
-l, --local If in a virtualenv that has global access, do not list globally-installed packages.
--user Only output packages installed in user-site.
--pre Include pre-release and development versions. By default, pip only finds stable versions.
--format <list_format> Select the output format among: columns (default), freeze, or json
--not-required List packages that are not dependencies of installed packages.
--exclude-editable Exclude editable package from output.
--include-editable Include editable package from output.
tabular layout, columns (default), freeze, or json
:return:
"""
command = _prologue()
command.extend(["list", "--format=json"])
results = subprocess.check_output(command)
return results
def show_installed_package(project_name: str, list_files: bool = False) -> None:
"""
tabular layout, columns (default), freeze, or json
:return:
"""
command = _prologue()
command.extend(["show", project_name])
if list_files:
command.append("--files")
results = subprocess.check_output(command)
return results
def search(query: str, base_url: Optional[str] = None) -> None:
"""
output in form of
package (1.0) - short description
Does not offer any alternative output formats.
:return:
"""
command = _prologue()
command.extend(["search", query])
if base_url:
command.append("--index={0}".format(base_url))
results = subprocess.check_output(command)
return results
if __name__ == "__main__":
def run() -> None:
noarg_functions = [freeze_current, list_current]
for fun in noarg_functions:
print(fun())
project_functions = [search]
for fun in project_functions:
print(fun("jiggle-version"))
print(download("jiggle-version", "tmp/"))
run()
| # coding=utf-8
"""
Shell to pip.
Not planning on supporting every metadata command & option that pip does.
Meta Data Relevant Commands:
download Download packages.
freeze Output installed packages in requirements format.
list List installed packages.
show Show information about installed packages.
search Search PyPI for packages.
"""
from typing import List, Optional
import subprocess
import sys
def _prologue() -> List[str]:
return [sys.executable, "-m", "pip"]
def download(project_name: str, dest: str) -> None:
"""
Many switches to specify which file to download
:param project_name:
:param dest:
:return:
"""
if not dest:
raise TypeError("destination folder required")
command = _prologue()
command.extend(["download", project_name, "--dest={0}".format(dest)])
results = subprocess.check_output(command)
return results
def freeze_current() -> None:
"""
-r, --requirement <file> Use the order in the given requirements file and its comments when generating output.
This option can be used multiple times.
-f, --find-links <url> URL for finding packages, which will be added to the output.
-l, --local If in a virtualenv that has global access, do not output globally-installed packages.
--user Only output packages installed in user-site.
--all Do not skip these packages in the output: setuptools, distribute, wheel, pip
--exclude-editable Exclude editable package from output.
:return:
"""
command = _prologue()
command.extend(["freeze"])
results = subprocess.check_output(command)
return results
def list_current() -> None:
"""
-o, --outdated List outdated packages
-u, --uptodate List uptodate packages
-e, --editable List editable projects.
-l, --local If in a virtualenv that has global access, do not list globally-installed packages.
--user Only output packages installed in user-site.
--pre Include pre-release and development versions. By default, pip only finds stable versions.
--format <list_format> Select the output format among: columns (default), freeze, or json
--not-required List packages that are not dependencies of installed packages.
--exclude-editable Exclude editable package from output.
--include-editable Include editable package from output.
tabular layout, columns (default), freeze, or json
:return:
"""
command = _prologue()
command.extend(["list", "--format=json"])
results = subprocess.check_output(command)
return results
def show_installed_package(project_name: str, list_files: bool = False) -> None:
"""
tabular layout, columns (default), freeze, or json
:return:
"""
command = _prologue()
command.extend(["show", project_name])
if list_files:
command.append("--files")
results = subprocess.check_output(command)
return results
def search(query: str, base_url: Optional[str] = None) -> None:
"""
output in form of
package (1.0) - short description
Does not offer any alternative output formats.
:return:
"""
command = _prologue()
command.extend(["search", query])
if base_url:
command.append("--index={0}".format(base_url))
results = subprocess.check_output(command)
return results
if __name__ == "__main__":
def run() -> None:
noarg_functions = [freeze_current, list_current]
for fun in noarg_functions:
print(fun())
project_functions = [search]
for fun in project_functions:
print(fun("jiggle-version"))
print(download("jiggle-version", "tmp/"))
run() | en | 0.676486 | # coding=utf-8 Shell to pip. Not planning on supporting every metadata command & option that pip does. Meta Data Relevant Commands: download Download packages. freeze Output installed packages in requirements format. list List installed packages. show Show information about installed packages. search Search PyPI for packages. Many switches to specify which file to download :param project_name: :param dest: :return: -r, --requirement <file> Use the order in the given requirements file and its comments when generating output. This option can be used multiple times. -f, --find-links <url> URL for finding packages, which will be added to the output. -l, --local If in a virtualenv that has global access, do not output globally-installed packages. --user Only output packages installed in user-site. --all Do not skip these packages in the output: setuptools, distribute, wheel, pip --exclude-editable Exclude editable package from output. :return: -o, --outdated List outdated packages -u, --uptodate List uptodate packages -e, --editable List editable projects. -l, --local If in a virtualenv that has global access, do not list globally-installed packages. --user Only output packages installed in user-site. --pre Include pre-release and development versions. By default, pip only finds stable versions. --format <list_format> Select the output format among: columns (default), freeze, or json --not-required List packages that are not dependencies of installed packages. --exclude-editable Exclude editable package from output. --include-editable Include editable package from output. tabular layout, columns (default), freeze, or json :return: tabular layout, columns (default), freeze, or json :return: output in form of package (1.0) - short description Does not offer any alternative output formats. :return: | 2.308439 | 2 |
2020/022_bt2446_method_c/debug_bt2446_bt2407.py | toru-ver4/sample_code | 19 | 6619288 | <reponame>toru-ver4/sample_code
# -*- coding: utf-8 -*-
"""
debug
==============
"""
# import standard libraries
import os
import pathlib
# import third-party libraries
import numpy as np
import cv2
from colour import read_LUT, write_LUT, LUT3D
import matplotlib.pyplot as plt
# import my libraries
import transfer_functions as tf
import test_pattern_generator2 as tpg
import color_space as cs
import bt2446_method_c as bmc
import bt2047_gamut_mapping as bgm
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def img_file_read(filename):
"""
OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。
"""
img = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
if img is not None:
return img[:, :, ::-1]
else:
return img
def img_file_read_float(filename):
img_int = img_file_read(filename)
img_float = img_int / 0xFFFF
return img_float
def img_file_write(filename, img):
"""
OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。
"""
cv2.imwrite(filename, img[:, :, ::-1], [cv2.IMWRITE_PNG_COMPRESSION, 9])
def img_file_wirte_float_to_16bit(filename, img_float):
img_int = np.uint16(np.round(np.clip(img_float, 0.0, 1.0) * 0xFFFF))
img_file_write(filename, img_int)
def main_func():
img_path = "./img/high.png"
hdr_img_non_linear = bmc.read_img_and_to_float(img_path)
hdr_img_linear = tf.eotf(hdr_img_non_linear, tf.ST2084)
sdr_img_linear = bmc.bt2446_method_c_tonemapping(hdr_img_linear)
tpg.preview_image(sdr_img_linear ** (1/2.4))
sdr_709_liner = bgm.bt2407_gamut_mapping_for_rgb_linear(
rgb_linear=sdr_img_linear,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
tpg.preview_image(sdr_709_liner ** (1/2.4))
def get_youtube_tonemap_line():
x_pq = np.linspace(0, 1, 1024)
x_img = np.dstack([x_pq, x_pq, x_pq])
lut3d = read_LUT("./luts/HDR10_to_BT709_YouTube_Rev03.cube")
y = lut3d.apply(x_img)
plt.plot(x_pq, y[..., 1].flatten())
x = tf.eotf_to_luminance(x_pq, tf.ST2084)
y = tf.eotf_to_luminance(y[..., 1].flatten(), tf.GAMMA24)
out_data = np.dstack((x, y)).reshape((1024, 2))
print(out_data)
np.save("./youtube.npy", out_data)
def apply_bt2446_bt2407(
src_color_space_name=cs.BT2020, tfc=tf.ST2084,
alpha=0.15, sigma=0.5,
hdr_ref_luminance=203, hdr_peak_luminance=1000,
k1=0.8, k3=0.7, y_sdr_ip=60, bt2407_gamut_mapping=True):
img = img_file_read("./_debug_img/SMPTE ST2084_ITU-R BT.2020_D65_1920x1080_rev04_type1.tiff")
x_linear = tf.eotf(img / 0xFFFF, tf.ST2084)
sdr_img_linear = bmc.bt2446_method_c_tonemapping(
img=x_linear,
src_color_space_name=src_color_space_name,
tfc=tfc, alpha=alpha, sigma=sigma,
hdr_ref_luminance=hdr_ref_luminance,
hdr_peak_luminance=hdr_peak_luminance,
k1=k1, k3=k3, y_sdr_ip=y_sdr_ip)
if bt2407_gamut_mapping:
sdr_img_linear = bgm.bt2407_gamut_mapping_for_rgb_linear(
rgb_linear=sdr_img_linear,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
sdr_img_nonlinear = sdr_img_linear ** (1/2.4)
out_img = np.uint16(np.round(sdr_img_nonlinear * 0xFFFF))
img_file_write("./_debug_img/sdr.tiff", out_img)
def make_blog_result_image():
img_list = [
"./img/step_ramp_step_65.png", "./img/dark.png",
"./img/middle.png", "./img/high.png", "./img/umi.png"]
youtube_lut = read_LUT("./3DLUT/HDR10_to_BT709_YouTube_Rev03.cube")
luminance_lut = read_LUT(
"./3DLUT/LuminanceMap_for_ST2084_BT2020_D65_MapRange_100-4000nits_65x65x65.cube")
bt2446_1000_lut = read_LUT(
"./3DLUT/1000nits_v3__a_0.10_s_0.60_k1_0.69_k3_0.74_y_s_49.0_grid_65_gamma_2.4.cube")
bt2446_4000_lut = read_LUT(
"./3DLUT/4000nits_v3__a_0.10_s_0.60_k1_0.69_k3_0.74_y_s_41.0_grid_65_gamma_2.4.cube")
dst_dir = "./blog_img"
lut3d_list = [
None, youtube_lut, luminance_lut,
bt2446_1000_lut, bt2446_4000_lut]
for src_path in img_list:
path_info = pathlib.Path(src_path)
base_name = path_info.stem
ext = path_info.suffix
# make names
dst_name_original = os.path.join(dst_dir, base_name + ext)
dst_name_youtube = os.path.join(dst_dir, base_name + "_youtube" + ext)
dst_name_luminance_map = os.path.join(
dst_dir, base_name + "_luminance" + ext)
dst_name_bt2446_1000 = os.path.join(
dst_dir, base_name + "_bt2446_1000" + ext)
dst_name_bt2446_4000 = os.path.join(
dst_dir, base_name + "_bt2446_4000" + ext)
dst_name_list = [
dst_name_original, dst_name_youtube, dst_name_luminance_map,
dst_name_bt2446_1000, dst_name_bt2446_4000]
# original
src_img = img_file_read_float(src_path)
# apply roop
for lut, dst_name in zip(lut3d_list, dst_name_list):
print(f"converting {dst_name}")
if lut is not None:
dst_img = lut.apply(src_img)
else:
dst_img = src_img.copy()
img_file_wirte_float_to_16bit(dst_name, dst_img)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# mbl.make_bt2020_to_bt709_luts()
# main_func()
# get_youtube_tonemap_line()
# apply_bt2446_bt2407(
# src_color_space_name=cs.BT2020, tfc=tf.ST2084,
# alpha=0.05, sigma=0.75,
# hdr_ref_luminance=203, hdr_peak_luminance=1000,
# k1=0.51, k3=0.75, y_sdr_ip=51.1, bt2407_gamut_mapping=True)
# lut3d = read_LUT("./3DLUT/_HDR10_to_BT709_YouTube_Rev03.cube")
# write_LUT(lut3d, "./3DLUT/HDR10_to_BT709_YouTube_Rev03.cube")
make_blog_result_image()
| # -*- coding: utf-8 -*-
"""
debug
==============
"""
# import standard libraries
import os
import pathlib
# import third-party libraries
import numpy as np
import cv2
from colour import read_LUT, write_LUT, LUT3D
import matplotlib.pyplot as plt
# import my libraries
import transfer_functions as tf
import test_pattern_generator2 as tpg
import color_space as cs
import bt2446_method_c as bmc
import bt2047_gamut_mapping as bgm
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def img_file_read(filename):
"""
OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。
"""
img = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
if img is not None:
return img[:, :, ::-1]
else:
return img
def img_file_read_float(filename):
img_int = img_file_read(filename)
img_float = img_int / 0xFFFF
return img_float
def img_file_write(filename, img):
"""
OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。
"""
cv2.imwrite(filename, img[:, :, ::-1], [cv2.IMWRITE_PNG_COMPRESSION, 9])
def img_file_wirte_float_to_16bit(filename, img_float):
img_int = np.uint16(np.round(np.clip(img_float, 0.0, 1.0) * 0xFFFF))
img_file_write(filename, img_int)
def main_func():
img_path = "./img/high.png"
hdr_img_non_linear = bmc.read_img_and_to_float(img_path)
hdr_img_linear = tf.eotf(hdr_img_non_linear, tf.ST2084)
sdr_img_linear = bmc.bt2446_method_c_tonemapping(hdr_img_linear)
tpg.preview_image(sdr_img_linear ** (1/2.4))
sdr_709_liner = bgm.bt2407_gamut_mapping_for_rgb_linear(
rgb_linear=sdr_img_linear,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
tpg.preview_image(sdr_709_liner ** (1/2.4))
def get_youtube_tonemap_line():
x_pq = np.linspace(0, 1, 1024)
x_img = np.dstack([x_pq, x_pq, x_pq])
lut3d = read_LUT("./luts/HDR10_to_BT709_YouTube_Rev03.cube")
y = lut3d.apply(x_img)
plt.plot(x_pq, y[..., 1].flatten())
x = tf.eotf_to_luminance(x_pq, tf.ST2084)
y = tf.eotf_to_luminance(y[..., 1].flatten(), tf.GAMMA24)
out_data = np.dstack((x, y)).reshape((1024, 2))
print(out_data)
np.save("./youtube.npy", out_data)
def apply_bt2446_bt2407(
src_color_space_name=cs.BT2020, tfc=tf.ST2084,
alpha=0.15, sigma=0.5,
hdr_ref_luminance=203, hdr_peak_luminance=1000,
k1=0.8, k3=0.7, y_sdr_ip=60, bt2407_gamut_mapping=True):
img = img_file_read("./_debug_img/SMPTE ST2084_ITU-R BT.2020_D65_1920x1080_rev04_type1.tiff")
x_linear = tf.eotf(img / 0xFFFF, tf.ST2084)
sdr_img_linear = bmc.bt2446_method_c_tonemapping(
img=x_linear,
src_color_space_name=src_color_space_name,
tfc=tfc, alpha=alpha, sigma=sigma,
hdr_ref_luminance=hdr_ref_luminance,
hdr_peak_luminance=hdr_peak_luminance,
k1=k1, k3=k3, y_sdr_ip=y_sdr_ip)
if bt2407_gamut_mapping:
sdr_img_linear = bgm.bt2407_gamut_mapping_for_rgb_linear(
rgb_linear=sdr_img_linear,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
sdr_img_nonlinear = sdr_img_linear ** (1/2.4)
out_img = np.uint16(np.round(sdr_img_nonlinear * 0xFFFF))
img_file_write("./_debug_img/sdr.tiff", out_img)
def make_blog_result_image():
img_list = [
"./img/step_ramp_step_65.png", "./img/dark.png",
"./img/middle.png", "./img/high.png", "./img/umi.png"]
youtube_lut = read_LUT("./3DLUT/HDR10_to_BT709_YouTube_Rev03.cube")
luminance_lut = read_LUT(
"./3DLUT/LuminanceMap_for_ST2084_BT2020_D65_MapRange_100-4000nits_65x65x65.cube")
bt2446_1000_lut = read_LUT(
"./3DLUT/1000nits_v3__a_0.10_s_0.60_k1_0.69_k3_0.74_y_s_49.0_grid_65_gamma_2.4.cube")
bt2446_4000_lut = read_LUT(
"./3DLUT/4000nits_v3__a_0.10_s_0.60_k1_0.69_k3_0.74_y_s_41.0_grid_65_gamma_2.4.cube")
dst_dir = "./blog_img"
lut3d_list = [
None, youtube_lut, luminance_lut,
bt2446_1000_lut, bt2446_4000_lut]
for src_path in img_list:
path_info = pathlib.Path(src_path)
base_name = path_info.stem
ext = path_info.suffix
# make names
dst_name_original = os.path.join(dst_dir, base_name + ext)
dst_name_youtube = os.path.join(dst_dir, base_name + "_youtube" + ext)
dst_name_luminance_map = os.path.join(
dst_dir, base_name + "_luminance" + ext)
dst_name_bt2446_1000 = os.path.join(
dst_dir, base_name + "_bt2446_1000" + ext)
dst_name_bt2446_4000 = os.path.join(
dst_dir, base_name + "_bt2446_4000" + ext)
dst_name_list = [
dst_name_original, dst_name_youtube, dst_name_luminance_map,
dst_name_bt2446_1000, dst_name_bt2446_4000]
# original
src_img = img_file_read_float(src_path)
# apply roop
for lut, dst_name in zip(lut3d_list, dst_name_list):
print(f"converting {dst_name}")
if lut is not None:
dst_img = lut.apply(src_img)
else:
dst_img = src_img.copy()
img_file_wirte_float_to_16bit(dst_name, dst_img)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# mbl.make_bt2020_to_bt709_luts()
# main_func()
# get_youtube_tonemap_line()
# apply_bt2446_bt2407(
# src_color_space_name=cs.BT2020, tfc=tf.ST2084,
# alpha=0.05, sigma=0.75,
# hdr_ref_luminance=203, hdr_peak_luminance=1000,
# k1=0.51, k3=0.75, y_sdr_ip=51.1, bt2407_gamut_mapping=True)
# lut3d = read_LUT("./3DLUT/_HDR10_to_BT709_YouTube_Rev03.cube")
# write_LUT(lut3d, "./3DLUT/HDR10_to_BT709_YouTube_Rev03.cube")
make_blog_result_image() | en | 0.336482 | # -*- coding: utf-8 -*- debug ============== # import standard libraries # import third-party libraries # import my libraries # information OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。 OpenCV の BGR 配列が怖いので並べ替えるwrapperを用意。 # make names # original # apply roop # mbl.make_bt2020_to_bt709_luts() # main_func() # get_youtube_tonemap_line() # apply_bt2446_bt2407( # src_color_space_name=cs.BT2020, tfc=tf.ST2084, # alpha=0.05, sigma=0.75, # hdr_ref_luminance=203, hdr_peak_luminance=1000, # k1=0.51, k3=0.75, y_sdr_ip=51.1, bt2407_gamut_mapping=True) # lut3d = read_LUT("./3DLUT/_HDR10_to_BT709_YouTube_Rev03.cube") # write_LUT(lut3d, "./3DLUT/HDR10_to_BT709_YouTube_Rev03.cube") | 2.227661 | 2 |
cdlib_rest/cdlib_server.py | GiulioRossetti/cdlib_rest | 1 | 6619289 | import json
import os
import shutil
import uuid
from aiohttp import web
from aiohttp_swagger import *
from cdlib import algorithms, readwrite
from networkx.readwrite import json_graph
def __unpack_stats(stats):
return dict(min=stats[0], max=stats[1], mean=stats[2], std=stats[3])
def __check_token(request):
token = request.query['token']
if not os.path.exists(f"../data/db/{token}"):
response_obj = dict(status='failure', description="token not valid")
return 500, response_obj
else:
return 200, None
async def __save_communities(communities, request):
token = request.query['token']
readwrite.write_community_json(communities,
f"../data/db/{token}/{communities.method_name}_{communities.method_parameters}")
async def __save_network(request):
token = request.query['token']
network = request.query['network']
with open(f"../data/db/{token}/network.json", "w") as f:
f.write(network)
async def __load_network(request):
token = request.query['token']
data = json.load(open(f"../data/db/{token}/network.json"))
network = json_graph.node_link_graph(data)
return network
async def __load_communities(request) -> list:
token = request.query['token']
community_names = json.loads(request.query['community_names'])
community = [readwrite.read_community_json(f"../data/db/{token}/{name}") for name in community_names]
return community
def create_experiment(request):
"""
---
description: This end-point allows to create a new experiment.
tags:
- Create Experiment
produces:
- text/plain
responses:
"200":
description: successful operation. Return experiment "token"
"""
token = str(uuid.uuid4())
directory = f"../data/db/{token}"
if not os.path.exists(directory):
os.makedirs(directory)
response_obj = dict(status="success", data={"token": token})
return web.Response(text=json.dumps(response_obj), status=200)
def destroy_experiment(request):
"""
---
description: This end-point allows to destroy an existing experiment.
tags:
- Destroy Experiment
produces:
- text/plain
responses:
"200":
description: successful operation.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
token = request.query['token']
if not os.path.exists(f"../data/db/{token}"):
response_obj = dict(status="failure", description="token not valid")
return web.Response(text=json.dumps(response_obj), status=500)
else:
shutil.rmtree(f"../data/db/{token}")
response_obj = dict(status="success")
return web.Response(text=json.dumps(response_obj), status=200)
async def upload_network(request):
"""
---
description: This end-point allows to upload a network dataset.
tags:
- Upload Network
produces:
- text/plain
responses:
"200":
description: successful operation.
"500":
description: operation failed.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: network
schema:
type: string
required: true
description: JSON string representing a networkx.Graph object
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
await __save_network(request)
response_obj = dict(status="success")
return web.Response(text=json.dumps(response_obj), status=200)
async def community_comparison(request):
"""
---
description: This end-point allows to compare two clusterings applying several state of art scores.
tags:
- Community Comparisons
produces:
- text/plain
responses:
"200":
description: successful operation.
"500":
description: operation failed.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: partition_names
schema:
type: string
required: false
description: Name of the partitions
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
coms = await __load_communities(request)
if len(coms) < 2 or len(coms) > 2:
response_obj = dict(status='failure',
description='to perform the comparison exactly two clusterings are required')
return web.Response(text=json.dumps(response_obj), status=500)
com1, com2 = coms
com1.graph = g
com2.graph = g
try:
f1 = com1.f1(com2)
data = dict(onmi=com1.overlapping_normalized_mutual_information(com2), omega=com1.omega(com2),
f1=dict(mean=f1[0], std=f1[1]), nf1=com1.nf1(com2))
if not com1.overlap and not com2.overlap:
crisp = dict(nmi=com1.normalized_mutual_information(com2),
ari=com1.adjusted_rand_index(com2), ami=com1.adjusted_mutual_information(com2),
vi=com1.variation_of_information(com2))
data = {**data, **crisp}
response_obj = dict(status='success', data=data)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def community_statistics(request):
"""
---
description: This end-point allows to compute aggregate statistics for the computed partition.
tags:
- Fitness Scores
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: partition_names
schema:
type: string
required: false
description: Name of the partitions
- in: query
name: summary
schema:
type: string
required: false
description: Whether or not to return an aggregated view of community-wise statistics
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
coms = await __load_communities(request)
data = {}
try:
for com in coms:
com.graph = g
simple = dict(er_modularity=com.erdos_renyi_modularity(), modularity_density=com.modularity_density(),
gn_modularity=com.newman_girvan_modularity(), z_modularity=com.z_modularity(),
link_modularity=com.link_modularity(), surprise=com.surprise(),
significance=com.significance())
summary = request.query['summary']
if summary == "True":
composed = dict(size=__unpack_stats(com.size()), conductance=__unpack_stats(com.conductance()),
normalized_cut=__unpack_stats(com.normalized_cut()),
triangle_participation_ratio=__unpack_stats(com.triangle_participation_ratio()),
max_odf=__unpack_stats(com.max_odf()), avg_odf=__unpack_stats(com.avg_odf()),
flake_odf=__unpack_stats(com.flake_odf()),
edges_inside=__unpack_stats(com.edges_inside()),
fraction_over_median_degree=__unpack_stats(com.fraction_over_median_degree()),
expansion=__unpack_stats(com.expansion()), cut_ratio=__unpack_stats(com.cut_ratio()),
internal_edge_density=__unpack_stats(com.internal_edge_density()),
average_internal_degree=__unpack_stats(com.average_internal_degree()))
else:
composed = dict(size=com.size(summary=False), conductance=com.conductance(summary=False),
normalized_cut=com.normalized_cut(summary=False),
triangle_participation_ratio=com.triangle_participation_ratio(summary=False),
max_odf=com.max_odf(summary=False), avg_odf=com.avg_odf(summary=False),
flake_odf=com.flake_odf(summary=False), edges_inside=com.edges_inside(summary=False),
fraction_over_median_degree=com.fraction_over_median_degree(summary=False),
expansion=com.expansion(summary=False), cut_ratio=com.cut_ratio(summary=False),
internal_edge_density=com.internal_edge_density(summary=False),
average_internal_degree=com.average_internal_degree(summary=False))
data[f"{com.method_name}_{com.method_parameters}"] = {**simple, **composed}
response_obj = dict(status='success', data=data)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def angel(request):
"""
---
description: This end-point allows to compute the Angel Community Discovery algorithm to a network dataset.
tags:
- Angel
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: threshold
schema:
type: float
required: true
description: merging threshold
- in: query
name: min_com_size
schema:
type: integer
required: true
description: minimum community size
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['threshold'])
com_size = int(request.query['min_com_size'])
communities = algorithms.angel(g, th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def demon(request):
"""
---
description: This end-point allows to compute the Demon Community Discovery algorithm to a network dataset.
tags:
- Demon
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: epsilon
schema:
type: float
required: true
description: merging threshold
- in: query
name: min_com_size
schema:
type: integer
required: true
description: minimum community size
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['epsilon'])
com_size = int(request.query['min_com_size'])
communities = algorithms.demon(g, th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def kclique(request):
"""
---
description: This end-point allows to compute kclique Community Discovery algorithm to a network dataset.
tags:
- Kclique
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: Size of smallest clique
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.kclique(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lfm(request):
"""
---
description: This end-point allows to compute Lfm Community Discovery algorithm to a network dataset.
tags:
- LFM
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: alpha
schema:
type: float
required: true
description: controll the size of the communities: Large values of alpha yield very small communities, small values instead deliver large modules. If alpha is small enough, all nodes end up in the same cluster, the network itself. In most cases, for alpha < 0.5 there is only one community, for alpha > 2 one recovers the smallest communities. A natural choise is alpha =1.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
alpha = float(request.query['alpha'])
communities = algorithms.lfm(g, alpha)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def ego_networks(request):
"""
---
description: This end-point allows to compute the Ego-networks Community Discovery algorithm to a network dataset.
tags:
- Ego-networks
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: level
schema:
type: integer
required: true
description: extrac communities with all neighbors of distance<=level from a node.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
com_level = int(request.query['level'])
communities = algorithms.ego_networks(g, com_level)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def overlapping_seed_set_expansion(request):
"""
---
description: This end-point allows to compute the OSSE Community Discovery algorithm to a network dataset.
tags:
- Overlapping seed set expansion
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seeds
schema:
type: list
required: true
description: Node list.
- in: query
name: ninf
schema:
type: boolean
required: true
description: Neighbourhood Inflation parameter.
- in: query
name: expansion
schema:
type: string
required: true
description: Seed expansion: ppr or vppr.
- in: query
name: stopping
schema:
type: sting
required: true
description: Stopping criteria: cond.
- in: query
name: nworkers
schema:
type: integer
required: true
description: Number of Workers.
- in: query
name: nruns
schema:
type: integer
required: true
description: Number of runs.
- in: query
name: alpha
schema:
type: float
required: true
description: alpha value for Personalized PageRank expansion.
- in: query
name: maxexpand
schema:
type: float
required: true
description: Maximum expansion allowed for approximate ppr.
- in: query
name: delta
schema:
type: float
required: true
description: Minimum distance parameter for near duplicate communities.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seeds = json.loads(request.query['seeds'])
tmp_ninf = str(request.query['ninf'])
if tmp_ninf == "False":
ninf = False
else:
ninf = True
expansion = str(request.query['expansion'])
nworkers = int(request.query['nworkers'])
stopping = str(request.query['stopping'])
nruns = int(request.query['nruns'])
alpha = float(request.query['alpha'])
delta = float(request.query['delta'])
communities = algorithms.overlapping_seed_set_expansion(g, seeds, ninf, expansion, stopping, nworkers, nruns,
alpha, float('INF'), delta)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lais2(request):
"""
---
description: This end-point allows to compute Lais2 Community Discovery algorithm to a network dataset.
tags:
- Lais2
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.lais2(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def congo(request):
"""
---
description: This end-point allows to compute Congo Community Discovery algorithm to a network dataset.
tags:
- Congo
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: the number of communities desired
- in: query
name: height
schema:
type: integer
required: true
description: The lengh of the longest shortest paths that CONGO considers
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
height = int(request.query['height'])
communities = algorithms.congo(g, number_communities, height)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def conga(request):
"""
---
description: This end-point allows to compute Conga Community Discovery algorithm to a network dataset.
tags:
- Conga
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: the number of communities desired
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
communities = algorithms.conga(g, number_communities)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lemon(request):
"""
---
description: This end-point allows to compute Lemon Community Discovery algorithm to a network dataset.
tags:
- Lemon
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seeds
schema:
type: list
required: true
description: Node list
- in: query
name: min_com_size
schema:
type: integer
required: true
description: the minimum size of a single community in the network
- in: query
name: max_com_size
schema:
type: integer
required: true
description: the maximum size of a single community in the network
- in: query
name: expand_step
schema:
type: integer
required: true
description: the step of seed set increasement during expansion process
- in: query
name: subspace_dim
schema:
type: integer
required: true
description: dimension of the subspace; choosing a large dimension is undesirable because it would increase the computation cost of generating local spectra
- in: query
name: walk_steps
schema:
type: integer
required: true
description: the number of step for the random walk
- in: query
name: biased
schema:
type: boolean
required: true
description: set if the random walk starting from seed nodes
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seeds = json.loads(request.query['seeds'])
min_com_size = int(request.query['min_com_size'])
max_com_size = int(request.query['max_com_size'])
expand_step = int(request.query['expand_step'])
walk_steps = int(request.query['walk_steps'])
tmp_biased = str(request.query['biased'])
if tmp_biased == "False":
biased = False
else:
biased = True
subspace_dim = int(request.query['subspace_dim'])
communities = algorithms.lemon(g, seeds, min_com_size, max_com_size, expand_step, subspace_dim, walk_steps,
biased)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def slpa(request):
"""
---
description: This end-point allows to compute slpa Community Discovery algorithm to a network dataset.
tags:
- slpa
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: t
schema:
type: integer
required: true
description: maximum number of iterations
- in: query
name: r
schema:
type: float
required: true
description: threshold ∈ [0, 1]. It is used in the post-processing stage: if the probability of seeing a particular label during the whole process is less than r, this label is deleted from a node’s memory.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
t = int(request.query['t'])
r = float(request.query['r'])
communities = algorithms.slpa(g, t, r)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def multicom(request):
"""
---
description: This end-point allows to compute multicom Community Discovery algorithm to a network dataset.
tags:
- multicom
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seed_node
schema:
type: integer
required: true
description: Id of the seed node around which we want to detect communities
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seed_node = json.loads(request.query['seed_node'])
communities = algorithms.multicom(g, seed_node)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def big_clam(request):
"""
---
description: This end-point allows to compute big_clam Community Discovery algorithm to a network dataset.
tags:
- Big_clam
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: number communities desired
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
communities = algorithms.big_clam(g, number_communities)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def node_perception(request):
"""
---
description: This end-point allows to compute the Node Perception Community Discovery algorithm to a network dataset.
tags:
- Node Perception
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: threshold
schema:
type: float
required: true
description: the tolerance required in order to merge communities.
- in: query
name: overlap_threshold
schema:
type: float
required: true
description: the overlap tolerance.
- in: query
name: min_comm_size
schema:
type: integer
required: true
description: minimum community size.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['threshold'])
ov_th = float(request.query['overlap_threshold'])
com_size = int(request.query['min_comm_size'])
communities = algorithms.node_perception(g, th, ov_th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
# crisp partition
async def girvan_newman(request):
"""
---
description: This end-point allows to compute girvan_newman Community Discovery algorithm to a network dataset.
tags:
- girvan_newman
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: level
schema:
type: integer
required: true
description: the level where to cut the dendrogram
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
level = int(request.query['level'])
communities = algorithms.girvan_newman(g, level)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def em(request):
"""
---
description: This end-point allows to compute EM Community Discovery algorithm to a network dataset.
tags:
- EM
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: tthe number of desired communities
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.em(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def scan(request):
"""
---
description: This end-point allows to compute Scan Community Discovery algorithm to a network dataset.
tags:
- Scan
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: epsilon
schema:
type: float
required: true
description: the minimum threshold to assigning cluster membership
- in: query
name: mu
schema:
type: integer
required: true
description: minimum number of neineighbors with a structural similarity that exceeds the threshold epsilon
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
epsilon = float(request.query['epsilon'])
mu = int(request.query['mu'])
communities = algorithms.scan(g, epsilon, mu)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def gdmp2(request):
"""
---
description: This end-point allows to compute gdmp2 Community Discovery algorithm to a network dataset.
tags:
- gdmp2
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: min_threshold
schema:
type: float
required: true
description: the minimum density threshold parameter to control the density of the output subgraphs
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
min_threshold = float(request.query['min_threshold'])
communities = algorithms.gdmp2(g, min_threshold)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def spinglass(request):
"""
---
description: This end-point allows to compute spinglass Community Discovery algorithm to a network dataset.
tags:
- spinglass
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.spinglass(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def eigenvector(request):
"""
---
description: This end-point allows to compute eigenvector Community Discovery algorithm to a network dataset.
tags:
- eigenvector
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.eigenvector(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def agdl(request):
"""
---
description: This end-point allows to compute agdl Community Discovery algorithm to a network dataset.
tags:
- agdl
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: number of communities
- in: query
name: number_neighbors
schema:
type: integer
required: true
description: Number of neighbors to use for KNN
- in: query
name: kc
schema:
type: integer
required: true
description: size of the neighbor set for each cluster
- in: query
name: a
schema:
type: float
required: true
description: range(-infinity;+infinty)
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
number_neighbors = int(request.query['number_neighbors'])
kc = int(request.query['kc'])
a = float(request.query['a'])
communities = algorithms.agdl(g, number_communities, number_neighbors, kc, a)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def louvain(request):
"""
---
description: This end-point allows to compute louvain Community Discovery algorithm to a network dataset.
tags:
- louvain
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: weight
schema:
type: string
required: true
description: optional the key in graph to use as weight
- in: query
name: resolution
schema:
type: string
required: float
description: Will change the size of the communities, default to 1.
- in: query
name: randomize
schema:
type: boolean
required: true
description: Will randomize the node evaluation order and the community evaluation order to get different partitions at each call, default False
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
weight = str(request.query['weight'])
resolution = float(request.query['resolution'])
tmp_randomize = str(request.query['randomize'])
if tmp_randomize == "False":
randomize = False
else:
randomize = True
communities = algorithms.louvain(g, weight, resolution, randomize)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def leiden(request):
"""
---
description: This end-point allows to compute leiden Community Discovery algorithm to a network dataset.
tags:
- leiden
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
communities = algorithms.leiden(g, initial_membership, weights)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def rb_pots(request):
"""
---
description: This end-point allows to compute rb_pots Community Discovery algorithm to a network dataset.
tags:
- rb_pots
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.rb_pots(g, initial_membership, weights, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def rber_pots(request):
"""
---
description: This end-point allows to compute rber_pots Community Discovery algorithm to a network dataset.
tags:
- rber_pots
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.rber_pots(g, initial_membership, weights, node_sizes, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def cpm(request):
"""
---
description: This end-point allows to compute cpm Community Discovery algorithm to a network dataset.
tags:
- cpm
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.cpm(g, initial_membership, weights, node_sizes, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def significance_communities(request):
"""
---
description: This end-point allows to compute significance_communities Community Discovery algorithm to a network dataset.
tags:
- significance_communities
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
node_sizes = json.loads(request.query['node_sizes'])
communities = algorithms.significance_communities(g, initial_membership, node_sizes)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def surprise_communities(request):
"""
---
description: This end-point allows to compute surprise_communities Community Discovery algorithm to a network dataset.
tags:
- surprise_communities
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
communities = algorithms.surprise_communities(g, initial_membership, weights, node_sizes)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def greedy_modularity(request):
"""
---
description: This end-point allows to compute greedy_modularity Community Discovery algorithm to a network dataset.
tags:
- greedy_modularity
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
weights = json.loads(request.query['weights'])
communities = algorithms.greedy_modularity(g, weights)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def infomap(request):
"""
---
description: This end-point allows to compute infomap Community Discovery algorithm to a network dataset.
tags:
- infomap
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.infomap(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def walktrap(request):
"""
---
description: This end-point allows to compute walktrap Community Discovery algorithm to a network dataset.
tags:
- walktrap
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.walktrap(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def label_propagation(request):
"""
---
description: This end-point allows to compute label_propagation Community Discovery algorithm to a network dataset.
tags:
- label_propagation
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.label_propagation(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def async_fluid(request):
"""
---
description: This end-point allows to compute async_fluid Community Discovery algorithm to a network dataset.
tags:
- async_fluid
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: Number of communities to search
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.async_fluid(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def der(request):
"""
---
description: This end-point allows to compute der Community Discovery algorithm to a network dataset.
tags:
- der
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: walk_len
schema:
type: integer
required: true
description: length of the random walk, default 3
- in: query
name: threshold
schema:
type: float
required: true
description: threshold for stop criteria; if the likelihood_diff is less than threshold tha algorithm stops, default 0.00001
- in: query
name: iter_bound
schema:
type: integer
required: true
description: maximum number of iteration, default 50
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
walk_len = int(request.query['walk_len'])
threshold = float(request.query['threshold'])
iter_bound = int(request.query['iter_bound'])
communities = algorithms.der(g, walk_len, threshold, iter_bound)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def frc_fgsn(request):
"""
---
description: This end-point allows to compute frc_fgsn Community Discovery algorithm to a network dataset.
tags:
- frc_fgsn
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: theta
schema:
type: float
required: true
description: community density coefficient
- in: query
name: eps
schema:
type: float
required: true
description: coupling coefficient of the community. Ranges in [0, 1], small values ensure that only strongly connected node granules are merged togheter.
- in: query
name: r
schema:
type: integer
required: true
description: radius of the granule
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
theta = float(request.query['theta'])
eps = float(request.query['eps'])
r = int(request.query['r'])
communities = algorithms.frc_fgsn(g, theta, eps, r)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def make_app():
app = web.Application()
# add the routes
app.add_routes([
web.get('/api/create_experiment', create_experiment),
web.post('/api/destroy_experiment', destroy_experiment),
web.post('/api/upload/network', upload_network),
web.post('/api/evaluation/fitness_scores', community_statistics),
web.post('/api/evaluation/community_comparison', community_comparison),
web.post('/api/cd/angel', angel),
web.post('/api/cd/demon', demon),
web.post('/api/cd/ego_networks', ego_networks),
web.post('/api/cd/node_perception', node_perception),
web.post('/api/cd/overlapping_seed_set_expansion', overlapping_seed_set_expansion),
web.post('/api/cd/kclique', kclique),
web.post('/api/cd/lfm', lfm),
web.post('/api/cd/lais2', lais2),
web.post('/api/cd/congo', congo),
web.post('/api/cd/conga', conga),
web.post('/api/cd/lemon', lemon),
web.post('/api/cd/slpa', slpa),
web.post('/api/cd/multicom', multicom),
web.post('/api/cd/big_clam', big_clam),
web.post('/api/cd/girvan_newman', girvan_newman),
web.post('/api/cd/em', em),
web.post('/api/cd/scan', scan),
web.post('/api/cd/gdmp2', gdmp2),
web.post('/api/cd/spinglass', spinglass),
web.post('/api/cd/eigenvector', eigenvector),
web.post('/api/cd/agdl', agdl),
web.post('/api/cd/louvain', louvain),
web.post('/api/cd/leiden', leiden),
web.post('/api/cd/rb_pots', rb_pots),
web.post('/api/cd/rber_pots', rber_pots),
web.post('/api/cd/cpm', cpm),
web.post('/api/cd/significance_communities', significance_communities),
web.post('/api/cd/surprise_communities', surprise_communities),
web.post('/api/cd/greedy_modularity', greedy_modularity),
web.post('/api/cd/infomap', infomap),
web.post('/api/cd/walktrap', walktrap),
web.post('/api/cd/label_propagation', label_propagation),
web.post('/api/cd/async_fluid', async_fluid),
web.post('/api/cd/der', der),
web.post('/api/cd/frc_fgsn', frc_fgsn)
])
setup_swagger(app, swagger_url="/api/v1/doc", description="",
title="CDlib Server API",
api_version="0.1.4",
contact="<EMAIL>")
return app
if __name__ == '__main__':
web.run_app(make_app(), port=8081, host="0.0.0.0")
| import json
import os
import shutil
import uuid
from aiohttp import web
from aiohttp_swagger import *
from cdlib import algorithms, readwrite
from networkx.readwrite import json_graph
def __unpack_stats(stats):
return dict(min=stats[0], max=stats[1], mean=stats[2], std=stats[3])
def __check_token(request):
token = request.query['token']
if not os.path.exists(f"../data/db/{token}"):
response_obj = dict(status='failure', description="token not valid")
return 500, response_obj
else:
return 200, None
async def __save_communities(communities, request):
token = request.query['token']
readwrite.write_community_json(communities,
f"../data/db/{token}/{communities.method_name}_{communities.method_parameters}")
async def __save_network(request):
token = request.query['token']
network = request.query['network']
with open(f"../data/db/{token}/network.json", "w") as f:
f.write(network)
async def __load_network(request):
token = request.query['token']
data = json.load(open(f"../data/db/{token}/network.json"))
network = json_graph.node_link_graph(data)
return network
async def __load_communities(request) -> list:
token = request.query['token']
community_names = json.loads(request.query['community_names'])
community = [readwrite.read_community_json(f"../data/db/{token}/{name}") for name in community_names]
return community
def create_experiment(request):
"""
---
description: This end-point allows to create a new experiment.
tags:
- Create Experiment
produces:
- text/plain
responses:
"200":
description: successful operation. Return experiment "token"
"""
token = str(uuid.uuid4())
directory = f"../data/db/{token}"
if not os.path.exists(directory):
os.makedirs(directory)
response_obj = dict(status="success", data={"token": token})
return web.Response(text=json.dumps(response_obj), status=200)
def destroy_experiment(request):
"""
---
description: This end-point allows to destroy an existing experiment.
tags:
- Destroy Experiment
produces:
- text/plain
responses:
"200":
description: successful operation.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
token = request.query['token']
if not os.path.exists(f"../data/db/{token}"):
response_obj = dict(status="failure", description="token not valid")
return web.Response(text=json.dumps(response_obj), status=500)
else:
shutil.rmtree(f"../data/db/{token}")
response_obj = dict(status="success")
return web.Response(text=json.dumps(response_obj), status=200)
async def upload_network(request):
"""
---
description: This end-point allows to upload a network dataset.
tags:
- Upload Network
produces:
- text/plain
responses:
"200":
description: successful operation.
"500":
description: operation failed.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: network
schema:
type: string
required: true
description: JSON string representing a networkx.Graph object
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
await __save_network(request)
response_obj = dict(status="success")
return web.Response(text=json.dumps(response_obj), status=200)
async def community_comparison(request):
"""
---
description: This end-point allows to compare two clusterings applying several state of art scores.
tags:
- Community Comparisons
produces:
- text/plain
responses:
"200":
description: successful operation.
"500":
description: operation failed.
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: partition_names
schema:
type: string
required: false
description: Name of the partitions
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
coms = await __load_communities(request)
if len(coms) < 2 or len(coms) > 2:
response_obj = dict(status='failure',
description='to perform the comparison exactly two clusterings are required')
return web.Response(text=json.dumps(response_obj), status=500)
com1, com2 = coms
com1.graph = g
com2.graph = g
try:
f1 = com1.f1(com2)
data = dict(onmi=com1.overlapping_normalized_mutual_information(com2), omega=com1.omega(com2),
f1=dict(mean=f1[0], std=f1[1]), nf1=com1.nf1(com2))
if not com1.overlap and not com2.overlap:
crisp = dict(nmi=com1.normalized_mutual_information(com2),
ari=com1.adjusted_rand_index(com2), ami=com1.adjusted_mutual_information(com2),
vi=com1.variation_of_information(com2))
data = {**data, **crisp}
response_obj = dict(status='success', data=data)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def community_statistics(request):
"""
---
description: This end-point allows to compute aggregate statistics for the computed partition.
tags:
- Fitness Scores
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: partition_names
schema:
type: string
required: false
description: Name of the partitions
- in: query
name: summary
schema:
type: string
required: false
description: Whether or not to return an aggregated view of community-wise statistics
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
coms = await __load_communities(request)
data = {}
try:
for com in coms:
com.graph = g
simple = dict(er_modularity=com.erdos_renyi_modularity(), modularity_density=com.modularity_density(),
gn_modularity=com.newman_girvan_modularity(), z_modularity=com.z_modularity(),
link_modularity=com.link_modularity(), surprise=com.surprise(),
significance=com.significance())
summary = request.query['summary']
if summary == "True":
composed = dict(size=__unpack_stats(com.size()), conductance=__unpack_stats(com.conductance()),
normalized_cut=__unpack_stats(com.normalized_cut()),
triangle_participation_ratio=__unpack_stats(com.triangle_participation_ratio()),
max_odf=__unpack_stats(com.max_odf()), avg_odf=__unpack_stats(com.avg_odf()),
flake_odf=__unpack_stats(com.flake_odf()),
edges_inside=__unpack_stats(com.edges_inside()),
fraction_over_median_degree=__unpack_stats(com.fraction_over_median_degree()),
expansion=__unpack_stats(com.expansion()), cut_ratio=__unpack_stats(com.cut_ratio()),
internal_edge_density=__unpack_stats(com.internal_edge_density()),
average_internal_degree=__unpack_stats(com.average_internal_degree()))
else:
composed = dict(size=com.size(summary=False), conductance=com.conductance(summary=False),
normalized_cut=com.normalized_cut(summary=False),
triangle_participation_ratio=com.triangle_participation_ratio(summary=False),
max_odf=com.max_odf(summary=False), avg_odf=com.avg_odf(summary=False),
flake_odf=com.flake_odf(summary=False), edges_inside=com.edges_inside(summary=False),
fraction_over_median_degree=com.fraction_over_median_degree(summary=False),
expansion=com.expansion(summary=False), cut_ratio=com.cut_ratio(summary=False),
internal_edge_density=com.internal_edge_density(summary=False),
average_internal_degree=com.average_internal_degree(summary=False))
data[f"{com.method_name}_{com.method_parameters}"] = {**simple, **composed}
response_obj = dict(status='success', data=data)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def angel(request):
"""
---
description: This end-point allows to compute the Angel Community Discovery algorithm to a network dataset.
tags:
- Angel
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: threshold
schema:
type: float
required: true
description: merging threshold
- in: query
name: min_com_size
schema:
type: integer
required: true
description: minimum community size
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['threshold'])
com_size = int(request.query['min_com_size'])
communities = algorithms.angel(g, th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def demon(request):
"""
---
description: This end-point allows to compute the Demon Community Discovery algorithm to a network dataset.
tags:
- Demon
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: epsilon
schema:
type: float
required: true
description: merging threshold
- in: query
name: min_com_size
schema:
type: integer
required: true
description: minimum community size
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['epsilon'])
com_size = int(request.query['min_com_size'])
communities = algorithms.demon(g, th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def kclique(request):
"""
---
description: This end-point allows to compute kclique Community Discovery algorithm to a network dataset.
tags:
- Kclique
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: Size of smallest clique
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.kclique(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lfm(request):
"""
---
description: This end-point allows to compute Lfm Community Discovery algorithm to a network dataset.
tags:
- LFM
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: alpha
schema:
type: float
required: true
description: controll the size of the communities: Large values of alpha yield very small communities, small values instead deliver large modules. If alpha is small enough, all nodes end up in the same cluster, the network itself. In most cases, for alpha < 0.5 there is only one community, for alpha > 2 one recovers the smallest communities. A natural choise is alpha =1.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
alpha = float(request.query['alpha'])
communities = algorithms.lfm(g, alpha)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def ego_networks(request):
"""
---
description: This end-point allows to compute the Ego-networks Community Discovery algorithm to a network dataset.
tags:
- Ego-networks
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: level
schema:
type: integer
required: true
description: extrac communities with all neighbors of distance<=level from a node.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
com_level = int(request.query['level'])
communities = algorithms.ego_networks(g, com_level)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def overlapping_seed_set_expansion(request):
"""
---
description: This end-point allows to compute the OSSE Community Discovery algorithm to a network dataset.
tags:
- Overlapping seed set expansion
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seeds
schema:
type: list
required: true
description: Node list.
- in: query
name: ninf
schema:
type: boolean
required: true
description: Neighbourhood Inflation parameter.
- in: query
name: expansion
schema:
type: string
required: true
description: Seed expansion: ppr or vppr.
- in: query
name: stopping
schema:
type: sting
required: true
description: Stopping criteria: cond.
- in: query
name: nworkers
schema:
type: integer
required: true
description: Number of Workers.
- in: query
name: nruns
schema:
type: integer
required: true
description: Number of runs.
- in: query
name: alpha
schema:
type: float
required: true
description: alpha value for Personalized PageRank expansion.
- in: query
name: maxexpand
schema:
type: float
required: true
description: Maximum expansion allowed for approximate ppr.
- in: query
name: delta
schema:
type: float
required: true
description: Minimum distance parameter for near duplicate communities.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seeds = json.loads(request.query['seeds'])
tmp_ninf = str(request.query['ninf'])
if tmp_ninf == "False":
ninf = False
else:
ninf = True
expansion = str(request.query['expansion'])
nworkers = int(request.query['nworkers'])
stopping = str(request.query['stopping'])
nruns = int(request.query['nruns'])
alpha = float(request.query['alpha'])
delta = float(request.query['delta'])
communities = algorithms.overlapping_seed_set_expansion(g, seeds, ninf, expansion, stopping, nworkers, nruns,
alpha, float('INF'), delta)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lais2(request):
"""
---
description: This end-point allows to compute Lais2 Community Discovery algorithm to a network dataset.
tags:
- Lais2
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.lais2(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def congo(request):
"""
---
description: This end-point allows to compute Congo Community Discovery algorithm to a network dataset.
tags:
- Congo
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: the number of communities desired
- in: query
name: height
schema:
type: integer
required: true
description: The lengh of the longest shortest paths that CONGO considers
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
height = int(request.query['height'])
communities = algorithms.congo(g, number_communities, height)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def conga(request):
"""
---
description: This end-point allows to compute Conga Community Discovery algorithm to a network dataset.
tags:
- Conga
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: the number of communities desired
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
communities = algorithms.conga(g, number_communities)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def lemon(request):
"""
---
description: This end-point allows to compute Lemon Community Discovery algorithm to a network dataset.
tags:
- Lemon
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seeds
schema:
type: list
required: true
description: Node list
- in: query
name: min_com_size
schema:
type: integer
required: true
description: the minimum size of a single community in the network
- in: query
name: max_com_size
schema:
type: integer
required: true
description: the maximum size of a single community in the network
- in: query
name: expand_step
schema:
type: integer
required: true
description: the step of seed set increasement during expansion process
- in: query
name: subspace_dim
schema:
type: integer
required: true
description: dimension of the subspace; choosing a large dimension is undesirable because it would increase the computation cost of generating local spectra
- in: query
name: walk_steps
schema:
type: integer
required: true
description: the number of step for the random walk
- in: query
name: biased
schema:
type: boolean
required: true
description: set if the random walk starting from seed nodes
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seeds = json.loads(request.query['seeds'])
min_com_size = int(request.query['min_com_size'])
max_com_size = int(request.query['max_com_size'])
expand_step = int(request.query['expand_step'])
walk_steps = int(request.query['walk_steps'])
tmp_biased = str(request.query['biased'])
if tmp_biased == "False":
biased = False
else:
biased = True
subspace_dim = int(request.query['subspace_dim'])
communities = algorithms.lemon(g, seeds, min_com_size, max_com_size, expand_step, subspace_dim, walk_steps,
biased)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def slpa(request):
"""
---
description: This end-point allows to compute slpa Community Discovery algorithm to a network dataset.
tags:
- slpa
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: t
schema:
type: integer
required: true
description: maximum number of iterations
- in: query
name: r
schema:
type: float
required: true
description: threshold ∈ [0, 1]. It is used in the post-processing stage: if the probability of seeing a particular label during the whole process is less than r, this label is deleted from a node’s memory.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
t = int(request.query['t'])
r = float(request.query['r'])
communities = algorithms.slpa(g, t, r)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def multicom(request):
"""
---
description: This end-point allows to compute multicom Community Discovery algorithm to a network dataset.
tags:
- multicom
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: seed_node
schema:
type: integer
required: true
description: Id of the seed node around which we want to detect communities
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
seed_node = json.loads(request.query['seed_node'])
communities = algorithms.multicom(g, seed_node)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def big_clam(request):
"""
---
description: This end-point allows to compute big_clam Community Discovery algorithm to a network dataset.
tags:
- Big_clam
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: number communities desired
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
communities = algorithms.big_clam(g, number_communities)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def node_perception(request):
"""
---
description: This end-point allows to compute the Node Perception Community Discovery algorithm to a network dataset.
tags:
- Node Perception
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: threshold
schema:
type: float
required: true
description: the tolerance required in order to merge communities.
- in: query
name: overlap_threshold
schema:
type: float
required: true
description: the overlap tolerance.
- in: query
name: min_comm_size
schema:
type: integer
required: true
description: minimum community size.
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
th = float(request.query['threshold'])
ov_th = float(request.query['overlap_threshold'])
com_size = int(request.query['min_comm_size'])
communities = algorithms.node_perception(g, th, ov_th, com_size)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
# crisp partition
async def girvan_newman(request):
"""
---
description: This end-point allows to compute girvan_newman Community Discovery algorithm to a network dataset.
tags:
- girvan_newman
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: level
schema:
type: integer
required: true
description: the level where to cut the dendrogram
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
level = int(request.query['level'])
communities = algorithms.girvan_newman(g, level)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def em(request):
"""
---
description: This end-point allows to compute EM Community Discovery algorithm to a network dataset.
tags:
- EM
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: tthe number of desired communities
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.em(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def scan(request):
"""
---
description: This end-point allows to compute Scan Community Discovery algorithm to a network dataset.
tags:
- Scan
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: epsilon
schema:
type: float
required: true
description: the minimum threshold to assigning cluster membership
- in: query
name: mu
schema:
type: integer
required: true
description: minimum number of neineighbors with a structural similarity that exceeds the threshold epsilon
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
epsilon = float(request.query['epsilon'])
mu = int(request.query['mu'])
communities = algorithms.scan(g, epsilon, mu)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def gdmp2(request):
"""
---
description: This end-point allows to compute gdmp2 Community Discovery algorithm to a network dataset.
tags:
- gdmp2
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: min_threshold
schema:
type: float
required: true
description: the minimum density threshold parameter to control the density of the output subgraphs
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
min_threshold = float(request.query['min_threshold'])
communities = algorithms.gdmp2(g, min_threshold)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def spinglass(request):
"""
---
description: This end-point allows to compute spinglass Community Discovery algorithm to a network dataset.
tags:
- spinglass
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.spinglass(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def eigenvector(request):
"""
---
description: This end-point allows to compute eigenvector Community Discovery algorithm to a network dataset.
tags:
- eigenvector
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.eigenvector(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def agdl(request):
"""
---
description: This end-point allows to compute agdl Community Discovery algorithm to a network dataset.
tags:
- agdl
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: number_communities
schema:
type: integer
required: true
description: number of communities
- in: query
name: number_neighbors
schema:
type: integer
required: true
description: Number of neighbors to use for KNN
- in: query
name: kc
schema:
type: integer
required: true
description: size of the neighbor set for each cluster
- in: query
name: a
schema:
type: float
required: true
description: range(-infinity;+infinty)
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
number_communities = int(request.query['number_communities'])
number_neighbors = int(request.query['number_neighbors'])
kc = int(request.query['kc'])
a = float(request.query['a'])
communities = algorithms.agdl(g, number_communities, number_neighbors, kc, a)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def louvain(request):
"""
---
description: This end-point allows to compute louvain Community Discovery algorithm to a network dataset.
tags:
- louvain
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: weight
schema:
type: string
required: true
description: optional the key in graph to use as weight
- in: query
name: resolution
schema:
type: string
required: float
description: Will change the size of the communities, default to 1.
- in: query
name: randomize
schema:
type: boolean
required: true
description: Will randomize the node evaluation order and the community evaluation order to get different partitions at each call, default False
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
weight = str(request.query['weight'])
resolution = float(request.query['resolution'])
tmp_randomize = str(request.query['randomize'])
if tmp_randomize == "False":
randomize = False
else:
randomize = True
communities = algorithms.louvain(g, weight, resolution, randomize)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def leiden(request):
"""
---
description: This end-point allows to compute leiden Community Discovery algorithm to a network dataset.
tags:
- leiden
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
communities = algorithms.leiden(g, initial_membership, weights)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def rb_pots(request):
"""
---
description: This end-point allows to compute rb_pots Community Discovery algorithm to a network dataset.
tags:
- rb_pots
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.rb_pots(g, initial_membership, weights, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def rber_pots(request):
"""
---
description: This end-point allows to compute rber_pots Community Discovery algorithm to a network dataset.
tags:
- rber_pots
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.rber_pots(g, initial_membership, weights, node_sizes, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def cpm(request):
"""
---
description: This end-point allows to compute cpm Community Discovery algorithm to a network dataset.
tags:
- cpm
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
- in: query
name: resolution_parameter
schema:
type: double
required: float
description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
resolution_parameter = float(request.query['resolution_parameter'])
communities = algorithms.cpm(g, initial_membership, weights, node_sizes, resolution_parameter)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def significance_communities(request):
"""
---
description: This end-point allows to compute significance_communities Community Discovery algorithm to a network dataset.
tags:
- significance_communities
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
node_sizes = json.loads(request.query['node_sizes'])
communities = algorithms.significance_communities(g, initial_membership, node_sizes)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def surprise_communities(request):
"""
---
description: This end-point allows to compute surprise_communities Community Discovery algorithm to a network dataset.
tags:
- surprise_communities
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: initial_membership
schema:
type: list
required: true
description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
- in: query
name: node_sizes
schema:
type: list of integer
required: float
description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
initial_membership = json.loads(request.query['initial_membership'])
weights = json.loads(request.query['weights'])
node_sizes = json.loads(request.query['node_sizes'])
communities = algorithms.surprise_communities(g, initial_membership, weights, node_sizes)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def greedy_modularity(request):
"""
---
description: This end-point allows to compute greedy_modularity Community Discovery algorithm to a network dataset.
tags:
- greedy_modularity
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: weights
schema:
type: list of double
required: float
description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
weights = json.loads(request.query['weights'])
communities = algorithms.greedy_modularity(g, weights)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def infomap(request):
"""
---
description: This end-point allows to compute infomap Community Discovery algorithm to a network dataset.
tags:
- infomap
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.infomap(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def walktrap(request):
"""
---
description: This end-point allows to compute walktrap Community Discovery algorithm to a network dataset.
tags:
- walktrap
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.walktrap(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def label_propagation(request):
"""
---
description: This end-point allows to compute label_propagation Community Discovery algorithm to a network dataset.
tags:
- label_propagation
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
communities = algorithms.label_propagation(g)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def async_fluid(request):
"""
---
description: This end-point allows to compute async_fluid Community Discovery algorithm to a network dataset.
tags:
- async_fluid
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: k
schema:
type: integer
required: true
description: Number of communities to search
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
k = int(request.query['k'])
communities = algorithms.async_fluid(g, k)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def der(request):
"""
---
description: This end-point allows to compute der Community Discovery algorithm to a network dataset.
tags:
- der
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: walk_len
schema:
type: integer
required: true
description: length of the random walk, default 3
- in: query
name: threshold
schema:
type: float
required: true
description: threshold for stop criteria; if the likelihood_diff is less than threshold tha algorithm stops, default 0.00001
- in: query
name: iter_bound
schema:
type: integer
required: true
description: maximum number of iteration, default 50
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
walk_len = int(request.query['walk_len'])
threshold = float(request.query['threshold'])
iter_bound = int(request.query['iter_bound'])
communities = algorithms.der(g, walk_len, threshold, iter_bound)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def frc_fgsn(request):
"""
---
description: This end-point allows to compute frc_fgsn Community Discovery algorithm to a network dataset.
tags:
- frc_fgsn
produces:
- application/json
responses:
"200":
description: successful operation.
"500":
description: operation failed
parameters:
- in: query
name: token
schema:
type: string
required: true
description: Experiment token
- in: query
name: theta
schema:
type: float
required: true
description: community density coefficient
- in: query
name: eps
schema:
type: float
required: true
description: coupling coefficient of the community. Ranges in [0, 1], small values ensure that only strongly connected node granules are merged togheter.
- in: query
name: r
schema:
type: integer
required: true
description: radius of the granule
"""
code, resp = __check_token(request)
if code == 500:
return web.Response(text=json.dumps(resp), status=500)
g = await __load_network(request)
try:
theta = float(request.query['theta'])
eps = float(request.query['eps'])
r = int(request.query['r'])
communities = algorithms.frc_fgsn(g, theta, eps, r)
resp = json.loads(communities.to_json())
response_obj = dict(status='success', data=resp)
await __save_communities(communities, request)
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as e:
response_obj = dict(status='failure', description=str(e))
return web.Response(text=json.dumps(response_obj), status=500)
async def make_app():
app = web.Application()
# add the routes
app.add_routes([
web.get('/api/create_experiment', create_experiment),
web.post('/api/destroy_experiment', destroy_experiment),
web.post('/api/upload/network', upload_network),
web.post('/api/evaluation/fitness_scores', community_statistics),
web.post('/api/evaluation/community_comparison', community_comparison),
web.post('/api/cd/angel', angel),
web.post('/api/cd/demon', demon),
web.post('/api/cd/ego_networks', ego_networks),
web.post('/api/cd/node_perception', node_perception),
web.post('/api/cd/overlapping_seed_set_expansion', overlapping_seed_set_expansion),
web.post('/api/cd/kclique', kclique),
web.post('/api/cd/lfm', lfm),
web.post('/api/cd/lais2', lais2),
web.post('/api/cd/congo', congo),
web.post('/api/cd/conga', conga),
web.post('/api/cd/lemon', lemon),
web.post('/api/cd/slpa', slpa),
web.post('/api/cd/multicom', multicom),
web.post('/api/cd/big_clam', big_clam),
web.post('/api/cd/girvan_newman', girvan_newman),
web.post('/api/cd/em', em),
web.post('/api/cd/scan', scan),
web.post('/api/cd/gdmp2', gdmp2),
web.post('/api/cd/spinglass', spinglass),
web.post('/api/cd/eigenvector', eigenvector),
web.post('/api/cd/agdl', agdl),
web.post('/api/cd/louvain', louvain),
web.post('/api/cd/leiden', leiden),
web.post('/api/cd/rb_pots', rb_pots),
web.post('/api/cd/rber_pots', rber_pots),
web.post('/api/cd/cpm', cpm),
web.post('/api/cd/significance_communities', significance_communities),
web.post('/api/cd/surprise_communities', surprise_communities),
web.post('/api/cd/greedy_modularity', greedy_modularity),
web.post('/api/cd/infomap', infomap),
web.post('/api/cd/walktrap', walktrap),
web.post('/api/cd/label_propagation', label_propagation),
web.post('/api/cd/async_fluid', async_fluid),
web.post('/api/cd/der', der),
web.post('/api/cd/frc_fgsn', frc_fgsn)
])
setup_swagger(app, swagger_url="/api/v1/doc", description="",
title="CDlib Server API",
api_version="0.1.4",
contact="<EMAIL>")
return app
if __name__ == '__main__':
web.run_app(make_app(), port=8081, host="0.0.0.0")
| en | 0.693875 | --- description: This end-point allows to create a new experiment. tags: - Create Experiment produces: - text/plain responses: "200": description: successful operation. Return experiment "token" --- description: This end-point allows to destroy an existing experiment. tags: - Destroy Experiment produces: - text/plain responses: "200": description: successful operation. parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to upload a network dataset. tags: - Upload Network produces: - text/plain responses: "200": description: successful operation. "500": description: operation failed. parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: network schema: type: string required: true description: JSON string representing a networkx.Graph object --- description: This end-point allows to compare two clusterings applying several state of art scores. tags: - Community Comparisons produces: - text/plain responses: "200": description: successful operation. "500": description: operation failed. parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: partition_names schema: type: string required: false description: Name of the partitions --- description: This end-point allows to compute aggregate statistics for the computed partition. tags: - Fitness Scores produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: partition_names schema: type: string required: false description: Name of the partitions - in: query name: summary schema: type: string required: false description: Whether or not to return an aggregated view of community-wise statistics --- description: This end-point allows to compute the Angel Community Discovery algorithm to a network dataset. tags: - Angel produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: threshold schema: type: float required: true description: merging threshold - in: query name: min_com_size schema: type: integer required: true description: minimum community size --- description: This end-point allows to compute the Demon Community Discovery algorithm to a network dataset. tags: - Demon produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: epsilon schema: type: float required: true description: merging threshold - in: query name: min_com_size schema: type: integer required: true description: minimum community size --- description: This end-point allows to compute kclique Community Discovery algorithm to a network dataset. tags: - Kclique produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: k schema: type: integer required: true description: Size of smallest clique --- description: This end-point allows to compute Lfm Community Discovery algorithm to a network dataset. tags: - LFM produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: alpha schema: type: float required: true description: controll the size of the communities: Large values of alpha yield very small communities, small values instead deliver large modules. If alpha is small enough, all nodes end up in the same cluster, the network itself. In most cases, for alpha < 0.5 there is only one community, for alpha > 2 one recovers the smallest communities. A natural choise is alpha =1. --- description: This end-point allows to compute the Ego-networks Community Discovery algorithm to a network dataset. tags: - Ego-networks produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: level schema: type: integer required: true description: extrac communities with all neighbors of distance<=level from a node. --- description: This end-point allows to compute the OSSE Community Discovery algorithm to a network dataset. tags: - Overlapping seed set expansion produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: seeds schema: type: list required: true description: Node list. - in: query name: ninf schema: type: boolean required: true description: Neighbourhood Inflation parameter. - in: query name: expansion schema: type: string required: true description: Seed expansion: ppr or vppr. - in: query name: stopping schema: type: sting required: true description: Stopping criteria: cond. - in: query name: nworkers schema: type: integer required: true description: Number of Workers. - in: query name: nruns schema: type: integer required: true description: Number of runs. - in: query name: alpha schema: type: float required: true description: alpha value for Personalized PageRank expansion. - in: query name: maxexpand schema: type: float required: true description: Maximum expansion allowed for approximate ppr. - in: query name: delta schema: type: float required: true description: Minimum distance parameter for near duplicate communities. --- description: This end-point allows to compute Lais2 Community Discovery algorithm to a network dataset. tags: - Lais2 produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute Congo Community Discovery algorithm to a network dataset. tags: - Congo produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: number_communities schema: type: integer required: true description: the number of communities desired - in: query name: height schema: type: integer required: true description: The lengh of the longest shortest paths that CONGO considers --- description: This end-point allows to compute Conga Community Discovery algorithm to a network dataset. tags: - Conga produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: number_communities schema: type: integer required: true description: the number of communities desired --- description: This end-point allows to compute Lemon Community Discovery algorithm to a network dataset. tags: - Lemon produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: seeds schema: type: list required: true description: Node list - in: query name: min_com_size schema: type: integer required: true description: the minimum size of a single community in the network - in: query name: max_com_size schema: type: integer required: true description: the maximum size of a single community in the network - in: query name: expand_step schema: type: integer required: true description: the step of seed set increasement during expansion process - in: query name: subspace_dim schema: type: integer required: true description: dimension of the subspace; choosing a large dimension is undesirable because it would increase the computation cost of generating local spectra - in: query name: walk_steps schema: type: integer required: true description: the number of step for the random walk - in: query name: biased schema: type: boolean required: true description: set if the random walk starting from seed nodes --- description: This end-point allows to compute slpa Community Discovery algorithm to a network dataset. tags: - slpa produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: t schema: type: integer required: true description: maximum number of iterations - in: query name: r schema: type: float required: true description: threshold ∈ [0, 1]. It is used in the post-processing stage: if the probability of seeing a particular label during the whole process is less than r, this label is deleted from a node’s memory. --- description: This end-point allows to compute multicom Community Discovery algorithm to a network dataset. tags: - multicom produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: seed_node schema: type: integer required: true description: Id of the seed node around which we want to detect communities --- description: This end-point allows to compute big_clam Community Discovery algorithm to a network dataset. tags: - Big_clam produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: number_communities schema: type: integer required: true description: number communities desired --- description: This end-point allows to compute the Node Perception Community Discovery algorithm to a network dataset. tags: - Node Perception produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: threshold schema: type: float required: true description: the tolerance required in order to merge communities. - in: query name: overlap_threshold schema: type: float required: true description: the overlap tolerance. - in: query name: min_comm_size schema: type: integer required: true description: minimum community size. # crisp partition --- description: This end-point allows to compute girvan_newman Community Discovery algorithm to a network dataset. tags: - girvan_newman produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: level schema: type: integer required: true description: the level where to cut the dendrogram --- description: This end-point allows to compute EM Community Discovery algorithm to a network dataset. tags: - EM produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: k schema: type: integer required: true description: tthe number of desired communities --- description: This end-point allows to compute Scan Community Discovery algorithm to a network dataset. tags: - Scan produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: epsilon schema: type: float required: true description: the minimum threshold to assigning cluster membership - in: query name: mu schema: type: integer required: true description: minimum number of neineighbors with a structural similarity that exceeds the threshold epsilon --- description: This end-point allows to compute gdmp2 Community Discovery algorithm to a network dataset. tags: - gdmp2 produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: min_threshold schema: type: float required: true description: the minimum density threshold parameter to control the density of the output subgraphs --- description: This end-point allows to compute spinglass Community Discovery algorithm to a network dataset. tags: - spinglass produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute eigenvector Community Discovery algorithm to a network dataset. tags: - eigenvector produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute agdl Community Discovery algorithm to a network dataset. tags: - agdl produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: number_communities schema: type: integer required: true description: number of communities - in: query name: number_neighbors schema: type: integer required: true description: Number of neighbors to use for KNN - in: query name: kc schema: type: integer required: true description: size of the neighbor set for each cluster - in: query name: a schema: type: float required: true description: range(-infinity;+infinty) --- description: This end-point allows to compute louvain Community Discovery algorithm to a network dataset. tags: - louvain produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: weight schema: type: string required: true description: optional the key in graph to use as weight - in: query name: resolution schema: type: string required: float description: Will change the size of the communities, default to 1. - in: query name: randomize schema: type: boolean required: true description: Will randomize the node evaluation order and the community evaluation order to get different partitions at each call, default False --- description: This end-point allows to compute leiden Community Discovery algorithm to a network dataset. tags: - leiden produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None --- description: This end-point allows to compute rb_pots Community Discovery algorithm to a network dataset. tags: - rb_pots produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None - in: query name: resolution_parameter schema: type: double required: float description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1 --- description: This end-point allows to compute rber_pots Community Discovery algorithm to a network dataset. tags: - rber_pots produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None - in: query name: node_sizes schema: type: list of integer required: float description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None - in: query name: resolution_parameter schema: type: double required: float description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1 --- description: This end-point allows to compute cpm Community Discovery algorithm to a network dataset. tags: - cpm produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None - in: query name: node_sizes schema: type: list of integer required: float description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None - in: query name: resolution_parameter schema: type: double required: float description: double >0 A parameter value controlling the coarseness of the clustering. Higher resolutions lead to more communities, while lower resolutions lead to fewer communities. Default 1 --- description: This end-point allows to compute significance_communities Community Discovery algorithm to a network dataset. tags: - significance_communities produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: node_sizes schema: type: list of integer required: float description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None --- description: This end-point allows to compute surprise_communities Community Discovery algorithm to a network dataset. tags: - surprise_communities produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: initial_membership schema: type: list required: true description: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None - in: query name: node_sizes schema: type: list of integer required: float description: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None --- description: This end-point allows to compute greedy_modularity Community Discovery algorithm to a network dataset. tags: - greedy_modularity produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: weights schema: type: list of double required: float description: or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None --- description: This end-point allows to compute infomap Community Discovery algorithm to a network dataset. tags: - infomap produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute walktrap Community Discovery algorithm to a network dataset. tags: - walktrap produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute label_propagation Community Discovery algorithm to a network dataset. tags: - label_propagation produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token --- description: This end-point allows to compute async_fluid Community Discovery algorithm to a network dataset. tags: - async_fluid produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: k schema: type: integer required: true description: Number of communities to search --- description: This end-point allows to compute der Community Discovery algorithm to a network dataset. tags: - der produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: walk_len schema: type: integer required: true description: length of the random walk, default 3 - in: query name: threshold schema: type: float required: true description: threshold for stop criteria; if the likelihood_diff is less than threshold tha algorithm stops, default 0.00001 - in: query name: iter_bound schema: type: integer required: true description: maximum number of iteration, default 50 --- description: This end-point allows to compute frc_fgsn Community Discovery algorithm to a network dataset. tags: - frc_fgsn produces: - application/json responses: "200": description: successful operation. "500": description: operation failed parameters: - in: query name: token schema: type: string required: true description: Experiment token - in: query name: theta schema: type: float required: true description: community density coefficient - in: query name: eps schema: type: float required: true description: coupling coefficient of the community. Ranges in [0, 1], small values ensure that only strongly connected node granules are merged togheter. - in: query name: r schema: type: integer required: true description: radius of the granule # add the routes | 2.479236 | 2 |
apps/lightroom.py | zachbarrow/talon_community | 125 | 6619290 | <reponame>zachbarrow/talon_community
# Talon voice commands for interacting with Adobe Lightroom
# <NAME> <EMAIL>
from talon.voice import Key, Context
ctx = Context("Lightroom", bundle="com.adobe.Lightroom6")
ctx.keymap(
{
"import": Key("shift-cmd-i"),
"show in finder": Key("cmd-r"),
"grid [view]": Key("g"),
"loop [view]": Key("e"),
"compare [view]": Key("c"),
"develop [view]": Key("d"),
"grid [view]": Key("g"),
"grid zoom in": Key("="),
"grid zoom out": Key("-"),
"(show | hide) toolbar": Key("t"),
"crop": Key("r"),
"crop": Key("r"),
"next photo": Key("cmd-right"),
"(preev | previous) photo": Key("cmd-left"),
"zoom": Key("z"),
"zoom": Key("z"),
"(rating 1 | 1 star)": Key("1"),
"(rating 2 | 2 stars)": Key("2"),
"(rating 3 | 3 stars)": Key("3"),
"(rating 4 | 4 stars)": Key("4"),
"(rating 5 | 5 stars)": Key("5"),
"color red": Key("6"),
"color yellow": Key("7"),
"color green": Key("8"),
"color blue": Key("9"),
"reject": Key("x"),
"flag it": Key("`"),
"unflag it": Key("u"),
"rotate (left | counter clockwise)": Key("cmd-["),
"rotate (right | clockwise)": Key("cmd-]"),
"keywords": Key("cmd-k"),
"auto tone": Key("cmd-u"),
"auto white balance": Key("shift-cmd-u"),
"black and white": Key("v"),
}
)
| # Talon voice commands for interacting with Adobe Lightroom
# <NAME> <EMAIL>
from talon.voice import Key, Context
ctx = Context("Lightroom", bundle="com.adobe.Lightroom6")
ctx.keymap(
{
"import": Key("shift-cmd-i"),
"show in finder": Key("cmd-r"),
"grid [view]": Key("g"),
"loop [view]": Key("e"),
"compare [view]": Key("c"),
"develop [view]": Key("d"),
"grid [view]": Key("g"),
"grid zoom in": Key("="),
"grid zoom out": Key("-"),
"(show | hide) toolbar": Key("t"),
"crop": Key("r"),
"crop": Key("r"),
"next photo": Key("cmd-right"),
"(preev | previous) photo": Key("cmd-left"),
"zoom": Key("z"),
"zoom": Key("z"),
"(rating 1 | 1 star)": Key("1"),
"(rating 2 | 2 stars)": Key("2"),
"(rating 3 | 3 stars)": Key("3"),
"(rating 4 | 4 stars)": Key("4"),
"(rating 5 | 5 stars)": Key("5"),
"color red": Key("6"),
"color yellow": Key("7"),
"color green": Key("8"),
"color blue": Key("9"),
"reject": Key("x"),
"flag it": Key("`"),
"unflag it": Key("u"),
"rotate (left | counter clockwise)": Key("cmd-["),
"rotate (right | clockwise)": Key("cmd-]"),
"keywords": Key("cmd-k"),
"auto tone": Key("cmd-u"),
"auto white balance": Key("shift-cmd-u"),
"black and white": Key("v"),
}
) | en | 0.599463 | # Talon voice commands for interacting with Adobe Lightroom # <NAME> <EMAIL> | 2.454185 | 2 |
.github/workflows/updatelib.py | bdeboe/iknow | 0 | 6619291 | """Common functions for autoupdate bot. Requires Python 3.6 or higher."""
import os
import random
import string
import sys
def _rand_string(length):
"""Return a random alphanumeric string with a given length."""
return ''.join(random.choice(ALPHANUMERIC) for _ in range(length))
def get_vars():
"""Return a dict of name:value pairs from the dependencies file."""
vars = {} # name:value dict for variables
_lines_before.clear()
_lines_after.clear()
with open(DEPENDENCIES_FILENAME) as dependencies_file:
start_reading = False
stop_reading = False
for line in dependencies_file:
if start_reading:
if line == '# END DEPENDENCY-AUTOUPDATE SECTION\n':
stop_reading = True
if stop_reading:
_lines_after.append(line)
else:
pair = line.rstrip().split('=')
if len(pair) != 2:
raise ValueError(f'Invalid name-value pair {pair!r}')
pair[1] = pair[1].strip('"')
vars[pair[0]] = pair[1]
else:
_lines_before.append(line)
if line == '# START DEPENDENCY-AUTOUPDATE SECTION\n':
start_reading = True
if set(vars) != EXPECTED_VARNAMES:
raise ValueError(f'Variables in {DEPENDENCIES_FILENAME} do not match the set {EXPECTED_VARNAMES}')
return vars
def set_vars(vars):
"""Set a dict of name:value pairs to the dependencies file. Do not call this
function before calling get_vars()."""
assert _lines_before and _lines_after, 'Need to call get_vars()'
with open(DEPENDENCIES_FILENAME, 'w') as dependencies_file:
dependencies_file.write(''.join(_lines_before))
for name, value in vars.items():
if ' ' in value:
value = f'"{value}"'
dependencies_file.write(f'{name}={value}\n')
dependencies_file.write(''.join(_lines_after))
def setenv(name, value):
"""Set the value of an environment variable for use in GitHub actions."""
with open(os.environ['GITHUB_ENV'], 'a') as env_file:
if '\n' in value:
length = 8
end_marker = _rand_string(length)
while end_marker in value:
length *= 2
end_marker = _rand_string(length)
env_file.write(f'{name}<<{end_marker}\n{value}\n{end_marker}\n')
else:
env_file.write(f'{name}={value}\n')
assert sys.version_info >= (3, 6), 'Python >=3.6 is required'
ALPHANUMERIC = string.digits + string.ascii_letters
DEPENDENCIES_FILENAME = os.path.join(os.environ['GITHUB_WORKSPACE'], '.github/workflows/dependencies.sh')
EXPECTED_VARNAMES = {
'ICU_NAME',
'ICU_URL_WIN',
'ICU_URL_SRC',
'PYVERSIONS_WIN',
'PYVERSIONS_OSX',
'PYENV_TOOL_VERSION',
'BUILDCACHE_NAME',
'BUILDCACHE_URL_WIN',
'CYTHON_VERSION',
'MANYLINUX2010_X86_64_TAG',
'MANYLINUX2014_AARCH64_TAG',
'MANYLINUX2014_PPC64LE_TAG'
}
_lines_before = []
_lines_after = []
| """Common functions for autoupdate bot. Requires Python 3.6 or higher."""
import os
import random
import string
import sys
def _rand_string(length):
"""Return a random alphanumeric string with a given length."""
return ''.join(random.choice(ALPHANUMERIC) for _ in range(length))
def get_vars():
"""Return a dict of name:value pairs from the dependencies file."""
vars = {} # name:value dict for variables
_lines_before.clear()
_lines_after.clear()
with open(DEPENDENCIES_FILENAME) as dependencies_file:
start_reading = False
stop_reading = False
for line in dependencies_file:
if start_reading:
if line == '# END DEPENDENCY-AUTOUPDATE SECTION\n':
stop_reading = True
if stop_reading:
_lines_after.append(line)
else:
pair = line.rstrip().split('=')
if len(pair) != 2:
raise ValueError(f'Invalid name-value pair {pair!r}')
pair[1] = pair[1].strip('"')
vars[pair[0]] = pair[1]
else:
_lines_before.append(line)
if line == '# START DEPENDENCY-AUTOUPDATE SECTION\n':
start_reading = True
if set(vars) != EXPECTED_VARNAMES:
raise ValueError(f'Variables in {DEPENDENCIES_FILENAME} do not match the set {EXPECTED_VARNAMES}')
return vars
def set_vars(vars):
"""Set a dict of name:value pairs to the dependencies file. Do not call this
function before calling get_vars()."""
assert _lines_before and _lines_after, 'Need to call get_vars()'
with open(DEPENDENCIES_FILENAME, 'w') as dependencies_file:
dependencies_file.write(''.join(_lines_before))
for name, value in vars.items():
if ' ' in value:
value = f'"{value}"'
dependencies_file.write(f'{name}={value}\n')
dependencies_file.write(''.join(_lines_after))
def setenv(name, value):
"""Set the value of an environment variable for use in GitHub actions."""
with open(os.environ['GITHUB_ENV'], 'a') as env_file:
if '\n' in value:
length = 8
end_marker = _rand_string(length)
while end_marker in value:
length *= 2
end_marker = _rand_string(length)
env_file.write(f'{name}<<{end_marker}\n{value}\n{end_marker}\n')
else:
env_file.write(f'{name}={value}\n')
assert sys.version_info >= (3, 6), 'Python >=3.6 is required'
ALPHANUMERIC = string.digits + string.ascii_letters
DEPENDENCIES_FILENAME = os.path.join(os.environ['GITHUB_WORKSPACE'], '.github/workflows/dependencies.sh')
EXPECTED_VARNAMES = {
'ICU_NAME',
'ICU_URL_WIN',
'ICU_URL_SRC',
'PYVERSIONS_WIN',
'PYVERSIONS_OSX',
'PYENV_TOOL_VERSION',
'BUILDCACHE_NAME',
'BUILDCACHE_URL_WIN',
'CYTHON_VERSION',
'MANYLINUX2010_X86_64_TAG',
'MANYLINUX2014_AARCH64_TAG',
'MANYLINUX2014_PPC64LE_TAG'
}
_lines_before = []
_lines_after = []
| en | 0.592741 | Common functions for autoupdate bot. Requires Python 3.6 or higher. Return a random alphanumeric string with a given length. Return a dict of name:value pairs from the dependencies file. # name:value dict for variables Set a dict of name:value pairs to the dependencies file. Do not call this function before calling get_vars(). Set the value of an environment variable for use in GitHub actions. | 3.172872 | 3 |
bright/xsgen/run/runchar.py | bright-dev/bright | 3 | 6619292 | from __future__ import print_function
import os
import time
import subprocess
import numpy as np
import tables as tb
from pyne.material import Material
from pyne.utils import message, failure
class RunChar(object):
"""A controller to run char very generally."""
def __init__(self, n_code, env):
"""Args:
* n_code: a neutron transport model
* env: the environment to execute the model in.
"""
self.n_code = n_code
self.env = env
#
# Controleer functions
#
def init_h5(self):
"""Inits the char library."""
# Make a new HDF5 file.
if (self.env['options'].MAKE_INPUT):
self.n_code.init_h5()
# the cross-section generation
if self.env['options'].RUN_BURNUP:
self.n_code.init_h5_burnup()
# Make Cross-sections as a separate step from the burnup calculation
if self.env['options'].RUN_XS_GEN:
self.n_code.init_h5_xs_gen()
#if self.env['xs_models_needed']:
# self.n_code.init_h5_flux_g()
self.n_code.init_h5_flux_g()
# Run initial nuclide sensitivity calculation
if self.env['options'].RUN_DELTAM:
self.n_code.init_h5_deltam()
def burnup(self, idx):
"""Runs the burnup portion of char.
idx : a list of perturbation indices that
could be supplied to range() or slice().
"""
# Make sure we only run with the right strides
ridx = idx[:2] + [self.n_code.ntimes]
# run the burnup steps
for n in range(*ridx):
res, dep = self.n_code.run_burnup_pert(n)
self.n_code.write_burnup(n, res, dep)
def xs_gen(self, idx, nucs):
"""Runs the cross-section generation portion of char.
idx : a list of perturbation indices that
could be supplied to range() or slice().
nucs : a set of nuclides to run (zzaaam-form).
"""
nucs_in_serpent = (nucs & set(self.env['core_transmute_in_serpent']))
nucs_not_in_serpent = (nucs & set(self.env['core_transmute_not_in_serpent']))
# Loop over the perturbation steps
for n in range(*idx):
# Grab the Material at this time.
ms_n = Material()
ms_n.from_hdf5(self.env['reactor'] + ".h5", "/Ti0", n, protocol=0)
# Calc restricted mass streams
ms_n_in_serpent = ms_n[self.env['core_transmute_in_serpent']]
ms_n_not_in_serpent = ms_n[self.env['core_transmute_not_in_serpent']]
# Read in some common parameters from the data file
with tb.openFile(self.env['reactor'] + ".h5", 'r') as rx_h5:
E_g = np.array(rx_h5.root.energy[n][::-1])
E_n = np.array(rx_h5.root.hi_res.energy.read()[::-1])
phi_n = np.array(rx_h5.root.hi_res.phi_g[n][::-1])
# Run and write the high resolution flux
if (phi_n < 0.0).all():
res, det = self.n_code.run_flux_g_pert(n, ms_n_in_serpent)
self.n_code.write_flux_g(n, res, det)
with tb.openFile(self.env['reactor'] + ".h5", 'r') as rx_h5:
phi_n = np.array(rx_h5.root.hi_res.phi_g[n][::-1])
#
# Loop over all output nuclides...
#
# ...that are valid in serpent
for nuc in nucs_in_serpent:
res, det = self.n_code.run_xs_gen_pert(nuc, n, ms_n_in_serpent, E_n, E_g, phi_n)
self.n_code.write_xs_gen(nuc, n, res, det)
# ...that are NOT valid in serpent
for nuc in nucs_not_in_serpent:
xsd = self.n_code.run_xs_mod_pert(nuc, n, E_n, E_g, phi_n)
self.n_code.write_xs_mod(nuc, n, xsd)
def deltam(self, idx, nucs, sidx):
"""Runs the nuclide sensitivity study.
idx : a list of perturbation indices that
could be supplied to range() or slice().
nucs : a set of nuclides to run (zzaaaam-form).
sidx : a list of sensitivity indices that
could be supplied to range() or slice().
"""
# Make sure we only run with the right strides
ridx = idx[:2] + [self.n_code.ntimes]
# Loop over all perturbations.
for n in range(*ridx):
# Loop over all nuclides
for nuc_zz in nucs:
# Calulate this nuclides new values of IHM concentration
nuc_fracs = self.env['deltam'] * self.ihm_stream.comp[nuc_zz]
# Skip nuclides that would be pertubed over 1 kgIHM
if (1.0 < nuc_fracs).any():
continue
# Loop over all nuclide sesnitivities
for s in range(*sidx):
res, dep = self.n_code.run_deltam_pert(nuc_zz, n, s, nuc_fracs)
self.n_code.write_deltam(nuc_zz, n, s, nuc_fracs, res, dep)
| from __future__ import print_function
import os
import time
import subprocess
import numpy as np
import tables as tb
from pyne.material import Material
from pyne.utils import message, failure
class RunChar(object):
"""A controller to run char very generally."""
def __init__(self, n_code, env):
"""Args:
* n_code: a neutron transport model
* env: the environment to execute the model in.
"""
self.n_code = n_code
self.env = env
#
# Controleer functions
#
def init_h5(self):
"""Inits the char library."""
# Make a new HDF5 file.
if (self.env['options'].MAKE_INPUT):
self.n_code.init_h5()
# the cross-section generation
if self.env['options'].RUN_BURNUP:
self.n_code.init_h5_burnup()
# Make Cross-sections as a separate step from the burnup calculation
if self.env['options'].RUN_XS_GEN:
self.n_code.init_h5_xs_gen()
#if self.env['xs_models_needed']:
# self.n_code.init_h5_flux_g()
self.n_code.init_h5_flux_g()
# Run initial nuclide sensitivity calculation
if self.env['options'].RUN_DELTAM:
self.n_code.init_h5_deltam()
def burnup(self, idx):
"""Runs the burnup portion of char.
idx : a list of perturbation indices that
could be supplied to range() or slice().
"""
# Make sure we only run with the right strides
ridx = idx[:2] + [self.n_code.ntimes]
# run the burnup steps
for n in range(*ridx):
res, dep = self.n_code.run_burnup_pert(n)
self.n_code.write_burnup(n, res, dep)
def xs_gen(self, idx, nucs):
"""Runs the cross-section generation portion of char.
idx : a list of perturbation indices that
could be supplied to range() or slice().
nucs : a set of nuclides to run (zzaaam-form).
"""
nucs_in_serpent = (nucs & set(self.env['core_transmute_in_serpent']))
nucs_not_in_serpent = (nucs & set(self.env['core_transmute_not_in_serpent']))
# Loop over the perturbation steps
for n in range(*idx):
# Grab the Material at this time.
ms_n = Material()
ms_n.from_hdf5(self.env['reactor'] + ".h5", "/Ti0", n, protocol=0)
# Calc restricted mass streams
ms_n_in_serpent = ms_n[self.env['core_transmute_in_serpent']]
ms_n_not_in_serpent = ms_n[self.env['core_transmute_not_in_serpent']]
# Read in some common parameters from the data file
with tb.openFile(self.env['reactor'] + ".h5", 'r') as rx_h5:
E_g = np.array(rx_h5.root.energy[n][::-1])
E_n = np.array(rx_h5.root.hi_res.energy.read()[::-1])
phi_n = np.array(rx_h5.root.hi_res.phi_g[n][::-1])
# Run and write the high resolution flux
if (phi_n < 0.0).all():
res, det = self.n_code.run_flux_g_pert(n, ms_n_in_serpent)
self.n_code.write_flux_g(n, res, det)
with tb.openFile(self.env['reactor'] + ".h5", 'r') as rx_h5:
phi_n = np.array(rx_h5.root.hi_res.phi_g[n][::-1])
#
# Loop over all output nuclides...
#
# ...that are valid in serpent
for nuc in nucs_in_serpent:
res, det = self.n_code.run_xs_gen_pert(nuc, n, ms_n_in_serpent, E_n, E_g, phi_n)
self.n_code.write_xs_gen(nuc, n, res, det)
# ...that are NOT valid in serpent
for nuc in nucs_not_in_serpent:
xsd = self.n_code.run_xs_mod_pert(nuc, n, E_n, E_g, phi_n)
self.n_code.write_xs_mod(nuc, n, xsd)
def deltam(self, idx, nucs, sidx):
"""Runs the nuclide sensitivity study.
idx : a list of perturbation indices that
could be supplied to range() or slice().
nucs : a set of nuclides to run (zzaaaam-form).
sidx : a list of sensitivity indices that
could be supplied to range() or slice().
"""
# Make sure we only run with the right strides
ridx = idx[:2] + [self.n_code.ntimes]
# Loop over all perturbations.
for n in range(*ridx):
# Loop over all nuclides
for nuc_zz in nucs:
# Calulate this nuclides new values of IHM concentration
nuc_fracs = self.env['deltam'] * self.ihm_stream.comp[nuc_zz]
# Skip nuclides that would be pertubed over 1 kgIHM
if (1.0 < nuc_fracs).any():
continue
# Loop over all nuclide sesnitivities
for s in range(*sidx):
res, dep = self.n_code.run_deltam_pert(nuc_zz, n, s, nuc_fracs)
self.n_code.write_deltam(nuc_zz, n, s, nuc_fracs, res, dep)
| en | 0.757887 | A controller to run char very generally. Args: * n_code: a neutron transport model * env: the environment to execute the model in. # # Controleer functions # Inits the char library. # Make a new HDF5 file. # the cross-section generation # Make Cross-sections as a separate step from the burnup calculation #if self.env['xs_models_needed']: # self.n_code.init_h5_flux_g() # Run initial nuclide sensitivity calculation Runs the burnup portion of char. idx : a list of perturbation indices that could be supplied to range() or slice(). # Make sure we only run with the right strides # run the burnup steps Runs the cross-section generation portion of char. idx : a list of perturbation indices that could be supplied to range() or slice(). nucs : a set of nuclides to run (zzaaam-form). # Loop over the perturbation steps # Grab the Material at this time. # Calc restricted mass streams # Read in some common parameters from the data file # Run and write the high resolution flux # # Loop over all output nuclides... # # ...that are valid in serpent # ...that are NOT valid in serpent Runs the nuclide sensitivity study. idx : a list of perturbation indices that could be supplied to range() or slice(). nucs : a set of nuclides to run (zzaaaam-form). sidx : a list of sensitivity indices that could be supplied to range() or slice(). # Make sure we only run with the right strides # Loop over all perturbations. # Loop over all nuclides # Calulate this nuclides new values of IHM concentration # Skip nuclides that would be pertubed over 1 kgIHM # Loop over all nuclide sesnitivities | 2.53978 | 3 |
active_learning_dd/utils/data_utils.py | gitter-lab/active-learning-drug-discovery | 0 | 6619293 | """
Contains data utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.SaltRemover import SaltRemover
from rdkit.Chem.FilterCatalog import *
import numpy as np
import pandas as pd
"""
Computes tanimoto dissimilarity array between two feature matrices.
Compares each row of X with each row of Y.
"""
def tanimoto_dissimilarity(X, Y, X_batch_size=50, Y_batch_size=50):
n_features = X.shape[-1]
if X.ndim == 1:
X = X.reshape(-1, n_features)
if Y.ndim == 1:
Y = Y.reshape(-1, n_features)
tan_sim = []
X_total_batches = X.shape[0] // X_batch_size + 1
Y_total_batches = Y.shape[0] // Y_batch_size + 1
for X_batch_i in range(X_total_batches):
X_start_idx = X_batch_i*X_batch_size
X_end_idx = min((X_batch_i+1)*X_batch_size, X.shape[0])
X_batch = X[X_start_idx:X_end_idx,:]
for Y_batch_i in range(Y_total_batches):
Y_start_idx = Y_batch_i*Y_batch_size
Y_end_idx = min((Y_batch_i+1)*Y_batch_size, Y.shape[0])
Y_batch = Y[Y_start_idx:Y_end_idx,:]
# adapted from: https://github.com/deepchem/deepchem/blob/2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/transformers.py#L752
numerator = np.dot(X_batch, Y_batch.T).flatten() # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1)
denominator = n_features - np.dot(1-X_batch, (1-Y_batch).T).flatten() # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1)
tan_sim.append(numerator / denominator)
tan_sim = np.hstack(tan_sim)
return 1.0 - tan_sim
"""
Computes tanimoto dissimilarity between two vectors.
"""
def feature_dist_func_dict():
return {"tanimoto_dissimilarity": tanimoto_dissimilarity}
"""
Returns indices of duplicated smiles from x_smiles in y_smiles.
"""
def get_duplicate_smiles_in1d(x_smiles, y_smiles, smiles_are_canonical=True):
x_canon_smiles = x_smiles
y_canon_smiles = y_smiles
if not smiles_are_canonical:
x_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(x)) for x in x_smiles])
y_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(y)) for y in y_smiles])
y_duplicates = np.in1d(y_canon_smiles, x_canon_smiles)
idx_to_drop = list(np.arange(len(y_canon_smiles))[y_duplicates])
return idx_to_drop
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
clusters_ordered_ids is the ordering of clusters_avg_dissimilarity.
Assumes feature distance function returns array with distances between rows of X and Y.
"""
def get_avg_cluster_dissimilarity(clusters,
features,
selected_cluster_ids,
candidate_cluster_ids,
feature_dist_func=tanimoto_dissimilarity,
candidate_cluster_batch_size=2056):
clusters_ordered_ids = candidate_cluster_ids #[:] no need to make a copy
#clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
cluster_dist_means_list = []
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
----
curr_clusters_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
for selected_cid in selected_cluster_ids:
selected_cid_instances = np.where(clusters == selected_cid)[0]
candidate_cid_instances = np.in1d(clusters, candidate_cluster_ids)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(selected_cid_instances))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'group'])
cluster_dist_means = dist_df.groupby('group').mean().values.flatten()
sorted_idx = np.argsort(candidate_cluster_ids)
rev_sorted_idx = np.zeros(len(candidate_cluster_ids), dtype=int)
rev_sorted_idx[sorted_idx] = np.arange(len(candidate_cluster_ids)) # adapted from: https://stackoverflow.com/a/10831155
curr_clusters_dissimilarity[:] = cluster_dist_means[rev_sorted_idx]
clusters_avg_dissimilarity += curr_clusters_dissimilarity
clusters_avg_dissimilarity /= len(selected_cluster_ids)
"""
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
Uses a disk-stored np.memmap matrix storing the instance dissimilarities.
"""
def get_avg_cluster_dissimilarity_from_file(clusters,
memmap_filename,
n_instances,
selected_cluster_ids,
candidate_cluster_ids,
candidate_cluster_batch_size=2056,
batched_clusters_method=True):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
clusters_ordered_ids = candidate_cluster_ids[:]
clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
cluster_dist_means_list = []
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
if batched_clusters_method:
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
dm_slice = dissimilarity_matrix[candidate_cid_instances, :][:,selected_cid_instances]
dm_slice = dm_slice.flatten().reshape(-1,1)
dist_df = pd.DataFrame(data=np.hstack([dm_slice,
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
else:
for ccid in clusters_ordered_ids:
ccid_instances_idx = np.where(clusters == ccid)[0]
dm_slice = dissimilarity_matrix[ccid_instances_idx, :][:,selected_cid_instances]
cluster_dist_means_list.append(np.mean(dm_slice))
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
del dissimilarity_matrix
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
Computes dissimilarity matrix for a given row of features.
"""
def get_dissimilarity_matrix(features,
feature_dist_func=tanimoto_dissimilarity):
row_count = features.shape[0]
dissimilarity_matrix = np.zeros(shape=(row_count, row_count))
for i in range(row_count):
for j in range(row_count):
dissimilarity_matrix[i,j] = feature_dist_func(features[i:i+1,:], features[j:j+1,:])
return dissimilarity_matrix
"""
Returns dissimilarity matrix slice from disk-stored np.memmap matrix. .
"""
def get_dissimilarity_matrix_from_file(instances_idx,
memmap_filename,
n_instances):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
dm_slice = dissimilarity_matrix[instances_idx, :][:,instances_idx]
del dissimilarity_matrix
return dm_slice | """
Contains data utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.SaltRemover import SaltRemover
from rdkit.Chem.FilterCatalog import *
import numpy as np
import pandas as pd
"""
Computes tanimoto dissimilarity array between two feature matrices.
Compares each row of X with each row of Y.
"""
def tanimoto_dissimilarity(X, Y, X_batch_size=50, Y_batch_size=50):
n_features = X.shape[-1]
if X.ndim == 1:
X = X.reshape(-1, n_features)
if Y.ndim == 1:
Y = Y.reshape(-1, n_features)
tan_sim = []
X_total_batches = X.shape[0] // X_batch_size + 1
Y_total_batches = Y.shape[0] // Y_batch_size + 1
for X_batch_i in range(X_total_batches):
X_start_idx = X_batch_i*X_batch_size
X_end_idx = min((X_batch_i+1)*X_batch_size, X.shape[0])
X_batch = X[X_start_idx:X_end_idx,:]
for Y_batch_i in range(Y_total_batches):
Y_start_idx = Y_batch_i*Y_batch_size
Y_end_idx = min((Y_batch_i+1)*Y_batch_size, Y.shape[0])
Y_batch = Y[Y_start_idx:Y_end_idx,:]
# adapted from: https://github.com/deepchem/deepchem/blob/2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/transformers.py#L752
numerator = np.dot(X_batch, Y_batch.T).flatten() # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1)
denominator = n_features - np.dot(1-X_batch, (1-Y_batch).T).flatten() # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1)
tan_sim.append(numerator / denominator)
tan_sim = np.hstack(tan_sim)
return 1.0 - tan_sim
"""
Computes tanimoto dissimilarity between two vectors.
"""
def feature_dist_func_dict():
return {"tanimoto_dissimilarity": tanimoto_dissimilarity}
"""
Returns indices of duplicated smiles from x_smiles in y_smiles.
"""
def get_duplicate_smiles_in1d(x_smiles, y_smiles, smiles_are_canonical=True):
x_canon_smiles = x_smiles
y_canon_smiles = y_smiles
if not smiles_are_canonical:
x_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(x)) for x in x_smiles])
y_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(y)) for y in y_smiles])
y_duplicates = np.in1d(y_canon_smiles, x_canon_smiles)
idx_to_drop = list(np.arange(len(y_canon_smiles))[y_duplicates])
return idx_to_drop
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
clusters_ordered_ids is the ordering of clusters_avg_dissimilarity.
Assumes feature distance function returns array with distances between rows of X and Y.
"""
def get_avg_cluster_dissimilarity(clusters,
features,
selected_cluster_ids,
candidate_cluster_ids,
feature_dist_func=tanimoto_dissimilarity,
candidate_cluster_batch_size=2056):
clusters_ordered_ids = candidate_cluster_ids #[:] no need to make a copy
#clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
cluster_dist_means_list = []
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
----
curr_clusters_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
for selected_cid in selected_cluster_ids:
selected_cid_instances = np.where(clusters == selected_cid)[0]
candidate_cid_instances = np.in1d(clusters, candidate_cluster_ids)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(selected_cid_instances))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'group'])
cluster_dist_means = dist_df.groupby('group').mean().values.flatten()
sorted_idx = np.argsort(candidate_cluster_ids)
rev_sorted_idx = np.zeros(len(candidate_cluster_ids), dtype=int)
rev_sorted_idx[sorted_idx] = np.arange(len(candidate_cluster_ids)) # adapted from: https://stackoverflow.com/a/10831155
curr_clusters_dissimilarity[:] = cluster_dist_means[rev_sorted_idx]
clusters_avg_dissimilarity += curr_clusters_dissimilarity
clusters_avg_dissimilarity /= len(selected_cluster_ids)
"""
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
Uses a disk-stored np.memmap matrix storing the instance dissimilarities.
"""
def get_avg_cluster_dissimilarity_from_file(clusters,
memmap_filename,
n_instances,
selected_cluster_ids,
candidate_cluster_ids,
candidate_cluster_batch_size=2056,
batched_clusters_method=True):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
clusters_ordered_ids = candidate_cluster_ids[:]
clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
cluster_dist_means_list = []
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
if batched_clusters_method:
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
dm_slice = dissimilarity_matrix[candidate_cid_instances, :][:,selected_cid_instances]
dm_slice = dm_slice.flatten().reshape(-1,1)
dist_df = pd.DataFrame(data=np.hstack([dm_slice,
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
else:
for ccid in clusters_ordered_ids:
ccid_instances_idx = np.where(clusters == ccid)[0]
dm_slice = dissimilarity_matrix[ccid_instances_idx, :][:,selected_cid_instances]
cluster_dist_means_list.append(np.mean(dm_slice))
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
del dissimilarity_matrix
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
Computes dissimilarity matrix for a given row of features.
"""
def get_dissimilarity_matrix(features,
feature_dist_func=tanimoto_dissimilarity):
row_count = features.shape[0]
dissimilarity_matrix = np.zeros(shape=(row_count, row_count))
for i in range(row_count):
for j in range(row_count):
dissimilarity_matrix[i,j] = feature_dist_func(features[i:i+1,:], features[j:j+1,:])
return dissimilarity_matrix
"""
Returns dissimilarity matrix slice from disk-stored np.memmap matrix. .
"""
def get_dissimilarity_matrix_from_file(instances_idx,
memmap_filename,
n_instances):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
dm_slice = dissimilarity_matrix[instances_idx, :][:,instances_idx]
del dissimilarity_matrix
return dm_slice | en | 0.622213 | Contains data utils. Computes tanimoto dissimilarity array between two feature matrices. Compares each row of X with each row of Y. # adapted from: https://github.com/deepchem/deepchem/blob/2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/transformers.py#L752 # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1) # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1) Computes tanimoto dissimilarity between two vectors. Returns indices of duplicated smiles from x_smiles in y_smiles. Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters. clusters_ordered_ids is the ordering of clusters_avg_dissimilarity. Assumes feature distance function returns array with distances between rows of X and Y. #[:] no need to make a copy #clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),)) ---- curr_clusters_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),)) for selected_cid in selected_cluster_ids: selected_cid_instances = np.where(clusters == selected_cid)[0] candidate_cid_instances = np.in1d(clusters, candidate_cluster_ids) candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(selected_cid_instances)) candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:]) dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1), candidate_cluster_rep.reshape(-1,1)]), columns=['dist', 'group']) cluster_dist_means = dist_df.groupby('group').mean().values.flatten() sorted_idx = np.argsort(candidate_cluster_ids) rev_sorted_idx = np.zeros(len(candidate_cluster_ids), dtype=int) rev_sorted_idx[sorted_idx] = np.arange(len(candidate_cluster_ids)) # adapted from: https://stackoverflow.com/a/10831155 curr_clusters_dissimilarity[:] = cluster_dist_means[rev_sorted_idx] clusters_avg_dissimilarity += curr_clusters_dissimilarity clusters_avg_dissimilarity /= len(selected_cluster_ids) Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters. Uses a disk-stored np.memmap matrix storing the instance dissimilarities. Computes dissimilarity matrix for a given row of features. Returns dissimilarity matrix slice from disk-stored np.memmap matrix. . | 1.869685 | 2 |
snappy_wrappers/wrappers/epitoper_par/wrapper.py | PotatoThrone/snappy-pipeline | 5 | 6619294 | # isort:skip_file
import json
import os
import sys
import tempfile
import textwrap
from math import ceil
base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, base_dir)
from snappy_wrappers.wrapper_parallel import (
ResourceUsage,
SgeResourceUsageConverter,
gib,
hours,
in_working_dir,
) # pylint: disable=wrong-import-position
# TODO: call on overlapping windows, on merge make unique
# Naming clash limbo...
snake_job = snakemake
del snakemake
from snakemake import shell # noqa: C0411
from snakemake import snakemake as run_snakemake # noqa: C0411
snakemake = snake_job
# Create Temp Work Directory ----------------------------------------------------------------------
tmpdir = tempfile.mkdtemp("snake_par")
# Perform Splitting -------------------------------------------------------------------------------
shell.executable("/bin/bash")
shell.prefix("set -ex;")
# Figure out the number of chunks
chunk_size = snakemake.config["step_config"]["epitope_prediction"]["epitoper"]["chunk_size"]
record_count = int(shell('zgrep -v "^#" {snakemake.input} | wc -l', read=True))
num_chunks = int(ceil(record_count / chunk_size))
# Split input into chunks of the configured size
with in_working_dir(tmpdir, print_chdir=True):
shell(
r"""
# Hack: get back bin directory of base/root environment.
export PATH=$PATH:$(dirname $(dirname $(which conda)))/bin
mkdir -p splitting
SUFFIX=.vcf \
NUM_LINES={snakemake.config[step_config][epitope_prediction][epitoper][chunk_size]} \
snappy-vcf_split \
splitting/ \
{snakemake.input}
i=0
for fname in $(ls splitting/*.vcf | sort); do
mkdir -p job_in.$i.d
mv $fname job_in.$i.d/input.vcf
bgzip -f job_in.$i.d/input.vcf
let "++i"
done
"""
)
# Generate Snakefile chunk-by-chunk ---------------------------------------------------------------
result_files = ["job_out.{jobno}.d/.done".format(jobno=jobno) for jobno in range(num_chunks)]
chunks = [
textwrap.dedent(
r"""
shell.executable("/bin/bash")
shell.prefix("set -ex;")
configfile: 'config.json'
localrules: all
rule all:
input: {all_results}
"""
)
.lstrip()
.format(all_results=", ".join(map(repr, result_files)))
]
def esc(s):
return s.replace("{", "{{").replace("}", "}}")
#: Extensions to generate
key_ext = {"txt_gz": "txt.gz", "txt_gz_md5": "txt.gz.md5"}
resources = ResourceUsage(cores=8, memory=gib(30.0), duration=hours(6.0))
for jobno in range(num_chunks):
output = {
key: "job_out.{jobno}.d/out/tmp_{jobno}.{ext}".format(jobno=jobno, ext=ext)
for key, ext in key_ext.items()
}
chunks.append(
textwrap.dedent(
r"""
rule chunk_{jobno}:
input:
vcf='job_in.{jobno}.d/input.vcf.gz',
output:
touch("job_out.{jobno}.d/.done"),
**{output}
params:
args={args}
wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/epitoper'
cluster_config['chunk_{jobno}'] = {resources}
"""
).format(
jobno=jobno,
output=repr(output),
args=repr(snakemake.params["args"]),
wrapper_prefix="file://" + base_dir,
resources=repr(SgeResourceUsageConverter(resources).to_res_dict()),
)
)
with in_working_dir(tmpdir, print_chdir=True):
with open("Snakefile", "wt") as snakefile:
print("\n\n".join(chunks), file=sys.stderr)
print("\n\n".join(chunks), file=snakefile)
# Write out config file
with in_working_dir(tmpdir, print_chdir=True):
with open("config.json", "wt") as configfile:
json.dump(snakemake.config, configfile)
with open("config.json", "rt") as configfile:
print(configfile.read(), file=sys.stderr)
with in_working_dir(tmpdir, print_chdir=True):
# Launch execution on Snakefile
# snakemake.config['step_config']['epitope_prediction']['epitoper']['use_drmaa']
run_snakemake(
"Snakefile",
use_conda=True,
cores=snakemake.config["step_config"]["epitope_prediction"]["epitoper"]["num_threads"],
restart_times=snakemake.config["step_config"]["epitope_prediction"]["epitoper"][
"restart_times"
],
max_jobs_per_second=snakemake.config["step_config"]["epitope_prediction"]["epitoper"][
"max_jobs_per_second"
],
max_status_checks_per_second=snakemake.config["step_config"]["epitope_prediction"][
"epitoper"
]["max_status_checks_per_second"],
)
# Join results
max_jobno = num_chunks - 1
with in_working_dir(tmpdir, print_chdir=True):
shell(
textwrap.dedent(
r"""
set -euo pipefail
# Also pipe everything to log file
if [[ -n "{snakemake.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
exec &> >(tee -a "{snakemake.log}" >&2)
else
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
echo "No tty, logging disabled" >"{snakemake.log}"
fi
fi
out_base=$(dirname {snakemake.output.txt_gz})/$(basename {snakemake.output.txt_gz} .txt.gz)
mkdir -p $(dirname {snakemake.output.txt_gz})
# take first header -------------------------------------------------------
zcat job_out.0.d/out/tmp_0.txt.gz | head -n 2 >$out_base.txt || true
# append body contents ----------------------------------------------------
for jobno in {{0..{max_jobno}}}; do
zcat job_out.$jobno.d/out/tmp_$jobno.txt.gz | tail -n +3 \
>>$out_base.txt
done
# bgzip output ------------------------------------------------------------
bgzip -f $out_base.txt
pushd $(dirname $out_base) && md5sum $(basename $out_base.txt).gz >$out_base.txt.gz.md5 && popd
"""
).lstrip()
)
| # isort:skip_file
import json
import os
import sys
import tempfile
import textwrap
from math import ceil
base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, base_dir)
from snappy_wrappers.wrapper_parallel import (
ResourceUsage,
SgeResourceUsageConverter,
gib,
hours,
in_working_dir,
) # pylint: disable=wrong-import-position
# TODO: call on overlapping windows, on merge make unique
# Naming clash limbo...
snake_job = snakemake
del snakemake
from snakemake import shell # noqa: C0411
from snakemake import snakemake as run_snakemake # noqa: C0411
snakemake = snake_job
# Create Temp Work Directory ----------------------------------------------------------------------
tmpdir = tempfile.mkdtemp("snake_par")
# Perform Splitting -------------------------------------------------------------------------------
shell.executable("/bin/bash")
shell.prefix("set -ex;")
# Figure out the number of chunks
chunk_size = snakemake.config["step_config"]["epitope_prediction"]["epitoper"]["chunk_size"]
record_count = int(shell('zgrep -v "^#" {snakemake.input} | wc -l', read=True))
num_chunks = int(ceil(record_count / chunk_size))
# Split input into chunks of the configured size
with in_working_dir(tmpdir, print_chdir=True):
shell(
r"""
# Hack: get back bin directory of base/root environment.
export PATH=$PATH:$(dirname $(dirname $(which conda)))/bin
mkdir -p splitting
SUFFIX=.vcf \
NUM_LINES={snakemake.config[step_config][epitope_prediction][epitoper][chunk_size]} \
snappy-vcf_split \
splitting/ \
{snakemake.input}
i=0
for fname in $(ls splitting/*.vcf | sort); do
mkdir -p job_in.$i.d
mv $fname job_in.$i.d/input.vcf
bgzip -f job_in.$i.d/input.vcf
let "++i"
done
"""
)
# Generate Snakefile chunk-by-chunk ---------------------------------------------------------------
result_files = ["job_out.{jobno}.d/.done".format(jobno=jobno) for jobno in range(num_chunks)]
chunks = [
textwrap.dedent(
r"""
shell.executable("/bin/bash")
shell.prefix("set -ex;")
configfile: 'config.json'
localrules: all
rule all:
input: {all_results}
"""
)
.lstrip()
.format(all_results=", ".join(map(repr, result_files)))
]
def esc(s):
return s.replace("{", "{{").replace("}", "}}")
#: Extensions to generate
key_ext = {"txt_gz": "txt.gz", "txt_gz_md5": "txt.gz.md5"}
resources = ResourceUsage(cores=8, memory=gib(30.0), duration=hours(6.0))
for jobno in range(num_chunks):
output = {
key: "job_out.{jobno}.d/out/tmp_{jobno}.{ext}".format(jobno=jobno, ext=ext)
for key, ext in key_ext.items()
}
chunks.append(
textwrap.dedent(
r"""
rule chunk_{jobno}:
input:
vcf='job_in.{jobno}.d/input.vcf.gz',
output:
touch("job_out.{jobno}.d/.done"),
**{output}
params:
args={args}
wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/epitoper'
cluster_config['chunk_{jobno}'] = {resources}
"""
).format(
jobno=jobno,
output=repr(output),
args=repr(snakemake.params["args"]),
wrapper_prefix="file://" + base_dir,
resources=repr(SgeResourceUsageConverter(resources).to_res_dict()),
)
)
with in_working_dir(tmpdir, print_chdir=True):
with open("Snakefile", "wt") as snakefile:
print("\n\n".join(chunks), file=sys.stderr)
print("\n\n".join(chunks), file=snakefile)
# Write out config file
with in_working_dir(tmpdir, print_chdir=True):
with open("config.json", "wt") as configfile:
json.dump(snakemake.config, configfile)
with open("config.json", "rt") as configfile:
print(configfile.read(), file=sys.stderr)
with in_working_dir(tmpdir, print_chdir=True):
# Launch execution on Snakefile
# snakemake.config['step_config']['epitope_prediction']['epitoper']['use_drmaa']
run_snakemake(
"Snakefile",
use_conda=True,
cores=snakemake.config["step_config"]["epitope_prediction"]["epitoper"]["num_threads"],
restart_times=snakemake.config["step_config"]["epitope_prediction"]["epitoper"][
"restart_times"
],
max_jobs_per_second=snakemake.config["step_config"]["epitope_prediction"]["epitoper"][
"max_jobs_per_second"
],
max_status_checks_per_second=snakemake.config["step_config"]["epitope_prediction"][
"epitoper"
]["max_status_checks_per_second"],
)
# Join results
max_jobno = num_chunks - 1
with in_working_dir(tmpdir, print_chdir=True):
shell(
textwrap.dedent(
r"""
set -euo pipefail
# Also pipe everything to log file
if [[ -n "{snakemake.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
exec &> >(tee -a "{snakemake.log}" >&2)
else
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
echo "No tty, logging disabled" >"{snakemake.log}"
fi
fi
out_base=$(dirname {snakemake.output.txt_gz})/$(basename {snakemake.output.txt_gz} .txt.gz)
mkdir -p $(dirname {snakemake.output.txt_gz})
# take first header -------------------------------------------------------
zcat job_out.0.d/out/tmp_0.txt.gz | head -n 2 >$out_base.txt || true
# append body contents ----------------------------------------------------
for jobno in {{0..{max_jobno}}}; do
zcat job_out.$jobno.d/out/tmp_$jobno.txt.gz | tail -n +3 \
>>$out_base.txt
done
# bgzip output ------------------------------------------------------------
bgzip -f $out_base.txt
pushd $(dirname $out_base) && md5sum $(basename $out_base.txt).gz >$out_base.txt.gz.md5 && popd
"""
).lstrip()
)
| en | 0.384674 | # isort:skip_file # pylint: disable=wrong-import-position # TODO: call on overlapping windows, on merge make unique # Naming clash limbo... # noqa: C0411 # noqa: C0411 # Create Temp Work Directory ---------------------------------------------------------------------- # Perform Splitting ------------------------------------------------------------------------------- # Figure out the number of chunks #" {snakemake.input} | wc -l', read=True)) # Split input into chunks of the configured size # Hack: get back bin directory of base/root environment. export PATH=$PATH:$(dirname $(dirname $(which conda)))/bin mkdir -p splitting SUFFIX=.vcf \ NUM_LINES={snakemake.config[step_config][epitope_prediction][epitoper][chunk_size]} \ snappy-vcf_split \ splitting/ \ {snakemake.input} i=0 for fname in $(ls splitting/*.vcf | sort); do mkdir -p job_in.$i.d mv $fname job_in.$i.d/input.vcf bgzip -f job_in.$i.d/input.vcf let "++i" done # Generate Snakefile chunk-by-chunk --------------------------------------------------------------- shell.executable("/bin/bash") shell.prefix("set -ex;") configfile: 'config.json' localrules: all rule all: input: {all_results} #: Extensions to generate rule chunk_{jobno}: input: vcf='job_in.{jobno}.d/input.vcf.gz', output: touch("job_out.{jobno}.d/.done"), **{output} params: args={args} wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/epitoper' cluster_config['chunk_{jobno}'] = {resources} # Write out config file # Launch execution on Snakefile # snakemake.config['step_config']['epitope_prediction']['epitoper']['use_drmaa'] # Join results set -euo pipefail # Also pipe everything to log file if [[ -n "{snakemake.log}" ]]; then if [[ "$(set +e; tty; set -e)" != "" ]]; then rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log}) exec &> >(tee -a "{snakemake.log}" >&2) else rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log}) echo "No tty, logging disabled" >"{snakemake.log}" fi fi out_base=$(dirname {snakemake.output.txt_gz})/$(basename {snakemake.output.txt_gz} .txt.gz) mkdir -p $(dirname {snakemake.output.txt_gz}) # take first header ------------------------------------------------------- zcat job_out.0.d/out/tmp_0.txt.gz | head -n 2 >$out_base.txt || true # append body contents ---------------------------------------------------- for jobno in {{0..{max_jobno}}}; do zcat job_out.$jobno.d/out/tmp_$jobno.txt.gz | tail -n +3 \ >>$out_base.txt done # bgzip output ------------------------------------------------------------ bgzip -f $out_base.txt pushd $(dirname $out_base) && md5sum $(basename $out_base.txt).gz >$out_base.txt.gz.md5 && popd | 2.037175 | 2 |
app.py | ap-t/yfinance-rest-api | 0 | 6619295 | <gh_stars>0
from flask import Flask
from flask_cors import CORS
from yfinancerestapi.home.routes import home
from yfinancerestapi.system.routes import system_api
from yfinancerestapi.finance.stocks.routes import stocks_api
from yfinancerestapi.finance.news.routes import news_api
def create_app():
app = Flask(__name__)
CORS(app)
# Register blueprints
app.register_blueprint(home, url_prefix='/')
app.register_blueprint(system_api, url_prefix='/api/v1/system')
app.register_blueprint(stocks_api, url_prefix='/api/v1/finance/stocks')
app.register_blueprint(news_api, url_prefix='/api/v1/finance/news')
return app
app = create_app()
if __name__ == "__main__":
app.run() | from flask import Flask
from flask_cors import CORS
from yfinancerestapi.home.routes import home
from yfinancerestapi.system.routes import system_api
from yfinancerestapi.finance.stocks.routes import stocks_api
from yfinancerestapi.finance.news.routes import news_api
def create_app():
app = Flask(__name__)
CORS(app)
# Register blueprints
app.register_blueprint(home, url_prefix='/')
app.register_blueprint(system_api, url_prefix='/api/v1/system')
app.register_blueprint(stocks_api, url_prefix='/api/v1/finance/stocks')
app.register_blueprint(news_api, url_prefix='/api/v1/finance/news')
return app
app = create_app()
if __name__ == "__main__":
app.run() | en | 0.763792 | # Register blueprints | 2.028684 | 2 |
setup.py | datadealer/dd_auth | 0 | 6619296 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'Django==1.4.16',
'django-allauth==0.14.1',
'django_extensions',
'django-json-rpc',
'django-redis-sessions==0.4.0',
'gevent==1.0',
'gunicorn==18.0',
'hiredis',
'PasteScript',
'py-bcrypt',
'pymongo==2.6.3',
'pytz',
'redis==2.8.0',
'South',
]
setup(name='dd_auth',
version='0.1',
description='dd_auth',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Framework :: Django",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"License :: DFSG approved",
"License :: OSI Approved :: Artistic License",
],
author='C<NAME>',
author_email='<EMAIL>',
license='Artistic Licence 2.0 http://www.perlfoundation.org/attachment/legal/artistic-2_0.txt',
url='https://datadealer.com',
keywords='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="dd_auth",
entry_points = """\
[paste.app_factory]
main = dd_auth.wsgi:app
""",
)
| import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'Django==1.4.16',
'django-allauth==0.14.1',
'django_extensions',
'django-json-rpc',
'django-redis-sessions==0.4.0',
'gevent==1.0',
'gunicorn==18.0',
'hiredis',
'PasteScript',
'py-bcrypt',
'pymongo==2.6.3',
'pytz',
'redis==2.8.0',
'South',
]
setup(name='dd_auth',
version='0.1',
description='dd_auth',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Framework :: Django",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"License :: DFSG approved",
"License :: OSI Approved :: Artistic License",
],
author='C<NAME>',
author_email='<EMAIL>',
license='Artistic Licence 2.0 http://www.perlfoundation.org/attachment/legal/artistic-2_0.txt',
url='https://datadealer.com',
keywords='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="dd_auth",
entry_points = """\
[paste.app_factory]
main = dd_auth.wsgi:app
""",
)
| ml | 0.048262 | \ [paste.app_factory] main = dd_auth.wsgi:app | 1.237336 | 1 |
swarmdjango/core/unitTests/serializersTests/test_photoGallerySerializer.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 0 | 6619297 | from core.serializers import serializers
from core.models import PhotoGallery, User
from django.test import TestCase
import datetime
class PhotoGallerySerializerTest(TestCase):
def setUp(self):
self.userAttribs = {
'email': '<EMAIL>',
'lastName': 'Testineer',
'firstName': 'Test',
'password': '<PASSWORD>',
'username': 'test6',
'id': '1',
}
self.user = User.objects.create(**self.userAttribs)
self.photoGalleryAttribs = {
'fileName': 'Test/File/name',
'caption': 'Test caption',
'uploadedBy': self.user
}
self.photoGallery = PhotoGallery.objects.create(**self.photoGalleryAttribs)
self.serializer = serializers.PhotoGallerySerializer(instance=self.photoGallery)
self.data = self.serializer.data
def testContainsExpectedFields(self):
self.assertEqual(set(self.data.keys()), set(self.photoGalleryAttribs.keys())) | from core.serializers import serializers
from core.models import PhotoGallery, User
from django.test import TestCase
import datetime
class PhotoGallerySerializerTest(TestCase):
def setUp(self):
self.userAttribs = {
'email': '<EMAIL>',
'lastName': 'Testineer',
'firstName': 'Test',
'password': '<PASSWORD>',
'username': 'test6',
'id': '1',
}
self.user = User.objects.create(**self.userAttribs)
self.photoGalleryAttribs = {
'fileName': 'Test/File/name',
'caption': 'Test caption',
'uploadedBy': self.user
}
self.photoGallery = PhotoGallery.objects.create(**self.photoGalleryAttribs)
self.serializer = serializers.PhotoGallerySerializer(instance=self.photoGallery)
self.data = self.serializer.data
def testContainsExpectedFields(self):
self.assertEqual(set(self.data.keys()), set(self.photoGalleryAttribs.keys())) | none | 1 | 2.491869 | 2 | |
eds/openmtc-gevent/futile/src/futile/subprocess/daemon.py | piyush82/elastest-device-emulator-service | 0 | 6619298 | '''
Created on 02.02.2012
@author: kca
'''
from time import sleep
from abc import ABCMeta, abstractproperty, abstractmethod
from futile import Base
from futile.path import Path
from . import check_call, STDOUT
class DaemonController(Base):
__metaclass__ = ABCMeta
def __init__(self, sleep = 5, stop_sleep = 3, *args, **kw):
super(DaemonController, self).__init__(*args, **kw)
self.__sleep = int(sleep)
self.__stop_sleep = int(stop_sleep)
@abstractproperty
def is_running(self):
raise NotImplementedError()
def start(self):
self._start()
sleep(self.__sleep)
@abstractmethod
def _start(self):
raise NotImplementedError()
def stop(self):
self._stop()
sleep(self.__stop_sleep)
@abstractmethod
def _stop(self):
raise NotImplementedError()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
class DummyController(DaemonController):
def __init__(self, sleep = 0, stop_sleep = 0, *args, **kw):
super(DummyController).__init__(sleep = sleep, stop_sleep = stop_sleep, *args, **kw)
def _start(self):
pass
_stop = _start
@property
def is_running(self):
return False
import os
import errno
class CheckPIDFileController(DaemonController):
def __init__(self, pidfile, *args, **kw):
super(CheckPIDFileController, self).__init__(*args, **kw)
self.__pidfile = Path(pidfile)
@property
def pidfile(self):
return self.__pidfile
@property
def is_running(self):
if not self.pidfile.exists():
return False
if not self.pidfile.isfile():
raise Exception("pidfile '%s' is not a file" % (self.pidfile, ))
try:
pid = int(self.__pidfile.open().readline(16))
except:
self.logger.exception("Error reading pidfile %s" % (self.pidfile))
raise
try:
os.kill(pid, 0)
return True
except OSError, e:
if e.errno == errno.ESRCH:
return False
raise
class StartStopDaemonController(CheckPIDFileController):
def __init__(self, executable, fork = False, workingdir = None, pidfile = None, makepidfile = False, daemonargs = None, ssd = "/sbin/start-stop-daemon", ldpath = None, outfile = "/dev/null", *args, **kw):
if not pidfile:
pidfile = "/tmp/" + executable.replace("/", "_") + ".pid"
super(StartStopDaemonController, self).__init__(pidfile = pidfile, *args, **kw)
self.__executable = unicode(executable)
self.__workingdir = workingdir and unicode(workingdir) or None
if ldpath is not None:
if not isinstance(ldpath, (list, set, tuple, frozenset)):
ldpath = [ ldpath ]
ldpath = tuple(set(ldpath))
self.__ldpath = ldpath
self.__makepidfile = makepidfile
self.__daemonargs = daemonargs
self.__fork = fork
self.__ssd = ssd
self.__outfile = outfile
def get_daemonargs(self):
return self.__daemonargs
def set_daemonargs(self, da):
self.__daemonargs = da
daemonargs = property(get_daemonargs, set_daemonargs)
def __make_cmd(self, cmd, test):
cmd = [ self.__ssd, cmd, '-x', self.__executable, '-p', self.pidfile, '-o' ]
if self.__workingdir:
cmd += [ '-d', self.__workingdir ]
if test:
cmd.append('-t')
env = None
if self.__ldpath:
env = dict(LD_LIBRARY_PATH = ':'.join(self.__ldpath))
return cmd, env
def __check_cmd(self, cmd, env):
self.logger.debug("ssd env: " + str(env))
outfile = self.__outfile
if outfile:
outfile = Path(outfile).open("a")
try:
check_call(cmd, stdout = outfile, stderr = STDOUT, close_fds = True, cwd = self.__workingdir, env = env)
finally:
if outfile is not None:
outfile.close()
def _start(self):
cmd, env = self.__make_cmd("-S", False)
if self.__makepidfile:
cmd.append('-m')
if self.__fork:
cmd.append('-b')
if self.__daemonargs:
cmd += [ '--' ] + list(self.__daemonargs)
self.__check_cmd(cmd, env)
def _stop(self):
cmd, env = self.__make_cmd("-K", False)
self.__check_cmd(cmd, env)
| '''
Created on 02.02.2012
@author: kca
'''
from time import sleep
from abc import ABCMeta, abstractproperty, abstractmethod
from futile import Base
from futile.path import Path
from . import check_call, STDOUT
class DaemonController(Base):
__metaclass__ = ABCMeta
def __init__(self, sleep = 5, stop_sleep = 3, *args, **kw):
super(DaemonController, self).__init__(*args, **kw)
self.__sleep = int(sleep)
self.__stop_sleep = int(stop_sleep)
@abstractproperty
def is_running(self):
raise NotImplementedError()
def start(self):
self._start()
sleep(self.__sleep)
@abstractmethod
def _start(self):
raise NotImplementedError()
def stop(self):
self._stop()
sleep(self.__stop_sleep)
@abstractmethod
def _stop(self):
raise NotImplementedError()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
class DummyController(DaemonController):
def __init__(self, sleep = 0, stop_sleep = 0, *args, **kw):
super(DummyController).__init__(sleep = sleep, stop_sleep = stop_sleep, *args, **kw)
def _start(self):
pass
_stop = _start
@property
def is_running(self):
return False
import os
import errno
class CheckPIDFileController(DaemonController):
def __init__(self, pidfile, *args, **kw):
super(CheckPIDFileController, self).__init__(*args, **kw)
self.__pidfile = Path(pidfile)
@property
def pidfile(self):
return self.__pidfile
@property
def is_running(self):
if not self.pidfile.exists():
return False
if not self.pidfile.isfile():
raise Exception("pidfile '%s' is not a file" % (self.pidfile, ))
try:
pid = int(self.__pidfile.open().readline(16))
except:
self.logger.exception("Error reading pidfile %s" % (self.pidfile))
raise
try:
os.kill(pid, 0)
return True
except OSError, e:
if e.errno == errno.ESRCH:
return False
raise
class StartStopDaemonController(CheckPIDFileController):
def __init__(self, executable, fork = False, workingdir = None, pidfile = None, makepidfile = False, daemonargs = None, ssd = "/sbin/start-stop-daemon", ldpath = None, outfile = "/dev/null", *args, **kw):
if not pidfile:
pidfile = "/tmp/" + executable.replace("/", "_") + ".pid"
super(StartStopDaemonController, self).__init__(pidfile = pidfile, *args, **kw)
self.__executable = unicode(executable)
self.__workingdir = workingdir and unicode(workingdir) or None
if ldpath is not None:
if not isinstance(ldpath, (list, set, tuple, frozenset)):
ldpath = [ ldpath ]
ldpath = tuple(set(ldpath))
self.__ldpath = ldpath
self.__makepidfile = makepidfile
self.__daemonargs = daemonargs
self.__fork = fork
self.__ssd = ssd
self.__outfile = outfile
def get_daemonargs(self):
return self.__daemonargs
def set_daemonargs(self, da):
self.__daemonargs = da
daemonargs = property(get_daemonargs, set_daemonargs)
def __make_cmd(self, cmd, test):
cmd = [ self.__ssd, cmd, '-x', self.__executable, '-p', self.pidfile, '-o' ]
if self.__workingdir:
cmd += [ '-d', self.__workingdir ]
if test:
cmd.append('-t')
env = None
if self.__ldpath:
env = dict(LD_LIBRARY_PATH = ':'.join(self.__ldpath))
return cmd, env
def __check_cmd(self, cmd, env):
self.logger.debug("ssd env: " + str(env))
outfile = self.__outfile
if outfile:
outfile = Path(outfile).open("a")
try:
check_call(cmd, stdout = outfile, stderr = STDOUT, close_fds = True, cwd = self.__workingdir, env = env)
finally:
if outfile is not None:
outfile.close()
def _start(self):
cmd, env = self.__make_cmd("-S", False)
if self.__makepidfile:
cmd.append('-m')
if self.__fork:
cmd.append('-b')
if self.__daemonargs:
cmd += [ '--' ] + list(self.__daemonargs)
self.__check_cmd(cmd, env)
def _stop(self):
cmd, env = self.__make_cmd("-K", False)
self.__check_cmd(cmd, env)
| de | 0.234104 | Created on 02.02.2012 @author: kca | 2.411787 | 2 |
src/train.py | JMasr/caliope_bert | 0 | 6619299 | <filename>src/train.py<gh_stars>0
import time
import torch
import torch.nn as nn
from torch.utils import data
import torch.multiprocessing
import mlflow
import numpy as np
from tqdm import tqdm
from uuid import uuid4
from argparser import parse_arguments
from dataset import Dataset
from dataset import cpu_count
from model import DeepPunctuation, DeepPunctuationCRF
from config import *
import augmentation
torch.multiprocessing.set_sharing_strategy('file_system') # https://github.com/pytorch/pytorch/issues/11201
args = parse_arguments()
# for reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
# tokenizer
if 'berto' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/berto/')
elif 'bertinho' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/bertinho/')
else:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained(args.pretrained_model)
augmentation.tokenizer = tokenizer
augmentation.sub_style = args.sub_style
augmentation.alpha_sub = args.alpha_sub
augmentation.alpha_del = args.alpha_del
token_style = MODELS[args.pretrained_model][3]
ar = args.augment_rate
sequence_len = args.sequence_length
aug_type = args.augment_type
# Datasets
print("+==================+")
print("| Loading data ... |")
print("+------------------+")
if args.language == 'en':
train_set = Dataset(os.path.join(args.data_path, 'en/train2012'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=True,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'en/dev2012'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'en/test2011'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set_asr = Dataset(os.path.join(args.data_path, 'en/test2011asr'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set = [val_set, test_set_ref, test_set_asr]
print("\ttest-set loaded")
elif args.language == 'gl':
check_for_data_base('gl')
data_path = os.path.join(args.data_path, 'gl/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl/train', 'gl/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl/dev', 'gl/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'gl_big':
check_for_data_base('gl_big')
data_path = os.path.join(args.data_path, 'gl_big/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl_big/train', 'gl_big/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl_big/dev', 'gl_big/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'es':
train_set = Dataset(os.path.join(args.data_path, 'es/train'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, is_train=True, batch_size=args.batch_size,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'es/dev'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tdev-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'es/test'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=False)
test_set = [test_set_ref]
print("\ttest-set loaded")
else:
raise ValueError('Incorrect language argument for Dataset')
# Data Loaders
print("+======================+")
print("| Loading the Database |")
print("+----------------------+")
data_loader_params = {
'batch_size': args.batch_size,
'shuffle': True,
'num_workers': cpu_count()
}
train_loader = torch.utils.data.DataLoader(train_set, **data_loader_params)
val_loader = torch.utils.data.DataLoader(val_set, **data_loader_params)
test_loaders = [torch.utils.data.DataLoader(x, **data_loader_params) for x in test_set]
# logs
uniq_id = str(uuid4()).split("-")[0]
if args.save_path:
save_path = args.save_path + uniq_id
else:
date = "_".join(time.asctime().split(" ")[:3])
save_path = f"exp_{args.language}_{date}_{uniq_id}/"
os.makedirs(save_path, exist_ok=True)
model_save_path = os.path.join(save_path, 'weights.pt')
log_path = os.path.join(save_path, args.name + '_logs_.txt')
# Model
device = torch.device('cpu') if args.cuda == -1 else torch.device('cuda:' + str(args.cuda))
print(F"+=============================+")
print(f"|Loading BERT model using {str(device).upper()}|")
print(F"+=============================+")
if args.use_crf:
deep_punctuation = DeepPunctuationCRF(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
else:
deep_punctuation = DeepPunctuation(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
deep_punctuation.to(device)
if args.loss_w:
t_weight = torch.tensor(train_set.tensor_weight, device=device)
else:
t_weight = torch.tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device=device)
criterion = nn.CrossEntropyLoss(weight=t_weight)
optimizer = torch.optim.Adam(deep_punctuation.parameters(), lr=args.lr, weight_decay=args.decay)
def validate(data_loader):
"""
:return: validation accuracy, validation loss
"""
num_iteration = 0
deep_punctuation.eval()
# Class Metrics
tp = np.zeros(1 + len(punctuation_dict), dtype=int)
fp = np.zeros(1 + len(punctuation_dict), dtype=int)
fn = np.zeros(1 + len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
# Global metrics
correct = 0
total = 0
val_loss = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='eval'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
loss = deep_punctuation.log_likelihood(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
val_loss += loss.item()
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# since we created this position due to padding or sub-word tokenization, so we can ignore it
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
global_loss = val_loss/num_iteration
accuracy = correct/total
precision = tp / (tp + fp) if (tp + fp).any() else 0
recall = tp / (tp + fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return accuracy, global_loss, np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), cm
def test(data_loader):
"""
:return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix
"""
print("Strating Train Phase")
num_iteration = 0
deep_punctuation.eval()
# +1 for overall result
tp = np.zeros(1+len(punctuation_dict), dtype=int)
fp = np.zeros(1+len(punctuation_dict), dtype=int)
fn = np.zeros(1+len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
correct = 0
total = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='test'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
y_predict = torch.argmax(y_predict, dim=1).view(-1)
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# we can ignore this because we know there won't be any punctuation in this position
# since we created this position due to padding or sub-word tokenization
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
precision = tp/(tp+fp) if (tp + fp).any() else 0
recall = tp/(tp+fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), correct/total, cm
def train():
with open(log_path, 'a') as f:
f.write(str(args)+'\n')
exp_date = "_".join(time.asctime().split(" ")[:3])
mlflow.set_tracking_uri('http://0.0.0.0:5000')
mlflow.set_experiment(f"exp_{args.language}_{exp_date}")
exp_id = mlflow.tracking.MlflowClient().get_experiment_by_name(f"exp_{args.language}_{date}").experiment_id
with mlflow.start_run(experiment_id=exp_id, run_name=uniq_id):
# MLflow Tracking #0
model_parameters = {"model-name": args.pretrained_model, "seed": args.epoch, "language": args.language,
"epochs": args.epoch, "learning-rate": args.lr, "sequence-length": args.sequence_length,
"batch-size": args.batch_size, "lstm-dim": args.lstm_dim,
"loss-weighted": t_weight, "crf": args.use_crf, "weight-decay": args.decay,
"gradient-clip": args.gradient_clip,
"augment-rate": args.augment_rate, "augment-type": args.augment_type,
"alpha-sub": args.alpha_sub, "alpha-del": args.alpha_del,
}
db_characters = {"train-set": len(train_set),
"dev-set": len(val_set),
"test-set": len(test_set_ref)}
mlflow.log_params(model_parameters) # Log a model parameters
mlflow.log_params(db_characters) # Log a database characteristics
# MLflow Tracking - end #
batch_norm = []
best_val_acc = -1
for epoch in range(args.epoch):
train_loss = 0.0
train_iteration = 0
correct = 0
total = 0
print("Star Training ...")
deep_punctuation.train()
for x, y, att, y_mask in tqdm(train_loader, desc='train'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
loss = deep_punctuation.log_likelihood(x, att, y)
# y_predict = deep_punctuation(x, att, y)
# y_predict = y_predict.view(-1)
# y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y_predict = y_predict.view(-1, y_predict.shape[2])
y = y.view(-1)
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
optimizer.zero_grad()
train_loss += loss.item()
train_iteration += 1
loss.backward()
# Doing Gradient clipping, very useful!
if args.gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(deep_punctuation.parameters(), max_norm=2.0, norm_type=2)
# Calculate gradient norms
for n, layer in enumerate(deep_punctuation.ordered_layers):
if n == 2:
norm_grad = layer.weight.grad.norm().cpu()
batch_norm.append(norm_grad.numpy())
optimizer.step()
y_mask = y_mask.view(-1)
total += torch.sum(y_mask).item()
train_acc = correct / total
train_loss /= train_iteration
log = 'epoch: {}, Train loss: {}, Train accuracy: {}'.format(epoch, train_loss, train_acc)
# MLflow Tracking#
train_metrics = {"train_loss": train_loss, "train_accuracy": train_acc, "GradientNorm": np.mean(batch_norm)}
mlflow.log_metrics(train_metrics, step=epoch + 1)
# Print in log
with open(log_path, 'a') as f:
f.write(log + '\n')
print(log)
val_acc, val_loss, val_precision, val_recall, val_f1, val_cm = validate(val_loader)
log = 'epoch: {}, Val loss: {}, Val accuracy: {}\n'.format(epoch, val_loss, val_acc)
log_val_metrics = f'Precision: {val_precision}\n' \
f'Recall: {val_recall}\n' \
f'F1 score: {val_f1}\n'
# Print log
with open(log_path, 'a') as f:
f.write(log)
f.write(log_val_metrics)
print(log)
print(log_val_metrics)
if val_acc > best_val_acc:
best_val_acc = val_acc
torch.save(deep_punctuation.state_dict(), model_save_path)
# MLflow Tracking #
val_metrics = {"eval_loss": val_loss, "val_accuracy": val_acc,
"P_Lower": val_precision[0], "P_Lower-Comma": val_precision[1],
"P_Lower-Period": val_precision[2], "P_All-Capital": val_precision[4],
"P_Frits-Capital": val_precision[5], "P_All-Capital-Comma": val_precision[6],
"P_All-Capital-Period": val_precision[7], "P_Frits-Capital-Comma": val_precision[9],
"P_Frits-Capital-Period": val_precision[10],
#
"R_Lower": val_recall[0], "R_Lower-Comma": val_recall[1], "R_Lower-Period": val_recall[2],
"R_All-Capital": val_recall[4], "R_Frits-Capital": val_recall[5],
"R_All-Capital-Comma": val_recall[6], "R_All-Capital-Period": val_recall[7],
"R_Frits-Capital-Comma": val_recall[9], "R_Frits-Capital-Period": val_recall[10],
#
"F1_Lower": val_f1[0], "F1_Lower-Comma": val_f1[1], "F1_Lower-Period": val_f1[2],
"F1_All-Capital": val_f1[4], "F1_Frits-Capital": val_f1[5],
"F1_All-Capital-Comma": val_f1[6], "F1_All-Capital-Period": val_f1[7],
"F1_Frits-Capital-Comma": val_f1[9], "F1_Frits-Capital-Period": val_f1[10],
}
mlflow.log_metrics(val_metrics, step=epoch + 1)
print('Best validation Acc:', best_val_acc)
deep_punctuation.load_state_dict(torch.load(model_save_path))
for loader in test_loaders:
precision, recall, f1, accuracy, cm = test(loader)
log = 'Precision: ' + str(precision) + '\n' + 'Recall: ' + str(recall) + '\n' + 'F1 score: ' + str(f1) + \
'\n' + 'Accuracy:' + str(accuracy) + '\n' + 'Confusion Matrix' + str(cm) + '\n'
print(log)
# MLflow Tracking#
test_metrics = {"test_acc": accuracy}
mlflow.log_metrics(test_metrics)
# Print in log
with open(log_path, 'a') as f:
f.write(log)
log_text = ''
for i in range(1, 5):
log_text += str(precision[i] * 100) + ' ' + str(recall[i] * 100) + ' ' + str(f1[i] * 100) + ' '
with open(log_path, 'a') as f:
f.write(log_text[:-1] + '\n\n')
if __name__ == '__main__':
train()
| <filename>src/train.py<gh_stars>0
import time
import torch
import torch.nn as nn
from torch.utils import data
import torch.multiprocessing
import mlflow
import numpy as np
from tqdm import tqdm
from uuid import uuid4
from argparser import parse_arguments
from dataset import Dataset
from dataset import cpu_count
from model import DeepPunctuation, DeepPunctuationCRF
from config import *
import augmentation
torch.multiprocessing.set_sharing_strategy('file_system') # https://github.com/pytorch/pytorch/issues/11201
args = parse_arguments()
# for reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
# tokenizer
if 'berto' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/berto/')
elif 'bertinho' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/bertinho/')
else:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained(args.pretrained_model)
augmentation.tokenizer = tokenizer
augmentation.sub_style = args.sub_style
augmentation.alpha_sub = args.alpha_sub
augmentation.alpha_del = args.alpha_del
token_style = MODELS[args.pretrained_model][3]
ar = args.augment_rate
sequence_len = args.sequence_length
aug_type = args.augment_type
# Datasets
print("+==================+")
print("| Loading data ... |")
print("+------------------+")
if args.language == 'en':
train_set = Dataset(os.path.join(args.data_path, 'en/train2012'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=True,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'en/dev2012'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'en/test2011'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set_asr = Dataset(os.path.join(args.data_path, 'en/test2011asr'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set = [val_set, test_set_ref, test_set_asr]
print("\ttest-set loaded")
elif args.language == 'gl':
check_for_data_base('gl')
data_path = os.path.join(args.data_path, 'gl/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl/train', 'gl/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl/dev', 'gl/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'gl_big':
check_for_data_base('gl_big')
data_path = os.path.join(args.data_path, 'gl_big/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl_big/train', 'gl_big/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl_big/dev', 'gl_big/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'es':
train_set = Dataset(os.path.join(args.data_path, 'es/train'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, is_train=True, batch_size=args.batch_size,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'es/dev'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tdev-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'es/test'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=False)
test_set = [test_set_ref]
print("\ttest-set loaded")
else:
raise ValueError('Incorrect language argument for Dataset')
# Data Loaders
print("+======================+")
print("| Loading the Database |")
print("+----------------------+")
data_loader_params = {
'batch_size': args.batch_size,
'shuffle': True,
'num_workers': cpu_count()
}
train_loader = torch.utils.data.DataLoader(train_set, **data_loader_params)
val_loader = torch.utils.data.DataLoader(val_set, **data_loader_params)
test_loaders = [torch.utils.data.DataLoader(x, **data_loader_params) for x in test_set]
# logs
uniq_id = str(uuid4()).split("-")[0]
if args.save_path:
save_path = args.save_path + uniq_id
else:
date = "_".join(time.asctime().split(" ")[:3])
save_path = f"exp_{args.language}_{date}_{uniq_id}/"
os.makedirs(save_path, exist_ok=True)
model_save_path = os.path.join(save_path, 'weights.pt')
log_path = os.path.join(save_path, args.name + '_logs_.txt')
# Model
device = torch.device('cpu') if args.cuda == -1 else torch.device('cuda:' + str(args.cuda))
print(F"+=============================+")
print(f"|Loading BERT model using {str(device).upper()}|")
print(F"+=============================+")
if args.use_crf:
deep_punctuation = DeepPunctuationCRF(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
else:
deep_punctuation = DeepPunctuation(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
deep_punctuation.to(device)
if args.loss_w:
t_weight = torch.tensor(train_set.tensor_weight, device=device)
else:
t_weight = torch.tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device=device)
criterion = nn.CrossEntropyLoss(weight=t_weight)
optimizer = torch.optim.Adam(deep_punctuation.parameters(), lr=args.lr, weight_decay=args.decay)
def validate(data_loader):
"""
:return: validation accuracy, validation loss
"""
num_iteration = 0
deep_punctuation.eval()
# Class Metrics
tp = np.zeros(1 + len(punctuation_dict), dtype=int)
fp = np.zeros(1 + len(punctuation_dict), dtype=int)
fn = np.zeros(1 + len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
# Global metrics
correct = 0
total = 0
val_loss = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='eval'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
loss = deep_punctuation.log_likelihood(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
val_loss += loss.item()
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# since we created this position due to padding or sub-word tokenization, so we can ignore it
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
global_loss = val_loss/num_iteration
accuracy = correct/total
precision = tp / (tp + fp) if (tp + fp).any() else 0
recall = tp / (tp + fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return accuracy, global_loss, np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), cm
def test(data_loader):
"""
:return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix
"""
print("Strating Train Phase")
num_iteration = 0
deep_punctuation.eval()
# +1 for overall result
tp = np.zeros(1+len(punctuation_dict), dtype=int)
fp = np.zeros(1+len(punctuation_dict), dtype=int)
fn = np.zeros(1+len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
correct = 0
total = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='test'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
y_predict = torch.argmax(y_predict, dim=1).view(-1)
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# we can ignore this because we know there won't be any punctuation in this position
# since we created this position due to padding or sub-word tokenization
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
precision = tp/(tp+fp) if (tp + fp).any() else 0
recall = tp/(tp+fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), correct/total, cm
def train():
with open(log_path, 'a') as f:
f.write(str(args)+'\n')
exp_date = "_".join(time.asctime().split(" ")[:3])
mlflow.set_tracking_uri('http://0.0.0.0:5000')
mlflow.set_experiment(f"exp_{args.language}_{exp_date}")
exp_id = mlflow.tracking.MlflowClient().get_experiment_by_name(f"exp_{args.language}_{date}").experiment_id
with mlflow.start_run(experiment_id=exp_id, run_name=uniq_id):
# MLflow Tracking #0
model_parameters = {"model-name": args.pretrained_model, "seed": args.epoch, "language": args.language,
"epochs": args.epoch, "learning-rate": args.lr, "sequence-length": args.sequence_length,
"batch-size": args.batch_size, "lstm-dim": args.lstm_dim,
"loss-weighted": t_weight, "crf": args.use_crf, "weight-decay": args.decay,
"gradient-clip": args.gradient_clip,
"augment-rate": args.augment_rate, "augment-type": args.augment_type,
"alpha-sub": args.alpha_sub, "alpha-del": args.alpha_del,
}
db_characters = {"train-set": len(train_set),
"dev-set": len(val_set),
"test-set": len(test_set_ref)}
mlflow.log_params(model_parameters) # Log a model parameters
mlflow.log_params(db_characters) # Log a database characteristics
# MLflow Tracking - end #
batch_norm = []
best_val_acc = -1
for epoch in range(args.epoch):
train_loss = 0.0
train_iteration = 0
correct = 0
total = 0
print("Star Training ...")
deep_punctuation.train()
for x, y, att, y_mask in tqdm(train_loader, desc='train'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
loss = deep_punctuation.log_likelihood(x, att, y)
# y_predict = deep_punctuation(x, att, y)
# y_predict = y_predict.view(-1)
# y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y_predict = y_predict.view(-1, y_predict.shape[2])
y = y.view(-1)
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
optimizer.zero_grad()
train_loss += loss.item()
train_iteration += 1
loss.backward()
# Doing Gradient clipping, very useful!
if args.gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(deep_punctuation.parameters(), max_norm=2.0, norm_type=2)
# Calculate gradient norms
for n, layer in enumerate(deep_punctuation.ordered_layers):
if n == 2:
norm_grad = layer.weight.grad.norm().cpu()
batch_norm.append(norm_grad.numpy())
optimizer.step()
y_mask = y_mask.view(-1)
total += torch.sum(y_mask).item()
train_acc = correct / total
train_loss /= train_iteration
log = 'epoch: {}, Train loss: {}, Train accuracy: {}'.format(epoch, train_loss, train_acc)
# MLflow Tracking#
train_metrics = {"train_loss": train_loss, "train_accuracy": train_acc, "GradientNorm": np.mean(batch_norm)}
mlflow.log_metrics(train_metrics, step=epoch + 1)
# Print in log
with open(log_path, 'a') as f:
f.write(log + '\n')
print(log)
val_acc, val_loss, val_precision, val_recall, val_f1, val_cm = validate(val_loader)
log = 'epoch: {}, Val loss: {}, Val accuracy: {}\n'.format(epoch, val_loss, val_acc)
log_val_metrics = f'Precision: {val_precision}\n' \
f'Recall: {val_recall}\n' \
f'F1 score: {val_f1}\n'
# Print log
with open(log_path, 'a') as f:
f.write(log)
f.write(log_val_metrics)
print(log)
print(log_val_metrics)
if val_acc > best_val_acc:
best_val_acc = val_acc
torch.save(deep_punctuation.state_dict(), model_save_path)
# MLflow Tracking #
val_metrics = {"eval_loss": val_loss, "val_accuracy": val_acc,
"P_Lower": val_precision[0], "P_Lower-Comma": val_precision[1],
"P_Lower-Period": val_precision[2], "P_All-Capital": val_precision[4],
"P_Frits-Capital": val_precision[5], "P_All-Capital-Comma": val_precision[6],
"P_All-Capital-Period": val_precision[7], "P_Frits-Capital-Comma": val_precision[9],
"P_Frits-Capital-Period": val_precision[10],
#
"R_Lower": val_recall[0], "R_Lower-Comma": val_recall[1], "R_Lower-Period": val_recall[2],
"R_All-Capital": val_recall[4], "R_Frits-Capital": val_recall[5],
"R_All-Capital-Comma": val_recall[6], "R_All-Capital-Period": val_recall[7],
"R_Frits-Capital-Comma": val_recall[9], "R_Frits-Capital-Period": val_recall[10],
#
"F1_Lower": val_f1[0], "F1_Lower-Comma": val_f1[1], "F1_Lower-Period": val_f1[2],
"F1_All-Capital": val_f1[4], "F1_Frits-Capital": val_f1[5],
"F1_All-Capital-Comma": val_f1[6], "F1_All-Capital-Period": val_f1[7],
"F1_Frits-Capital-Comma": val_f1[9], "F1_Frits-Capital-Period": val_f1[10],
}
mlflow.log_metrics(val_metrics, step=epoch + 1)
print('Best validation Acc:', best_val_acc)
deep_punctuation.load_state_dict(torch.load(model_save_path))
for loader in test_loaders:
precision, recall, f1, accuracy, cm = test(loader)
log = 'Precision: ' + str(precision) + '\n' + 'Recall: ' + str(recall) + '\n' + 'F1 score: ' + str(f1) + \
'\n' + 'Accuracy:' + str(accuracy) + '\n' + 'Confusion Matrix' + str(cm) + '\n'
print(log)
# MLflow Tracking#
test_metrics = {"test_acc": accuracy}
mlflow.log_metrics(test_metrics)
# Print in log
with open(log_path, 'a') as f:
f.write(log)
log_text = ''
for i in range(1, 5):
log_text += str(precision[i] * 100) + ' ' + str(recall[i] * 100) + ' ' + str(f1[i] * 100) + ' '
with open(log_path, 'a') as f:
f.write(log_text[:-1] + '\n\n')
if __name__ == '__main__':
train()
| en | 0.737508 | # https://github.com/pytorch/pytorch/issues/11201 # for reproducibility # tokenizer # Datasets # Data Loaders # logs # Model :return: validation accuracy, validation loss # Class Metrics # Global metrics # since we created this position due to padding or sub-word tokenization, so we can ignore it # ignore first index which is for no punctuation :return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix # +1 for overall result # we can ignore this because we know there won't be any punctuation in this position # since we created this position due to padding or sub-word tokenization # ignore first index which is for no punctuation # MLflow Tracking #0 # Log a model parameters # Log a database characteristics # MLflow Tracking - end # # y_predict = deep_punctuation(x, att, y) # y_predict = y_predict.view(-1) # y = y.view(-1) # Doing Gradient clipping, very useful! # Calculate gradient norms # MLflow Tracking# # Print in log # Print log # MLflow Tracking # # # # MLflow Tracking# # Print in log | 2.098657 | 2 |
pgweb/account/forms.py | ChristophBerg/pgweb | 1 | 6619300 | from django import forms
import re
from django.contrib.auth.models import User
from pgweb.core.models import UserProfile
from pgweb.contributors.models import Contributor
class SignupForm(forms.Form):
username = forms.CharField(max_length=30)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.EmailField()
email2 = forms.EmailField(label="Repeat email")
def clean_email2(self):
# If the primary email checker had an exception, the data will be gone
# from the cleaned_data structure
if not self.cleaned_data.has_key('email'):
return self.cleaned_data['email2']
email1 = self.cleaned_data['email']
email2 = self.cleaned_data['email2']
if email1 != email2:
raise forms.ValidationError("Email addresses don't match")
return email2
def clean_username(self):
username = self.cleaned_data['username'].lower()
if not re.match('^[a-z0-9_@\.-]+$', username):
raise forms.ValidationError("Invalid character in user name. Only a-z, 0-9, _, @, . and - allowed.")
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError("This username is already in use")
def clean_email(self):
email = self.cleaned_data['email']
try:
User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError("A user with this email address is already registered")
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ('user',)
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
class Meta:
model = User
fields = ('first_name', 'last_name', )
class ContributorForm(forms.ModelForm):
class Meta:
model = Contributor
exclude = ('ctype', 'lastname', 'firstname', 'email', 'user', )
| from django import forms
import re
from django.contrib.auth.models import User
from pgweb.core.models import UserProfile
from pgweb.contributors.models import Contributor
class SignupForm(forms.Form):
username = forms.CharField(max_length=30)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.EmailField()
email2 = forms.EmailField(label="Repeat email")
def clean_email2(self):
# If the primary email checker had an exception, the data will be gone
# from the cleaned_data structure
if not self.cleaned_data.has_key('email'):
return self.cleaned_data['email2']
email1 = self.cleaned_data['email']
email2 = self.cleaned_data['email2']
if email1 != email2:
raise forms.ValidationError("Email addresses don't match")
return email2
def clean_username(self):
username = self.cleaned_data['username'].lower()
if not re.match('^[a-z0-9_@\.-]+$', username):
raise forms.ValidationError("Invalid character in user name. Only a-z, 0-9, _, @, . and - allowed.")
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError("This username is already in use")
def clean_email(self):
email = self.cleaned_data['email']
try:
User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError("A user with this email address is already registered")
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ('user',)
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
class Meta:
model = User
fields = ('first_name', 'last_name', )
class ContributorForm(forms.ModelForm):
class Meta:
model = Contributor
exclude = ('ctype', 'lastname', 'firstname', 'email', 'user', )
| en | 0.852051 | # If the primary email checker had an exception, the data will be gone # from the cleaned_data structure | 2.563859 | 3 |
recipe_app/schema.py | PatrickCmd/Recipe-API-Django-GraphQL | 0 | 6619301 | import graphene
import graphql_jwt
import auth_user.schema
import api.schema
class Query(
auth_user.schema.Query,
api.schema.Query,
graphene.ObjectType,
):
pass
class Mutation(
auth_user.schema.Mutation,
api.schema.Mutation,
graphene.ObjectType,
):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| import graphene
import graphql_jwt
import auth_user.schema
import api.schema
class Query(
auth_user.schema.Query,
api.schema.Query,
graphene.ObjectType,
):
pass
class Mutation(
auth_user.schema.Mutation,
api.schema.Mutation,
graphene.ObjectType,
):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| none | 1 | 2.269355 | 2 | |
ml_project/tests/features/test_make_features.py | made-ml-in-prod-2021/mrtimmy89 | 0 | 6619302 | <filename>ml_project/tests/features/test_make_features.py<gh_stars>0
import pandas as pd
import numpy as np
from src.data.make_dataset import read_data
from src.features.make_features import extract_features, extract_target, label_features, full_transform
from src.entities.feature_parameters import FeatureParams
def test_extract_and_label_features(dataset_path: str):
df = read_data(dataset_path)
extracted_features = np.sort(extract_features(df))
assert (len(extracted_features) == 13)
expected_cat_features, _ = label_features(df)
assert (len(expected_cat_features) == 8)
def test_extract_target(dataset_path: str, feature_params: FeatureParams):
df = read_data(dataset_path)
extracted_target = extract_target(df)
expected_target = df[feature_params.target_col]
assert np.allclose(extracted_target, expected_target)
def test_full_transform(dataset_path: str) -> pd.DataFrame:
df = read_data(dataset_path)
df_transformed = full_transform(df)
assert df_transformed.shape[0] == df.shape[0]
# assert df_transformed.values.min() > 0
| <filename>ml_project/tests/features/test_make_features.py<gh_stars>0
import pandas as pd
import numpy as np
from src.data.make_dataset import read_data
from src.features.make_features import extract_features, extract_target, label_features, full_transform
from src.entities.feature_parameters import FeatureParams
def test_extract_and_label_features(dataset_path: str):
df = read_data(dataset_path)
extracted_features = np.sort(extract_features(df))
assert (len(extracted_features) == 13)
expected_cat_features, _ = label_features(df)
assert (len(expected_cat_features) == 8)
def test_extract_target(dataset_path: str, feature_params: FeatureParams):
df = read_data(dataset_path)
extracted_target = extract_target(df)
expected_target = df[feature_params.target_col]
assert np.allclose(extracted_target, expected_target)
def test_full_transform(dataset_path: str) -> pd.DataFrame:
df = read_data(dataset_path)
df_transformed = full_transform(df)
assert df_transformed.shape[0] == df.shape[0]
# assert df_transformed.values.min() > 0
| en | 0.220043 | # assert df_transformed.values.min() > 0 | 2.644039 | 3 |
admin/freeposte/admin/views/aliases.py | XYunknown/freeposte.io | 10 | 6619303 | <reponame>XYunknown/freeposte.io
from freeposte.admin import app, db, models, forms, utils
import os
import flask
import flask_login
import wtforms_components
@app.route('/alias/list/<domain_name>', methods=['GET'])
@flask_login.login_required
def alias_list(domain_name):
domain = utils.get_domain_admin(domain_name)
return flask.render_template('alias/list.html', domain=domain)
@app.route('/alias/create/<domain_name>', methods=['GET', 'POST'])
@flask_login.login_required
def alias_create(domain_name):
domain = utils.get_domain_admin(domain_name)
if domain.max_aliases and len(domain.aliases) >= domain.max_aliases:
flask.flash('Too many aliases for domain %s' % domain, 'error')
return flask.redirect(
flask.url_for('.alias_list', domain_name=domain.name))
form = forms.AliasForm()
if form.validate_on_submit():
if domain.has_email(form.localpart.data):
flask.flash('Email is already used', 'error')
else:
alias = models.Alias(domain=domain)
form.populate_obj(alias)
db.session.add(alias)
db.session.commit()
flask.flash('Alias %s created' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=domain.name))
return flask.render_template('alias/create.html',
domain=domain, form=form)
@app.route('/alias/edit/<alias>', methods=['GET', 'POST'])
@flask_login.login_required
def alias_edit(alias):
alias = utils.get_alias(alias)
form = forms.AliasForm(obj=alias)
wtforms_components.read_only(form.localpart)
if form.validate_on_submit():
form.populate_obj(alias)
db.session.commit()
flask.flash('Alias %s updated' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=alias.domain.name))
return flask.render_template('alias/edit.html',
form=form, alias=alias, domain=alias.domain)
@app.route('/alias/delete/<alias>', methods=['GET'])
@flask_login.login_required
def alias_delete(alias):
alias = utils.get_alias(alias)
db.session.delete(alias)
db.session.commit()
flask.flash('Alias %s deleted' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=alias.domain.name))
| from freeposte.admin import app, db, models, forms, utils
import os
import flask
import flask_login
import wtforms_components
@app.route('/alias/list/<domain_name>', methods=['GET'])
@flask_login.login_required
def alias_list(domain_name):
domain = utils.get_domain_admin(domain_name)
return flask.render_template('alias/list.html', domain=domain)
@app.route('/alias/create/<domain_name>', methods=['GET', 'POST'])
@flask_login.login_required
def alias_create(domain_name):
domain = utils.get_domain_admin(domain_name)
if domain.max_aliases and len(domain.aliases) >= domain.max_aliases:
flask.flash('Too many aliases for domain %s' % domain, 'error')
return flask.redirect(
flask.url_for('.alias_list', domain_name=domain.name))
form = forms.AliasForm()
if form.validate_on_submit():
if domain.has_email(form.localpart.data):
flask.flash('Email is already used', 'error')
else:
alias = models.Alias(domain=domain)
form.populate_obj(alias)
db.session.add(alias)
db.session.commit()
flask.flash('Alias %s created' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=domain.name))
return flask.render_template('alias/create.html',
domain=domain, form=form)
@app.route('/alias/edit/<alias>', methods=['GET', 'POST'])
@flask_login.login_required
def alias_edit(alias):
alias = utils.get_alias(alias)
form = forms.AliasForm(obj=alias)
wtforms_components.read_only(form.localpart)
if form.validate_on_submit():
form.populate_obj(alias)
db.session.commit()
flask.flash('Alias %s updated' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=alias.domain.name))
return flask.render_template('alias/edit.html',
form=form, alias=alias, domain=alias.domain)
@app.route('/alias/delete/<alias>', methods=['GET'])
@flask_login.login_required
def alias_delete(alias):
alias = utils.get_alias(alias)
db.session.delete(alias)
db.session.commit()
flask.flash('Alias %s deleted' % alias)
return flask.redirect(
flask.url_for('.alias_list', domain_name=alias.domain.name)) | none | 1 | 2.28836 | 2 | |
api/manage.py | countable-web/queue-management | 0 | 6619304 | """Manage the database and some other items required to run the API"""
from flask_script import Command, Manager, Option # class for handling a set of commands
from flask_migrate import Migrate, MigrateCommand, upgrade
from qsystem import db, application
from app import models
import logging
from datetime import datetime
migrate = Migrate(application, db)
manager = Manager(application)
class Bootstrap(Command):
def run(self):
print("Clearing out all models")
models.Period.query.delete()
models.PeriodState.query.delete()
models.ServiceReq.query.delete()
models.SRState.query.delete()
models.Citizen.query.delete()
models.CitizenState.query.delete()
models.CSR.query.delete()
models.CSRState.query.delete()
# models.OfficeService.query.delete() # This needs to be updated.
models.Office.query.delete()
models.SmartBoard.query.delete()
# models.RolePermission.query.delete() # No data in this table yet.
models.Role.query.delete()
# models.Permission.query.delete() # No data in this table yet.
models.Service.query.filter_by(actual_service_ind=1).delete()
models.Service.query.delete()
models.Channel.query.delete()
db.session.commit()
print("Starting to bootstrap data")
#-- Channels --------------------------------------------------------
print("--> Channels")
channel1 = models.Channel(
channel_name="In Person"
)
channel2 = models.Channel(
channel_name="Phone"
)
channel3 = models.Channel(
channel_name="Back Office"
)
channel4 = models.Channel(
channel_name="Email/Fax/Mail"
)
channel5 = models.Channel(
channel_name="CATs Assist"
)
channel6 = models.Channel(
channel_name="Mobile Assist"
)
db.session.add(channel1)
db.session.add(channel2)
db.session.add(channel3)
db.session.add(channel4)
db.session.add(channel5)
db.session.add(channel6)
db.session.commit()
#-- Roles -----------------------------------------------------------
print("--> Roles")
role_csr = models.Role(
role_code="CSR",
role_desc="Customer Service Representative"
)
role_ga = models.Role(
role_code="GA",
role_desc="Government Agent"
)
role3 = models.Role(
role_code="HELPDESK",
role_desc="Help Desk Functions"
)
role4 = models.Role(
role_code="SUPPORT",
role_desc="All Administrative Functions"
)
role5 = models.Role(
role_code="ANALYTICS",
role_desc="Analtyics Team to update Services per Office"
)
db.session.add(role_csr)
db.session.add(role_ga)
db.session.add(role3)
db.session.add(role4)
db.session.add(role5)
db.session.commit()
#-- Period State ----------------------------------------------------
print("--> Period States")
period_state1 = models.PeriodState(
ps_name="Waiting",
ps_desc="Waiting in line to see a CSR, after a ticket has been created for them. The time they are in this state is the Citizen Wait Time",
ps_number=1
)
period_state2 = models.PeriodState(
ps_name="Ticket Creation",
ps_desc="A receptionist is creating a service request / ticket for the citizen. This is the first state a citizen will be in. The time they are in this state is the CSR prep time.",
ps_number=2
)
period_state3 = models.PeriodState(
ps_name="Invited",
ps_desc="Has been called from the waiting area to be served. The time they are in this state is the time it takes them to walk from the waiting area, to the CSR, until the CSR starts to serve them.",
ps_number=4
)
period_state4 = models.PeriodState(
ps_name="Being Served",
ps_desc="Is being servbed by a CSR. The time they are in this state is the Service time.",
ps_number=7
)
period_state5 = models.PeriodState(
ps_name="On hold",
ps_desc="Has been placed on hold be a csr. The time they are in this state is the Hold time",
ps_number=11
)
db.session.add(period_state1)
db.session.add(period_state2)
db.session.add(period_state3)
db.session.add(period_state4)
db.session.add(period_state5)
db.session.commit()
#-- Smartboard values -----------------------------------------------
print("--> Smartboard")
smartboard_call_name = models.SmartBoard(sb_type="callbyname")
smartboard_call_ticket = models.SmartBoard(sb_type="callbyticket")
smartboard_no_call = models.SmartBoard(sb_type="nocallonsmartboard")
db.session.add(smartboard_call_name)
db.session.add(smartboard_call_ticket)
db.session.add(smartboard_no_call)
db.session.commit()
#-- Citizen state values --------------------------------------------
print("--> Citizen State")
cs1 = models.CitizenState(
cs_state_name="Active",
cs_state_desc="Citizen is active, a ticket is being or has been created for them"
)
cs2 = models.CitizenState(
cs_state_name="Received Services",
cs_state_desc="Citizen left after receiving services"
)
cs3 = models.CitizenState(
cs_state_name="Left before receiving services",
cs_state_desc="Citizen left, after ticket creation, before service was started for them"
)
db.session.add(cs1)
db.session.add(cs2)
db.session.add(cs3)
db.session.commit()
#-- CSR state values --------------------------------------------
print("--> CSR State")
csr_state_logout = models.CSRState(
csr_state_name="Logout",
csr_state_desc="Logged out"
)
csr_state2 = models.CSRState(
csr_state_name="Login",
csr_state_desc="Logged in"
)
csr_state3 = models.CSRState(
csr_state_name="Break",
csr_state_desc="Currently on break"
)
csr_state4 = models.CSRState(
csr_state_name="Serving",
csr_state_desc="Serving a citizen"
)
csr_state5 = models.CSRState(
csr_state_name="Back Office",
csr_state_desc="Currently doing back office work"
)
db.session.add(csr_state_logout)
db.session.add(csr_state2)
db.session.add(csr_state3)
db.session.add(csr_state4)
db.session.add(csr_state5)
db.session.commit()
#-- Service Request values ------------------------------------------
print("--> Service Request states")
sr_state1 = models.SRState(
sr_code="Pending",
sr_state_desc="Service Request is pending, citizen has not started receiving services yet."
)
sr_state2 = models.SRState(
sr_code="Active",
sr_state_desc="Service Request is active. A citizen has started being served."
)
sr_state3 = models.SRState(
sr_code="Complete",
sr_state_desc="The service has been received for this Service Request."
)
db.session.add(sr_state1)
db.session.add(sr_state2)
db.session.add(sr_state3)
db.session.commit()
#-- Service Category values -----------------------------------------
print("--> Categories and Services")
category_msp = models.Service(
service_code = "MSP",
service_name = "MSP",
service_desc = "Medical Services Plan",
prefix = "A",
display_dashboard_ind = 0,
actual_service_ind = 0
)
category_ptax = models.Service(
service_code = "PTAX",
service_name = "Property Tax",
service_desc = "Property Tax",
prefix = "A",
display_dashboard_ind = 0,
actual_service_ind = 0
)
category_back_office = models.Service(
service_code = "Back Office",
service_name = "Back Office",
service_desc = "Back Office",
prefix = "B",
display_dashboard_ind = 0,
actual_service_ind = 0
)
db.session.add(category_msp)
db.session.add(category_ptax)
db.session.add(category_back_office)
db.session.commit()
#-- Service values --------------------------------------------------
service_msp6 = models.Service(
service_code = "MSP - 006",
service_name = "Payment - MSP",
service_desc = "MSP- SC686, SC1089 -Pay direct payment, employer payment",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax4 = models.Service(
service_code = "PTAX - 004",
service_name = "Other - PTAX",
service_desc = "PTax/RPT - Providing information, forms, searches, tax clearance certificate, address changes, add new owner, extensions, forfeiture status, tax search, etc.",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax1 = models.Service(
service_code = "PTAX - 001",
service_name = "Deferment Application",
service_desc = "PTax/RPT - Process application - new and renewal, post note, etc.",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax2 = models.Service(
service_code = "PTAX - 002",
service_name = "Deferment Payment",
service_desc = "PTax/RPT - Full or Partial deferment account payment",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_msp1 = models.Service(
service_code = "MSP - 001",
service_name = "Account Enquiry/Update",
service_desc = "MSP-Address or family changes, personal information updates, general status enquiries, billing information from Biller Direct, immigration documents to HIBC, needs PHN, etc.",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_msp2 = models.Service(
service_code = "MSP - 002",
service_name = "BCSC Non Photo",
service_desc = "MSP- SC2607 RAPID ordering , status enquiry, address update, also for the non photo form process when photo eligible, etc.",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_bo1 = models.Service(
service_code = "Back Office - 001",
service_name = "Batching",
service_desc = "Batching",
parent_id = category_back_office.service_id,
prefix = "B",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_bo2 = models.Service(
service_code = "Back Office - 002",
service_name = "Cash Out",
service_desc = "Cash Out",
parent_id = category_back_office.service_id,
prefix = "B",
display_dashboard_ind = 1,
actual_service_ind = 1
)
db.session.add(service_bo1)
db.session.add(service_bo2)
db.session.add(service_msp1)
db.session.add(service_msp2)
db.session.add(service_msp6)
db.session.add(service_ptax1)
db.session.add(service_ptax2)
db.session.add(service_ptax4)
db.session.commit()
#-- Office values ---------------------------------------------------
print("--> Offices")
office_test = models.Office(
office_name="Test Office",
office_number=999,
sb_id=smartboard_call_ticket.sb_id
)
office_100 = models.Office(
office_name="100 Mile House",
office_number=1,
sb_id=smartboard_no_call.sb_id
)
office_victoria = models.Office(
office_name="Victoria",
office_number=61,
sb_id=smartboard_call_name.sb_id
)
db.session.add(office_test)
db.session.add(office_100)
db.session.add(office_victoria)
db.session.commit()
#-- CSR values ------------------------------------------------------
print("--> CSRs")
cfms_postman_operator = models.CSR(
username="cfms-postman-operator",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=1,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
cfms_postman_non_operator = models.CSR(
username="cfms-postman-non-operator",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
demo_ga = models.CSR(
username="admin",
office_id=office_test.office_id,
role_id=role_ga.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
demo_csr = models.CSR(
username="user",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
db.session.add(cfms_postman_operator)
db.session.add(cfms_postman_non_operator)
db.session.add(demo_ga)
db.session.add(demo_csr)
db.session.commit()
#-- The Office / Services values ------------------------------------
print("--> Office Services")
office_test.services.append(category_back_office)
office_test.services.append(category_msp)
office_test.services.append(category_ptax)
office_test.services.append(service_bo1)
office_test.services.append(service_bo2)
office_test.services.append(service_msp1)
office_test.services.append(service_msp2)
office_test.services.append(service_msp6)
office_test.services.append(service_ptax1)
office_test.services.append(service_ptax2)
office_test.services.append(service_ptax4)
office_victoria.services.append(category_back_office)
office_victoria.services.append(category_msp)
office_victoria.services.append(service_bo1)
office_victoria.services.append(service_bo2)
office_victoria.services.append(service_msp1)
office_victoria.services.append(service_msp2)
office_victoria.services.append(service_msp6)
office_100.services.append(category_back_office)
office_100.services.append(category_ptax)
office_100.services.append(service_bo1)
office_100.services.append(service_bo2)
office_100.services.append(service_ptax1)
office_100.services.append(service_ptax2)
office_100.services.append(service_ptax4)
db.session.commit()
class FetchData(Command):
def run(self):
offices = db.session.query(models.Office).all()
for o in offices:
print(o.id, o.name)
class CreateUser(Command):
option_list = (
Option('--username', '-u', dest='username'),
Option('--password', '-p', dest='password'),
Option('--office_id', '-o', dest='office_id'),
)
def run(self, username, password, office_id):
if username is None or password is None or office_id is None:
exit("Error, username, password and office_id are all required")
user = models.User(username, password, office_id)
db.session.add(user)
db.session.commit()
class MigrateWrapper(Command):
def run(self):
upgrade()
manager.add_command('db', MigrateCommand)
manager.add_command('migrate', MigrateWrapper())
manager.add_command('bootstrap', Bootstrap())
manager.add_command('fetch', FetchData())
manager.add_command('create_user', CreateUser())
if __name__ == '__main__':
logging.log(logging.INFO, 'Running the Manager')
manager.run()
| """Manage the database and some other items required to run the API"""
from flask_script import Command, Manager, Option # class for handling a set of commands
from flask_migrate import Migrate, MigrateCommand, upgrade
from qsystem import db, application
from app import models
import logging
from datetime import datetime
migrate = Migrate(application, db)
manager = Manager(application)
class Bootstrap(Command):
def run(self):
print("Clearing out all models")
models.Period.query.delete()
models.PeriodState.query.delete()
models.ServiceReq.query.delete()
models.SRState.query.delete()
models.Citizen.query.delete()
models.CitizenState.query.delete()
models.CSR.query.delete()
models.CSRState.query.delete()
# models.OfficeService.query.delete() # This needs to be updated.
models.Office.query.delete()
models.SmartBoard.query.delete()
# models.RolePermission.query.delete() # No data in this table yet.
models.Role.query.delete()
# models.Permission.query.delete() # No data in this table yet.
models.Service.query.filter_by(actual_service_ind=1).delete()
models.Service.query.delete()
models.Channel.query.delete()
db.session.commit()
print("Starting to bootstrap data")
#-- Channels --------------------------------------------------------
print("--> Channels")
channel1 = models.Channel(
channel_name="In Person"
)
channel2 = models.Channel(
channel_name="Phone"
)
channel3 = models.Channel(
channel_name="Back Office"
)
channel4 = models.Channel(
channel_name="Email/Fax/Mail"
)
channel5 = models.Channel(
channel_name="CATs Assist"
)
channel6 = models.Channel(
channel_name="Mobile Assist"
)
db.session.add(channel1)
db.session.add(channel2)
db.session.add(channel3)
db.session.add(channel4)
db.session.add(channel5)
db.session.add(channel6)
db.session.commit()
#-- Roles -----------------------------------------------------------
print("--> Roles")
role_csr = models.Role(
role_code="CSR",
role_desc="Customer Service Representative"
)
role_ga = models.Role(
role_code="GA",
role_desc="Government Agent"
)
role3 = models.Role(
role_code="HELPDESK",
role_desc="Help Desk Functions"
)
role4 = models.Role(
role_code="SUPPORT",
role_desc="All Administrative Functions"
)
role5 = models.Role(
role_code="ANALYTICS",
role_desc="Analtyics Team to update Services per Office"
)
db.session.add(role_csr)
db.session.add(role_ga)
db.session.add(role3)
db.session.add(role4)
db.session.add(role5)
db.session.commit()
#-- Period State ----------------------------------------------------
print("--> Period States")
period_state1 = models.PeriodState(
ps_name="Waiting",
ps_desc="Waiting in line to see a CSR, after a ticket has been created for them. The time they are in this state is the Citizen Wait Time",
ps_number=1
)
period_state2 = models.PeriodState(
ps_name="Ticket Creation",
ps_desc="A receptionist is creating a service request / ticket for the citizen. This is the first state a citizen will be in. The time they are in this state is the CSR prep time.",
ps_number=2
)
period_state3 = models.PeriodState(
ps_name="Invited",
ps_desc="Has been called from the waiting area to be served. The time they are in this state is the time it takes them to walk from the waiting area, to the CSR, until the CSR starts to serve them.",
ps_number=4
)
period_state4 = models.PeriodState(
ps_name="Being Served",
ps_desc="Is being servbed by a CSR. The time they are in this state is the Service time.",
ps_number=7
)
period_state5 = models.PeriodState(
ps_name="On hold",
ps_desc="Has been placed on hold be a csr. The time they are in this state is the Hold time",
ps_number=11
)
db.session.add(period_state1)
db.session.add(period_state2)
db.session.add(period_state3)
db.session.add(period_state4)
db.session.add(period_state5)
db.session.commit()
#-- Smartboard values -----------------------------------------------
print("--> Smartboard")
smartboard_call_name = models.SmartBoard(sb_type="callbyname")
smartboard_call_ticket = models.SmartBoard(sb_type="callbyticket")
smartboard_no_call = models.SmartBoard(sb_type="nocallonsmartboard")
db.session.add(smartboard_call_name)
db.session.add(smartboard_call_ticket)
db.session.add(smartboard_no_call)
db.session.commit()
#-- Citizen state values --------------------------------------------
print("--> Citizen State")
cs1 = models.CitizenState(
cs_state_name="Active",
cs_state_desc="Citizen is active, a ticket is being or has been created for them"
)
cs2 = models.CitizenState(
cs_state_name="Received Services",
cs_state_desc="Citizen left after receiving services"
)
cs3 = models.CitizenState(
cs_state_name="Left before receiving services",
cs_state_desc="Citizen left, after ticket creation, before service was started for them"
)
db.session.add(cs1)
db.session.add(cs2)
db.session.add(cs3)
db.session.commit()
#-- CSR state values --------------------------------------------
print("--> CSR State")
csr_state_logout = models.CSRState(
csr_state_name="Logout",
csr_state_desc="Logged out"
)
csr_state2 = models.CSRState(
csr_state_name="Login",
csr_state_desc="Logged in"
)
csr_state3 = models.CSRState(
csr_state_name="Break",
csr_state_desc="Currently on break"
)
csr_state4 = models.CSRState(
csr_state_name="Serving",
csr_state_desc="Serving a citizen"
)
csr_state5 = models.CSRState(
csr_state_name="Back Office",
csr_state_desc="Currently doing back office work"
)
db.session.add(csr_state_logout)
db.session.add(csr_state2)
db.session.add(csr_state3)
db.session.add(csr_state4)
db.session.add(csr_state5)
db.session.commit()
#-- Service Request values ------------------------------------------
print("--> Service Request states")
sr_state1 = models.SRState(
sr_code="Pending",
sr_state_desc="Service Request is pending, citizen has not started receiving services yet."
)
sr_state2 = models.SRState(
sr_code="Active",
sr_state_desc="Service Request is active. A citizen has started being served."
)
sr_state3 = models.SRState(
sr_code="Complete",
sr_state_desc="The service has been received for this Service Request."
)
db.session.add(sr_state1)
db.session.add(sr_state2)
db.session.add(sr_state3)
db.session.commit()
#-- Service Category values -----------------------------------------
print("--> Categories and Services")
category_msp = models.Service(
service_code = "MSP",
service_name = "MSP",
service_desc = "Medical Services Plan",
prefix = "A",
display_dashboard_ind = 0,
actual_service_ind = 0
)
category_ptax = models.Service(
service_code = "PTAX",
service_name = "Property Tax",
service_desc = "Property Tax",
prefix = "A",
display_dashboard_ind = 0,
actual_service_ind = 0
)
category_back_office = models.Service(
service_code = "Back Office",
service_name = "Back Office",
service_desc = "Back Office",
prefix = "B",
display_dashboard_ind = 0,
actual_service_ind = 0
)
db.session.add(category_msp)
db.session.add(category_ptax)
db.session.add(category_back_office)
db.session.commit()
#-- Service values --------------------------------------------------
service_msp6 = models.Service(
service_code = "MSP - 006",
service_name = "Payment - MSP",
service_desc = "MSP- SC686, SC1089 -Pay direct payment, employer payment",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax4 = models.Service(
service_code = "PTAX - 004",
service_name = "Other - PTAX",
service_desc = "PTax/RPT - Providing information, forms, searches, tax clearance certificate, address changes, add new owner, extensions, forfeiture status, tax search, etc.",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax1 = models.Service(
service_code = "PTAX - 001",
service_name = "Deferment Application",
service_desc = "PTax/RPT - Process application - new and renewal, post note, etc.",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_ptax2 = models.Service(
service_code = "PTAX - 002",
service_name = "Deferment Payment",
service_desc = "PTax/RPT - Full or Partial deferment account payment",
parent_id = category_ptax.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_msp1 = models.Service(
service_code = "MSP - 001",
service_name = "Account Enquiry/Update",
service_desc = "MSP-Address or family changes, personal information updates, general status enquiries, billing information from Biller Direct, immigration documents to HIBC, needs PHN, etc.",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_msp2 = models.Service(
service_code = "MSP - 002",
service_name = "BCSC Non Photo",
service_desc = "MSP- SC2607 RAPID ordering , status enquiry, address update, also for the non photo form process when photo eligible, etc.",
parent_id = category_msp.service_id,
prefix = "A",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_bo1 = models.Service(
service_code = "Back Office - 001",
service_name = "Batching",
service_desc = "Batching",
parent_id = category_back_office.service_id,
prefix = "B",
display_dashboard_ind = 1,
actual_service_ind = 1
)
service_bo2 = models.Service(
service_code = "Back Office - 002",
service_name = "Cash Out",
service_desc = "Cash Out",
parent_id = category_back_office.service_id,
prefix = "B",
display_dashboard_ind = 1,
actual_service_ind = 1
)
db.session.add(service_bo1)
db.session.add(service_bo2)
db.session.add(service_msp1)
db.session.add(service_msp2)
db.session.add(service_msp6)
db.session.add(service_ptax1)
db.session.add(service_ptax2)
db.session.add(service_ptax4)
db.session.commit()
#-- Office values ---------------------------------------------------
print("--> Offices")
office_test = models.Office(
office_name="Test Office",
office_number=999,
sb_id=smartboard_call_ticket.sb_id
)
office_100 = models.Office(
office_name="100 Mile House",
office_number=1,
sb_id=smartboard_no_call.sb_id
)
office_victoria = models.Office(
office_name="Victoria",
office_number=61,
sb_id=smartboard_call_name.sb_id
)
db.session.add(office_test)
db.session.add(office_100)
db.session.add(office_victoria)
db.session.commit()
#-- CSR values ------------------------------------------------------
print("--> CSRs")
cfms_postman_operator = models.CSR(
username="cfms-postman-operator",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=1,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
cfms_postman_non_operator = models.CSR(
username="cfms-postman-non-operator",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
demo_ga = models.CSR(
username="admin",
office_id=office_test.office_id,
role_id=role_ga.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
demo_csr = models.CSR(
username="user",
office_id=office_test.office_id,
role_id=role_csr.role_id,
qt_xn_csr_ind=0,
receptionist_ind=1,
deleted=None,
csr_state_id=csr_state_logout.csr_state_id
)
db.session.add(cfms_postman_operator)
db.session.add(cfms_postman_non_operator)
db.session.add(demo_ga)
db.session.add(demo_csr)
db.session.commit()
#-- The Office / Services values ------------------------------------
print("--> Office Services")
office_test.services.append(category_back_office)
office_test.services.append(category_msp)
office_test.services.append(category_ptax)
office_test.services.append(service_bo1)
office_test.services.append(service_bo2)
office_test.services.append(service_msp1)
office_test.services.append(service_msp2)
office_test.services.append(service_msp6)
office_test.services.append(service_ptax1)
office_test.services.append(service_ptax2)
office_test.services.append(service_ptax4)
office_victoria.services.append(category_back_office)
office_victoria.services.append(category_msp)
office_victoria.services.append(service_bo1)
office_victoria.services.append(service_bo2)
office_victoria.services.append(service_msp1)
office_victoria.services.append(service_msp2)
office_victoria.services.append(service_msp6)
office_100.services.append(category_back_office)
office_100.services.append(category_ptax)
office_100.services.append(service_bo1)
office_100.services.append(service_bo2)
office_100.services.append(service_ptax1)
office_100.services.append(service_ptax2)
office_100.services.append(service_ptax4)
db.session.commit()
class FetchData(Command):
def run(self):
offices = db.session.query(models.Office).all()
for o in offices:
print(o.id, o.name)
class CreateUser(Command):
option_list = (
Option('--username', '-u', dest='username'),
Option('--password', '-p', dest='password'),
Option('--office_id', '-o', dest='office_id'),
)
def run(self, username, password, office_id):
if username is None or password is None or office_id is None:
exit("Error, username, password and office_id are all required")
user = models.User(username, password, office_id)
db.session.add(user)
db.session.commit()
class MigrateWrapper(Command):
def run(self):
upgrade()
manager.add_command('db', MigrateCommand)
manager.add_command('migrate', MigrateWrapper())
manager.add_command('bootstrap', Bootstrap())
manager.add_command('fetch', FetchData())
manager.add_command('create_user', CreateUser())
if __name__ == '__main__':
logging.log(logging.INFO, 'Running the Manager')
manager.run()
| en | 0.127007 | Manage the database and some other items required to run the API # class for handling a set of commands # models.OfficeService.query.delete() # This needs to be updated. # models.RolePermission.query.delete() # No data in this table yet. # models.Permission.query.delete() # No data in this table yet. #-- Channels -------------------------------------------------------- #-- Roles ----------------------------------------------------------- #-- Period State ---------------------------------------------------- #-- Smartboard values ----------------------------------------------- #-- Citizen state values -------------------------------------------- #-- CSR state values -------------------------------------------- #-- Service Request values ------------------------------------------ #-- Service Category values ----------------------------------------- #-- Service values -------------------------------------------------- #-- Office values --------------------------------------------------- #-- CSR values ------------------------------------------------------ #-- The Office / Services values ------------------------------------ | 2.719424 | 3 |
python/cleanCodecs.py | SoftwareAG/cumulocity-lora | 7 | 6619305 | import sys, requests;
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
codec = sys.argv[4]
auth = (username, password)
moId = requests.get(url + "/identity/externalIds/Codec ID/" + codec, auth=auth).json()["managedObject"]["id"]
print("Found id " + moId + " for codec " + codec)
mos = requests.get(url + "/inventory/managedObjects?type=Device Codec&pageSize=1000&query=lora_codec_DeviceCodecRepresentation.id eq " + codec, auth=auth).json()["managedObjects"]
for mo in mos:
if mo["id"] != moId:
print("Deleting redundant managed object " + mo["id"])
print(requests.delete(url + "/inventory/managedObjects/" + mo["id"], auth=auth)) | import sys, requests;
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
codec = sys.argv[4]
auth = (username, password)
moId = requests.get(url + "/identity/externalIds/Codec ID/" + codec, auth=auth).json()["managedObject"]["id"]
print("Found id " + moId + " for codec " + codec)
mos = requests.get(url + "/inventory/managedObjects?type=Device Codec&pageSize=1000&query=lora_codec_DeviceCodecRepresentation.id eq " + codec, auth=auth).json()["managedObjects"]
for mo in mos:
if mo["id"] != moId:
print("Deleting redundant managed object " + mo["id"])
print(requests.delete(url + "/inventory/managedObjects/" + mo["id"], auth=auth)) | none | 1 | 2.610402 | 3 | |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/011.py | moacirsouza/nadas | 1 | 6619306 | <reponame>moacirsouza/nadas<filename>01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/011.py<gh_stars>1-10
# (01-Gabarito/011.py)) Faça um programa que leia a largura e a altura de uma parede em metros,
# calcule a sua área e a quantidade de tinta necessária para pintá-la,
# sabendo que cada litro de tinta, pinta uma área de 2m².
largura = float(input("Qual a largura da parede, patrão?\t"))
altura = float(input("Qual a altura da parede, patrão?\t"))
total = largura * altura
litros_necessarios = total/2
print('')
print('Patrão, o sr vai precisa de ' + str(litros_necessarios) + ' litros de tinta.')
print('Agora é só escolher a cor! :D') | # (01-Gabarito/011.py)) Faça um programa que leia a largura e a altura de uma parede em metros,
# calcule a sua área e a quantidade de tinta necessária para pintá-la,
# sabendo que cada litro de tinta, pinta uma área de 2m².
largura = float(input("Qual a largura da parede, patrão?\t"))
altura = float(input("Qual a altura da parede, patrão?\t"))
total = largura * altura
litros_necessarios = total/2
print('')
print('Patrão, o sr vai precisa de ' + str(litros_necessarios) + ' litros de tinta.')
print('Agora é só escolher a cor! :D') | pt | 0.989865 | # (01-Gabarito/011.py)) Faça um programa que leia a largura e a altura de uma parede em metros, # calcule a sua área e a quantidade de tinta necessária para pintá-la, # sabendo que cada litro de tinta, pinta uma área de 2m². | 3.989944 | 4 |
setup.py | egbertbouman/tribler-debug-ui | 0 | 6619307 | <reponame>egbertbouman/tribler-debug-ui<gh_stars>0
import os
from distutils.util import convert_path
from setuptools import setup, find_packages
ns = {}
with open(convert_path('tribler_debug_ui/__init__.py')) as fp:
exec(fp.read(), ns)
def find_files(directory):
return [os.path.join('..', path, fn) for (path, _, fns) in os.walk(directory) for fn in fns]
setup(
name='tribler_debug_ui',
version=ns['__version__'],
packages=find_packages(),
package_data={'': find_files('tribler_debug_ui/app/dist')},
install_requires=["aiohttp"]
)
| import os
from distutils.util import convert_path
from setuptools import setup, find_packages
ns = {}
with open(convert_path('tribler_debug_ui/__init__.py')) as fp:
exec(fp.read(), ns)
def find_files(directory):
return [os.path.join('..', path, fn) for (path, _, fns) in os.walk(directory) for fn in fns]
setup(
name='tribler_debug_ui',
version=ns['__version__'],
packages=find_packages(),
package_data={'': find_files('tribler_debug_ui/app/dist')},
install_requires=["aiohttp"]
) | none | 1 | 1.732356 | 2 | |
src/pyfme/models/state/angular_velocity.py | gaofeng2020/PyFME | 199 | 6619308 | <filename>src/pyfme/models/state/angular_velocity.py<gh_stars>100-1000
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Angular Velocity
----------------
"""
from abc import abstractmethod
import numpy as np
class AngularVelocity:
"""Angular velocity
vel_ang : ndarray, shape(3)
(p [rad/s], q [rad/s], r [rad/s])
p
q
r
euler_ang_rates : ndarray, shape(3)
(theta_dot [rad/s], phi_dot [rad/s], psi_dot [rad/s])
theta
phi
psi
"""
def __init__(self):
# ANGULAR VELOCITY: (p, q, r)
self._vel_ang_body = np.zeros(3) # rad/s
# EULER ANGLE RATES (theta_dot, phi_dot, psi_dot)
self._euler_ang_rate = np.zeros(3) # rad/s
@abstractmethod
def update(self, coords, attitude):
raise NotImplementedError
@property
def vel_ang_body(self):
return self._vel_ang_body
@property
def p(self):
return self._vel_ang_body[0]
@property
def q(self):
return self._vel_ang_body[1]
@property
def r(self):
return self._vel_ang_body[2]
@property
def euler_ang_rate(self):
return self._euler_ang_rate
@property
def theta_dot(self):
return self._euler_ang_rate[0]
@property
def phi_dot(self):
return self._euler_ang_rate[1]
@property
def psi_dot(self):
return self._euler_ang_rate[2]
@property
def value(self):
"""Only for testing purposes"""
return np.hstack((self.vel_ang_body, self.euler_ang_rate))
class BodyAngularVelocity(AngularVelocity):
def __init__(self, p, q, r, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([p, q, r]), attitude)
def update(self, coords, attitude):
self._vel_ang_body[:] = coords
# TODO: transform angular velocity in body axis to euler angles
# rates
self._euler_ang_rate = np.zeros(3) # rad/s
def __repr__(self):
return (f"P: {self.p:.2f} rad/s, "
f"Q: {self.q:.2f} rad/s, "
f"R: {self.r:.2f} rad/s")
class EulerAngularRates(AngularVelocity):
def __init__(self, theta_dot, phi_dot, psi_dot, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([theta_dot, phi_dot, psi_dot]),
attitude)
def update(self, coords, attitude):
self._euler_ang_rate[:] = coords
# TODO: transform euler angles rates to angular velocity in body
# axis
self._vel_ang_body[:] = np.zeros(3) # rad/s
| <filename>src/pyfme/models/state/angular_velocity.py<gh_stars>100-1000
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Angular Velocity
----------------
"""
from abc import abstractmethod
import numpy as np
class AngularVelocity:
"""Angular velocity
vel_ang : ndarray, shape(3)
(p [rad/s], q [rad/s], r [rad/s])
p
q
r
euler_ang_rates : ndarray, shape(3)
(theta_dot [rad/s], phi_dot [rad/s], psi_dot [rad/s])
theta
phi
psi
"""
def __init__(self):
# ANGULAR VELOCITY: (p, q, r)
self._vel_ang_body = np.zeros(3) # rad/s
# EULER ANGLE RATES (theta_dot, phi_dot, psi_dot)
self._euler_ang_rate = np.zeros(3) # rad/s
@abstractmethod
def update(self, coords, attitude):
raise NotImplementedError
@property
def vel_ang_body(self):
return self._vel_ang_body
@property
def p(self):
return self._vel_ang_body[0]
@property
def q(self):
return self._vel_ang_body[1]
@property
def r(self):
return self._vel_ang_body[2]
@property
def euler_ang_rate(self):
return self._euler_ang_rate
@property
def theta_dot(self):
return self._euler_ang_rate[0]
@property
def phi_dot(self):
return self._euler_ang_rate[1]
@property
def psi_dot(self):
return self._euler_ang_rate[2]
@property
def value(self):
"""Only for testing purposes"""
return np.hstack((self.vel_ang_body, self.euler_ang_rate))
class BodyAngularVelocity(AngularVelocity):
def __init__(self, p, q, r, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([p, q, r]), attitude)
def update(self, coords, attitude):
self._vel_ang_body[:] = coords
# TODO: transform angular velocity in body axis to euler angles
# rates
self._euler_ang_rate = np.zeros(3) # rad/s
def __repr__(self):
return (f"P: {self.p:.2f} rad/s, "
f"Q: {self.q:.2f} rad/s, "
f"R: {self.r:.2f} rad/s")
class EulerAngularRates(AngularVelocity):
def __init__(self, theta_dot, phi_dot, psi_dot, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([theta_dot, phi_dot, psi_dot]),
attitude)
def update(self, coords, attitude):
self._euler_ang_rate[:] = coords
# TODO: transform euler angles rates to angular velocity in body
# axis
self._vel_ang_body[:] = np.zeros(3) # rad/s
| en | 0.491439 | Python Flight Mechanics Engine (PyFME). Copyright (c) AeroPython Development Team. Distributed under the terms of the MIT License. Angular Velocity ---------------- Angular velocity vel_ang : ndarray, shape(3) (p [rad/s], q [rad/s], r [rad/s]) p q r euler_ang_rates : ndarray, shape(3) (theta_dot [rad/s], phi_dot [rad/s], psi_dot [rad/s]) theta phi psi # ANGULAR VELOCITY: (p, q, r) # rad/s # EULER ANGLE RATES (theta_dot, phi_dot, psi_dot) # rad/s Only for testing purposes # TODO: docstring # TODO: transform angular velocity in body axis to euler angles # rates # rad/s # TODO: docstring # TODO: transform euler angles rates to angular velocity in body # axis # rad/s | 2.487376 | 2 |
random_geometry_points_service/endpoints/api.py | brauls/random-geometry-points-service | 0 | 6619309 | <filename>random_geometry_points_service/endpoints/api.py
"""API initialization module.
"""
from flask_restplus import Api
from .circles2d import API as circle_api
from .sphere import API as sphere_api
from .plane import API as plane_api
def init_api():
"""Create the api along with the available namespaces.
Returns:
Api: The initialized api object
"""
api = Api(
title="Random Point Generation API",
version="1.0.0",
description="Create an arbitrary number of random points on different geometries"
)
api.add_namespace(circle_api)
api.add_namespace(sphere_api)
api.add_namespace(plane_api)
return api
| <filename>random_geometry_points_service/endpoints/api.py
"""API initialization module.
"""
from flask_restplus import Api
from .circles2d import API as circle_api
from .sphere import API as sphere_api
from .plane import API as plane_api
def init_api():
"""Create the api along with the available namespaces.
Returns:
Api: The initialized api object
"""
api = Api(
title="Random Point Generation API",
version="1.0.0",
description="Create an arbitrary number of random points on different geometries"
)
api.add_namespace(circle_api)
api.add_namespace(sphere_api)
api.add_namespace(plane_api)
return api
| en | 0.500155 | API initialization module. Create the api along with the available namespaces. Returns: Api: The initialized api object | 2.871332 | 3 |
homepage/forms.py | Shawn9717/awards | 0 | 6619310 | <filename>homepage/forms.py
from django import forms
from .models import Projects,Review
class ProjectForm(forms.ModelForm):
"""
Form class to create an html form from the projects model
"""
class Meta:
model = Projects
fields = ['title','description','project_image','project_link']
class RateForm(forms.ModelForm):
"""
model form to create ratings
Args:
forms (model): [class to help in creating the model form]
"""
class Meta:
model = Review
fields = ['text','design','usability','content']
| <filename>homepage/forms.py
from django import forms
from .models import Projects,Review
class ProjectForm(forms.ModelForm):
"""
Form class to create an html form from the projects model
"""
class Meta:
model = Projects
fields = ['title','description','project_image','project_link']
class RateForm(forms.ModelForm):
"""
model form to create ratings
Args:
forms (model): [class to help in creating the model form]
"""
class Meta:
model = Review
fields = ['text','design','usability','content']
| en | 0.879085 | Form class to create an html form from the projects model model form to create ratings Args: forms (model): [class to help in creating the model form] | 2.564443 | 3 |
web/prowlbackend/users/views.py | stensjoberg/pton-prowl | 0 | 6619311 | from rest_framework import generics, permissions, views
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from .models import User
from .serializers import UserSerializer
from .permissions import IsProfileOwner
class UserListView(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetailView(views.APIView):
#permission_classes = [IsProfileOwner]
def get(self, request, pk, format=None):
user = get_object_or_404(User, pk=pk)
serializer = UserSerializer(user, context={'request': request})
return Response(serializer.data)
class ValidateView(views.APIView):
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
user = UserSerializer(request.user, context={'request': request})
return Response(user.data)
# minimal overhead view for checking if user is validated by request
| from rest_framework import generics, permissions, views
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from .models import User
from .serializers import UserSerializer
from .permissions import IsProfileOwner
class UserListView(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetailView(views.APIView):
#permission_classes = [IsProfileOwner]
def get(self, request, pk, format=None):
user = get_object_or_404(User, pk=pk)
serializer = UserSerializer(user, context={'request': request})
return Response(serializer.data)
class ValidateView(views.APIView):
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
user = UserSerializer(request.user, context={'request': request})
return Response(user.data)
# minimal overhead view for checking if user is validated by request
| en | 0.800727 | #permission_classes = [IsProfileOwner] # minimal overhead view for checking if user is validated by request | 2.0782 | 2 |
example/example/apps.py | lorne-luo/django-autocode | 0 | 6619312 | <reponame>lorne-luo/django-autocode
from django.apps import AppConfig
class myAppNameConfig(AppConfig):
name = 'example'
verbose_name = 'An example app'
| from django.apps import AppConfig
class myAppNameConfig(AppConfig):
name = 'example'
verbose_name = 'An example app' | none | 1 | 1.269426 | 1 | |
JaroEliCall/src/tests/test_server_bidirectional.py | jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip | 0 | 6619313 | import asyncio
import sys
class EchoServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, voice_msg, addr):
print('Received voic - size: %d bytes from: %s' % (sys.getsizeof(voice_msg), addr))
self.transport.sendto(voice_msg, addr)
loop = asyncio.get_event_loop()
print("Starting UDP server")
# One protocol instance will be created to serve all client requests
listen = loop.create_datagram_endpoint(
EchoServerProtocol, local_addr=('127.0.0.1', 9999))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
| import asyncio
import sys
class EchoServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, voice_msg, addr):
print('Received voic - size: %d bytes from: %s' % (sys.getsizeof(voice_msg), addr))
self.transport.sendto(voice_msg, addr)
loop = asyncio.get_event_loop()
print("Starting UDP server")
# One protocol instance will be created to serve all client requests
listen = loop.create_datagram_endpoint(
EchoServerProtocol, local_addr=('127.0.0.1', 9999))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
| en | 0.89679 | # One protocol instance will be created to serve all client requests | 2.887182 | 3 |
codebase/third_party/spos_ofa/mobilenet_v3.py | qdmy/Adelaidet-Quantization | 0 | 6619314 | # Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import copy
import torch.nn as nn
from .ofa.utils.layers import set_layer_from_config, MBConvLayer, ConvLayer, IdentityLayer, LinearLayer, ResidualBlock
from .ofa.utils import MyNetwork, make_divisible, MyGlobalAvgPool2d
__all__ = ['MobileNetV3']
class MobileNetV3(MyNetwork):
def __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):
super(MobileNetV3, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.final_expand_layer = final_expand_layer
self.global_avg_pool = MyGlobalAvgPool2d(keep_dim=True)
self.feature_mix_layer = feature_mix_layer
self.classifier = classifier
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.final_expand_layer(x)
x = self.global_avg_pool(x) # global average pooling
x = self.feature_mix_layer(x)
feature = x.view(x.size(0), -1)
x = self.classifier(feature)
return x
def set_bn_param(self, bn_momentum=0.1, bn_eps=0.00001):
for m in self.modules():
if type(m) in [nn.BatchNorm1d, nn.BatchNorm2d]:
m.momentum = bn_momentum
m.eps = bn_eps
return
| # Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import copy
import torch.nn as nn
from .ofa.utils.layers import set_layer_from_config, MBConvLayer, ConvLayer, IdentityLayer, LinearLayer, ResidualBlock
from .ofa.utils import MyNetwork, make_divisible, MyGlobalAvgPool2d
__all__ = ['MobileNetV3']
class MobileNetV3(MyNetwork):
def __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):
super(MobileNetV3, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.final_expand_layer = final_expand_layer
self.global_avg_pool = MyGlobalAvgPool2d(keep_dim=True)
self.feature_mix_layer = feature_mix_layer
self.classifier = classifier
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.final_expand_layer(x)
x = self.global_avg_pool(x) # global average pooling
x = self.feature_mix_layer(x)
feature = x.view(x.size(0), -1)
x = self.classifier(feature)
return x
def set_bn_param(self, bn_momentum=0.1, bn_eps=0.00001):
for m in self.modules():
if type(m) in [nn.BatchNorm1d, nn.BatchNorm2d]:
m.momentum = bn_momentum
m.eps = bn_eps
return
| en | 0.737869 | # Once for All: Train One Network and Specialize it for Efficient Deployment # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # International Conference on Learning Representations (ICLR), 2020. # global average pooling | 2.416373 | 2 |
Python/Desafios/tarefas/exe 11 pintando.py | Kaioguilherme1/PythonCodigos | 2 | 6619315 | print('bom dia preencha o que se pede abaixo')
largura =float(input('digite a largura da parede >>>'))
altura =float(input('digite a atura da parede >>>'))
Area = largura * altura
tinta = Area / 2
print('uma parede de {0} m x {1} m tem a area igual a {2}m² e ira gasta {3} L de tinta'.format(largura, altura, Area, tinta)) | print('bom dia preencha o que se pede abaixo')
largura =float(input('digite a largura da parede >>>'))
altura =float(input('digite a atura da parede >>>'))
Area = largura * altura
tinta = Area / 2
print('uma parede de {0} m x {1} m tem a area igual a {2}m² e ira gasta {3} L de tinta'.format(largura, altura, Area, tinta)) | none | 1 | 3.85824 | 4 | |
Variado_GeekUniversity/guppe/escrever_em_arquivos.py | PauloFTeixeira/curso_python | 0 | 6619316 | <gh_stars>0
"""
Escrevendo em arquivos
# OBS: Ao abrir um arquivo para leitura, não podemos realizar a escrita nele. Apenas ler.
Da mesma forma, se abrirmor um arquivo para escrita, não podemos lê-lo, somente escrever nele.
# OBS: Ao abrir um arquivo para escrita, o arquivo é criado no sistema operacional.
Para escrevermos dados em um arquivo, após abrir o arquivo utilizamos a função write().
Esta função recebe uma string como parâmetro, caso contrário teremos um TypeError
Abrindo um arquivo para escrita com o modo 'w', se o arquivo não existir será criado,
caso ele já exista, o anterior será apagado e um novo será criado. Dessa forma, todo
o conteúdo no arquivo anterior é perdido.
# Exemplo de escrita - modo 'w' - write (escrita)
# Forma tradicional de escrita em arquivo (Não Pythônica)
arquivo = open('mais.txt', 'w')
arquivo.write('Um texto qualquer.\n')
arquivo.write('Mais um texto.')
arquivo.close()
# Forma Pythônica
with open('novo.txt', 'w') as arquivo:
arquivo.write('Novos dados.\n')
arquivo.write('Outros podemos colocar quantas linhas quisermos.\n')
arquivo.write('Mais Esta é a última linha.')
with open('geek.txt', 'w') as arquivo:
arquivo.write('Geek ' * 1000)
"""
with open('frutas.txt', 'w') as arquivo:
while True:
fruta = input('Informe uma fruta ou digite sair: ')
if fruta != 'sair':
arquivo.write(fruta)
arquivo.write('\n')
else:
break
| """
Escrevendo em arquivos
# OBS: Ao abrir um arquivo para leitura, não podemos realizar a escrita nele. Apenas ler.
Da mesma forma, se abrirmor um arquivo para escrita, não podemos lê-lo, somente escrever nele.
# OBS: Ao abrir um arquivo para escrita, o arquivo é criado no sistema operacional.
Para escrevermos dados em um arquivo, após abrir o arquivo utilizamos a função write().
Esta função recebe uma string como parâmetro, caso contrário teremos um TypeError
Abrindo um arquivo para escrita com o modo 'w', se o arquivo não existir será criado,
caso ele já exista, o anterior será apagado e um novo será criado. Dessa forma, todo
o conteúdo no arquivo anterior é perdido.
# Exemplo de escrita - modo 'w' - write (escrita)
# Forma tradicional de escrita em arquivo (Não Pythônica)
arquivo = open('mais.txt', 'w')
arquivo.write('Um texto qualquer.\n')
arquivo.write('Mais um texto.')
arquivo.close()
# Forma Pythônica
with open('novo.txt', 'w') as arquivo:
arquivo.write('Novos dados.\n')
arquivo.write('Outros podemos colocar quantas linhas quisermos.\n')
arquivo.write('Mais Esta é a última linha.')
with open('geek.txt', 'w') as arquivo:
arquivo.write('Geek ' * 1000)
"""
with open('frutas.txt', 'w') as arquivo:
while True:
fruta = input('Informe uma fruta ou digite sair: ')
if fruta != 'sair':
arquivo.write(fruta)
arquivo.write('\n')
else:
break | pt | 0.976795 | Escrevendo em arquivos # OBS: Ao abrir um arquivo para leitura, não podemos realizar a escrita nele. Apenas ler. Da mesma forma, se abrirmor um arquivo para escrita, não podemos lê-lo, somente escrever nele. # OBS: Ao abrir um arquivo para escrita, o arquivo é criado no sistema operacional. Para escrevermos dados em um arquivo, após abrir o arquivo utilizamos a função write(). Esta função recebe uma string como parâmetro, caso contrário teremos um TypeError Abrindo um arquivo para escrita com o modo 'w', se o arquivo não existir será criado, caso ele já exista, o anterior será apagado e um novo será criado. Dessa forma, todo o conteúdo no arquivo anterior é perdido. # Exemplo de escrita - modo 'w' - write (escrita) # Forma tradicional de escrita em arquivo (Não Pythônica) arquivo = open('mais.txt', 'w') arquivo.write('Um texto qualquer.\n') arquivo.write('Mais um texto.') arquivo.close() # Forma Pythônica with open('novo.txt', 'w') as arquivo: arquivo.write('Novos dados.\n') arquivo.write('Outros podemos colocar quantas linhas quisermos.\n') arquivo.write('Mais Esta é a última linha.') with open('geek.txt', 'w') as arquivo: arquivo.write('Geek ' * 1000) | 4.455719 | 4 |
strategy.py | UtkucanBykl/python-design-patterns | 0 | 6619317 | from abc import ABC, abstractmethod
class Crawle(ABC):
@abstractmethod
def get(self):
pass
class CrawleJSON(Crawle):
def get(self):
print({'type': 'json'})
class CrawleXML(Crawle):
def get(self):
print('<xml></xml>')
class Context:
def __init__(self):
self.crawle = None
@property
def crawler(self):
return self.crawle
@crawler.setter
def crawler(self, value):
self.crawle = value
def run(self):
self.crawle.get()
if __name__ == "__main__":
j_crawle = CrawleJSON()
x_crawle = CrawleXML()
context = Context()
context.crawle = j_crawle
context.run()
context.crawle = x_crawle
context.run()
| from abc import ABC, abstractmethod
class Crawle(ABC):
@abstractmethod
def get(self):
pass
class CrawleJSON(Crawle):
def get(self):
print({'type': 'json'})
class CrawleXML(Crawle):
def get(self):
print('<xml></xml>')
class Context:
def __init__(self):
self.crawle = None
@property
def crawler(self):
return self.crawle
@crawler.setter
def crawler(self, value):
self.crawle = value
def run(self):
self.crawle.get()
if __name__ == "__main__":
j_crawle = CrawleJSON()
x_crawle = CrawleXML()
context = Context()
context.crawle = j_crawle
context.run()
context.crawle = x_crawle
context.run()
| none | 1 | 3.556424 | 4 | |
functions/frame.py | prehren/annotation-to-outline | 0 | 6619318 | <reponame>prehren/annotation-to-outline
#
# Functions for program annotation-to-outline. Responsible for creating the annotation
# object table from the data extracted from annotated pdf.
#
import pandas as pd
import re
def dealWithBreaks(df):
# Deal with page breaks and breaks on a single page in highlighted and underlined text
for item in reversed(df.index):
if re.search('#\\.\\.\\.', df['Annotation'][item]): # if page break
textToAppend = df['Text'][item]
df.loc[item - 1, 'Text'] = df['Text'][item - 1] + textToAppend
df.loc[item - 1, 'Page'] = df['Page'][item - 1] + ", " + df['Page'][item]
df = df.drop(item)
elif re.search('#,,,', df['Annotation'][item]): # if break on a single page
textToAppend = df['Text'][item]
df.loc[item - 1, 'Text'] = df['Text'][item - 1] + " [...] " + textToAppend
df.loc[item - 1, 'Page'] = df['Page'][item - 1] + ", " + df['Page'][item]
df = df.drop(item)
return df
def extractInstructions(string):
# extract instruction section from text
# match instruction section of the annotation (without numbers)
instructions = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)', string)
if instructions: # if instruction pattern has been matched
instructions = instructions.group(0) # regex match object to string
instructions = instructions[1:-1] # extract actual instructions
else: # if no instruction pattern match was found
instructions = ''
return instructions
def extractType(string):
# extract type from text
# match instruction section of the annotation (with numbers)
temp = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)[0-9]{0,2}', string)
if temp: # if instruction pattern has been matched
temp = temp.group(0) # regex match object to string, cut "#"
pattern = re.compile(re.escape(re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)', temp).group(0)[:-1]))
annotationType = re.sub(pattern, '', temp)
else: # if no instruction pattern match was found
annotationType = ''
return annotationType
def extractTitle(string):
# extract title from text
# match instruction section of the annotation (with numbers)
temp = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)[0-9]{0,2}', string)
if temp: # if instruction pattern has been matched
temp = temp.group(0) # regex match object to string
title = string[len(temp) + 1:] # cut away instruction pattern
# capitalize title
if title:
title = title[0].upper() + title[1:]
else: # if no instruction pattern match was found
title = ''
return title
def frameData(highlightText, highlightTextPos, underlineText, underlineTextPos,
textBoxText, textBoxTextPos, firstPage, numFirstPage):
# Generate dataframe from highlighted text and content of textboxes. The latter are matched
# to the former via comparison of vertical positioning
pages = [(item[0] - numFirstPage + 1 + firstPage) for item in highlightTextPos] # pages with highlighted text
lowerLimit = [item[1] for item in highlightTextPos] # lower vertical boundaries
upperLimit = [item[2] for item in highlightTextPos] # upper vertical boundaries
midpoint = [(x + y) / 2 for x, y in zip(lowerLimit, upperLimit)] # midpoint of vertical boundaries
# Generate dataframe with highlighted text
highlightDF = pd.DataFrame({'Page': pages, 'Text': highlightText,
'Lower': lowerLimit, 'Upper': upperLimit, 'Midpoint': midpoint})
highlightDF = highlightDF.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
highlightDF = highlightDF.reset_index(drop=True) # reindex dataframe
pages = [(item[0] + firstPage - numFirstPage + 1) for item in textBoxTextPos] # pages with textboxes
midpoint = [item[2] for item in textBoxTextPos] # midpoint of vertical boundaries
# Generate dataframe with content from textboxes
textBoxDF = pd.DataFrame({'Page': pages, 'Annotation': textBoxText, 'Midpoint': midpoint})
textBoxDF = textBoxDF.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
textBoxDF = textBoxDF.reset_index(drop=True) # reindex dataframe
L = highlightDF.shape[0] # number of elements in highlight-dataframe
textList = [''] * L # empty list for matching of highlighted text and textbox contents
for pNumber in highlightDF.Page.unique(): # go through all pages with highlights on them
# relevant part of both dataframes
tempHighlightDF = highlightDF.loc[highlightDF['Page'] == pNumber]
tempTextBoxDF = textBoxDF.loc[textBoxDF['Page'] == pNumber]
# compare vertical positioning
for k in tempHighlightDF.index:
for l in tempTextBoxDF.index:
# if midpoint of current textbox is located between vertical boundaries of highlighted text
if tempHighlightDF['Lower'][k] < tempTextBoxDF['Midpoint'][l] < tempHighlightDF['Upper'][k]:
textList[k] = tempTextBoxDF['Annotation'][l] # append contents of textbox
try:
textBoxDF = textBoxDF.drop(l) # drop row from textbox dataframe
tempTextBoxDF = tempTextBoxDF.drop(l) # drop row from textbox dataframe of current page
except KeyError:
print('KEY ERROR at page %d' % pNumber)
break
textListDF = pd.DataFrame({'Annotation': textList}) # generate dataframe from textList
df = pd.concat([highlightDF[['Page', 'Midpoint', 'Text']], textListDF], axis=1)
textBoxDF.loc[:, 'Text'] = ''
df = pd.concat([df, textBoxDF], sort=True)
df = df.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
df = df.reset_index(drop=True) # reindex dataframe
return df
| #
# Functions for program annotation-to-outline. Responsible for creating the annotation
# object table from the data extracted from annotated pdf.
#
import pandas as pd
import re
def dealWithBreaks(df):
# Deal with page breaks and breaks on a single page in highlighted and underlined text
for item in reversed(df.index):
if re.search('#\\.\\.\\.', df['Annotation'][item]): # if page break
textToAppend = df['Text'][item]
df.loc[item - 1, 'Text'] = df['Text'][item - 1] + textToAppend
df.loc[item - 1, 'Page'] = df['Page'][item - 1] + ", " + df['Page'][item]
df = df.drop(item)
elif re.search('#,,,', df['Annotation'][item]): # if break on a single page
textToAppend = df['Text'][item]
df.loc[item - 1, 'Text'] = df['Text'][item - 1] + " [...] " + textToAppend
df.loc[item - 1, 'Page'] = df['Page'][item - 1] + ", " + df['Page'][item]
df = df.drop(item)
return df
def extractInstructions(string):
# extract instruction section from text
# match instruction section of the annotation (without numbers)
instructions = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)', string)
if instructions: # if instruction pattern has been matched
instructions = instructions.group(0) # regex match object to string
instructions = instructions[1:-1] # extract actual instructions
else: # if no instruction pattern match was found
instructions = ''
return instructions
def extractType(string):
# extract type from text
# match instruction section of the annotation (with numbers)
temp = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)[0-9]{0,2}', string)
if temp: # if instruction pattern has been matched
temp = temp.group(0) # regex match object to string, cut "#"
pattern = re.compile(re.escape(re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)', temp).group(0)[:-1]))
annotationType = re.sub(pattern, '', temp)
else: # if no instruction pattern match was found
annotationType = ''
return annotationType
def extractTitle(string):
# extract title from text
# match instruction section of the annotation (with numbers)
temp = re.match('#[^\\w\\(]{0,12}(\\w|\\([A-Z][0-9]{0,2}\\)\\w)[0-9]{0,2}', string)
if temp: # if instruction pattern has been matched
temp = temp.group(0) # regex match object to string
title = string[len(temp) + 1:] # cut away instruction pattern
# capitalize title
if title:
title = title[0].upper() + title[1:]
else: # if no instruction pattern match was found
title = ''
return title
def frameData(highlightText, highlightTextPos, underlineText, underlineTextPos,
textBoxText, textBoxTextPos, firstPage, numFirstPage):
# Generate dataframe from highlighted text and content of textboxes. The latter are matched
# to the former via comparison of vertical positioning
pages = [(item[0] - numFirstPage + 1 + firstPage) for item in highlightTextPos] # pages with highlighted text
lowerLimit = [item[1] for item in highlightTextPos] # lower vertical boundaries
upperLimit = [item[2] for item in highlightTextPos] # upper vertical boundaries
midpoint = [(x + y) / 2 for x, y in zip(lowerLimit, upperLimit)] # midpoint of vertical boundaries
# Generate dataframe with highlighted text
highlightDF = pd.DataFrame({'Page': pages, 'Text': highlightText,
'Lower': lowerLimit, 'Upper': upperLimit, 'Midpoint': midpoint})
highlightDF = highlightDF.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
highlightDF = highlightDF.reset_index(drop=True) # reindex dataframe
pages = [(item[0] + firstPage - numFirstPage + 1) for item in textBoxTextPos] # pages with textboxes
midpoint = [item[2] for item in textBoxTextPos] # midpoint of vertical boundaries
# Generate dataframe with content from textboxes
textBoxDF = pd.DataFrame({'Page': pages, 'Annotation': textBoxText, 'Midpoint': midpoint})
textBoxDF = textBoxDF.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
textBoxDF = textBoxDF.reset_index(drop=True) # reindex dataframe
L = highlightDF.shape[0] # number of elements in highlight-dataframe
textList = [''] * L # empty list for matching of highlighted text and textbox contents
for pNumber in highlightDF.Page.unique(): # go through all pages with highlights on them
# relevant part of both dataframes
tempHighlightDF = highlightDF.loc[highlightDF['Page'] == pNumber]
tempTextBoxDF = textBoxDF.loc[textBoxDF['Page'] == pNumber]
# compare vertical positioning
for k in tempHighlightDF.index:
for l in tempTextBoxDF.index:
# if midpoint of current textbox is located between vertical boundaries of highlighted text
if tempHighlightDF['Lower'][k] < tempTextBoxDF['Midpoint'][l] < tempHighlightDF['Upper'][k]:
textList[k] = tempTextBoxDF['Annotation'][l] # append contents of textbox
try:
textBoxDF = textBoxDF.drop(l) # drop row from textbox dataframe
tempTextBoxDF = tempTextBoxDF.drop(l) # drop row from textbox dataframe of current page
except KeyError:
print('KEY ERROR at page %d' % pNumber)
break
textListDF = pd.DataFrame({'Annotation': textList}) # generate dataframe from textList
df = pd.concat([highlightDF[['Page', 'Midpoint', 'Text']], textListDF], axis=1)
textBoxDF.loc[:, 'Text'] = ''
df = pd.concat([df, textBoxDF], sort=True)
df = df.sort_values(by=['Page', 'Midpoint']) # sort data in dataframe
df = df.reset_index(drop=True) # reindex dataframe
return df | en | 0.811708 | # # Functions for program annotation-to-outline. Responsible for creating the annotation # object table from the data extracted from annotated pdf. # # Deal with page breaks and breaks on a single page in highlighted and underlined text # if page break # if break on a single page # extract instruction section from text # match instruction section of the annotation (without numbers) # if instruction pattern has been matched # regex match object to string # extract actual instructions # if no instruction pattern match was found # extract type from text # match instruction section of the annotation (with numbers) # if instruction pattern has been matched # regex match object to string, cut "#" # if no instruction pattern match was found # extract title from text # match instruction section of the annotation (with numbers) # if instruction pattern has been matched # regex match object to string # cut away instruction pattern # capitalize title # if no instruction pattern match was found # Generate dataframe from highlighted text and content of textboxes. The latter are matched # to the former via comparison of vertical positioning # pages with highlighted text # lower vertical boundaries # upper vertical boundaries # midpoint of vertical boundaries # Generate dataframe with highlighted text # sort data in dataframe # reindex dataframe # pages with textboxes # midpoint of vertical boundaries # Generate dataframe with content from textboxes # sort data in dataframe # reindex dataframe # number of elements in highlight-dataframe # empty list for matching of highlighted text and textbox contents # go through all pages with highlights on them # relevant part of both dataframes # compare vertical positioning # if midpoint of current textbox is located between vertical boundaries of highlighted text # append contents of textbox # drop row from textbox dataframe # drop row from textbox dataframe of current page # generate dataframe from textList # sort data in dataframe # reindex dataframe | 3.000585 | 3 |
samples/my_sender.py | imandr/stompy | 0 | 6619319 | <filename>samples/my_sender.py
import time
import sys
from stompy import STOMPClient
client = STOMPClient()
client.connect(("127.0.0.1", 61613), 'admin', 'password')
txn = client.transaction()
txn.message('/queue/send', "Message 1")
txn.message('/queue/send', "Message 2")
txn.commit(receipt=True)
client.disconnect()
| <filename>samples/my_sender.py
import time
import sys
from stompy import STOMPClient
client = STOMPClient()
client.connect(("127.0.0.1", 61613), 'admin', 'password')
txn = client.transaction()
txn.message('/queue/send', "Message 1")
txn.message('/queue/send', "Message 2")
txn.commit(receipt=True)
client.disconnect()
| none | 1 | 1.880976 | 2 | |
lab1/nn/modules.py | yvr1037/hello-dian.ai | 0 | 6619320 | import numpy as np
from itertools import product
from numpy.core.fromnumeric import transpose
from numpy.random import gamma
import nn.tensor
from . import tensor
class Module(object):
"""Base class for all neural network modules.
"""
def __init__(self) -> None:
"""If a module behaves different between training and testing,
its init method should inherit from this one."""
self.training = True
def __call__(self, x: np.ndarray) -> np.ndarray:
"""Defines calling forward method at every call.
Should not be overridden by subclasses.
"""
return self.forward(x)
def forward(self, x: np.ndarray) -> np.ndarray:
"""Defines the forward propagation of the module performed at every call.
Should be overridden by all subclasses.
"""
...
def backward(self, dy: np.ndarray) -> np.ndarray:
"""Defines the backward propagation of the module.
"""
return dy
def train(self):
"""Sets the mode of the module to training.
Should not be overridden by subclasses.
"""
if 'training' in vars(self):
self.training = True
for attr in vars(self).values():
# isintance():判断attr的类型是不是Module
if isinstance(attr, Module):
Module.train()
def eval(self):
"""Sets the mode of the module to eval.
Should not be overridden by subclasses.
"""
if 'training' in vars(self):
self.training = False
for attr in vars(self).values():
# isintance():判断attr的类型是不是Module
if isinstance(attr, Module):
Module.eval()
class Linear(Module):
def __init__(self, in_length: int, out_length: int):
"""Module which applies linear transformation to input.
Args:
in_length: L_in from expected input shape (N, L_in).
out_length: L_out from output shape (N, L_out).
"""
# TODO Initialize the weight
# of linear module.
self.w = tensor.from_array(np.random.randn(in_length+1,out_length))
# self.b = np.zeros(out_length)
self.x = None
# self.original_x_shape = None
# self.dw = None
# self.db = None
# End of todo
def forward(self, x):
"""Forward propagation of linear module.
Args:
x: input of shape (N, L_in).
Returns:
out: output of shape (N, L_out).
"""
# TODO Implement forward propogation
# of linear module.
self.original_x_shape = x.shape
# print(x.shape)
v = np.ones(x.shape[0])
x = np.column_stack([x,v])
self.x = x
# print(x.shape)
# print(self.w.shape)
out = np.dot(x,self.w)
return out
# End of todo
def backward(self, dy):
"""Backward propagation of linear module.
Args:
dy: output delta of shape (N, L_out).
Returns:
dx: input delta of shape (N, L_in).
"""
# TODO Implement backward propogation
# of linear module.
# dx = np.dot(dy,self.w.T)
self.w.grad = np.dot(self.x.T,dy)
# dx = dx.reshape(*self.original_x_shape)
...
# return dx
# End of todo
class BatchNorm1d(Module):
def __init__(self, length: int, momentum: float=0.9):
"""Module which applies batch normalization to input.
Args:
length: L from expected input shape (N, L).
momentum: default 0.9.
"""
super(BatchNorm1d, self).__init__()
# TODO Initialize the attributes
# of 1d batchnorm module.
...
self.L = int(length)
self.running_mean = np.zeros(self.L) # 追踪mini-batch 均值
self.running_var = np.ones(self.L) # 追踪mini-batch 方差
self.eps = 1e-5 # 排除计算错误和分母为0的情况
self.momentum = momentum # 超参数,追踪样本整体均值和方差的动量
self.beta = nn.tensor.from_array(np.zeros(shape=(self.L,)))
# self.gamma = gamma(0,1,self.L).reshape((self.L,))
self.gamma = nn.tensor.from_array(np.ones((self.L)))
self.x_hat = None
# End of todo
def forward(self, x):
"""Forward propagation of batch norm module.
Args:
x: input of shape (N, L).
Returns:
out: output of shape (N, L).
"""
# TODO Implement forward propogation
# of 1d batchnorm module.
...
x_mean = np.mean(x,axis=0)
x_var = np.var(x,axis=0)
# 根据计算的mean和var批量归一化x
self.x_hat = (x-x_mean) / np.sqrt(x_var + self.eps)
y = self.gamma*self.x_hat + self.beta
# 根据当前mini-batch的样本进行追踪更新,计算滑动平均
self.running_mean = (1-self.momentum)*self.running_mean + self.momentum*x_mean
self.running_var = (1-self.momentum)*self.running_var + self.momentum*x_var
return y
# End of todo
def backward(self, dy):
"""Backward propagation of batch norm module.
Args:
dy: output delta of shape (N, L).
Returns:
dx: input delta of shape (N, L).
"""
# TODO Implement backward propogation
# of 1d batchnorm module.
...
# 需要保存前向传播里面的部分参数值(中间变量):
# self.x_hat:
# self.gamma:
# x-x_mean:
# x_var+self.eps:
N = self.x_hat.shape[0]
self.gamma.grad = np.sum(self.x_hat * dy,axis=0)
self.beta.grad = np.sum(dy,axis=0)
dx_hat = np.matmul(np.ones((N,1)),gamma.reshape((1,-1))) * dy
dx = N*dy - np.sum(dx_hat,axis=0) - self.x_hat * np.sum(dx_hat*self.x_hat)
return dx
class Conv2d(Module):
def __init__(self, in_channels: int, channels: int, kernel_size: int=3,
stride: int=1, padding: int=0, bias: bool=False):
"""Module which applies 2D convolution to input.
Args:
in_channels: C_in from expected input shape (B, C_in, H_in, W_in).
channels: C_out from output shape (B, C_out, H_out, W_out).
kernel_size: default 3.
stride: default 1.
padding: default 0.
"""
# TODO Initialize the attributes
# of 2d convolution module.
...
self.C_in = in_channels
self.C_out = channels
self.kernel_size = kernel_size
# self.n_filters = C_out
self.W = nn.tensor.from_array(np.random.randn(self.C_in,self.C_out,self.kernel_size,self.kernel_size))
self.W.grad = np.zeros((self.kernel_size,self.kernel_size))
self.W_im2col = None
self.b = nn.tensor.from_array(np.zeros((1,self.C_out)))
self.b.grad = np.zeros((1,self.C_out))
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of convolution module.
Args:
x: input of shape (B, C_in, H_in, W_in).
Returns:
out: output of shape (B, C_out, H_out, W_out).
"""
# TODO Implement forward propogation
# of 2d convolution module.
...
batch_size,C_in,H_in,W_in = x.shape
self.x = x
# FC,FN,FH,FW = self.W.shape
# FC:=C_in, 滤波器的通道数,由输入的x的通道数决定C_in
# FN:=C_out, 滤波器W的个数,决定输出out的通道数
# FH:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3
# FW:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3
# self.x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding),'constant'))
# self.W_im2col = self.W.reshape((self.n_filters),-1)
# self.layer_input = x
# 调用Conv2d_im2col对x进行拉伸
# x:(B,C_in,H_in,W_in) -> (B*H_out*W_out,C_in*FH*FW)
# -> (B*H_out*W_out,C_in*kernel_size*kernel_size)
# x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),'constant')
self.X_im2col = Conv2d_im2col.forward(self,x)
# self.W也要进行拉伸,变成二维
# self.W:(C_in,C_out,self.kernel_size,self.kernel_size) -> (FH*FW*C_in,FN)
# -> (FH*FW*C_in,C_out)
self.W_im2col = self.W.reshape(self.C_out,-1).T
out = np.dot(self.X_im2col,self.W_im2col) + self.b
self.out = out.reshape(
batch_size,(H_in+2*self.padding-self.kernel_size) // self.stride + 1,(W_in+2*self.padding-self.kernel_size) // self.stride + 1,-1) \
.transpose(0,3,1,2)
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of convolution module.
Args:
dy: output delta of shape (B, C_out, H_out, W_out).
Returns:
dx: input delta of shape (B, C_in, H_in, W_in).
"""
# TODO Implement backward propogation
# of 2d convolution module.
...
dy = dy.transpose(0,2,3,1).reshape(-1,self.out_channels)
self.W.grad = np.dot(self.X_im2col.T,dy).transpose(1,0).reshape(self.W.shape)
self.b.grad = np.sum(dy,axis=1,keepdims=True)
dx_im2col = np.dot(dy,self.W_im2col.T)
# backward时转换col2im
B,C_in,H_in,W_in = self.x.shape
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride - 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride - 1)
dx_im2col = dx_im2col.reshape(B,out_h,out_w,C_in,self.kernel_size,self.kernel_size),transpose(0,3,4,5,1,2)
dx = np.zeros((B,C_in,H_in+2*self.padding+self.stride-1,W_in+2*self.padding+self.stride-1))
for x in np.arange(self.kernel_size):
for y in np.arange(self.kernel_size):
dx[:,:,x:x+self.stride*out_h:self.stride,y:y+self.stride*out_w] += dx_im2col[:,:,x,y,:,:]
return dx[:,:,self.padding:H_in+self.padding,self.padding:W_in+self.padding]
# End of todo
class Conv2d_im2col(Conv2d):
def forward(self, x):
# TODO Implement forward propogation of
# 2d convolution module using im2col method.
...
batch_size,C_in,height,width = x.shape
filter_height,filter_width = self.kernel_size,self.kernel_size
pad_h,pad_w = int((filter_height - 1) / 2),int((filter_width-1) / 2)
x_padded = np.pad(x,((0,0),(0,0),(pad_h,pad_h),(pad_w,pad_w)),mode='constant')
out_height = int((height+2*pad_h-filter_height)/ self.stride - 1)
out_width = int((width+2*pad_w-filter_width)/ self.stride - 1)
i0 = np.repeat(np.arange(filter_height),filter_width)
i0 = np.tile(i0,C_in)
i1 = self.stride * np.repeat(np.arange(out_height),out_width)
j0 = np.tile(np.arange(filter_width),filter_width * C_in)
j1 = self.stride * np.tile(np.arange(out_width),out_width)
i = i0.reshape(1,-1) + i1.reshape(-1,1)
j = j0.reshape(1,-1) + j1.reshape(-1,1)
# i.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第一轴的索引位置
# j.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第二轴的索引位置
k = np.repeat(np.arange(C_in),filter_width*filter_height).reshape(1,-1)
# k.reshape:(1,C_in*filter_height*filter_width),存放的是每次卷积通道的索引
im_col = x_padded[:,k,i,j]
# 对x_padded进行切片处理,[:,k,i,j]就是根据k,i,j索引提取出x_padded中需要卷积的元素
# imcol.shape:(batch_size,out_weight*out_width,C_in*filter_height*filter_width)
# 用transpose对imcol维度变换之后让每一行的元素个数是C_in*filter_width*filter_height,
# 也就是每一次卷积需要卷积的元素个数,对每个样本进行卷积,包括所有通道
# im_col = im_col.transpose(1,2,0).reshape(-1,filter_width*filter_height*C_in)
im_col = im_col.reshape(-1,filter_width*filter_height*C_in)
return im_col
"""
B,iC,iH,iW = x.shape
p,s,k = self.padding,self.stride,self.kernel_size
oH,oW = (iH-k) // s + 1,(iW-k) // s + 1
col = np.zeros((B,iC,k,k,oH,oW))
for h in np.arange(k):
for w in np.arange(k):
col[:,:,h,w,:,:] = x[:,:,h:h+s*oH:s,w:w+s*oW:s]
return col.transpose(0,4,5,1,2,3).reshape(B*oH*oW,-1)
"""
# End of todo
class AvgPool(Module):
def __init__(self, kernel_size: int=2,
stride: int=2, padding: int=0):
"""Module which applies average pooling to input.
Args:
kernel_size: default 2.
stride: default 2.
padding: default 0.
"""
# TODO Initialize the attributes
# of average pooling module.
...
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of average pooling module.
Args:
x: input of shape (B, C, H_in, W_in).
Returns:
out: output of shape (B, C, H_out, W_out).
"""
# TODO Implement forward propogation
# of average pooling module.
...
B,C,H_in,W_in = x.shape
# self.x = x
self.x_padded = np.pad(x,((0,0),(0,0),(self.padding),(self.padding)),mode = 'constant')
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride + 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride + 1)
self.out = np.zeros(B,C,out_h,out_w)
for h in np.arange(out_h):
for w in np.arange(out_w):
self.out[:,:,h,w] = np.mean(self.x_padded[:,:
self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size],axis=(-2,-1))
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of average pooling module.
Args:
dy: output delta of shape (B, C, H_out, W_out).
Returns:
dx: input delta of shape (B, C, H_in, W_in).
"""
# TODO Implement backward propogation
# of average pooling module.
...
B,C,H_out,W_out = dy.shape
B,C,H_in,W_in = self.x.shape
self.grad = np.zeros_like(self.x_padded)
for h in np.arange(H_out):
for w in np.arange(W_out):
self.grad[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size] \
+= dy[:,:,h,w] / self.kernel_size**2
return self.grad[:,:,self.padding:self.padding+H_in,self.padding:self.padding+W_in]
# End of todo
class MaxPool(Module):
def __init__(self, kernel_size: int=2,
stride: int=2, padding: int=0):
"""Module which applies max pooling to input.
Args:
kernel_size: default 2.
stride: default 2.
padding: default 0.
"""
# TODO Initialize the attributes
# of maximum pooling module.
...
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of max pooling module.
Args:
x: input of shape (B, C, H_in, W_in).
Returns:
out: output of shape (B, C, H_out, W_out).
"""
# TODO Implement forward propogation
# of maximum pooling module.
...
B,C,H_in,W_in = x.shape
self.x = x
x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),mode='constant')
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride + 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride + 1)
self.out = np.zeros((B,C,out_h,out_w))
for h in np.arange(out_h):
for w in np.arange(out_w):
self.out[:,:,h,w] = np.max(x_padded[:,:,
self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size],axis=(-2,-1))
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of max pooling module.
Args:
dy: output delta of shape (B, C, H_out, W_out).
Returns:
out: input delta of shape (B, C, H_in, W_in).
"""
# TODO Implement backward propogation
# of maximum pooling module.
...
B,C,H_out,W_out = dy.shape
B,C,H_in,W_in = self.x.shape
self.grad = np.zeros_like(self.x_padded)
for h in np.arange(H_out):
for w in np.arange(W_out):
tmp_x_padded = self.x_padded[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size]
id_max = np.max(tmp_x_padded,axis=(-2,-1))
tmp_grad = self.grad[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size]
tmp_grad += np.where(tmp_grad==np.expand_dims(id_max,(-2,-1)),dy[:,:,h,w],0)
return self.grad[:,:,self.padding:self.padding+H_in,self.padding:self.padding+W_in]
# End of todo
class Dropout(Module):
def __init__(self, p: float=0.5):
# TODO Initialize the attributes
# of dropout module.
...
self.p = p
# End of todo
def forward(self, x):
# TODO Implement forward propogation
# of dropout module.
...
a = (np.random.rand(*x.shape) < self.p)
a = a / self.p
return a*x
# End of todo
def backard(self, dy):
# TODO Implement backward propogation
# of dropout module.
...
# End of todo
if __name__ == '__main__':
import pdb; pdb.set_trace() | import numpy as np
from itertools import product
from numpy.core.fromnumeric import transpose
from numpy.random import gamma
import nn.tensor
from . import tensor
class Module(object):
"""Base class for all neural network modules.
"""
def __init__(self) -> None:
"""If a module behaves different between training and testing,
its init method should inherit from this one."""
self.training = True
def __call__(self, x: np.ndarray) -> np.ndarray:
"""Defines calling forward method at every call.
Should not be overridden by subclasses.
"""
return self.forward(x)
def forward(self, x: np.ndarray) -> np.ndarray:
"""Defines the forward propagation of the module performed at every call.
Should be overridden by all subclasses.
"""
...
def backward(self, dy: np.ndarray) -> np.ndarray:
"""Defines the backward propagation of the module.
"""
return dy
def train(self):
"""Sets the mode of the module to training.
Should not be overridden by subclasses.
"""
if 'training' in vars(self):
self.training = True
for attr in vars(self).values():
# isintance():判断attr的类型是不是Module
if isinstance(attr, Module):
Module.train()
def eval(self):
"""Sets the mode of the module to eval.
Should not be overridden by subclasses.
"""
if 'training' in vars(self):
self.training = False
for attr in vars(self).values():
# isintance():判断attr的类型是不是Module
if isinstance(attr, Module):
Module.eval()
class Linear(Module):
def __init__(self, in_length: int, out_length: int):
"""Module which applies linear transformation to input.
Args:
in_length: L_in from expected input shape (N, L_in).
out_length: L_out from output shape (N, L_out).
"""
# TODO Initialize the weight
# of linear module.
self.w = tensor.from_array(np.random.randn(in_length+1,out_length))
# self.b = np.zeros(out_length)
self.x = None
# self.original_x_shape = None
# self.dw = None
# self.db = None
# End of todo
def forward(self, x):
"""Forward propagation of linear module.
Args:
x: input of shape (N, L_in).
Returns:
out: output of shape (N, L_out).
"""
# TODO Implement forward propogation
# of linear module.
self.original_x_shape = x.shape
# print(x.shape)
v = np.ones(x.shape[0])
x = np.column_stack([x,v])
self.x = x
# print(x.shape)
# print(self.w.shape)
out = np.dot(x,self.w)
return out
# End of todo
def backward(self, dy):
"""Backward propagation of linear module.
Args:
dy: output delta of shape (N, L_out).
Returns:
dx: input delta of shape (N, L_in).
"""
# TODO Implement backward propogation
# of linear module.
# dx = np.dot(dy,self.w.T)
self.w.grad = np.dot(self.x.T,dy)
# dx = dx.reshape(*self.original_x_shape)
...
# return dx
# End of todo
class BatchNorm1d(Module):
def __init__(self, length: int, momentum: float=0.9):
"""Module which applies batch normalization to input.
Args:
length: L from expected input shape (N, L).
momentum: default 0.9.
"""
super(BatchNorm1d, self).__init__()
# TODO Initialize the attributes
# of 1d batchnorm module.
...
self.L = int(length)
self.running_mean = np.zeros(self.L) # 追踪mini-batch 均值
self.running_var = np.ones(self.L) # 追踪mini-batch 方差
self.eps = 1e-5 # 排除计算错误和分母为0的情况
self.momentum = momentum # 超参数,追踪样本整体均值和方差的动量
self.beta = nn.tensor.from_array(np.zeros(shape=(self.L,)))
# self.gamma = gamma(0,1,self.L).reshape((self.L,))
self.gamma = nn.tensor.from_array(np.ones((self.L)))
self.x_hat = None
# End of todo
def forward(self, x):
"""Forward propagation of batch norm module.
Args:
x: input of shape (N, L).
Returns:
out: output of shape (N, L).
"""
# TODO Implement forward propogation
# of 1d batchnorm module.
...
x_mean = np.mean(x,axis=0)
x_var = np.var(x,axis=0)
# 根据计算的mean和var批量归一化x
self.x_hat = (x-x_mean) / np.sqrt(x_var + self.eps)
y = self.gamma*self.x_hat + self.beta
# 根据当前mini-batch的样本进行追踪更新,计算滑动平均
self.running_mean = (1-self.momentum)*self.running_mean + self.momentum*x_mean
self.running_var = (1-self.momentum)*self.running_var + self.momentum*x_var
return y
# End of todo
def backward(self, dy):
"""Backward propagation of batch norm module.
Args:
dy: output delta of shape (N, L).
Returns:
dx: input delta of shape (N, L).
"""
# TODO Implement backward propogation
# of 1d batchnorm module.
...
# 需要保存前向传播里面的部分参数值(中间变量):
# self.x_hat:
# self.gamma:
# x-x_mean:
# x_var+self.eps:
N = self.x_hat.shape[0]
self.gamma.grad = np.sum(self.x_hat * dy,axis=0)
self.beta.grad = np.sum(dy,axis=0)
dx_hat = np.matmul(np.ones((N,1)),gamma.reshape((1,-1))) * dy
dx = N*dy - np.sum(dx_hat,axis=0) - self.x_hat * np.sum(dx_hat*self.x_hat)
return dx
class Conv2d(Module):
def __init__(self, in_channels: int, channels: int, kernel_size: int=3,
stride: int=1, padding: int=0, bias: bool=False):
"""Module which applies 2D convolution to input.
Args:
in_channels: C_in from expected input shape (B, C_in, H_in, W_in).
channels: C_out from output shape (B, C_out, H_out, W_out).
kernel_size: default 3.
stride: default 1.
padding: default 0.
"""
# TODO Initialize the attributes
# of 2d convolution module.
...
self.C_in = in_channels
self.C_out = channels
self.kernel_size = kernel_size
# self.n_filters = C_out
self.W = nn.tensor.from_array(np.random.randn(self.C_in,self.C_out,self.kernel_size,self.kernel_size))
self.W.grad = np.zeros((self.kernel_size,self.kernel_size))
self.W_im2col = None
self.b = nn.tensor.from_array(np.zeros((1,self.C_out)))
self.b.grad = np.zeros((1,self.C_out))
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of convolution module.
Args:
x: input of shape (B, C_in, H_in, W_in).
Returns:
out: output of shape (B, C_out, H_out, W_out).
"""
# TODO Implement forward propogation
# of 2d convolution module.
...
batch_size,C_in,H_in,W_in = x.shape
self.x = x
# FC,FN,FH,FW = self.W.shape
# FC:=C_in, 滤波器的通道数,由输入的x的通道数决定C_in
# FN:=C_out, 滤波器W的个数,决定输出out的通道数
# FH:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3
# FW:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3
# self.x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding),'constant'))
# self.W_im2col = self.W.reshape((self.n_filters),-1)
# self.layer_input = x
# 调用Conv2d_im2col对x进行拉伸
# x:(B,C_in,H_in,W_in) -> (B*H_out*W_out,C_in*FH*FW)
# -> (B*H_out*W_out,C_in*kernel_size*kernel_size)
# x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),'constant')
self.X_im2col = Conv2d_im2col.forward(self,x)
# self.W也要进行拉伸,变成二维
# self.W:(C_in,C_out,self.kernel_size,self.kernel_size) -> (FH*FW*C_in,FN)
# -> (FH*FW*C_in,C_out)
self.W_im2col = self.W.reshape(self.C_out,-1).T
out = np.dot(self.X_im2col,self.W_im2col) + self.b
self.out = out.reshape(
batch_size,(H_in+2*self.padding-self.kernel_size) // self.stride + 1,(W_in+2*self.padding-self.kernel_size) // self.stride + 1,-1) \
.transpose(0,3,1,2)
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of convolution module.
Args:
dy: output delta of shape (B, C_out, H_out, W_out).
Returns:
dx: input delta of shape (B, C_in, H_in, W_in).
"""
# TODO Implement backward propogation
# of 2d convolution module.
...
dy = dy.transpose(0,2,3,1).reshape(-1,self.out_channels)
self.W.grad = np.dot(self.X_im2col.T,dy).transpose(1,0).reshape(self.W.shape)
self.b.grad = np.sum(dy,axis=1,keepdims=True)
dx_im2col = np.dot(dy,self.W_im2col.T)
# backward时转换col2im
B,C_in,H_in,W_in = self.x.shape
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride - 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride - 1)
dx_im2col = dx_im2col.reshape(B,out_h,out_w,C_in,self.kernel_size,self.kernel_size),transpose(0,3,4,5,1,2)
dx = np.zeros((B,C_in,H_in+2*self.padding+self.stride-1,W_in+2*self.padding+self.stride-1))
for x in np.arange(self.kernel_size):
for y in np.arange(self.kernel_size):
dx[:,:,x:x+self.stride*out_h:self.stride,y:y+self.stride*out_w] += dx_im2col[:,:,x,y,:,:]
return dx[:,:,self.padding:H_in+self.padding,self.padding:W_in+self.padding]
# End of todo
class Conv2d_im2col(Conv2d):
def forward(self, x):
# TODO Implement forward propogation of
# 2d convolution module using im2col method.
...
batch_size,C_in,height,width = x.shape
filter_height,filter_width = self.kernel_size,self.kernel_size
pad_h,pad_w = int((filter_height - 1) / 2),int((filter_width-1) / 2)
x_padded = np.pad(x,((0,0),(0,0),(pad_h,pad_h),(pad_w,pad_w)),mode='constant')
out_height = int((height+2*pad_h-filter_height)/ self.stride - 1)
out_width = int((width+2*pad_w-filter_width)/ self.stride - 1)
i0 = np.repeat(np.arange(filter_height),filter_width)
i0 = np.tile(i0,C_in)
i1 = self.stride * np.repeat(np.arange(out_height),out_width)
j0 = np.tile(np.arange(filter_width),filter_width * C_in)
j1 = self.stride * np.tile(np.arange(out_width),out_width)
i = i0.reshape(1,-1) + i1.reshape(-1,1)
j = j0.reshape(1,-1) + j1.reshape(-1,1)
# i.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第一轴的索引位置
# j.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第二轴的索引位置
k = np.repeat(np.arange(C_in),filter_width*filter_height).reshape(1,-1)
# k.reshape:(1,C_in*filter_height*filter_width),存放的是每次卷积通道的索引
im_col = x_padded[:,k,i,j]
# 对x_padded进行切片处理,[:,k,i,j]就是根据k,i,j索引提取出x_padded中需要卷积的元素
# imcol.shape:(batch_size,out_weight*out_width,C_in*filter_height*filter_width)
# 用transpose对imcol维度变换之后让每一行的元素个数是C_in*filter_width*filter_height,
# 也就是每一次卷积需要卷积的元素个数,对每个样本进行卷积,包括所有通道
# im_col = im_col.transpose(1,2,0).reshape(-1,filter_width*filter_height*C_in)
im_col = im_col.reshape(-1,filter_width*filter_height*C_in)
return im_col
"""
B,iC,iH,iW = x.shape
p,s,k = self.padding,self.stride,self.kernel_size
oH,oW = (iH-k) // s + 1,(iW-k) // s + 1
col = np.zeros((B,iC,k,k,oH,oW))
for h in np.arange(k):
for w in np.arange(k):
col[:,:,h,w,:,:] = x[:,:,h:h+s*oH:s,w:w+s*oW:s]
return col.transpose(0,4,5,1,2,3).reshape(B*oH*oW,-1)
"""
# End of todo
class AvgPool(Module):
def __init__(self, kernel_size: int=2,
stride: int=2, padding: int=0):
"""Module which applies average pooling to input.
Args:
kernel_size: default 2.
stride: default 2.
padding: default 0.
"""
# TODO Initialize the attributes
# of average pooling module.
...
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of average pooling module.
Args:
x: input of shape (B, C, H_in, W_in).
Returns:
out: output of shape (B, C, H_out, W_out).
"""
# TODO Implement forward propogation
# of average pooling module.
...
B,C,H_in,W_in = x.shape
# self.x = x
self.x_padded = np.pad(x,((0,0),(0,0),(self.padding),(self.padding)),mode = 'constant')
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride + 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride + 1)
self.out = np.zeros(B,C,out_h,out_w)
for h in np.arange(out_h):
for w in np.arange(out_w):
self.out[:,:,h,w] = np.mean(self.x_padded[:,:
self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size],axis=(-2,-1))
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of average pooling module.
Args:
dy: output delta of shape (B, C, H_out, W_out).
Returns:
dx: input delta of shape (B, C, H_in, W_in).
"""
# TODO Implement backward propogation
# of average pooling module.
...
B,C,H_out,W_out = dy.shape
B,C,H_in,W_in = self.x.shape
self.grad = np.zeros_like(self.x_padded)
for h in np.arange(H_out):
for w in np.arange(W_out):
self.grad[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size] \
+= dy[:,:,h,w] / self.kernel_size**2
return self.grad[:,:,self.padding:self.padding+H_in,self.padding:self.padding+W_in]
# End of todo
class MaxPool(Module):
def __init__(self, kernel_size: int=2,
stride: int=2, padding: int=0):
"""Module which applies max pooling to input.
Args:
kernel_size: default 2.
stride: default 2.
padding: default 0.
"""
# TODO Initialize the attributes
# of maximum pooling module.
...
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.x = None
self.x_padded = None
# End of todo
def forward(self, x):
"""Forward propagation of max pooling module.
Args:
x: input of shape (B, C, H_in, W_in).
Returns:
out: output of shape (B, C, H_out, W_out).
"""
# TODO Implement forward propogation
# of maximum pooling module.
...
B,C,H_in,W_in = x.shape
self.x = x
x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),mode='constant')
out_h = int((H_in+2*self.padding-self.kernel_size) / self.stride + 1)
out_w = int((W_in+2*self.padding-self.kernel_size) / self.stride + 1)
self.out = np.zeros((B,C,out_h,out_w))
for h in np.arange(out_h):
for w in np.arange(out_w):
self.out[:,:,h,w] = np.max(x_padded[:,:,
self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size],axis=(-2,-1))
return self.out
# End of todo
def backward(self, dy):
"""Backward propagation of max pooling module.
Args:
dy: output delta of shape (B, C, H_out, W_out).
Returns:
out: input delta of shape (B, C, H_in, W_in).
"""
# TODO Implement backward propogation
# of maximum pooling module.
...
B,C,H_out,W_out = dy.shape
B,C,H_in,W_in = self.x.shape
self.grad = np.zeros_like(self.x_padded)
for h in np.arange(H_out):
for w in np.arange(W_out):
tmp_x_padded = self.x_padded[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size]
id_max = np.max(tmp_x_padded,axis=(-2,-1))
tmp_grad = self.grad[:,:,self.stride*h:self.stride*h+self.kernel_size,self.stride*w:self.stride*w+self.kernel_size]
tmp_grad += np.where(tmp_grad==np.expand_dims(id_max,(-2,-1)),dy[:,:,h,w],0)
return self.grad[:,:,self.padding:self.padding+H_in,self.padding:self.padding+W_in]
# End of todo
class Dropout(Module):
def __init__(self, p: float=0.5):
# TODO Initialize the attributes
# of dropout module.
...
self.p = p
# End of todo
def forward(self, x):
# TODO Implement forward propogation
# of dropout module.
...
a = (np.random.rand(*x.shape) < self.p)
a = a / self.p
return a*x
# End of todo
def backard(self, dy):
# TODO Implement backward propogation
# of dropout module.
...
# End of todo
if __name__ == '__main__':
import pdb; pdb.set_trace() | en | 0.448426 | Base class for all neural network modules. If a module behaves different between training and testing, its init method should inherit from this one. Defines calling forward method at every call. Should not be overridden by subclasses. Defines the forward propagation of the module performed at every call. Should be overridden by all subclasses. Defines the backward propagation of the module. Sets the mode of the module to training. Should not be overridden by subclasses. # isintance():判断attr的类型是不是Module Sets the mode of the module to eval. Should not be overridden by subclasses. # isintance():判断attr的类型是不是Module Module which applies linear transformation to input. Args: in_length: L_in from expected input shape (N, L_in). out_length: L_out from output shape (N, L_out). # TODO Initialize the weight # of linear module. # self.b = np.zeros(out_length) # self.original_x_shape = None # self.dw = None # self.db = None # End of todo Forward propagation of linear module. Args: x: input of shape (N, L_in). Returns: out: output of shape (N, L_out). # TODO Implement forward propogation # of linear module. # print(x.shape) # print(x.shape) # print(self.w.shape) # End of todo Backward propagation of linear module. Args: dy: output delta of shape (N, L_out). Returns: dx: input delta of shape (N, L_in). # TODO Implement backward propogation # of linear module. # dx = np.dot(dy,self.w.T) # dx = dx.reshape(*self.original_x_shape) # return dx # End of todo Module which applies batch normalization to input. Args: length: L from expected input shape (N, L). momentum: default 0.9. # TODO Initialize the attributes # of 1d batchnorm module. # 追踪mini-batch 均值 # 追踪mini-batch 方差 # 排除计算错误和分母为0的情况 # 超参数,追踪样本整体均值和方差的动量 # self.gamma = gamma(0,1,self.L).reshape((self.L,)) # End of todo Forward propagation of batch norm module. Args: x: input of shape (N, L). Returns: out: output of shape (N, L). # TODO Implement forward propogation # of 1d batchnorm module. # 根据计算的mean和var批量归一化x # 根据当前mini-batch的样本进行追踪更新,计算滑动平均 # End of todo Backward propagation of batch norm module. Args: dy: output delta of shape (N, L). Returns: dx: input delta of shape (N, L). # TODO Implement backward propogation # of 1d batchnorm module. # 需要保存前向传播里面的部分参数值(中间变量): # self.x_hat: # self.gamma: # x-x_mean: # x_var+self.eps: Module which applies 2D convolution to input. Args: in_channels: C_in from expected input shape (B, C_in, H_in, W_in). channels: C_out from output shape (B, C_out, H_out, W_out). kernel_size: default 3. stride: default 1. padding: default 0. # TODO Initialize the attributes # of 2d convolution module. # self.n_filters = C_out # End of todo Forward propagation of convolution module. Args: x: input of shape (B, C_in, H_in, W_in). Returns: out: output of shape (B, C_out, H_out, W_out). # TODO Implement forward propogation # of 2d convolution module. # FC,FN,FH,FW = self.W.shape # FC:=C_in, 滤波器的通道数,由输入的x的通道数决定C_in # FN:=C_out, 滤波器W的个数,决定输出out的通道数 # FH:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3 # FW:滤波器卷积核的尺寸大小,由自己设计或者采用默认的kernel_size都可以,通常是3 # self.x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding),'constant')) # self.W_im2col = self.W.reshape((self.n_filters),-1) # self.layer_input = x # 调用Conv2d_im2col对x进行拉伸 # x:(B,C_in,H_in,W_in) -> (B*H_out*W_out,C_in*FH*FW) # -> (B*H_out*W_out,C_in*kernel_size*kernel_size) # x_padded = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),'constant') # self.W也要进行拉伸,变成二维 # self.W:(C_in,C_out,self.kernel_size,self.kernel_size) -> (FH*FW*C_in,FN) # -> (FH*FW*C_in,C_out) # End of todo Backward propagation of convolution module. Args: dy: output delta of shape (B, C_out, H_out, W_out). Returns: dx: input delta of shape (B, C_in, H_in, W_in). # TODO Implement backward propogation # of 2d convolution module. # backward时转换col2im # End of todo # TODO Implement forward propogation of # 2d convolution module using im2col method. # i.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第一轴的索引位置 # j.shape:(out_weight*out_width,C_in*filter_height*filter_width),存放的是从x_padded中一个通道每次读取一个单元卷积核对应元素的第二轴的索引位置 # k.reshape:(1,C_in*filter_height*filter_width),存放的是每次卷积通道的索引 # 对x_padded进行切片处理,[:,k,i,j]就是根据k,i,j索引提取出x_padded中需要卷积的元素 # imcol.shape:(batch_size,out_weight*out_width,C_in*filter_height*filter_width) # 用transpose对imcol维度变换之后让每一行的元素个数是C_in*filter_width*filter_height, # 也就是每一次卷积需要卷积的元素个数,对每个样本进行卷积,包括所有通道 # im_col = im_col.transpose(1,2,0).reshape(-1,filter_width*filter_height*C_in) B,iC,iH,iW = x.shape p,s,k = self.padding,self.stride,self.kernel_size oH,oW = (iH-k) // s + 1,(iW-k) // s + 1 col = np.zeros((B,iC,k,k,oH,oW)) for h in np.arange(k): for w in np.arange(k): col[:,:,h,w,:,:] = x[:,:,h:h+s*oH:s,w:w+s*oW:s] return col.transpose(0,4,5,1,2,3).reshape(B*oH*oW,-1) # End of todo Module which applies average pooling to input. Args: kernel_size: default 2. stride: default 2. padding: default 0. # TODO Initialize the attributes # of average pooling module. # End of todo Forward propagation of average pooling module. Args: x: input of shape (B, C, H_in, W_in). Returns: out: output of shape (B, C, H_out, W_out). # TODO Implement forward propogation # of average pooling module. # self.x = x # End of todo Backward propagation of average pooling module. Args: dy: output delta of shape (B, C, H_out, W_out). Returns: dx: input delta of shape (B, C, H_in, W_in). # TODO Implement backward propogation # of average pooling module. # End of todo Module which applies max pooling to input. Args: kernel_size: default 2. stride: default 2. padding: default 0. # TODO Initialize the attributes # of maximum pooling module. # End of todo Forward propagation of max pooling module. Args: x: input of shape (B, C, H_in, W_in). Returns: out: output of shape (B, C, H_out, W_out). # TODO Implement forward propogation # of maximum pooling module. # End of todo Backward propagation of max pooling module. Args: dy: output delta of shape (B, C, H_out, W_out). Returns: out: input delta of shape (B, C, H_in, W_in). # TODO Implement backward propogation # of maximum pooling module. # End of todo # TODO Initialize the attributes # of dropout module. # End of todo # TODO Implement forward propogation # of dropout module. # End of todo # TODO Implement backward propogation # of dropout module. # End of todo | 3.124347 | 3 |
thecut/backslash/tests/test_decorators.py | exemplarysoftware/thecut-backslash | 0 | 6619321 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from thecut.backslash.decorators import register, reject_unsupported_browsers
from thecut.backslash.options import ModelAdmin
from test_app.models import BackslashTestModel
from django.test.client import RequestFactory
from django.http import HttpResponse
try: # Python 3
from unittest import mock
except ImportError: # Python 2
import mock
class TestRegister(TestCase):
def test_not_modeladmin_subclass(self):
class Useless2(object):
pass
with self.assertRaises(ValueError):
@register(Useless2)
class Useless(object):
pass
def test_is_modeladmin_subclass(self):
@register(BackslashTestModel)
class Useless(ModelAdmin):
pass
def test_invalid_site(self):
class Useless2(object):
pass
with self.assertRaises(ValueError):
@register(BackslashTestModel, site=Useless2)
class Useless(ModelAdmin):
pass
class TestRejectUnsupportedBrowsers(TestCase):
def setUp(self):
self.wrapper = reject_unsupported_browsers({
'microsoft internet explorer': '10'},
'backslash/unsupported_browser.html')
def test_browser_is_not_supported(self):
def always_fails_view(request):
assert False
view = reject_unsupported_browsers(
browser_requirements={'microsoft internet explorer': '10'},
template_name='backslash/unsupported_browser.html')(
always_fails_view)
request_factory = RequestFactory()
request = request_factory.get('/', HTTP_USER_AGENT='Mozilla/4.0 '
'(compatible; MSIE 6.0; Windows NT 5.1) ') # NOQA
response = view(request)
self.assertEqual(response.status_code, 406)
def test_browser_is_supported(self):
def always_succeeds_view(request):
html = '<html><body>Hello</body></html>'
return HttpResponse(html)
view = reject_unsupported_browsers(
browser_requirements={'microsoft internet explorer': '10'},
template_name='backslash/unsupported_browser.html')(
always_succeeds_view)
request_factory = RequestFactory()
request = request_factory.get('/', HTTP_USER_AGENT='Mozilla/5.0 '
'(compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)') # NOQA
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'<html><body>Hello</body></html>')
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from thecut.backslash.decorators import register, reject_unsupported_browsers
from thecut.backslash.options import ModelAdmin
from test_app.models import BackslashTestModel
from django.test.client import RequestFactory
from django.http import HttpResponse
try: # Python 3
from unittest import mock
except ImportError: # Python 2
import mock
class TestRegister(TestCase):
def test_not_modeladmin_subclass(self):
class Useless2(object):
pass
with self.assertRaises(ValueError):
@register(Useless2)
class Useless(object):
pass
def test_is_modeladmin_subclass(self):
@register(BackslashTestModel)
class Useless(ModelAdmin):
pass
def test_invalid_site(self):
class Useless2(object):
pass
with self.assertRaises(ValueError):
@register(BackslashTestModel, site=Useless2)
class Useless(ModelAdmin):
pass
class TestRejectUnsupportedBrowsers(TestCase):
def setUp(self):
self.wrapper = reject_unsupported_browsers({
'microsoft internet explorer': '10'},
'backslash/unsupported_browser.html')
def test_browser_is_not_supported(self):
def always_fails_view(request):
assert False
view = reject_unsupported_browsers(
browser_requirements={'microsoft internet explorer': '10'},
template_name='backslash/unsupported_browser.html')(
always_fails_view)
request_factory = RequestFactory()
request = request_factory.get('/', HTTP_USER_AGENT='Mozilla/4.0 '
'(compatible; MSIE 6.0; Windows NT 5.1) ') # NOQA
response = view(request)
self.assertEqual(response.status_code, 406)
def test_browser_is_supported(self):
def always_succeeds_view(request):
html = '<html><body>Hello</body></html>'
return HttpResponse(html)
view = reject_unsupported_browsers(
browser_requirements={'microsoft internet explorer': '10'},
template_name='backslash/unsupported_browser.html')(
always_succeeds_view)
request_factory = RequestFactory()
request = request_factory.get('/', HTTP_USER_AGENT='Mozilla/5.0 '
'(compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)') # NOQA
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'<html><body>Hello</body></html>')
| en | 0.398957 | # -*- coding: utf-8 -*- # Python 3 # Python 2 # NOQA # NOQA | 2.28379 | 2 |
pyhammer/tasks/svn/svnimporttask.py | webbers/pyhammer | 2 | 6619322 | <reponame>webbers/pyhammer
# -*- coding: utf-8 -*-
from pyhammer.tasks.taskbase import TaskBase
from pyhammer.utils import execProg
class SvnImportTask(TaskBase):
"""Svn Commit Dir Step"""
def __init__( self, dir, repo ):
super(SvnImportTask, self).__init__()
self.dir = dir
self.repo = repo
def do( self ):
self.reporter.message( "Svn Import: %s => %s" % ( self.dir, self.repo ) )
commitMessage = "Commited by Build"
command = "svn import --non-interactive --trust-server-cert -m \"%s\" \"%s\" \"%s\" " % ( commitMessage, self.dir, self.repo )
return execProg( command, self.reporter, self.dir ) == 0 | # -*- coding: utf-8 -*-
from pyhammer.tasks.taskbase import TaskBase
from pyhammer.utils import execProg
class SvnImportTask(TaskBase):
"""Svn Commit Dir Step"""
def __init__( self, dir, repo ):
super(SvnImportTask, self).__init__()
self.dir = dir
self.repo = repo
def do( self ):
self.reporter.message( "Svn Import: %s => %s" % ( self.dir, self.repo ) )
commitMessage = "Commited by Build"
command = "svn import --non-interactive --trust-server-cert -m \"%s\" \"%s\" \"%s\" " % ( commitMessage, self.dir, self.repo )
return execProg( command, self.reporter, self.dir ) == 0 | en | 0.680905 | # -*- coding: utf-8 -*- Svn Commit Dir Step | 2.13127 | 2 |
mandelbrot.py | jschmidtnj/CS115 | 0 | 6619323 | <filename>mandelbrot.py
# mandelbrot.py
# Lab 9
#
# Name:
# keep this import line...
from cs5png import PNGImage
# start your Lab 9 functions here:
| <filename>mandelbrot.py
# mandelbrot.py
# Lab 9
#
# Name:
# keep this import line...
from cs5png import PNGImage
# start your Lab 9 functions here:
| en | 0.602204 | # mandelbrot.py # Lab 9 # # Name: # keep this import line... # start your Lab 9 functions here: | 1.88613 | 2 |
src/pyams_zmi/zmi/viewlet/breadcrumb.py | Py-AMS/pyams-zmi | 0 | 6619324 | <reponame>Py-AMS/pyams-zmi<gh_stars>0
#
# Copyright (c) 2015-2021 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_zmi.zmi.viewlet.breadcrumb module
This module defines breadcrumbs adapter for management interface.
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.location import ILocation
from pyams_i18n.interfaces import II18n
from pyams_site.interfaces import ISiteRoot
from pyams_skin.interfaces.viewlet import IBreadcrumbItem
from pyams_skin.viewlet.breadcrumb import BreadcrumbItem
from pyams_utils.adapter import adapter_config
from pyams_zmi.interfaces import IAdminLayer
from pyams_zmi.interfaces.configuration import IZMIConfiguration
@adapter_config(required=(ILocation, IAdminLayer, Interface),
provides=IBreadcrumbItem)
class AdminLayerBreadcrumbItem(BreadcrumbItem):
"""Admin layer breadcrumb ietm adapter"""
view_name = 'admin'
@adapter_config(required=(ISiteRoot, IAdminLayer, Interface),
provides=IBreadcrumbItem)
class SiteRootBreadcrumbItem(AdminLayerBreadcrumbItem):
"""Site root breadcrumb item adapter"""
@property
def label(self):
"""Label getter"""
configuration = IZMIConfiguration(self.request.root)
return II18n(configuration).query_attribute('home_name', request=self.request) or \
configuration.site_name
css_class = 'breadcrumb-item persistent'
| #
# Copyright (c) 2015-2021 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_zmi.zmi.viewlet.breadcrumb module
This module defines breadcrumbs adapter for management interface.
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.location import ILocation
from pyams_i18n.interfaces import II18n
from pyams_site.interfaces import ISiteRoot
from pyams_skin.interfaces.viewlet import IBreadcrumbItem
from pyams_skin.viewlet.breadcrumb import BreadcrumbItem
from pyams_utils.adapter import adapter_config
from pyams_zmi.interfaces import IAdminLayer
from pyams_zmi.interfaces.configuration import IZMIConfiguration
@adapter_config(required=(ILocation, IAdminLayer, Interface),
provides=IBreadcrumbItem)
class AdminLayerBreadcrumbItem(BreadcrumbItem):
"""Admin layer breadcrumb ietm adapter"""
view_name = 'admin'
@adapter_config(required=(ISiteRoot, IAdminLayer, Interface),
provides=IBreadcrumbItem)
class SiteRootBreadcrumbItem(AdminLayerBreadcrumbItem):
"""Site root breadcrumb item adapter"""
@property
def label(self):
"""Label getter"""
configuration = IZMIConfiguration(self.request.root)
return II18n(configuration).query_attribute('home_name', request=self.request) or \
configuration.site_name
css_class = 'breadcrumb-item persistent' | en | 0.56776 | # # Copyright (c) 2015-2021 <NAME> <tflorac AT ulthar.net> # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # PyAMS_zmi.zmi.viewlet.breadcrumb module This module defines breadcrumbs adapter for management interface. Admin layer breadcrumb ietm adapter Site root breadcrumb item adapter Label getter | 1.78476 | 2 |
star_tides/services/databases/mongo/models/guidance_model.py | STAR-TIDES/kb | 2 | 6619325 | ''' star_tides.services.databases.mongo.models.guidance_model
'''
from mongoengine import (
EmbeddedDocument,
StringField,
ObjectIdField,
EmbeddedDocumentField,
URLField
)
from star_tides.services.databases.mongo.models.location_model import (
LocationModel
)
class Guidance(EmbeddedDocument):
author = ObjectIdField(required=False)
project_id = ObjectIdField(required=False)
text_content = StringField(required=False)
link = URLField(required=False)
location = EmbeddedDocumentField(LocationModel, required=True)
| ''' star_tides.services.databases.mongo.models.guidance_model
'''
from mongoengine import (
EmbeddedDocument,
StringField,
ObjectIdField,
EmbeddedDocumentField,
URLField
)
from star_tides.services.databases.mongo.models.location_model import (
LocationModel
)
class Guidance(EmbeddedDocument):
author = ObjectIdField(required=False)
project_id = ObjectIdField(required=False)
text_content = StringField(required=False)
link = URLField(required=False)
location = EmbeddedDocumentField(LocationModel, required=True)
| ja | 0.086234 | star_tides.services.databases.mongo.models.guidance_model | 1.97488 | 2 |
Tools/scripts/pdeps.py | sireliah/polish-python | 1 | 6619326 | #! /usr/bin/env python3
# pdeps
#
# Find dependencies between a bunch of Python modules.
#
# Usage:
# pdeps file1.py file2.py ...
#
# Output:
# Four tables separated by lines like '--- Closure ---':
# 1) Direct dependencies, listing which module imports which other modules
# 2) The inverse of (1)
# 3) Indirect dependencies, albo the closure of the above
# 4) The inverse of (3)
#
# To do:
# - command line options to select output type
# - option to automatically scan the Python library dla referenced modules
# - option to limit output to particular modules
zaimportuj sys
zaimportuj re
zaimportuj os
# Main program
#
def main():
args = sys.argv[1:]
jeżeli nie args:
print('usage: pdeps file.py file.py ...')
zwróć 2
#
table = {}
dla arg w args:
process(arg, table)
#
print('--- Uses ---')
printresults(table)
#
print('--- Used By ---')
inv = inverse(table)
printresults(inv)
#
print('--- Closure of Uses ---')
reach = closure(table)
printresults(reach)
#
print('--- Closure of Used By ---')
invreach = inverse(reach)
printresults(invreach)
#
zwróć 0
# Compiled regular expressions to search dla zaimportuj statements
#
m_zaimportuj = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
m_z = re.compile('^[ \t]*import[ \t]+([^#]+)')
# Collect data z one file
#
def process(filename, table):
fp = open(filename, 'r')
mod = os.path.basename(filename)
jeżeli mod[-3:] == '.py':
mod = mod[:-3]
table[mod] = list = []
dopóki 1:
line = fp.readline()
jeżeli nie line: przerwij
dopóki line[-1:] == '\\':
nextline = fp.readline()
jeżeli nie nextline: przerwij
line = line[:-1] + nextline
m_found = m_import.match(line) albo m_from.match(line)
jeżeli m_found:
(a, b), (a1, b1) = m_found.regs[:2]
inaczej: kontynuuj
words = line[a1:b1].split(',')
# print '#', line, words
dla word w words:
word = word.strip()
jeżeli word nie w list:
list.append(word)
fp.close()
# Compute closure (this jest w fact totally general)
#
def closure(table):
modules = list(table.keys())
#
# Initialize reach przy a copy of table
#
reach = {}
dla mod w modules:
reach[mod] = table[mod][:]
#
# Iterate until no more change
#
change = 1
dopóki change:
change = 0
dla mod w modules:
dla mo w reach[mod]:
jeżeli mo w modules:
dla m w reach[mo]:
jeżeli m nie w reach[mod]:
reach[mod].append(m)
change = 1
#
zwróć reach
# Invert a table (this jest again totally general).
# All keys of the original table are made keys of the inverse,
# so there may be empty lists w the inverse.
#
def inverse(table):
inv = {}
dla key w table.keys():
jeżeli key nie w inv:
inv[key] = []
dla item w table[key]:
store(inv, item, key)
zwróć inv
# Store "item" w "dict" under "key".
# The dictionary maps keys to lists of items.
# If there jest no list dla the key yet, it jest created.
#
def store(dict, key, item):
jeżeli key w dict:
dict[key].append(item)
inaczej:
dict[key] = [item]
# Tabulate results neatly
#
def printresults(table):
modules = sorted(table.keys())
maxlen = 0
dla mod w modules: maxlen = max(maxlen, len(mod))
dla mod w modules:
list = sorted(table[mod])
print(mod.ljust(maxlen), ':', end=' ')
jeżeli mod w list:
print('(*)', end=' ')
dla ref w list:
print(ref, end=' ')
print()
# Call main oraz honor exit status
jeżeli __name__ == '__main__':
spróbuj:
sys.exit(main())
wyjąwszy KeyboardInterrupt:
sys.exit(1)
| #! /usr/bin/env python3
# pdeps
#
# Find dependencies between a bunch of Python modules.
#
# Usage:
# pdeps file1.py file2.py ...
#
# Output:
# Four tables separated by lines like '--- Closure ---':
# 1) Direct dependencies, listing which module imports which other modules
# 2) The inverse of (1)
# 3) Indirect dependencies, albo the closure of the above
# 4) The inverse of (3)
#
# To do:
# - command line options to select output type
# - option to automatically scan the Python library dla referenced modules
# - option to limit output to particular modules
zaimportuj sys
zaimportuj re
zaimportuj os
# Main program
#
def main():
args = sys.argv[1:]
jeżeli nie args:
print('usage: pdeps file.py file.py ...')
zwróć 2
#
table = {}
dla arg w args:
process(arg, table)
#
print('--- Uses ---')
printresults(table)
#
print('--- Used By ---')
inv = inverse(table)
printresults(inv)
#
print('--- Closure of Uses ---')
reach = closure(table)
printresults(reach)
#
print('--- Closure of Used By ---')
invreach = inverse(reach)
printresults(invreach)
#
zwróć 0
# Compiled regular expressions to search dla zaimportuj statements
#
m_zaimportuj = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
m_z = re.compile('^[ \t]*import[ \t]+([^#]+)')
# Collect data z one file
#
def process(filename, table):
fp = open(filename, 'r')
mod = os.path.basename(filename)
jeżeli mod[-3:] == '.py':
mod = mod[:-3]
table[mod] = list = []
dopóki 1:
line = fp.readline()
jeżeli nie line: przerwij
dopóki line[-1:] == '\\':
nextline = fp.readline()
jeżeli nie nextline: przerwij
line = line[:-1] + nextline
m_found = m_import.match(line) albo m_from.match(line)
jeżeli m_found:
(a, b), (a1, b1) = m_found.regs[:2]
inaczej: kontynuuj
words = line[a1:b1].split(',')
# print '#', line, words
dla word w words:
word = word.strip()
jeżeli word nie w list:
list.append(word)
fp.close()
# Compute closure (this jest w fact totally general)
#
def closure(table):
modules = list(table.keys())
#
# Initialize reach przy a copy of table
#
reach = {}
dla mod w modules:
reach[mod] = table[mod][:]
#
# Iterate until no more change
#
change = 1
dopóki change:
change = 0
dla mod w modules:
dla mo w reach[mod]:
jeżeli mo w modules:
dla m w reach[mo]:
jeżeli m nie w reach[mod]:
reach[mod].append(m)
change = 1
#
zwróć reach
# Invert a table (this jest again totally general).
# All keys of the original table are made keys of the inverse,
# so there may be empty lists w the inverse.
#
def inverse(table):
inv = {}
dla key w table.keys():
jeżeli key nie w inv:
inv[key] = []
dla item w table[key]:
store(inv, item, key)
zwróć inv
# Store "item" w "dict" under "key".
# The dictionary maps keys to lists of items.
# If there jest no list dla the key yet, it jest created.
#
def store(dict, key, item):
jeżeli key w dict:
dict[key].append(item)
inaczej:
dict[key] = [item]
# Tabulate results neatly
#
def printresults(table):
modules = sorted(table.keys())
maxlen = 0
dla mod w modules: maxlen = max(maxlen, len(mod))
dla mod w modules:
list = sorted(table[mod])
print(mod.ljust(maxlen), ':', end=' ')
jeżeli mod w list:
print('(*)', end=' ')
dla ref w list:
print(ref, end=' ')
print()
# Call main oraz honor exit status
jeżeli __name__ == '__main__':
spróbuj:
sys.exit(main())
wyjąwszy KeyboardInterrupt:
sys.exit(1)
| en | 0.569532 | #! /usr/bin/env python3 # pdeps # # Find dependencies between a bunch of Python modules. # # Usage: # pdeps file1.py file2.py ... # # Output: # Four tables separated by lines like '--- Closure ---': # 1) Direct dependencies, listing which module imports which other modules # 2) The inverse of (1) # 3) Indirect dependencies, albo the closure of the above # 4) The inverse of (3) # # To do: # - command line options to select output type # - option to automatically scan the Python library dla referenced modules # - option to limit output to particular modules # Main program # # # # # # # # Compiled regular expressions to search dla zaimportuj statements # #]+)') # Collect data z one file # # print '#', line, words # Compute closure (this jest w fact totally general) # # # Initialize reach przy a copy of table # # # Iterate until no more change # # # Invert a table (this jest again totally general). # All keys of the original table are made keys of the inverse, # so there may be empty lists w the inverse. # # Store "item" w "dict" under "key". # The dictionary maps keys to lists of items. # If there jest no list dla the key yet, it jest created. # # Tabulate results neatly # # Call main oraz honor exit status | 3.016706 | 3 |
gym_scarecrow/controllers/scarecrow_q.py | ajberlier/gym-scarecrow | 0 | 6619327 | <reponame>ajberlier/gym-scarecrow
import os
import sys
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from gym_scarecrow.params import *
import gym
import gym_scarecrow
class ScarecrowQ:
def __init__(self, i):
self.epsilon = EPSILON * EPSILON_DECAY ** i
self.cum_reward = 0
def train(self, env, obs, q_table):
# loop for a single episode
done = False
while not done:
# exploration vs exploitation
if random.uniform(0, 1) < self.epsilon:
action = env.action_space.sample() # explore action space
else:
action = np.argmax(q_table[obs]) # exploit learned values
print('Action: ' + str(action))
# take action to determine next observed state
next_obs, reward, done, info = env.step(action)
# maintain previous Q-value for Bellman Equation
prev_value = q_table[obs, action]
# calculate the maximum Q-value for the next observed state
next_max = np.max(q_table[next_obs])
# update Q-table with Bellman Equation
q_table[obs, action] = (1 - ALPHA) * prev_value + ALPHA * (reward + GAMMA * next_max)
# update observation
obs = next_obs
# cumulative reward
self.cum_reward += reward
print('\nQ-learning Algorithm: Training Complete!.')
print('\nTotal Reward: ' + str(self.cum_reward))
print('\nEpsilon: ' + str(self.epsilon))
return q_table
def play(self, env, obs, q_file_path):
# load q table
q_table = np.load(q_file_path)
# loop for a single episode
done = False
while not done:
action = np.argmax(q_table[obs]) # exploit learned values
obs, reward, done, info = env.step(action)
env.render()
# def __init__(self):
#
# def q_input(self, obs, action_space):
# self.obs = obs
# self.action_space = action_space
#
# def train(self, episodes, alpha, gamma, epsilon):
# # reset environment
# state = self.observations
# # reset state-action table
# obs_n = len(self.observations)
# act_n = len(self.action_space)
# sa_table = np.zeros([obs_n, act_n])
# """Training the agent"""
# for i in range(1, episodes + 1):
# epochs, penalties, reward, = 0, 0, 0
# done = False
# while not done:
# if random.uniform(0, 1) < epsilon:
# action = random.sample(self.action_space, 1) # Explore action space
# else:
# action = np.argmax(sa_table[state]) # Exploit learned values
# next_state, reward, done, info = env.step(
# action) # TODO: how do we want to handle stepping through the sim?
# old_value = sa_table[state, action]
# next_max = np.max(sa_table[next_state])
# new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
# sa_table[state, action] = new_value
# if reward == -10:
# penalties += 1
# state = next_state
# epochs += 1
# if i % 100 == 0:
# clear_output(wait=True)
# print(f"Episode: {i}")
# # save table
# np.save('sa_table', sa_table)
# print("Training finished.\n")
#
# def agent_eval(self, episodes):
# """Evaluate agent's performance after Q-learning"""
# sa_table = np.load('sa_table.npy')
# total_epochs, total_penalties, total_reward = 0, 0, 0
# for _ in range(episodes):
# state = self.observations
# epochs, penalties, reward = 0, 0, 0
# done = False
# while not done:
# action = np.argmax(sa_table[state])
# obs, reward, done, info = env.step(action)
# if reward == -10:
# penalties += 1
# epochs += 1
# total_reward += reward
# total_penalties += penalties
# total_epochs += epochs
# print(f"Results after {episodes} episodes:")
# print(f"- Average reward per episode: {total_reward / episodes}")
# print(f"- Average reward per move: {total_reward / total_epochs}")
# print(f"- Average timesteps per episode: {total_epochs / episodes}")
# print(f"- Average penalties per episode: {total_penalties / episodes}")
#
# # TODO:
# def Run(self):
# # TODO: add logic to choose most recent saved table for the chosen algorithm or another one
# # for now i am just going to overwrite all previous trained tables
# sa_table = np.load('sa_table.npy')
# state = self.observations
# action = np.argmax(sa_table[state])
# return action
#
# def get_output(self):
# return []
#
| import os
import sys
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from gym_scarecrow.params import *
import gym
import gym_scarecrow
class ScarecrowQ:
def __init__(self, i):
self.epsilon = EPSILON * EPSILON_DECAY ** i
self.cum_reward = 0
def train(self, env, obs, q_table):
# loop for a single episode
done = False
while not done:
# exploration vs exploitation
if random.uniform(0, 1) < self.epsilon:
action = env.action_space.sample() # explore action space
else:
action = np.argmax(q_table[obs]) # exploit learned values
print('Action: ' + str(action))
# take action to determine next observed state
next_obs, reward, done, info = env.step(action)
# maintain previous Q-value for Bellman Equation
prev_value = q_table[obs, action]
# calculate the maximum Q-value for the next observed state
next_max = np.max(q_table[next_obs])
# update Q-table with Bellman Equation
q_table[obs, action] = (1 - ALPHA) * prev_value + ALPHA * (reward + GAMMA * next_max)
# update observation
obs = next_obs
# cumulative reward
self.cum_reward += reward
print('\nQ-learning Algorithm: Training Complete!.')
print('\nTotal Reward: ' + str(self.cum_reward))
print('\nEpsilon: ' + str(self.epsilon))
return q_table
def play(self, env, obs, q_file_path):
# load q table
q_table = np.load(q_file_path)
# loop for a single episode
done = False
while not done:
action = np.argmax(q_table[obs]) # exploit learned values
obs, reward, done, info = env.step(action)
env.render()
# def __init__(self):
#
# def q_input(self, obs, action_space):
# self.obs = obs
# self.action_space = action_space
#
# def train(self, episodes, alpha, gamma, epsilon):
# # reset environment
# state = self.observations
# # reset state-action table
# obs_n = len(self.observations)
# act_n = len(self.action_space)
# sa_table = np.zeros([obs_n, act_n])
# """Training the agent"""
# for i in range(1, episodes + 1):
# epochs, penalties, reward, = 0, 0, 0
# done = False
# while not done:
# if random.uniform(0, 1) < epsilon:
# action = random.sample(self.action_space, 1) # Explore action space
# else:
# action = np.argmax(sa_table[state]) # Exploit learned values
# next_state, reward, done, info = env.step(
# action) # TODO: how do we want to handle stepping through the sim?
# old_value = sa_table[state, action]
# next_max = np.max(sa_table[next_state])
# new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
# sa_table[state, action] = new_value
# if reward == -10:
# penalties += 1
# state = next_state
# epochs += 1
# if i % 100 == 0:
# clear_output(wait=True)
# print(f"Episode: {i}")
# # save table
# np.save('sa_table', sa_table)
# print("Training finished.\n")
#
# def agent_eval(self, episodes):
# """Evaluate agent's performance after Q-learning"""
# sa_table = np.load('sa_table.npy')
# total_epochs, total_penalties, total_reward = 0, 0, 0
# for _ in range(episodes):
# state = self.observations
# epochs, penalties, reward = 0, 0, 0
# done = False
# while not done:
# action = np.argmax(sa_table[state])
# obs, reward, done, info = env.step(action)
# if reward == -10:
# penalties += 1
# epochs += 1
# total_reward += reward
# total_penalties += penalties
# total_epochs += epochs
# print(f"Results after {episodes} episodes:")
# print(f"- Average reward per episode: {total_reward / episodes}")
# print(f"- Average reward per move: {total_reward / total_epochs}")
# print(f"- Average timesteps per episode: {total_epochs / episodes}")
# print(f"- Average penalties per episode: {total_penalties / episodes}")
#
# # TODO:
# def Run(self):
# # TODO: add logic to choose most recent saved table for the chosen algorithm or another one
# # for now i am just going to overwrite all previous trained tables
# sa_table = np.load('sa_table.npy')
# state = self.observations
# action = np.argmax(sa_table[state])
# return action
#
# def get_output(self):
# return []
# | en | 0.61896 | # loop for a single episode # exploration vs exploitation # explore action space # exploit learned values # take action to determine next observed state # maintain previous Q-value for Bellman Equation # calculate the maximum Q-value for the next observed state # update Q-table with Bellman Equation # update observation # cumulative reward # load q table # loop for a single episode # exploit learned values # def __init__(self): # # def q_input(self, obs, action_space): # self.obs = obs # self.action_space = action_space # # def train(self, episodes, alpha, gamma, epsilon): # # reset environment # state = self.observations # # reset state-action table # obs_n = len(self.observations) # act_n = len(self.action_space) # sa_table = np.zeros([obs_n, act_n]) # """Training the agent""" # for i in range(1, episodes + 1): # epochs, penalties, reward, = 0, 0, 0 # done = False # while not done: # if random.uniform(0, 1) < epsilon: # action = random.sample(self.action_space, 1) # Explore action space # else: # action = np.argmax(sa_table[state]) # Exploit learned values # next_state, reward, done, info = env.step( # action) # TODO: how do we want to handle stepping through the sim? # old_value = sa_table[state, action] # next_max = np.max(sa_table[next_state]) # new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max) # sa_table[state, action] = new_value # if reward == -10: # penalties += 1 # state = next_state # epochs += 1 # if i % 100 == 0: # clear_output(wait=True) # print(f"Episode: {i}") # # save table # np.save('sa_table', sa_table) # print("Training finished.\n") # # def agent_eval(self, episodes): # """Evaluate agent's performance after Q-learning""" # sa_table = np.load('sa_table.npy') # total_epochs, total_penalties, total_reward = 0, 0, 0 # for _ in range(episodes): # state = self.observations # epochs, penalties, reward = 0, 0, 0 # done = False # while not done: # action = np.argmax(sa_table[state]) # obs, reward, done, info = env.step(action) # if reward == -10: # penalties += 1 # epochs += 1 # total_reward += reward # total_penalties += penalties # total_epochs += epochs # print(f"Results after {episodes} episodes:") # print(f"- Average reward per episode: {total_reward / episodes}") # print(f"- Average reward per move: {total_reward / total_epochs}") # print(f"- Average timesteps per episode: {total_epochs / episodes}") # print(f"- Average penalties per episode: {total_penalties / episodes}") # # # TODO: # def Run(self): # # TODO: add logic to choose most recent saved table for the chosen algorithm or another one # # for now i am just going to overwrite all previous trained tables # sa_table = np.load('sa_table.npy') # state = self.observations # action = np.argmax(sa_table[state]) # return action # # def get_output(self): # return [] # | 3.124097 | 3 |
simplydomain/src/dynamic_modules/bing_search.py | SimplySecurity/SimplyDomain-Old | 17 | 6619328 | import time
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from simplydomain.src import core_serialization
from simplydomain.src import module_helpers
from simplydomain.src import core_scrub
# use RequestsHelpers() class to make requests to target URL
class DynamicModule(module_helpers.RequestsHelpers):
"""
Dynamic module class that will be loaded and called
at runtime. This will allow modules to easily be independent of the
core runtime.
"""
def __init__(self, json_entry):
"""
Init class structure. Each module takes a JSON entry object which
can pass different values to the module with out changing up the API.
adapted form Empire Project:
https://github.com/EmpireProject/Empire/blob/master/lib/modules/python_template.py
:param json_entry: JSON data object passed to the module.
"""
module_helpers.RequestsHelpers.__init__(self)
self.json_entry = json_entry
self.info = {
# mod name
'Module': 'bing_search.py',
# long name of the module to be used
'Name': 'Bing Subdomain Search',
# version of the module to be used
'Version': '1.0',
# description
'Description': ['Uses Bing search engine',
'with unofficial search engine API support.'],
# authors or sources to be quoted
'Authors': ['@Killswitch-GUI', '@ecjx'],
# list of resources or comments
'comments': [
'https://github.com/ejcx/subdomainer/blob/master/subdomainer.py'
]
}
self.options = {
'url': 'http://www.bing.com/search?q=site%3A%s&first=%s'
'count'
}
def dynamic_main(self, queue_dict):
"""
Main entry point for process to call.
core_serialization.SubDomain Attributes:
name: long name of method
module_name: name of the module that performed collection
source: source of the subdomain or resource of collection
module_version: version from meta
source: source of the collection
time: time the result obj was built
subdomain: subdomain to use
valid: is domain valid
:return: NONE
"""
foundsubdomains = []
core_args = self.json_entry['args']
task_output_queue = queue_dict['task_output_queue']
cs = core_scrub.Scrub()
start_count = int(self.json_entry['bing_search']['start_count'])
end_count = int(self.json_entry['bing_search']['end_count'])
while start_count <= end_count:
domain = "http://www.bing.com/search?q=site%3A" + \
str(core_args.DOMAIN) + "&first=" + str(start_count)
data, status = self.request_content(domain)
soup = BeautifulSoup(data, 'html.parser')
for i in soup.find_all('a', href=True):
possiblesubdomain = i['href']
if "." + str(core_args.DOMAIN) in possiblesubdomain:
parsed = urlparse(possiblesubdomain)
if parsed.netloc not in foundsubdomains:
foundsubdomains.append(str(parsed.netloc))
if parsed.hostname not in foundsubdomains:
foundsubdomains.append(str(parsed.hostname))
for sub in foundsubdomains:
cs.subdomain = sub
# check if domain name is valid
valid = cs.validate_domain()
# build the SubDomain Object to pass
sub_obj = core_serialization.SubDomain(
self.info["Name"],
self.info["Module"],
self.options['url'],
domain,
time.time(),
sub,
valid
)
task_output_queue.put(sub_obj)
# results inc at rate of 10 per page
start_count += 10
time.sleep(0.5)
| import time
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from simplydomain.src import core_serialization
from simplydomain.src import module_helpers
from simplydomain.src import core_scrub
# use RequestsHelpers() class to make requests to target URL
class DynamicModule(module_helpers.RequestsHelpers):
"""
Dynamic module class that will be loaded and called
at runtime. This will allow modules to easily be independent of the
core runtime.
"""
def __init__(self, json_entry):
"""
Init class structure. Each module takes a JSON entry object which
can pass different values to the module with out changing up the API.
adapted form Empire Project:
https://github.com/EmpireProject/Empire/blob/master/lib/modules/python_template.py
:param json_entry: JSON data object passed to the module.
"""
module_helpers.RequestsHelpers.__init__(self)
self.json_entry = json_entry
self.info = {
# mod name
'Module': 'bing_search.py',
# long name of the module to be used
'Name': 'Bing Subdomain Search',
# version of the module to be used
'Version': '1.0',
# description
'Description': ['Uses Bing search engine',
'with unofficial search engine API support.'],
# authors or sources to be quoted
'Authors': ['@Killswitch-GUI', '@ecjx'],
# list of resources or comments
'comments': [
'https://github.com/ejcx/subdomainer/blob/master/subdomainer.py'
]
}
self.options = {
'url': 'http://www.bing.com/search?q=site%3A%s&first=%s'
'count'
}
def dynamic_main(self, queue_dict):
"""
Main entry point for process to call.
core_serialization.SubDomain Attributes:
name: long name of method
module_name: name of the module that performed collection
source: source of the subdomain or resource of collection
module_version: version from meta
source: source of the collection
time: time the result obj was built
subdomain: subdomain to use
valid: is domain valid
:return: NONE
"""
foundsubdomains = []
core_args = self.json_entry['args']
task_output_queue = queue_dict['task_output_queue']
cs = core_scrub.Scrub()
start_count = int(self.json_entry['bing_search']['start_count'])
end_count = int(self.json_entry['bing_search']['end_count'])
while start_count <= end_count:
domain = "http://www.bing.com/search?q=site%3A" + \
str(core_args.DOMAIN) + "&first=" + str(start_count)
data, status = self.request_content(domain)
soup = BeautifulSoup(data, 'html.parser')
for i in soup.find_all('a', href=True):
possiblesubdomain = i['href']
if "." + str(core_args.DOMAIN) in possiblesubdomain:
parsed = urlparse(possiblesubdomain)
if parsed.netloc not in foundsubdomains:
foundsubdomains.append(str(parsed.netloc))
if parsed.hostname not in foundsubdomains:
foundsubdomains.append(str(parsed.hostname))
for sub in foundsubdomains:
cs.subdomain = sub
# check if domain name is valid
valid = cs.validate_domain()
# build the SubDomain Object to pass
sub_obj = core_serialization.SubDomain(
self.info["Name"],
self.info["Module"],
self.options['url'],
domain,
time.time(),
sub,
valid
)
task_output_queue.put(sub_obj)
# results inc at rate of 10 per page
start_count += 10
time.sleep(0.5)
| en | 0.73493 | # use RequestsHelpers() class to make requests to target URL Dynamic module class that will be loaded and called at runtime. This will allow modules to easily be independent of the core runtime. Init class structure. Each module takes a JSON entry object which can pass different values to the module with out changing up the API. adapted form Empire Project: https://github.com/EmpireProject/Empire/blob/master/lib/modules/python_template.py :param json_entry: JSON data object passed to the module. # mod name # long name of the module to be used # version of the module to be used # description # authors or sources to be quoted # list of resources or comments Main entry point for process to call. core_serialization.SubDomain Attributes: name: long name of method module_name: name of the module that performed collection source: source of the subdomain or resource of collection module_version: version from meta source: source of the collection time: time the result obj was built subdomain: subdomain to use valid: is domain valid :return: NONE # check if domain name is valid # build the SubDomain Object to pass # results inc at rate of 10 per page | 2.72999 | 3 |
server/opendp_apps/terms_of_access/tests/test_models.py | mikephelan/opendp-ux | 6 | 6619329 | import os
from django.test import TestCase
from opendp_apps.terms_of_access.models import TermsOfAccess, DifferentTermsOfAccessException
class TestTermsOfAccess(TestCase):
def test_save_identical_terms(self):
toa_count = TermsOfAccess.objects.count()
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Terms of Access</title>
</head>
<body>
You agree to a variety of things.
</body>
</html>''',
version='0',
notes=''
)
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
self.assertEqual(TermsOfAccess.objects.all().count(), toa_count + 1)
def test_save_different_formatting(self):
toa_count = TermsOfAccess.objects.count()
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title> Terms of Access </title>
</head><body>
You agree to a variety of things.
</body></html> ''',
version='0',
notes=''
)
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
self.assertEqual(TermsOfAccess.objects.all().count(), toa_count + 1)
def test_template_mismatch(self):
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Terms of Access</title>
</head>
<body>
You agree to a bunch of new things.
</body>
</html>''',
version='0',
notes=''
)
with self.assertRaises(DifferentTermsOfAccessException):
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
| import os
from django.test import TestCase
from opendp_apps.terms_of_access.models import TermsOfAccess, DifferentTermsOfAccessException
class TestTermsOfAccess(TestCase):
def test_save_identical_terms(self):
toa_count = TermsOfAccess.objects.count()
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Terms of Access</title>
</head>
<body>
You agree to a variety of things.
</body>
</html>''',
version='0',
notes=''
)
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
self.assertEqual(TermsOfAccess.objects.all().count(), toa_count + 1)
def test_save_different_formatting(self):
toa_count = TermsOfAccess.objects.count()
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title> Terms of Access </title>
</head><body>
You agree to a variety of things.
</body></html> ''',
version='0',
notes=''
)
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
self.assertEqual(TermsOfAccess.objects.all().count(), toa_count + 1)
def test_template_mismatch(self):
toa = TermsOfAccess(
name='test',
active=True,
description='''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Terms of Access</title>
</head>
<body>
You agree to a bunch of new things.
</body>
</html>''',
version='0',
notes=''
)
with self.assertRaises(DifferentTermsOfAccessException):
toa.save(template_path=os.path.dirname(os.path.realpath(__file__)) + f'/templates/0.html')
| en | 0.525644 | <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Terms of Access</title> </head> <body> You agree to a variety of things. </body> </html> <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title> Terms of Access </title> </head><body> You agree to a variety of things. </body></html> <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Terms of Access</title> </head> <body> You agree to a bunch of new things. </body> </html> | 2.371211 | 2 |
helper-scripts/DFW/nsx-dfw-backup-n-restore.py | kgbhat/nsx-t | 32 | 6619330 | #!/usr/bin/env python
# Requires Python 3.x
"""
NSX-T SDK Sample Code
Copyright 2017-2020 VMware, Inc. All rights reserved
The BSD-2 license (the "License") set forth below applies to all
parts of the NSX-T SDK Sample Code project. You may not use this
file except in compliance with the License.
BSD-2 License
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
################################################################################
# Summary: Script to Back and restore NSX DFW policy, rules, groups, Services and context-profiles.
# Usage: python nsx-dfw-backup-n-restore.py [-h] --nsx-mgr-ip IP --operation OPERATION
# [--user USER] [--password PASSWORD]
# [--backupfileprefix BACKUPFILEPREFIX]
# Caveat: Prior to 3.1 Services Restore will fail with this script due to
# https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308
# If you do not have user configured service then you are good and
# can comment out the restore service function.
# ##############################################################################
import requests
from requests.auth import HTTPBasicAuth
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import argparse
import re
################################################################################
### Define Arguments for the script.
################################################################################
parser = argparse.ArgumentParser(description='NSX DFW Policy Backup & Restore- DFW Policies, Groups, Services & Profiles ')
parser.add_argument('--nsx-mgr-ip', dest="ip",
help="NSX Manager IP", required=True)
parser.add_argument('--operation', dest="operation",
help="What operation - backup or restore", required=True)
parser.add_argument('--user', dest="user",
help="NSX Username, default: admin",
default="admin", required=False)
parser.add_argument('--password', dest="password",
help="NSX Password, default: <PASSWORD>",
default="<PASSWORD>", required=False)
parser.add_argument('--backupfileprefix', dest="backupfileprefix",
help="Prefix backup file with- default nsx-dfw-<object-type>.json",
default="nsx", required=False)
args = parser.parse_args()
################################################################################
### REST API function using python "requests" module
################################################################################
def rest_api_call (method, endpoint, data=None, ip=args.ip, user=args.user, password=args.password):
url = "https://%s%s" % (ip, endpoint)
# To remove ssl-warnings bug. even with cert verification is set as false
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'Content-Type': 'application/json'}
res = requests.request(
method=method,
url=url,
auth=HTTPBasicAuth(user, password),
headers=headers,
data=data,
verify=False
)
try:
res.raise_for_status()
except requests.exceptions.HTTPError as e:
raise e
if len(res.content) > 0:
response = res.json()
return response
################################################################################
### Backup NSX DFW DFW L4 Services
################################################################################
def backup_nsx_dfw_services(backupfileprefix):
backupfile = (backupfileprefix+'-services-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-Service"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
print("\n NSX DFW L4 services Backup saved as [%s]" % backupfile)
################################################################################
### Backup NSX DFW L7 Profiles
################################################################################
def backup_nsx_dfw_context_profiles(backupfileprefix):
backupfile = (backupfileprefix+'-context-profiles-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-ContextProfile"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
print("\n NSX DFW L7 context-profiles Backup saved as [%s]" % backupfile)
################################################################################
### Backup NSX DFW Policy, Rules with GROUPS.
################################################################################
def backup_nsx_dfw_policy_n_group(backupfileprefix):
backupfile = (backupfileprefix+'-policy-n-group-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-Domain|SecurityPolicy|Rule|Group"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
# Count pattern "ChildSecurityPolicy" for Total Policy Count
search_for_policy = 'ChildSecurityPolicy'
# Count pattern "Rule_id for Total Policy Count
search_for_rule = 'rule_id'
# Count pattern "ChildGroup" for Total Policy Count
search_for_group = 'ChildGroup'
# Intialize counter variable
pcount, rcount, gcount = 0, 0, 0
for line in lines:
line = line.strip().lower().split()
for words in line:
if words.find(search_for_policy.lower()) != -1:
pcount +=1
for words in line:
if words.find(search_for_rule.lower()) != -1:
rcount +=1
for words in line:
if words.find(search_for_group.lower()) != -1:
gcount +=1
print("\n NSX DFW Policy & Group Backup saved as [%s]" % backupfile)
print("\n NSX DFW Backup has %s Policy, %s Rules, %s Group\n" % (pcount, rcount, gcount))
################################################################################
### Restore NSX DFW L4 Services
################################################################################
def restore_nsx_dfw_services(backupfileprefix):
backupfile = (backupfileprefix+'-services-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire L4 Services config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW L4 Services")
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW L4 Services with error: [%s]\n" %(err_msg))
################################################################################
### Restore NSX DFW L7 Context-Profile
################################################################################
def restore_nsx_dfw_context_profiles(backupfileprefix):
backupfile = (backupfileprefix+'-context-profiles-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire L4 Services config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW L7 Services Restore")
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW L7 Services Restore with error: [%s]\n" %(err_msg))
################################################################################
### Restore NSX DFW Policy, Rules with GROUPS.
################################################################################
def restore_nsx_dfw_policy_n_group(backupfileprefix):
backupfile = (backupfileprefix+'-policy-n-group-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire Policy And Group config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
# Count pattern "ChildSecurityPolicy" for Total Policy Count
search_for_policy = 'ChildSecurityPolicy'
# Count pattern "Rule_id for Total Policy Count
search_for_rule = 'rule_id'
# Count pattern "ChildGroup" for Total Policy Count
search_for_group = 'ChildGroup'
# Intialize counter variable
pcount, rcount, gcount = 0, 0, 0
for line in lines:
line = line.strip().lower().split()
for words in line:
if words.find(search_for_policy.lower()) != -1:
pcount +=1
for words in line:
if words.find(search_for_rule.lower()) != -1:
rcount +=1
for words in line:
if words.find(search_for_group.lower()) != -1:
gcount +=1
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW Policy & Group Restore: %s Policy, %s Rules, %s Group\n" % (pcount, rcount, gcount))
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW Policy & Group Restore with error: [%s]\n" %(err_msg))
################################################################################
### Run "backup" or "restore" DFW policy backup based on user input to "--operation"
################################################################################
if __name__ == "__main__":
if "backup" in args.operation:
backup_nsx_dfw_services(args.backupfileprefix)
backup_nsx_dfw_context_profiles(args.backupfileprefix)
backup_nsx_dfw_policy_n_group(args.backupfileprefix)
if "restore" in args.operation:
# Prior to 3.1: Policy API bug with Service Patch call https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308
#If you do not have user configured service then you are good and can disable the function.
restore_nsx_dfw_services(args.backupfileprefix)
restore_nsx_dfw_context_profiles(args.backupfileprefix)
restore_nsx_dfw_policy_n_group(args.backupfileprefix)
"""
Sample Script output:
Backup:
bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation backup
NSX DFW L4 services Backup saved as [nsx-services-bkup.json]
NSX DFW L7 context-profiles Backup saved as [nsx-context-profiles-bkup.json]
NSX DFW Policy & Group Backup saved as [nsx-policy-n-group-bkup.json]
NSX DFW Backup has 6 Policy, 37 Rules, 3 Group
bhatg@bhatg-a02 DFW %
Restore:
bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation restore
SUCCESS - NSX DFW L4 Services
SUCCESS - NSX DFW L7 Services Restore
SUCCESS - NSX DFW Policy & Group Restore: 6 Policy, 37 Rules, 3 Group
bhatg@bhatg-a02 DFW %
"""
| #!/usr/bin/env python
# Requires Python 3.x
"""
NSX-T SDK Sample Code
Copyright 2017-2020 VMware, Inc. All rights reserved
The BSD-2 license (the "License") set forth below applies to all
parts of the NSX-T SDK Sample Code project. You may not use this
file except in compliance with the License.
BSD-2 License
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
################################################################################
# Summary: Script to Back and restore NSX DFW policy, rules, groups, Services and context-profiles.
# Usage: python nsx-dfw-backup-n-restore.py [-h] --nsx-mgr-ip IP --operation OPERATION
# [--user USER] [--password PASSWORD]
# [--backupfileprefix BACKUPFILEPREFIX]
# Caveat: Prior to 3.1 Services Restore will fail with this script due to
# https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308
# If you do not have user configured service then you are good and
# can comment out the restore service function.
# ##############################################################################
import requests
from requests.auth import HTTPBasicAuth
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import argparse
import re
################################################################################
### Define Arguments for the script.
################################################################################
parser = argparse.ArgumentParser(description='NSX DFW Policy Backup & Restore- DFW Policies, Groups, Services & Profiles ')
parser.add_argument('--nsx-mgr-ip', dest="ip",
help="NSX Manager IP", required=True)
parser.add_argument('--operation', dest="operation",
help="What operation - backup or restore", required=True)
parser.add_argument('--user', dest="user",
help="NSX Username, default: admin",
default="admin", required=False)
parser.add_argument('--password', dest="password",
help="NSX Password, default: <PASSWORD>",
default="<PASSWORD>", required=False)
parser.add_argument('--backupfileprefix', dest="backupfileprefix",
help="Prefix backup file with- default nsx-dfw-<object-type>.json",
default="nsx", required=False)
args = parser.parse_args()
################################################################################
### REST API function using python "requests" module
################################################################################
def rest_api_call (method, endpoint, data=None, ip=args.ip, user=args.user, password=args.password):
url = "https://%s%s" % (ip, endpoint)
# To remove ssl-warnings bug. even with cert verification is set as false
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'Content-Type': 'application/json'}
res = requests.request(
method=method,
url=url,
auth=HTTPBasicAuth(user, password),
headers=headers,
data=data,
verify=False
)
try:
res.raise_for_status()
except requests.exceptions.HTTPError as e:
raise e
if len(res.content) > 0:
response = res.json()
return response
################################################################################
### Backup NSX DFW DFW L4 Services
################################################################################
def backup_nsx_dfw_services(backupfileprefix):
backupfile = (backupfileprefix+'-services-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-Service"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
print("\n NSX DFW L4 services Backup saved as [%s]" % backupfile)
################################################################################
### Backup NSX DFW L7 Profiles
################################################################################
def backup_nsx_dfw_context_profiles(backupfileprefix):
backupfile = (backupfileprefix+'-context-profiles-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-ContextProfile"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
print("\n NSX DFW L7 context-profiles Backup saved as [%s]" % backupfile)
################################################################################
### Backup NSX DFW Policy, Rules with GROUPS.
################################################################################
def backup_nsx_dfw_policy_n_group(backupfileprefix):
backupfile = (backupfileprefix+'-policy-n-group-bkup.json')
# Send API Request to NSX Manager to get DFW Policy inventory
#endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group"
endpoint = "/policy/api/v1/infra?filter=Type-Domain|SecurityPolicy|Rule|Group"
res = rest_api_call(method= 'GET', endpoint = endpoint)
with open(backupfile, 'w') as bkdata:
# Save the resonse dictionary in python to a json file.
# Use option indent to save json in more readable format
json.dump(res, bkdata, indent=4)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
# Count pattern "ChildSecurityPolicy" for Total Policy Count
search_for_policy = 'ChildSecurityPolicy'
# Count pattern "Rule_id for Total Policy Count
search_for_rule = 'rule_id'
# Count pattern "ChildGroup" for Total Policy Count
search_for_group = 'ChildGroup'
# Intialize counter variable
pcount, rcount, gcount = 0, 0, 0
for line in lines:
line = line.strip().lower().split()
for words in line:
if words.find(search_for_policy.lower()) != -1:
pcount +=1
for words in line:
if words.find(search_for_rule.lower()) != -1:
rcount +=1
for words in line:
if words.find(search_for_group.lower()) != -1:
gcount +=1
print("\n NSX DFW Policy & Group Backup saved as [%s]" % backupfile)
print("\n NSX DFW Backup has %s Policy, %s Rules, %s Group\n" % (pcount, rcount, gcount))
################################################################################
### Restore NSX DFW L4 Services
################################################################################
def restore_nsx_dfw_services(backupfileprefix):
backupfile = (backupfileprefix+'-services-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire L4 Services config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW L4 Services")
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW L4 Services with error: [%s]\n" %(err_msg))
################################################################################
### Restore NSX DFW L7 Context-Profile
################################################################################
def restore_nsx_dfw_context_profiles(backupfileprefix):
backupfile = (backupfileprefix+'-context-profiles-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire L4 Services config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW L7 Services Restore")
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW L7 Services Restore with error: [%s]\n" %(err_msg))
################################################################################
### Restore NSX DFW Policy, Rules with GROUPS.
################################################################################
def restore_nsx_dfw_policy_n_group(backupfileprefix):
backupfile = (backupfileprefix+'-policy-n-group-bkup.json')
# 'Read' JSON encoded data from backup file and convert to python dict
with open(backupfile, 'r') as bkdata:
backup_data = json.load(bkdata)
# NSX API to send entire Policy And Group config in one PATCH call with backupfile as body
endpoint = "/policy/api/v1/infra"
# Convert body to JSON string, as module needs the body as string.
body = json.dumps(backup_data)
# To Count number of Security Policy, Rules & Groups
# Open DFW backup file
f = open(backupfile, "r")
lines = f.readlines()
f.close()
# Count pattern "ChildSecurityPolicy" for Total Policy Count
search_for_policy = 'ChildSecurityPolicy'
# Count pattern "Rule_id for Total Policy Count
search_for_rule = 'rule_id'
# Count pattern "ChildGroup" for Total Policy Count
search_for_group = 'ChildGroup'
# Intialize counter variable
pcount, rcount, gcount = 0, 0, 0
for line in lines:
line = line.strip().lower().split()
for words in line:
if words.find(search_for_policy.lower()) != -1:
pcount +=1
for words in line:
if words.find(search_for_rule.lower()) != -1:
rcount +=1
for words in line:
if words.find(search_for_group.lower()) != -1:
gcount +=1
try:
rest_api_call(method='PATCH', endpoint = endpoint, data=body)
print("\n SUCCESS - NSX DFW Policy & Group Restore: %s Policy, %s Rules, %s Group\n" % (pcount, rcount, gcount))
except Exception as ex:
err_res_cont = json.loads(ex.response.content)
# Grep error_message to identify issue
err_msg = err_res_cont["error_message"]
print("\n FAILURE - NSX DFW Policy & Group Restore with error: [%s]\n" %(err_msg))
################################################################################
### Run "backup" or "restore" DFW policy backup based on user input to "--operation"
################################################################################
if __name__ == "__main__":
if "backup" in args.operation:
backup_nsx_dfw_services(args.backupfileprefix)
backup_nsx_dfw_context_profiles(args.backupfileprefix)
backup_nsx_dfw_policy_n_group(args.backupfileprefix)
if "restore" in args.operation:
# Prior to 3.1: Policy API bug with Service Patch call https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308
#If you do not have user configured service then you are good and can disable the function.
restore_nsx_dfw_services(args.backupfileprefix)
restore_nsx_dfw_context_profiles(args.backupfileprefix)
restore_nsx_dfw_policy_n_group(args.backupfileprefix)
"""
Sample Script output:
Backup:
bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation backup
NSX DFW L4 services Backup saved as [nsx-services-bkup.json]
NSX DFW L7 context-profiles Backup saved as [nsx-context-profiles-bkup.json]
NSX DFW Policy & Group Backup saved as [nsx-policy-n-group-bkup.json]
NSX DFW Backup has 6 Policy, 37 Rules, 3 Group
bhatg@bhatg-a02 DFW %
Restore:
bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation restore
SUCCESS - NSX DFW L4 Services
SUCCESS - NSX DFW L7 Services Restore
SUCCESS - NSX DFW Policy & Group Restore: 6 Policy, 37 Rules, 3 Group
bhatg@bhatg-a02 DFW %
"""
| en | 0.396831 | #!/usr/bin/env python # Requires Python 3.x NSX-T SDK Sample Code Copyright 2017-2020 VMware, Inc. All rights reserved The BSD-2 license (the "License") set forth below applies to all parts of the NSX-T SDK Sample Code project. You may not use this file except in compliance with the License. BSD-2 License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ################################################################################ # Summary: Script to Back and restore NSX DFW policy, rules, groups, Services and context-profiles. # Usage: python nsx-dfw-backup-n-restore.py [-h] --nsx-mgr-ip IP --operation OPERATION # [--user USER] [--password PASSWORD] # [--backupfileprefix BACKUPFILEPREFIX] # Caveat: Prior to 3.1 Services Restore will fail with this script due to # https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308 # If you do not have user configured service then you are good and # can comment out the restore service function. # ############################################################################## ################################################################################ ### Define Arguments for the script. ################################################################################ ################################################################################ ### REST API function using python "requests" module ################################################################################ # To remove ssl-warnings bug. even with cert verification is set as false ################################################################################ ### Backup NSX DFW DFW L4 Services ################################################################################ # Send API Request to NSX Manager to get DFW Policy inventory #endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group" # Save the resonse dictionary in python to a json file. # Use option indent to save json in more readable format # To Count number of Security Policy, Rules & Groups # Open DFW backup file ################################################################################ ### Backup NSX DFW L7 Profiles ################################################################################ # Send API Request to NSX Manager to get DFW Policy inventory #endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group" # Save the resonse dictionary in python to a json file. # Use option indent to save json in more readable format # To Count number of Security Policy, Rules & Groups # Open DFW backup file ################################################################################ ### Backup NSX DFW Policy, Rules with GROUPS. ################################################################################ # Send API Request to NSX Manager to get DFW Policy inventory #endpoint = "/policy/api/v1/infra?base_path=/infra/domains/default&type_filter=SecurityPolicy;Group" # Save the resonse dictionary in python to a json file. # Use option indent to save json in more readable format # To Count number of Security Policy, Rules & Groups # Open DFW backup file # Count pattern "ChildSecurityPolicy" for Total Policy Count # Count pattern "Rule_id for Total Policy Count # Count pattern "ChildGroup" for Total Policy Count # Intialize counter variable ################################################################################ ### Restore NSX DFW L4 Services ################################################################################ # 'Read' JSON encoded data from backup file and convert to python dict # NSX API to send entire L4 Services config in one PATCH call with backupfile as body # Convert body to JSON string, as module needs the body as string. # Grep error_message to identify issue ################################################################################ ### Restore NSX DFW L7 Context-Profile ################################################################################ # 'Read' JSON encoded data from backup file and convert to python dict # NSX API to send entire L4 Services config in one PATCH call with backupfile as body # Convert body to JSON string, as module needs the body as string. # Grep error_message to identify issue ################################################################################ ### Restore NSX DFW Policy, Rules with GROUPS. ################################################################################ # 'Read' JSON encoded data from backup file and convert to python dict # NSX API to send entire Policy And Group config in one PATCH call with backupfile as body # Convert body to JSON string, as module needs the body as string. # To Count number of Security Policy, Rules & Groups # Open DFW backup file # Count pattern "ChildSecurityPolicy" for Total Policy Count # Count pattern "Rule_id for Total Policy Count # Count pattern "ChildGroup" for Total Policy Count # Intialize counter variable # Grep error_message to identify issue ################################################################################ ### Run "backup" or "restore" DFW policy backup based on user input to "--operation" ################################################################################ # Prior to 3.1: Policy API bug with Service Patch call https://bugzilla.eng.vmware.com/show_bug.cgi?id=2616308 #If you do not have user configured service then you are good and can disable the function. Sample Script output: Backup: bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation backup NSX DFW L4 services Backup saved as [nsx-services-bkup.json] NSX DFW L7 context-profiles Backup saved as [nsx-context-profiles-bkup.json] NSX DFW Policy & Group Backup saved as [nsx-policy-n-group-bkup.json] NSX DFW Backup has 6 Policy, 37 Rules, 3 Group bhatg@bhatg-a02 DFW % Restore: bhatg@bhatg-a02 DFW % python nsx-dfw-backup-n-restore.py --nsx-mgr-ip 10.110.57.244 --operation restore SUCCESS - NSX DFW L4 Services SUCCESS - NSX DFW L7 Services Restore SUCCESS - NSX DFW Policy & Group Restore: 6 Policy, 37 Rules, 3 Group bhatg@bhatg-a02 DFW % | 1.368415 | 1 |
tests/functional_tests/test_modal_endpoint.py | samukasmk/grupy-flask-jenkins | 2 | 6619331 | import pytest
from flask import url_for
@pytest.mark.usefixtures('live_server')
@pytest.mark.nondestructive
def test_render_modal_title(selenium):
selenium.get(url_for('modal', _external=True))
selenium.find_element_by_id('modal-title')
| import pytest
from flask import url_for
@pytest.mark.usefixtures('live_server')
@pytest.mark.nondestructive
def test_render_modal_title(selenium):
selenium.get(url_for('modal', _external=True))
selenium.find_element_by_id('modal-title')
| none | 1 | 2.164117 | 2 | |
api/views.py | danielchikara/pokemon_in_hom | 0 | 6619332 | from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import permissions
from api.serializers import *
from django.views.decorators.csrf import csrf_exempt
from api.models import *
from django.utils.decorators import method_decorator
# Create your views here.
@method_decorator(csrf_exempt, name='dispatch')
class RegisterClientView(APIView):
def post(self, request):
serializer = ClientRegisterSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
success = False
code = 400
if user:
success = True
code = 201
return Response({"success": success}, status=code)
# Inicio de sesion y creación de token
@method_decorator(csrf_exempt, name='dispatch')
class LoginView(APIView):
def post(self, request):
serializer = LoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
if user:
token, created = Token.objects.get_or_create(user=user)
serializer_user = UserClientSerializer(user)
return Response({"token": token.key, "user": serializer_user.data}, status=200)
# eliminación de token
@method_decorator(csrf_exempt, name='dispatch')
class LogoutView(APIView):
def post(self, request):
request.user.auth_token.delete()
return Response(status=204)
# Creación de pokemon recibe todos los parametros del modelo Pokemon
@method_decorator(csrf_exempt, name='dispatch')
class CreatePokemonView(generics.CreateAPIView):
serializer_class = PokemonSerializer
# La lista de pokemon puede recibir un parametro de filtrado por elemento
@method_decorator(csrf_exempt, name='dispatch')
class ListPokemonView(generics.ListAPIView):
serializer_class = PokemonSerializer
def get_queryset(self):
search = self.request.query_params.get('id_element', None)
if search is not None:
return Pokemon.objects.filter(id_element=search).order_by('id')
return Pokemon.objects.all().order_by('id')
# Actualización de pokemon recibe los mismo parametros de el create
@method_decorator(csrf_exempt, name='dispatch')
class UpdatePokemonView(generics.UpdateAPIView):
serializer_class = PokemonSerializer
queryset = Pokemon.objects.all()
# Eliminación de pokemon recibe el id del pokemon a eliminar
@method_decorator(csrf_exempt, name='dispatch')
class DeletePokemonView(generics.DestroyAPIView):
serializer_class = PokemonSerializer
queryset = Pokemon.objects.all() | from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import permissions
from api.serializers import *
from django.views.decorators.csrf import csrf_exempt
from api.models import *
from django.utils.decorators import method_decorator
# Create your views here.
@method_decorator(csrf_exempt, name='dispatch')
class RegisterClientView(APIView):
def post(self, request):
serializer = ClientRegisterSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
success = False
code = 400
if user:
success = True
code = 201
return Response({"success": success}, status=code)
# Inicio de sesion y creación de token
@method_decorator(csrf_exempt, name='dispatch')
class LoginView(APIView):
def post(self, request):
serializer = LoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
if user:
token, created = Token.objects.get_or_create(user=user)
serializer_user = UserClientSerializer(user)
return Response({"token": token.key, "user": serializer_user.data}, status=200)
# eliminación de token
@method_decorator(csrf_exempt, name='dispatch')
class LogoutView(APIView):
def post(self, request):
request.user.auth_token.delete()
return Response(status=204)
# Creación de pokemon recibe todos los parametros del modelo Pokemon
@method_decorator(csrf_exempt, name='dispatch')
class CreatePokemonView(generics.CreateAPIView):
serializer_class = PokemonSerializer
# La lista de pokemon puede recibir un parametro de filtrado por elemento
@method_decorator(csrf_exempt, name='dispatch')
class ListPokemonView(generics.ListAPIView):
serializer_class = PokemonSerializer
def get_queryset(self):
search = self.request.query_params.get('id_element', None)
if search is not None:
return Pokemon.objects.filter(id_element=search).order_by('id')
return Pokemon.objects.all().order_by('id')
# Actualización de pokemon recibe los mismo parametros de el create
@method_decorator(csrf_exempt, name='dispatch')
class UpdatePokemonView(generics.UpdateAPIView):
serializer_class = PokemonSerializer
queryset = Pokemon.objects.all()
# Eliminación de pokemon recibe el id del pokemon a eliminar
@method_decorator(csrf_exempt, name='dispatch')
class DeletePokemonView(generics.DestroyAPIView):
serializer_class = PokemonSerializer
queryset = Pokemon.objects.all() | es | 0.934777 | # Create your views here. # Inicio de sesion y creación de token # eliminación de token # Creación de pokemon recibe todos los parametros del modelo Pokemon # La lista de pokemon puede recibir un parametro de filtrado por elemento # Actualización de pokemon recibe los mismo parametros de el create # Eliminación de pokemon recibe el id del pokemon a eliminar | 2.158277 | 2 |
custom_chrf.py | jlibovicky/char-nmt-two-step-decoder | 0 | 6619333 | from typing import List
from collections import Counter
import string
import numpy as np
def pairwise_chrf(sentences: List[str], order: int = 6, beta: float = 2.0):
# 1. represent each sentece as n-grams
sentences = [
s.translate(str.maketrans("", "", string.whitespace))
for s in sentences]
sent_n_grams = [[
Counter([sent[i:i + o]
for i in range(len(sent) - o + 1)])
for o in range(1, order + 1)]
for sent in sentences]
# 2. prepare precision table
precisions = np.ones((len(sentences), len(sentences)))
# 3. compute the precisions
for i, sent_a in enumerate(sent_n_grams):
for j, sent_b in enumerate(sent_n_grams):
if i >= j:
continue
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for ngrams_a, ngrams_b in zip(sent_a, sent_b):
a_count = sum(ngrams_a.values())
b_count = sum(ngrams_b.values())
common_count = sum((ngrams_a & ngrams_b).values())
if a_count > 0 and b_count > 0:
avg_precision += common_count / a_count
avg_recall += common_count / b_count
effective_order += 1
if effective_order == 0:
avg_precision, avg_recall = 0.0, 0.0
else:
avg_precision /= effective_order
avg_recall /= effective_order
precisions[i, j] = avg_precision
precisions[j, i] = avg_recall
# 4. recall is transposed precision
recalls = precisions.T
# 5. compute score
beta_sq = beta ** 2
scores = (
(1 + beta_sq) * precisions * recalls /
((beta_sq * precisions) + recalls))
# 6. masked outliers
scores = np.where(
(precisions == 0) + (recalls == 0),
np.zeros((len(sentences), len(sentences))),
scores)
return scores
| from typing import List
from collections import Counter
import string
import numpy as np
def pairwise_chrf(sentences: List[str], order: int = 6, beta: float = 2.0):
# 1. represent each sentece as n-grams
sentences = [
s.translate(str.maketrans("", "", string.whitespace))
for s in sentences]
sent_n_grams = [[
Counter([sent[i:i + o]
for i in range(len(sent) - o + 1)])
for o in range(1, order + 1)]
for sent in sentences]
# 2. prepare precision table
precisions = np.ones((len(sentences), len(sentences)))
# 3. compute the precisions
for i, sent_a in enumerate(sent_n_grams):
for j, sent_b in enumerate(sent_n_grams):
if i >= j:
continue
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for ngrams_a, ngrams_b in zip(sent_a, sent_b):
a_count = sum(ngrams_a.values())
b_count = sum(ngrams_b.values())
common_count = sum((ngrams_a & ngrams_b).values())
if a_count > 0 and b_count > 0:
avg_precision += common_count / a_count
avg_recall += common_count / b_count
effective_order += 1
if effective_order == 0:
avg_precision, avg_recall = 0.0, 0.0
else:
avg_precision /= effective_order
avg_recall /= effective_order
precisions[i, j] = avg_precision
precisions[j, i] = avg_recall
# 4. recall is transposed precision
recalls = precisions.T
# 5. compute score
beta_sq = beta ** 2
scores = (
(1 + beta_sq) * precisions * recalls /
((beta_sq * precisions) + recalls))
# 6. masked outliers
scores = np.where(
(precisions == 0) + (recalls == 0),
np.zeros((len(sentences), len(sentences))),
scores)
return scores
| en | 0.709365 | # 1. represent each sentece as n-grams # 2. prepare precision table # 3. compute the precisions # 4. recall is transposed precision # 5. compute score # 6. masked outliers | 2.609244 | 3 |
inference/model.py | F2Wang/robotic-grasping | 2 | 6619334 | import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=1)
self.bn2 = nn.BatchNorm2d(in_channels)
def forward(self, x_in):
x = self.bn1(self.conv1(x_in))
x = F.relu(x)
x = self.bn2(self.conv2(x))
return x + x_in
class GenerativeResnet(nn.Module):
def __init__(self, input_channels=1):
super(GenerativeResnet, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=9, stride=1, padding=4)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.res1 = ResidualBlock(128, 128)
self.res2 = ResidualBlock(128, 128)
self.res3 = ResidualBlock(128, 128)
self.res4 = ResidualBlock(128, 128)
self.res5 = ResidualBlock(128, 128)
self.conv4 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=2, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.conv6 = nn.ConvTranspose2d(32, 32, kernel_size=9, stride=1, padding=4)
self.pos_output = nn.Conv2d(32, 1, kernel_size=2)
self.cos_output = nn.Conv2d(32, 1, kernel_size=2)
self.sin_output = nn.Conv2d(32, 1, kernel_size=2)
self.width_output = nn.Conv2d(32, 1, kernel_size=2)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, x_in):
x = F.relu(self.bn1(self.conv1(x_in)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = self.conv6(x)
pos_output = self.pos_output(x)
cos_output = self.cos_output(x)
sin_output = self.sin_output(x)
width_output = self.width_output(x)
return pos_output, cos_output, sin_output, width_output
def compute_loss(self, xc, yc):
y_pos, y_cos, y_sin, y_width = yc
pos_pred, cos_pred, sin_pred, width_pred = self(xc)
p_loss = F.smooth_l1_loss(pos_pred, y_pos)
cos_loss = F.smooth_l1_loss(cos_pred, y_cos)
sin_loss = F.smooth_l1_loss(sin_pred, y_sin)
width_loss = F.smooth_l1_loss(width_pred, y_width)
return {
'loss': p_loss + cos_loss + sin_loss + width_loss,
'losses': {
'p_loss': p_loss,
'cos_loss': cos_loss,
'sin_loss': sin_loss,
'width_loss': width_loss
},
'pred': {
'pos': pos_pred,
'cos': cos_pred,
'sin': sin_pred,
'width': width_pred
}
}
def predict(self, xc):
pos_pred, cos_pred, sin_pred, width_pred = self(xc)
return {
'pos': pos_pred,
'cos': cos_pred,
'sin': sin_pred,
'width': width_pred
}
| import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=1)
self.bn2 = nn.BatchNorm2d(in_channels)
def forward(self, x_in):
x = self.bn1(self.conv1(x_in))
x = F.relu(x)
x = self.bn2(self.conv2(x))
return x + x_in
class GenerativeResnet(nn.Module):
def __init__(self, input_channels=1):
super(GenerativeResnet, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=9, stride=1, padding=4)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.res1 = ResidualBlock(128, 128)
self.res2 = ResidualBlock(128, 128)
self.res3 = ResidualBlock(128, 128)
self.res4 = ResidualBlock(128, 128)
self.res5 = ResidualBlock(128, 128)
self.conv4 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=2, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.conv6 = nn.ConvTranspose2d(32, 32, kernel_size=9, stride=1, padding=4)
self.pos_output = nn.Conv2d(32, 1, kernel_size=2)
self.cos_output = nn.Conv2d(32, 1, kernel_size=2)
self.sin_output = nn.Conv2d(32, 1, kernel_size=2)
self.width_output = nn.Conv2d(32, 1, kernel_size=2)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, x_in):
x = F.relu(self.bn1(self.conv1(x_in)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = self.conv6(x)
pos_output = self.pos_output(x)
cos_output = self.cos_output(x)
sin_output = self.sin_output(x)
width_output = self.width_output(x)
return pos_output, cos_output, sin_output, width_output
def compute_loss(self, xc, yc):
y_pos, y_cos, y_sin, y_width = yc
pos_pred, cos_pred, sin_pred, width_pred = self(xc)
p_loss = F.smooth_l1_loss(pos_pred, y_pos)
cos_loss = F.smooth_l1_loss(cos_pred, y_cos)
sin_loss = F.smooth_l1_loss(sin_pred, y_sin)
width_loss = F.smooth_l1_loss(width_pred, y_width)
return {
'loss': p_loss + cos_loss + sin_loss + width_loss,
'losses': {
'p_loss': p_loss,
'cos_loss': cos_loss,
'sin_loss': sin_loss,
'width_loss': width_loss
},
'pred': {
'pos': pos_pred,
'cos': cos_pred,
'sin': sin_pred,
'width': width_pred
}
}
def predict(self, xc):
pos_pred, cos_pred, sin_pred, width_pred = self(xc)
return {
'pos': pos_pred,
'cos': cos_pred,
'sin': sin_pred,
'width': width_pred
}
| none | 1 | 2.530489 | 3 | |
wifiphisher/wifiphisher/extensions/roguehostapdinfo.py | burakbozdag/wifiphisher-docker | 3 | 6619335 | """
Extension that interacts with roguehostapd to print relevant information. For example,
information regarding automatic association attacks.
"""
from collections import defaultdict
import wifiphisher.common.constants as constants
class Roguehostapdinfo(object):
"""
Handles for printing KARMA attack information
"""
def __init__(self, data):
"""
Setup the class with all the given arguments.
:param self: A roguehostapdinfo object.
:param data: Shared data from main engine
:type self: roguehostapdinfo
:type data: dictionary
:return: None
:rtype: None
"""
self._data = data
self._packets_to_send = defaultdict(list)
self._mac2ssid_dict = defaultdict()
self._known_beacon_ssids = self._get_known_beacon_ssids()
def get_packet(self, packet):
"""
:param self: A roguehostapdinfo object
:param packet: A scapy.layers.RadioTap object
:type self: roguehostapdinfo
:type packet: scapy.layers.RadioTap
:return: empty list
:rtype: list
"""
return self._packets_to_send
def _get_known_beacon_ssids(self):
"""
:param self: A roguehostapdinfo object
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
known_beacons_ssids = set()
# locate the known WLANS file
if self._data.args.known_beacons:
area_file = constants.KNOWN_WLANS_FILE
with open(area_file) as _file:
for line in _file:
if line.startswith("!"):
continue
essid = line.rstrip()
known_beacons_ssids.add(essid)
return known_beacons_ssids
def send_output(self):
"""
Send the output the extension manager
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: A list with the password checking information
:rtype: list
..note: In each packet we ask roguehostapd whether there are victims
associated to rogue AP
"""
info = []
ssid_mac_list = self._data.roguehostapd.get_karma_data()
try:
mac_list, ssid_list = list(zip(*ssid_mac_list))
except ValueError:
# incase ssid_mac_list is still empty
mac_list = []
ssid_list = []
# remove the one not in the current associated list
pop_macs = []
for mac in self._mac2ssid_dict:
if mac not in mac_list:
pop_macs.append(mac)
for key in pop_macs:
self._mac2ssid_dict.pop(key)
# add new associated victims to the dictionary
for idx, mac in enumerate(mac_list):
if mac not in self._mac2ssid_dict:
self._mac2ssid_dict[mac] = ssid_list[idx]
macssid_pairs = list(self._mac2ssid_dict.items())
for mac, ssid in macssid_pairs:
if ssid == self._data.target_ap_essid:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Evil Twin)"
elif ssid not in self._known_beacon_ssids:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (KARMA)"
else:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Known Beacons)"
info.append(outputstr)
return info
def send_channels(self):
"""
Send channels to subscribe
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: empty list
:rtype: list
..note: we don't need to send frames in this extension
"""
return [self._data.target_ap_channel]
def on_exit(self):
"""
Free all the resources regarding to this module
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
pass
| """
Extension that interacts with roguehostapd to print relevant information. For example,
information regarding automatic association attacks.
"""
from collections import defaultdict
import wifiphisher.common.constants as constants
class Roguehostapdinfo(object):
"""
Handles for printing KARMA attack information
"""
def __init__(self, data):
"""
Setup the class with all the given arguments.
:param self: A roguehostapdinfo object.
:param data: Shared data from main engine
:type self: roguehostapdinfo
:type data: dictionary
:return: None
:rtype: None
"""
self._data = data
self._packets_to_send = defaultdict(list)
self._mac2ssid_dict = defaultdict()
self._known_beacon_ssids = self._get_known_beacon_ssids()
def get_packet(self, packet):
"""
:param self: A roguehostapdinfo object
:param packet: A scapy.layers.RadioTap object
:type self: roguehostapdinfo
:type packet: scapy.layers.RadioTap
:return: empty list
:rtype: list
"""
return self._packets_to_send
def _get_known_beacon_ssids(self):
"""
:param self: A roguehostapdinfo object
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
known_beacons_ssids = set()
# locate the known WLANS file
if self._data.args.known_beacons:
area_file = constants.KNOWN_WLANS_FILE
with open(area_file) as _file:
for line in _file:
if line.startswith("!"):
continue
essid = line.rstrip()
known_beacons_ssids.add(essid)
return known_beacons_ssids
def send_output(self):
"""
Send the output the extension manager
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: A list with the password checking information
:rtype: list
..note: In each packet we ask roguehostapd whether there are victims
associated to rogue AP
"""
info = []
ssid_mac_list = self._data.roguehostapd.get_karma_data()
try:
mac_list, ssid_list = list(zip(*ssid_mac_list))
except ValueError:
# incase ssid_mac_list is still empty
mac_list = []
ssid_list = []
# remove the one not in the current associated list
pop_macs = []
for mac in self._mac2ssid_dict:
if mac not in mac_list:
pop_macs.append(mac)
for key in pop_macs:
self._mac2ssid_dict.pop(key)
# add new associated victims to the dictionary
for idx, mac in enumerate(mac_list):
if mac not in self._mac2ssid_dict:
self._mac2ssid_dict[mac] = ssid_list[idx]
macssid_pairs = list(self._mac2ssid_dict.items())
for mac, ssid in macssid_pairs:
if ssid == self._data.target_ap_essid:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Evil Twin)"
elif ssid not in self._known_beacon_ssids:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (KARMA)"
else:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Known Beacons)"
info.append(outputstr)
return info
def send_channels(self):
"""
Send channels to subscribe
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: empty list
:rtype: list
..note: we don't need to send frames in this extension
"""
return [self._data.target_ap_channel]
def on_exit(self):
"""
Free all the resources regarding to this module
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
pass
| en | 0.674953 | Extension that interacts with roguehostapd to print relevant information. For example, information regarding automatic association attacks. Handles for printing KARMA attack information Setup the class with all the given arguments. :param self: A roguehostapdinfo object. :param data: Shared data from main engine :type self: roguehostapdinfo :type data: dictionary :return: None :rtype: None :param self: A roguehostapdinfo object :param packet: A scapy.layers.RadioTap object :type self: roguehostapdinfo :type packet: scapy.layers.RadioTap :return: empty list :rtype: list :param self: A roguehostapdinfo object :type self: roguehostapdinfo :return: None :rtype: None # locate the known WLANS file Send the output the extension manager :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: A list with the password checking information :rtype: list ..note: In each packet we ask roguehostapd whether there are victims associated to rogue AP # incase ssid_mac_list is still empty # remove the one not in the current associated list # add new associated victims to the dictionary Send channels to subscribe :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: empty list :rtype: list ..note: we don't need to send frames in this extension Free all the resources regarding to this module :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: None :rtype: None | 3.033106 | 3 |
orderDealForTree.py | truechuan/sklearn | 2 | 6619336 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import tree
from itertools import islice
import csv
# 使用决策树预测订单成交
orderInfo = []
orderDeal = []
# 训练模型
f = open('resource/learn/orderDeal.csv', 'rU')
lines = islice(csv.reader(f), 1, None)
for row in lines:
single = []
single.append(int(row[5])) # 是否指导厂商
single.append(int(row[6])) # 指导厂商id
single.append(int(row[7])) # 订单分类
single.append(int(row[8])) # 清洗结果
single.append(int(row[9])) # 用户等级
single.append(int(row[10])) # 团长邀约结果
orderDeal.append(row[11]) # 成交结果
orderInfo.append(single)
model = tree.DecisionTreeClassifier()
model.fit(orderInfo, orderDeal)
# 测试模型准确率
f = open('resource/test/orderDeal_2yue.csv', 'rU')
lines = islice(csv.reader(f), 1, None)
_success = 0
_error = 0
for row in lines:
single = []
single.append(int(row[5])) # 是否指导厂商
single.append(int(row[6])) # 指导厂商id
single.append(int(row[7])) # 订单分类
single.append(int(row[8])) # 清洗结果
single.append(int(row[9])) # 用户等级
single.append(int(row[10])) # 团长邀约结果
mResult = model.predict(np.vstack(single).T)
mResult = mResult[0]
if mResult == row[11]:
_success += 1
# print '成交预测正确订单id[%s],订单成交类型[%s],机器预测类型[%s]' % (row[0], row[10], mResult)
# print '预测正确'
else:
_error += 1
print '成交预测错误订单id[%s],订单成交类型[%s],机器预测类型[%s]' % (row[0], row[11], mResult)
# print '预测错误'
print '预测成功数据:' + str(_success)
print '预测错误数据:' + str(_error)
#print model.predict([[1, 2, 3]]) | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import tree
from itertools import islice
import csv
# 使用决策树预测订单成交
orderInfo = []
orderDeal = []
# 训练模型
f = open('resource/learn/orderDeal.csv', 'rU')
lines = islice(csv.reader(f), 1, None)
for row in lines:
single = []
single.append(int(row[5])) # 是否指导厂商
single.append(int(row[6])) # 指导厂商id
single.append(int(row[7])) # 订单分类
single.append(int(row[8])) # 清洗结果
single.append(int(row[9])) # 用户等级
single.append(int(row[10])) # 团长邀约结果
orderDeal.append(row[11]) # 成交结果
orderInfo.append(single)
model = tree.DecisionTreeClassifier()
model.fit(orderInfo, orderDeal)
# 测试模型准确率
f = open('resource/test/orderDeal_2yue.csv', 'rU')
lines = islice(csv.reader(f), 1, None)
_success = 0
_error = 0
for row in lines:
single = []
single.append(int(row[5])) # 是否指导厂商
single.append(int(row[6])) # 指导厂商id
single.append(int(row[7])) # 订单分类
single.append(int(row[8])) # 清洗结果
single.append(int(row[9])) # 用户等级
single.append(int(row[10])) # 团长邀约结果
mResult = model.predict(np.vstack(single).T)
mResult = mResult[0]
if mResult == row[11]:
_success += 1
# print '成交预测正确订单id[%s],订单成交类型[%s],机器预测类型[%s]' % (row[0], row[10], mResult)
# print '预测正确'
else:
_error += 1
print '成交预测错误订单id[%s],订单成交类型[%s],机器预测类型[%s]' % (row[0], row[11], mResult)
# print '预测错误'
print '预测成功数据:' + str(_success)
print '预测错误数据:' + str(_error)
#print model.predict([[1, 2, 3]]) | zh | 0.764213 | #!/usr/bin/python # -*- coding: utf-8 -*- # 使用决策树预测订单成交 # 训练模型 # 是否指导厂商 # 指导厂商id # 订单分类 # 清洗结果 # 用户等级 # 团长邀约结果 # 成交结果 # 测试模型准确率 # 是否指导厂商 # 指导厂商id # 订单分类 # 清洗结果 # 用户等级 # 团长邀约结果 # print '成交预测正确订单id[%s],订单成交类型[%s],机器预测类型[%s]' % (row[0], row[10], mResult) # print '预测正确' # print '预测错误' #print model.predict([[1, 2, 3]]) | 2.819315 | 3 |
backend/settings.py | drezr/apex_dev_backend | 3 | 6619337 | import os
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
ROOT_URLCONF = 'backend.urls'
WSGI_APPLICATION = 'backend.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
INSTALLED_APPS = [
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apex',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
try:
'''
Use an external local_settings.py for production
It will be used only if the file exists
'''
# Define the relative directory where your local_settings.py is
LOCAL_SETTINGS_PATH = '../'
import sys
sys.path.append(os.path.abspath(LOCAL_SETTINGS_PATH))
from local_settings import *
except ModuleNotFoundError:
'''
Define those variables in this file for developement only
Make sure to use an external setting file for production
Variables below should be set in local_settings.py for production
Copy/paste them in local_settings.py to override them
'''
SECRET_KEY = 'django-<KEY>'
ALLOWED_HOSTS=['*']
DEBUG = True
MEDIA_ROOT = '/your/media/path/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'YOUR EMAIL HOST'
EMAIL_PORT = 'YOUR EMAIL PORT'
EMAIL_HOST_USER = 'YOUR.EMAIL@HOST.USER'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False | import os
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
ROOT_URLCONF = 'backend.urls'
WSGI_APPLICATION = 'backend.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
INSTALLED_APPS = [
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apex',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
try:
'''
Use an external local_settings.py for production
It will be used only if the file exists
'''
# Define the relative directory where your local_settings.py is
LOCAL_SETTINGS_PATH = '../'
import sys
sys.path.append(os.path.abspath(LOCAL_SETTINGS_PATH))
from local_settings import *
except ModuleNotFoundError:
'''
Define those variables in this file for developement only
Make sure to use an external setting file for production
Variables below should be set in local_settings.py for production
Copy/paste them in local_settings.py to override them
'''
SECRET_KEY = 'django-<KEY>'
ALLOWED_HOSTS=['*']
DEBUG = True
MEDIA_ROOT = '/your/media/path/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'YOUR EMAIL HOST'
EMAIL_PORT = 'YOUR EMAIL PORT'
EMAIL_HOST_USER = 'YOUR.EMAIL@HOST.USER'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False | en | 0.839169 | Use an external local_settings.py for production It will be used only if the file exists # Define the relative directory where your local_settings.py is Define those variables in this file for developement only Make sure to use an external setting file for production Variables below should be set in local_settings.py for production Copy/paste them in local_settings.py to override them | 1.588517 | 2 |
core/migrations/0019_article_pinned.py | AndyRae/ostende | 1 | 6619338 | <filename>core/migrations/0019_article_pinned.py<gh_stars>1-10
# Generated by Django 2.2.1 on 2019-05-25 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0018_auto_20190522_2105"),
]
operations = [
migrations.AddField(
model_name="article",
name="pinned",
field=models.BooleanField(default=False),
),
]
| <filename>core/migrations/0019_article_pinned.py<gh_stars>1-10
# Generated by Django 2.2.1 on 2019-05-25 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0018_auto_20190522_2105"),
]
operations = [
migrations.AddField(
model_name="article",
name="pinned",
field=models.BooleanField(default=False),
),
]
| en | 0.639036 | # Generated by Django 2.2.1 on 2019-05-25 22:19 | 1.464954 | 1 |
integration_tests/testsuite/deploy_app.py | enterstudio/runtimes-common | 0 | 6619339 | <gh_stars>0
#!/usr/bin/python
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from retrying import retry
import subprocess
import sys
def _cleanup(appdir):
try:
os.remove(os.path.join(appdir, 'Dockerfile'))
except:
pass
def deploy_app(image, appdir):
try:
# change to app directory (and remember original directory)
owd = os.getcwd()
os.chdir(appdir)
# substitute vars in Dockerfile (equivalent of envsubst)
with open('Dockerfile.in', 'r') as fin:
with open('Dockerfile', 'w') as fout:
for line in fin:
fout.write(line.replace('${STAGING_IMAGE}', image))
fout.close()
fin.close()
# TODO: once sdk driver is published, use it here
deploy_command = ['gcloud', 'app', 'deploy',
'--stop-previous-version', '--verbosity=debug']
deploy_proc = subprocess.Popen(deploy_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, error = deploy_proc.communicate()
if deploy_proc.returncode != 0:
sys.exit('Error encountered when deploying app. ' +
'Full log: \n\n' + (output or ''))
return _retrieve_url()
finally:
_cleanup(appdir)
os.chdir(owd)
@retry(wait_fixed=10000, stop_max_attempt_number=4)
def _retrieve_url():
try:
# retrieve url of deployed app for test driver
url_command = ['gcloud', 'app', 'describe', '--format=json']
app_dict = json.loads(subprocess.check_output(url_command))
hostname = app_dict.get('defaultHostname')
return hostname.encode('ascii', 'ignore')
except (subprocess.CalledProcessError, ValueError, KeyError):
logging.warn('Error encountered when retrieving app URL!')
return None
raise Exception('Unable to contact deployed application!')
| #!/usr/bin/python
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from retrying import retry
import subprocess
import sys
def _cleanup(appdir):
try:
os.remove(os.path.join(appdir, 'Dockerfile'))
except:
pass
def deploy_app(image, appdir):
try:
# change to app directory (and remember original directory)
owd = os.getcwd()
os.chdir(appdir)
# substitute vars in Dockerfile (equivalent of envsubst)
with open('Dockerfile.in', 'r') as fin:
with open('Dockerfile', 'w') as fout:
for line in fin:
fout.write(line.replace('${STAGING_IMAGE}', image))
fout.close()
fin.close()
# TODO: once sdk driver is published, use it here
deploy_command = ['gcloud', 'app', 'deploy',
'--stop-previous-version', '--verbosity=debug']
deploy_proc = subprocess.Popen(deploy_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, error = deploy_proc.communicate()
if deploy_proc.returncode != 0:
sys.exit('Error encountered when deploying app. ' +
'Full log: \n\n' + (output or ''))
return _retrieve_url()
finally:
_cleanup(appdir)
os.chdir(owd)
@retry(wait_fixed=10000, stop_max_attempt_number=4)
def _retrieve_url():
try:
# retrieve url of deployed app for test driver
url_command = ['gcloud', 'app', 'describe', '--format=json']
app_dict = json.loads(subprocess.check_output(url_command))
hostname = app_dict.get('defaultHostname')
return hostname.encode('ascii', 'ignore')
except (subprocess.CalledProcessError, ValueError, KeyError):
logging.warn('Error encountered when retrieving app URL!')
return None
raise Exception('Unable to contact deployed application!') | en | 0.840345 | #!/usr/bin/python # Copyright 2017 Google Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # change to app directory (and remember original directory) # substitute vars in Dockerfile (equivalent of envsubst) # TODO: once sdk driver is published, use it here # retrieve url of deployed app for test driver | 2.048377 | 2 |
test_progect/2_1_7.py | Natalia1957/stepik-auto-test-course_ | 0 | 6619340 | <filename>test_progect/2_1_7.py
# Открыть страницу http://suninjuly.github.io/get_attribute.html.
# Найти на ней элемент-картинку, который является изображением сундука с сокровищами.
# Взять у этого элемента значение атрибута valuex, которое является значением x для задачи.
# Посчитать математическую функцию от x (сама функция остаётся неизменной).
# Ввести ответ в текстовое поле.
# Отметить checkbox "Подтверждаю, что являюсь роботом".
# Выбрать radiobutton "Роботы рулят!".
# Нажать на кнопку "Отправить".
from selenium import webdriver
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/get_attribute.html"
browser = webdriver.Chrome()
browser.get(link)
x = int(browser.find_element_by_id("treasure").get_attribute("valuex"))
y = calc(x)
browser.find_element_by_id("answer").send_keys(str(y))
browser.find_element_by_id("robotCheckbox").click()
browser.find_element_by_id("robotsRule").click()
browser.find_element_by_class_name("btn-default").click() | <filename>test_progect/2_1_7.py
# Открыть страницу http://suninjuly.github.io/get_attribute.html.
# Найти на ней элемент-картинку, который является изображением сундука с сокровищами.
# Взять у этого элемента значение атрибута valuex, которое является значением x для задачи.
# Посчитать математическую функцию от x (сама функция остаётся неизменной).
# Ввести ответ в текстовое поле.
# Отметить checkbox "Подтверждаю, что являюсь роботом".
# Выбрать radiobutton "Роботы рулят!".
# Нажать на кнопку "Отправить".
from selenium import webdriver
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/get_attribute.html"
browser = webdriver.Chrome()
browser.get(link)
x = int(browser.find_element_by_id("treasure").get_attribute("valuex"))
y = calc(x)
browser.find_element_by_id("answer").send_keys(str(y))
browser.find_element_by_id("robotCheckbox").click()
browser.find_element_by_id("robotsRule").click()
browser.find_element_by_class_name("btn-default").click() | ru | 0.995345 | # Открыть страницу http://suninjuly.github.io/get_attribute.html. # Найти на ней элемент-картинку, который является изображением сундука с сокровищами. # Взять у этого элемента значение атрибута valuex, которое является значением x для задачи. # Посчитать математическую функцию от x (сама функция остаётся неизменной). # Ввести ответ в текстовое поле. # Отметить checkbox "Подтверждаю, что являюсь роботом". # Выбрать radiobutton "Роботы рулят!". # Нажать на кнопку "Отправить". | 2.862407 | 3 |
src/content/admin.py | zniper/test-blog | 0 | 6619341 | from django.contrib import admin
import models
admin.site.register(models.Entry)
admin.site.register(models.Category)
| from django.contrib import admin
import models
admin.site.register(models.Entry)
admin.site.register(models.Category)
| none | 1 | 1.277011 | 1 | |
codes/raulcr-p1318-Accepted-s1225965.py | raulcr98/coj-solutions | 1 | 6619342 | <gh_stars>1-10
a = [int(i) for i in raw_input().split(' ')]
ind = "ABC"
orden = raw_input()
a.sort()
print (str(a[ind.find(orden[0])] )+ " " + str(a[ind.find(orden[1])]) + " " + str(a[ind.find(orden[2])]))
| a = [int(i) for i in raw_input().split(' ')]
ind = "ABC"
orden = raw_input()
a.sort()
print (str(a[ind.find(orden[0])] )+ " " + str(a[ind.find(orden[1])]) + " " + str(a[ind.find(orden[2])])) | none | 1 | 3.521379 | 4 | |
zing/parser/peg.py | joshuamorton/zing | 1 | 6619343 | import re
class Node:
def __init__(self, t, parser):
self.parser = parser
self.type = t
self._string = None
self.children = []
@property
def string(self):
return self.__repr__() # "complex" recursive thing
def __repr__(self):
return self.type + ":" + str(self.children)
def __getitem__(self, i):
return self.children[i]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def handle(self, vals):
return self.__getattribute__(self.type)(self, vals)
class PEGNode(Node):
def expression(self, node):
for i in range(0, len(node), 2):
self.handle(node(i))
def seqence(self, node):
print(node)
def prefix(self, node):
pass
def suffix(self, node):
pass
class Terminal(Node):
def __init__(self, t, parser, string):
super().__init__(t, parser)
self.parser = parser
self.type = t
self._string = string
self.children = None
@property
def string(self):
return self._string
def __repr__(self):
if self.type == "space" or self.type == "eof":
return ""
return "Terminal:" + '"' + self.string + '"'
def __str__(self):
return self._string
class PEGTerminal(Terminal):
pass
class GrammarError(Exception):
pass
class TerminalError(GrammarError):
pass
class SlashError(GrammarError):
pass
class NotError(GrammarError):
pass
class P:
def __init__(self, grammar):
self.functions = dict()
self.grammar = grammar
self.syntax_tree, _ = self._grammar(grammar)
self.syntax_tree = self.syntax_tree[0]
def parse(self, string):
return self._parse(string, self.syntax_tree)
def _parse(self, string, node):
"""
recursively parse nodes from the syntax
"""
print(node.type)
# for bootstrapping the PEG parse tree
# these methods each are in the form
# function(String a) -> Tuple[Subtring, Node]
# where Substring is some substring of a, from an index x (can be 0)
# to the end
# and Node is a Node object that essentially represents part of the AST of
# the parser itself
def _grammar(self, grammar):
"""
all _x are of the form str -> ([Node], str)
Grammar <- Spacing Definition+ EndOfFile
"""
gram = Node("grammar", self)
spacing, rest = self._spacing(grammar)
children = spacing
definitions, rest = self._some(self._definition)(rest)
children += definitions
eof, rest = self._EOF(rest)
children += eof
gram.children = children
return [gram], rest # rest will be empty here
def _definition(self, rest):
"""
Definition <- Identifier LEFTARROW Expression
"""
defn = Node("definition", self)
ident, rest = self._IDENTIFIER(rest)
arrow, rest = self._LEFTARROW(rest)
exp, rest = self._expression(rest)
defn.children = ident + arrow + exp
self.functions[defn.children[0].string] = defn.children[4]
return [defn], rest
def _expression(self, rest):
"""
Expression <- Sequence (SLASH Sequence)*
"""
expr = Node("expression", self)
seq, rest = self._sequence(rest)
nodes, rest = self._maybe(self._some(self._paren(self._SLASH, self._sequence)))(rest)
expr.children = seq + nodes
return [expr], rest
def _sequence(self, rest):
"""
Sequence <- Prefix*
"""
seq = Node("sequence", self)
nodes, rest = self._maybe(self._some(self._prefix))(rest)
seq.children = nodes
return [seq], rest
def _prefix(self, rest):
"""
Prefix <- (AND / NOT)? Suffix
"""
prefix = Node("prefix", self)
nodes, rest = self._maybe(self._slashed(self._AND, self._NOT))(rest)
suffix, rest = self._suffix(rest)
prefix.children = nodes + suffix
return [prefix], rest
def _suffix(self, rest):
"""
Suffix <- Primary (QUESTION / STAR / PLUS)?
"""
suffix = Node("suffix", self)
prim, rest = self._primary(rest)
nodes, rest = self._maybe(self._slashed(self._QUESTION, self._STAR, self._PLUS))(rest)
suffix.children = prim + nodes
return [suffix], rest
def _primary(self, rest):
"""
Primary <- Identifier (!LEFTARROW) / (OPEN Expression CLOSE) / Literal / Class / DOT
"""
prim = Node("primary", self)
nodes, rest = self._slashed(self._paren(self._IDENTIFIER, self._not(self._LEFTARROW)), self._paren(self._OPEN, self._expression,self._CLOSE), self._literal, self._class, self._DOT)(rest)
prim.children = nodes
return [prim], rest
def _IDENTIFIER(self, rest):
"""
Identifier <- IdentStart IdentCont* Spacing
IdentStart <- [a-zA-Z_]
IdentCont <- IdentStart / [0-9]
"""
return self._terminal(r'[a-zA-Z_][a-zA-Z0-9_]*', "identifier")(rest)
def _literal(self, rest):
"""
Literal <- ['] (!['] Char)* ['] Spacing / ["] (!["] Char)* ["] Spacing
"""
try:
if rest[0] == "'":
return self._terminal(r"""\'([^']|\n|\r|\r\n)*?\'""", "literal")(rest)
else:
return self._terminal(r"""\"([^"]|\n|\r|\r\n)*?\"""", "literal")(rest)
except:
raise GrammarError
def _class(self, rest):
"""
Class <- '[' (!']' Range)* ']' Spacing
"""
return self._terminal(r'\[(.(-.)?)*\]', "range")(rest)
def _terminal(self, terminal, name):
"""
terminal: the raw string to match
name: the name of the node
"""
def inner(rest):
try:
pos = re.match(terminal, rest).end()
node = [Terminal(name, self, rest[:pos])]
rest = rest[pos:]
except:
raise TerminalError("Expected a {} at '".format(name) + rest[:min(10, len(rest))] + "'")
spacing, rest = self._spacing(rest)
return node + spacing, rest
return inner
def _LEFTARROW(self, rest):
"""
LEFTARROW <- '<-' Spacing
"""
return self._terminal(r'<-', "LEFTARROW")(rest)
def _SLASH(self, rest):
"""
SLASH <- '/' Spacing
"""
return self._terminal(r'/', "SLASH")(rest)
def _AND(self, rest):
"""
AND <- '&' Spacing
"""
return self._terminal(r'&', "AND")(rest)
def _NOT(self, rest):
"""
NOT <- '!' Spacing
"""
return self._terminal(r'!', "NOT")(rest)
def _QUESTION(self, rest):
"""
QUESTION <- '?' Spacing
"""
return self._terminal(r'\?', "QUESTION")(rest)
def _STAR(self, rest):
"""
STAR <- '*' Spacing
"""
return self._terminal(r'\*', "STAR")(rest)
def _PLUS(self, rest):
"""
PLUS <- '+' Spacing
"""
return self._terminal(r'\+', "PLUS")(rest)
def _OPEN(self, rest):
"""
OPEN <- '(' Spacing
"""
return self._terminal(r'\(', "OPEN")(rest)
def _CLOSE(self, rest):
"""
CLOSE <- ')' Spacing
"""
return self._terminal(r'\)', "CLOSE")(rest)
def _DOT(self, rest):
"""
DOT <- '.' Spacing
"""
return self._terminal(r'\.', "DOT")(rest)
def _spacing(self, rest):
"""
Spacing <- (Space / Comment)*
"""
spacing = Node("spacing", self)
nodes, rest = self._maybe(self._some(self._paren(self._slashed(self._SPACE, self._COMMENT))))(rest)
spacing.children = nodes
return [spacing], rest
def _COMMENT(self, rest):
try:
pos = re.match(r"#.*?(\n|\r|\r\n|$)", rest).end()
return [Terminal("comment", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a comment at '" + rest[:min(10, len(rest))] + "'")
def _SPACE(self, rest):
try:
pos = re.match(r"( |\t|\r\n|\n|\r)+", rest).end()
return [Terminal("space", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a space at '" + rest[:min(10, len(rest))] + "'")
def _EOF(self, rest):
if rest != "":
raise TerminalError("Expected an end of file at '" + rest[:min(10, len(rest))] + "'")
else:
return [Terminal("eof", self, None)], None
@staticmethod
def _some(parser):
"""
parses at least one of the passed in parser
"""
def inner(rest):
node, rest = parser(rest)
nodes = node
while True:
try:
node, rest = parser(rest)
nodes += node
except GrammarError:
break
return nodes, rest
return inner
@staticmethod
def _maybe(parser):
"""
parses an optional item
"""
def inner(rest):
try:
node, rest = parser(rest)
except GrammarError:
node, rest = [], rest
return node, rest
return inner
@staticmethod
def _paren(*parsers):
"""
parses a parenthetical
"""
def inner(rest):
nodes = []
for parser in parsers:
node, rest = parser(rest)
nodes += node
return nodes, rest
return inner
@staticmethod
def _slashed(*parsers):
"""
parses slash seperated values
"""
def inner(rest):
for parser in parsers:
try:
node, rest = parser(rest)
return node, rest
except GrammarError:
pass
raise SlashError
return inner
@staticmethod
def _not(parser):
"""
parses a not lookahead
"""
def inner(rest):
try:
parser(rest)
except GrammarError:
return [], rest
raise GrammarError
return inner
| import re
class Node:
def __init__(self, t, parser):
self.parser = parser
self.type = t
self._string = None
self.children = []
@property
def string(self):
return self.__repr__() # "complex" recursive thing
def __repr__(self):
return self.type + ":" + str(self.children)
def __getitem__(self, i):
return self.children[i]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def handle(self, vals):
return self.__getattribute__(self.type)(self, vals)
class PEGNode(Node):
def expression(self, node):
for i in range(0, len(node), 2):
self.handle(node(i))
def seqence(self, node):
print(node)
def prefix(self, node):
pass
def suffix(self, node):
pass
class Terminal(Node):
def __init__(self, t, parser, string):
super().__init__(t, parser)
self.parser = parser
self.type = t
self._string = string
self.children = None
@property
def string(self):
return self._string
def __repr__(self):
if self.type == "space" or self.type == "eof":
return ""
return "Terminal:" + '"' + self.string + '"'
def __str__(self):
return self._string
class PEGTerminal(Terminal):
pass
class GrammarError(Exception):
pass
class TerminalError(GrammarError):
pass
class SlashError(GrammarError):
pass
class NotError(GrammarError):
pass
class P:
def __init__(self, grammar):
self.functions = dict()
self.grammar = grammar
self.syntax_tree, _ = self._grammar(grammar)
self.syntax_tree = self.syntax_tree[0]
def parse(self, string):
return self._parse(string, self.syntax_tree)
def _parse(self, string, node):
"""
recursively parse nodes from the syntax
"""
print(node.type)
# for bootstrapping the PEG parse tree
# these methods each are in the form
# function(String a) -> Tuple[Subtring, Node]
# where Substring is some substring of a, from an index x (can be 0)
# to the end
# and Node is a Node object that essentially represents part of the AST of
# the parser itself
def _grammar(self, grammar):
"""
all _x are of the form str -> ([Node], str)
Grammar <- Spacing Definition+ EndOfFile
"""
gram = Node("grammar", self)
spacing, rest = self._spacing(grammar)
children = spacing
definitions, rest = self._some(self._definition)(rest)
children += definitions
eof, rest = self._EOF(rest)
children += eof
gram.children = children
return [gram], rest # rest will be empty here
def _definition(self, rest):
"""
Definition <- Identifier LEFTARROW Expression
"""
defn = Node("definition", self)
ident, rest = self._IDENTIFIER(rest)
arrow, rest = self._LEFTARROW(rest)
exp, rest = self._expression(rest)
defn.children = ident + arrow + exp
self.functions[defn.children[0].string] = defn.children[4]
return [defn], rest
def _expression(self, rest):
"""
Expression <- Sequence (SLASH Sequence)*
"""
expr = Node("expression", self)
seq, rest = self._sequence(rest)
nodes, rest = self._maybe(self._some(self._paren(self._SLASH, self._sequence)))(rest)
expr.children = seq + nodes
return [expr], rest
def _sequence(self, rest):
"""
Sequence <- Prefix*
"""
seq = Node("sequence", self)
nodes, rest = self._maybe(self._some(self._prefix))(rest)
seq.children = nodes
return [seq], rest
def _prefix(self, rest):
"""
Prefix <- (AND / NOT)? Suffix
"""
prefix = Node("prefix", self)
nodes, rest = self._maybe(self._slashed(self._AND, self._NOT))(rest)
suffix, rest = self._suffix(rest)
prefix.children = nodes + suffix
return [prefix], rest
def _suffix(self, rest):
"""
Suffix <- Primary (QUESTION / STAR / PLUS)?
"""
suffix = Node("suffix", self)
prim, rest = self._primary(rest)
nodes, rest = self._maybe(self._slashed(self._QUESTION, self._STAR, self._PLUS))(rest)
suffix.children = prim + nodes
return [suffix], rest
def _primary(self, rest):
"""
Primary <- Identifier (!LEFTARROW) / (OPEN Expression CLOSE) / Literal / Class / DOT
"""
prim = Node("primary", self)
nodes, rest = self._slashed(self._paren(self._IDENTIFIER, self._not(self._LEFTARROW)), self._paren(self._OPEN, self._expression,self._CLOSE), self._literal, self._class, self._DOT)(rest)
prim.children = nodes
return [prim], rest
def _IDENTIFIER(self, rest):
"""
Identifier <- IdentStart IdentCont* Spacing
IdentStart <- [a-zA-Z_]
IdentCont <- IdentStart / [0-9]
"""
return self._terminal(r'[a-zA-Z_][a-zA-Z0-9_]*', "identifier")(rest)
def _literal(self, rest):
"""
Literal <- ['] (!['] Char)* ['] Spacing / ["] (!["] Char)* ["] Spacing
"""
try:
if rest[0] == "'":
return self._terminal(r"""\'([^']|\n|\r|\r\n)*?\'""", "literal")(rest)
else:
return self._terminal(r"""\"([^"]|\n|\r|\r\n)*?\"""", "literal")(rest)
except:
raise GrammarError
def _class(self, rest):
"""
Class <- '[' (!']' Range)* ']' Spacing
"""
return self._terminal(r'\[(.(-.)?)*\]', "range")(rest)
def _terminal(self, terminal, name):
"""
terminal: the raw string to match
name: the name of the node
"""
def inner(rest):
try:
pos = re.match(terminal, rest).end()
node = [Terminal(name, self, rest[:pos])]
rest = rest[pos:]
except:
raise TerminalError("Expected a {} at '".format(name) + rest[:min(10, len(rest))] + "'")
spacing, rest = self._spacing(rest)
return node + spacing, rest
return inner
def _LEFTARROW(self, rest):
"""
LEFTARROW <- '<-' Spacing
"""
return self._terminal(r'<-', "LEFTARROW")(rest)
def _SLASH(self, rest):
"""
SLASH <- '/' Spacing
"""
return self._terminal(r'/', "SLASH")(rest)
def _AND(self, rest):
"""
AND <- '&' Spacing
"""
return self._terminal(r'&', "AND")(rest)
def _NOT(self, rest):
"""
NOT <- '!' Spacing
"""
return self._terminal(r'!', "NOT")(rest)
def _QUESTION(self, rest):
"""
QUESTION <- '?' Spacing
"""
return self._terminal(r'\?', "QUESTION")(rest)
def _STAR(self, rest):
"""
STAR <- '*' Spacing
"""
return self._terminal(r'\*', "STAR")(rest)
def _PLUS(self, rest):
"""
PLUS <- '+' Spacing
"""
return self._terminal(r'\+', "PLUS")(rest)
def _OPEN(self, rest):
"""
OPEN <- '(' Spacing
"""
return self._terminal(r'\(', "OPEN")(rest)
def _CLOSE(self, rest):
"""
CLOSE <- ')' Spacing
"""
return self._terminal(r'\)', "CLOSE")(rest)
def _DOT(self, rest):
"""
DOT <- '.' Spacing
"""
return self._terminal(r'\.', "DOT")(rest)
def _spacing(self, rest):
"""
Spacing <- (Space / Comment)*
"""
spacing = Node("spacing", self)
nodes, rest = self._maybe(self._some(self._paren(self._slashed(self._SPACE, self._COMMENT))))(rest)
spacing.children = nodes
return [spacing], rest
def _COMMENT(self, rest):
try:
pos = re.match(r"#.*?(\n|\r|\r\n|$)", rest).end()
return [Terminal("comment", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a comment at '" + rest[:min(10, len(rest))] + "'")
def _SPACE(self, rest):
try:
pos = re.match(r"( |\t|\r\n|\n|\r)+", rest).end()
return [Terminal("space", self, rest[:pos])], rest[pos:]
except:
raise TerminalError("Expected a space at '" + rest[:min(10, len(rest))] + "'")
def _EOF(self, rest):
if rest != "":
raise TerminalError("Expected an end of file at '" + rest[:min(10, len(rest))] + "'")
else:
return [Terminal("eof", self, None)], None
@staticmethod
def _some(parser):
"""
parses at least one of the passed in parser
"""
def inner(rest):
node, rest = parser(rest)
nodes = node
while True:
try:
node, rest = parser(rest)
nodes += node
except GrammarError:
break
return nodes, rest
return inner
@staticmethod
def _maybe(parser):
"""
parses an optional item
"""
def inner(rest):
try:
node, rest = parser(rest)
except GrammarError:
node, rest = [], rest
return node, rest
return inner
@staticmethod
def _paren(*parsers):
"""
parses a parenthetical
"""
def inner(rest):
nodes = []
for parser in parsers:
node, rest = parser(rest)
nodes += node
return nodes, rest
return inner
@staticmethod
def _slashed(*parsers):
"""
parses slash seperated values
"""
def inner(rest):
for parser in parsers:
try:
node, rest = parser(rest)
return node, rest
except GrammarError:
pass
raise SlashError
return inner
@staticmethod
def _not(parser):
"""
parses a not lookahead
"""
def inner(rest):
try:
parser(rest)
except GrammarError:
return [], rest
raise GrammarError
return inner
| en | 0.57994 | # "complex" recursive thing recursively parse nodes from the syntax # for bootstrapping the PEG parse tree # these methods each are in the form # function(String a) -> Tuple[Subtring, Node] # where Substring is some substring of a, from an index x (can be 0) # to the end # and Node is a Node object that essentially represents part of the AST of # the parser itself all _x are of the form str -> ([Node], str) Grammar <- Spacing Definition+ EndOfFile # rest will be empty here Definition <- Identifier LEFTARROW Expression Expression <- Sequence (SLASH Sequence)* Sequence <- Prefix* Prefix <- (AND / NOT)? Suffix Suffix <- Primary (QUESTION / STAR / PLUS)? Primary <- Identifier (!LEFTARROW) / (OPEN Expression CLOSE) / Literal / Class / DOT Identifier <- IdentStart IdentCont* Spacing IdentStart <- [a-zA-Z_] IdentCont <- IdentStart / [0-9] Literal <- ['] (!['] Char)* ['] Spacing / ["] (!["] Char)* ["] Spacing \'([^']|\n|\r|\r\n)*?\' \"([^"]|\n|\r|\r\n)*?\ Class <- '[' (!']' Range)* ']' Spacing terminal: the raw string to match name: the name of the node LEFTARROW <- '<-' Spacing SLASH <- '/' Spacing AND <- '&' Spacing NOT <- '!' Spacing QUESTION <- '?' Spacing STAR <- '*' Spacing PLUS <- '+' Spacing OPEN <- '(' Spacing CLOSE <- ')' Spacing DOT <- '.' Spacing Spacing <- (Space / Comment)* parses at least one of the passed in parser parses an optional item parses a parenthetical parses slash seperated values parses a not lookahead | 3.049348 | 3 |
aequilibrae/paths/__version__.py | jamiecook/AequilibraE | 0 | 6619344 | import os
a = open(os.path.join(os.path.dirname(__file__), 'parameters.pxi'), 'r')
for i in a.readlines():
if 'VERSION' in i:
version = i[10:]
if 'MINOR_VRSN' in i:
minor_version = i[13:]
if 'release_name' in i:
release_name = i[16:-1]
if 'binary' in i:
binary_version = i[17:-1]
release_version = str(version) + '.' + str(minor_version) | import os
a = open(os.path.join(os.path.dirname(__file__), 'parameters.pxi'), 'r')
for i in a.readlines():
if 'VERSION' in i:
version = i[10:]
if 'MINOR_VRSN' in i:
minor_version = i[13:]
if 'release_name' in i:
release_name = i[16:-1]
if 'binary' in i:
binary_version = i[17:-1]
release_version = str(version) + '.' + str(minor_version) | none | 1 | 2.144828 | 2 | |
example/example1.py | dylan-plummer/scHiCTools | 15 | 6619345 | <reponame>dylan-plummer/scHiCTools<filename>example/example1.py<gh_stars>10-100
import sys
import os
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
print(sys.path)
from scHiCTools import scHiCs,scatter
x = scHiCs(['../test/data/cell_01', '../test/data/cell_02',
'../test/data/cell_03'],
reference_genome='mm9',
resolution=50000,
max_distance=4000000,
format='shortest_score',
adjust_resolution=True,
chromosomes='except Y',
operations=['convolution'],
kernel_shape=3,
keep_n_strata=10,
store_full_map=True
)
x.plot_contacts()
emb, _ = x.learn_embedding(similarity_method='innerproduct',
embedding_method='MDS',
aggregation='median',
print_time=False,
return_distance=True)
print(emb)
print(_)
plt.figure()
scatter(emb,label=['01','02','03'],point_size=5)
plt.show()
| import sys
import os
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
print(sys.path)
from scHiCTools import scHiCs,scatter
x = scHiCs(['../test/data/cell_01', '../test/data/cell_02',
'../test/data/cell_03'],
reference_genome='mm9',
resolution=50000,
max_distance=4000000,
format='shortest_score',
adjust_resolution=True,
chromosomes='except Y',
operations=['convolution'],
kernel_shape=3,
keep_n_strata=10,
store_full_map=True
)
x.plot_contacts()
emb, _ = x.learn_embedding(similarity_method='innerproduct',
embedding_method='MDS',
aggregation='median',
print_time=False,
return_distance=True)
print(emb)
print(_)
plt.figure()
scatter(emb,label=['01','02','03'],point_size=5)
plt.show() | none | 1 | 2.211661 | 2 | |
demo/urls.py | pantyukhov/river-admin | 75 | 6619346 | from django.conf.urls import url
from django.contrib import admin
from django.urls import include
from demo.view import approve_issue, approve_shipping
urlpatterns = [
url(r'^approve_issue/(?P<issue_id>\d+)/(?P<next_state_id>\d+)/$', approve_issue, name='approve_issue'),
url(r'^approve_shipping/(?P<shipping_id>\d+)/(?P<next_state_id>\d+)/$', approve_shipping, name='approve_shipping'),
url(r'^admin/', admin.site.urls),
url(r'^', include("river_admin.urls")),
url(r'^api-auth/', include('rest_framework.urls')),
]
| from django.conf.urls import url
from django.contrib import admin
from django.urls import include
from demo.view import approve_issue, approve_shipping
urlpatterns = [
url(r'^approve_issue/(?P<issue_id>\d+)/(?P<next_state_id>\d+)/$', approve_issue, name='approve_issue'),
url(r'^approve_shipping/(?P<shipping_id>\d+)/(?P<next_state_id>\d+)/$', approve_shipping, name='approve_shipping'),
url(r'^admin/', admin.site.urls),
url(r'^', include("river_admin.urls")),
url(r'^api-auth/', include('rest_framework.urls')),
]
| none | 1 | 1.921377 | 2 | |
ck4py/utils/MetricsUtil.py | rahlk/vagrant | 0 | 6619347 | from __future__ import division
from __future__ import print_function
import os
import subprocess
from .FileUtils import XMLUtil
from .git_core import Git
from pdb import set_trace
import shutil
root = os.getcwd()
class JavaUtil:
def __init__(self, jar_path_json, fbp_path, save_path="metrics", file_name="metrics"):
self.jar_path = jar_path_json
self.fbp_path = fbp_path
self.file_name = file_name if ".xml" in file_name else file_name + ".xml"
self.save_path = os.path.abspath(save_path)
@staticmethod
def _run_ckjm(jar):
cmd = ["java", "-jar", os.path.join(root, "tools/ckjm_ext.jar"),
"-x",
"-s",
jar]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
@staticmethod
def _run_findbugs(fbp_file):
cmd = [os.path.join(root, "tools/findbugs-3.0.1/bin/findbugs"),
"-textui", "-project", fbp_file, "-xml"]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
def save_metrics(self):
for version, jarfiles in self.jar_path.iteritems():
metrics = []
fbp_file = os.path.join(self.fbp_path, version + ".fbp")
print("\t+ Version: {}".format(version))
print("\t+ -- Computing CK Metrics")
for jar in jarfiles:
metrics.append(self._run_ckjm(jar).communicate()[0])
print("\t+ -- Running FindBugs")
foundbugs = self._run_findbugs(fbp_file).communicate()[0]
print(foundbugs,
file=open(os.path.join(self.save_path, "bug-" + version + ".xml"), "w+"))
print("<metrics>", "\n".join(metrics), "</metrics>", sep="\n",
file=open(os.path.join(self.save_path, version + ".xml"), "w+"))
class JSUtil:
def __init__(self, git_url, clone_path=None, project=None):
self.project = project if project else git_url.split("/")[-1].split(".git")[0]
self.clone_path = clone_path if clone_path is not None else os.path.abspath(os.path.join("/dev/shm/", self.project))
self.git = Git(project=self.project, url=git_url, clone_path=self.clone_path)
def fetch_project(self, hash=None):
self.git.fetch_commit_hash(hash)
def _run_escomplex(self):
cmd = ["cr", "--ignoreerrors", "--format", "json", self.clone_path]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
def isolate_changes(self, git_hash):
self.git.isolate_changes(git_hash)
def get_metrics(self):
metrics = self._run_escomplex().communicate()[0]
return metrics
def clean_up(self):
shutil.rmtree(self.clone_path)
def __test_util():
"""
Commented this out to test JSUtil + Java isn't working at the moment.
Run a test case
:return:
"""
m = JavaUtil(jar_file="data/ant-1.8.2/build/lib/ant.jar",
file_name="ant.xml")
m.save_metrics()
xml = XMLUtil(metrics_name="ant.xml")
xml.save_as_csv()
def __test_JSUtil():
m = JSUtil(git_url="https://github.com/facebook/react.git")
m.fetch_project("30e6c6c9c9f8e52776981a0e91ccfbabb95f7974")
metrics = m.get_metrics()
return metrics
if __name__ == "__main__":
metrics = __test_JSUtil()
set_trace()
| from __future__ import division
from __future__ import print_function
import os
import subprocess
from .FileUtils import XMLUtil
from .git_core import Git
from pdb import set_trace
import shutil
root = os.getcwd()
class JavaUtil:
def __init__(self, jar_path_json, fbp_path, save_path="metrics", file_name="metrics"):
self.jar_path = jar_path_json
self.fbp_path = fbp_path
self.file_name = file_name if ".xml" in file_name else file_name + ".xml"
self.save_path = os.path.abspath(save_path)
@staticmethod
def _run_ckjm(jar):
cmd = ["java", "-jar", os.path.join(root, "tools/ckjm_ext.jar"),
"-x",
"-s",
jar]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
@staticmethod
def _run_findbugs(fbp_file):
cmd = [os.path.join(root, "tools/findbugs-3.0.1/bin/findbugs"),
"-textui", "-project", fbp_file, "-xml"]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
def save_metrics(self):
for version, jarfiles in self.jar_path.iteritems():
metrics = []
fbp_file = os.path.join(self.fbp_path, version + ".fbp")
print("\t+ Version: {}".format(version))
print("\t+ -- Computing CK Metrics")
for jar in jarfiles:
metrics.append(self._run_ckjm(jar).communicate()[0])
print("\t+ -- Running FindBugs")
foundbugs = self._run_findbugs(fbp_file).communicate()[0]
print(foundbugs,
file=open(os.path.join(self.save_path, "bug-" + version + ".xml"), "w+"))
print("<metrics>", "\n".join(metrics), "</metrics>", sep="\n",
file=open(os.path.join(self.save_path, version + ".xml"), "w+"))
class JSUtil:
def __init__(self, git_url, clone_path=None, project=None):
self.project = project if project else git_url.split("/")[-1].split(".git")[0]
self.clone_path = clone_path if clone_path is not None else os.path.abspath(os.path.join("/dev/shm/", self.project))
self.git = Git(project=self.project, url=git_url, clone_path=self.clone_path)
def fetch_project(self, hash=None):
self.git.fetch_commit_hash(hash)
def _run_escomplex(self):
cmd = ["cr", "--ignoreerrors", "--format", "json", self.clone_path]
return subprocess.Popen(cmd, stdout=subprocess.PIPE
, stderr=open(os.devnull, "w"))
def isolate_changes(self, git_hash):
self.git.isolate_changes(git_hash)
def get_metrics(self):
metrics = self._run_escomplex().communicate()[0]
return metrics
def clean_up(self):
shutil.rmtree(self.clone_path)
def __test_util():
"""
Commented this out to test JSUtil + Java isn't working at the moment.
Run a test case
:return:
"""
m = JavaUtil(jar_file="data/ant-1.8.2/build/lib/ant.jar",
file_name="ant.xml")
m.save_metrics()
xml = XMLUtil(metrics_name="ant.xml")
xml.save_as_csv()
def __test_JSUtil():
m = JSUtil(git_url="https://github.com/facebook/react.git")
m.fetch_project("30e6c6c9c9f8e52776981a0e91ccfbabb95f7974")
metrics = m.get_metrics()
return metrics
if __name__ == "__main__":
metrics = __test_JSUtil()
set_trace()
| en | 0.928529 | Commented this out to test JSUtil + Java isn't working at the moment. Run a test case :return: | 2.134917 | 2 |
brian2/codegen/languages/cpp_lang.py | divyashivaram/brian2 | 1 | 6619348 | '''
TODO: use preferences to get arguments to Language
'''
import itertools
import numpy
from brian2.utils.stringtools import deindent, stripped_deindented_lines
from brian2.codegen.functions.base import Function
from brian2.utils.logger import get_logger
from brian2.parsing.rendering import CPPNodeRenderer
from brian2.core.preferences import brian_prefs, BrianPreference
from brian2.core.variables import ArrayVariable
from .base import Language
logger = get_logger(__name__)
__all__ = ['CPPLanguage',
'c_data_type',
]
def c_data_type(dtype):
'''
Gives the C language specifier for numpy data types. For example,
``numpy.int32`` maps to ``int32_t`` in C.
'''
# this handles the case where int is specified, it will be int32 or int64
# depending on platform
if dtype is int:
dtype = numpy.array([1]).dtype.type
if dtype is float:
dtype = numpy.array([1.]).dtype.type
if dtype == numpy.float32:
dtype = 'float'
elif dtype == numpy.float64:
dtype = 'double'
elif dtype == numpy.int32:
dtype = 'int32_t'
elif dtype == numpy.int64:
dtype = 'int64_t'
elif dtype == numpy.uint16:
dtype = 'uint16_t'
elif dtype == numpy.uint32:
dtype = 'uint32_t'
elif dtype == numpy.bool_ or dtype is bool:
dtype = 'bool'
else:
raise ValueError("dtype " + str(dtype) + " not known.")
return dtype
# Preferences
brian_prefs.register_preferences(
'codegen.languages.cpp',
'C++ codegen preferences',
restrict_keyword = BrianPreference(
default='__restrict__',
docs='''
The keyword used for the given compiler to declare pointers as restricted.
This keyword is different on different compilers, the default is for gcc.
''',
),
flush_denormals = BrianPreference(
default=False,
docs='''
Adds code to flush denormals to zero.
The code is gcc and architecture specific, so may not compile on all
platforms. The code, for reference is::
#define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr);
Found at `<http://stackoverflow.com/questions/2487653/avoiding-denormal-values-in-c>`_.
''',
),
)
class CPPLanguage(Language):
'''
C++ language
C++ code templates should provide Jinja2 macros with the following names:
``main``
The main loop.
``support_code``
The support code (function definitions, etc.), compiled in a separate
file.
For user-defined functions, there are two keys to provide:
``support_code``
The function definition which will be added to the support code.
``hashdefine_code``
The ``#define`` code added to the main loop.
See `TimedArray` for an example of these keys.
'''
language_id = 'cpp'
def __init__(self, c_data_type=c_data_type):
self.restrict = brian_prefs['codegen.languages.cpp.restrict_keyword'] + ' '
self.flush_denormals = brian_prefs['codegen.languages.cpp.flush_denormals']
self.c_data_type = c_data_type
def translate_expression(self, expr):
return CPPNodeRenderer().render_expr(expr).strip()
def translate_statement(self, statement):
var, op, expr = statement.var, statement.op, statement.expr
if op == ':=':
decl = self.c_data_type(statement.dtype) + ' '
op = '='
if statement.constant:
decl = 'const ' + decl
else:
decl = ''
return decl + var + ' ' + op + ' ' + self.translate_expression(expr) + ';'
def translate_statement_sequence(self, statements, variables, namespace,
variable_indices, iterate_all):
# Note that C++ code does not care about the iterate_all argument -- it
# always has to loop over the elements
read, write = self.array_read_write(statements, variables)
lines = []
# read arrays
for varname in read:
index_var = variable_indices[varname]
var = variables[varname]
if varname not in write:
line = 'const '
else:
line = ''
line = line + self.c_data_type(var.dtype) + ' ' + varname + ' = '
line = line + '_ptr' + var.arrayname + '[' + index_var + '];'
lines.append(line)
# simply declare variables that will be written but not read
for varname in write:
if varname not in read:
var = variables[varname]
line = self.c_data_type(var.dtype) + ' ' + varname + ';'
lines.append(line)
# the actual code
lines.extend([self.translate_statement(stmt) for stmt in statements])
# write arrays
for varname in write:
index_var = variable_indices[varname]
var = variables[varname]
line = '_ptr' + var.arrayname + '[' + index_var + '] = ' + varname + ';'
lines.append(line)
code = '\n'.join(lines)
# set up the restricted pointers, these are used so that the compiler
# knows there is no aliasing in the pointers, for optimisation
lines = []
# It is possible that several different variable names refer to the
# same array. E.g. in gapjunction code, v_pre and v_post refer to the
# same array if a group is connected to itself
arraynames = set()
for varname, var in variables.iteritems():
if isinstance(var, ArrayVariable):
arrayname = var.arrayname
if not arrayname in arraynames:
line = self.c_data_type(var.dtype) + ' * ' + self.restrict + '_ptr' + arrayname + ' = ' + arrayname + ';'
lines.append(line)
arraynames.add(arrayname)
pointers = '\n'.join(lines)
# set up the functions
user_functions = []
support_code = ''
hash_defines = ''
for varname, variable in namespace.items():
if isinstance(variable, Function):
user_functions.append(varname)
speccode = variable.code(self, varname)
support_code += '\n' + deindent(speccode['support_code'])
hash_defines += deindent(speccode['hashdefine_code'])
# add the Python function with a leading '_python', if it
# exists. This allows the function to make use of the Python
# function via weave if necessary (e.g. in the case of randn)
if not variable.pyfunc is None:
pyfunc_name = '_python_' + varname
if pyfunc_name in namespace:
logger.warn(('Namespace already contains function %s, '
'not replacing it') % pyfunc_name)
else:
namespace[pyfunc_name] = variable.pyfunc
# delete the user-defined functions from the namespace
for func in user_functions:
del namespace[func]
# return
return (stripped_deindented_lines(code),
{'pointers_lines': stripped_deindented_lines(pointers),
'support_code_lines': stripped_deindented_lines(support_code),
'hashdefine_lines': stripped_deindented_lines(hash_defines),
'denormals_code_lines': stripped_deindented_lines(self.denormals_to_zero_code()),
})
def denormals_to_zero_code(self):
if self.flush_denormals:
return '''
#define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr);
'''
else:
return ''
| '''
TODO: use preferences to get arguments to Language
'''
import itertools
import numpy
from brian2.utils.stringtools import deindent, stripped_deindented_lines
from brian2.codegen.functions.base import Function
from brian2.utils.logger import get_logger
from brian2.parsing.rendering import CPPNodeRenderer
from brian2.core.preferences import brian_prefs, BrianPreference
from brian2.core.variables import ArrayVariable
from .base import Language
logger = get_logger(__name__)
__all__ = ['CPPLanguage',
'c_data_type',
]
def c_data_type(dtype):
'''
Gives the C language specifier for numpy data types. For example,
``numpy.int32`` maps to ``int32_t`` in C.
'''
# this handles the case where int is specified, it will be int32 or int64
# depending on platform
if dtype is int:
dtype = numpy.array([1]).dtype.type
if dtype is float:
dtype = numpy.array([1.]).dtype.type
if dtype == numpy.float32:
dtype = 'float'
elif dtype == numpy.float64:
dtype = 'double'
elif dtype == numpy.int32:
dtype = 'int32_t'
elif dtype == numpy.int64:
dtype = 'int64_t'
elif dtype == numpy.uint16:
dtype = 'uint16_t'
elif dtype == numpy.uint32:
dtype = 'uint32_t'
elif dtype == numpy.bool_ or dtype is bool:
dtype = 'bool'
else:
raise ValueError("dtype " + str(dtype) + " not known.")
return dtype
# Preferences
brian_prefs.register_preferences(
'codegen.languages.cpp',
'C++ codegen preferences',
restrict_keyword = BrianPreference(
default='__restrict__',
docs='''
The keyword used for the given compiler to declare pointers as restricted.
This keyword is different on different compilers, the default is for gcc.
''',
),
flush_denormals = BrianPreference(
default=False,
docs='''
Adds code to flush denormals to zero.
The code is gcc and architecture specific, so may not compile on all
platforms. The code, for reference is::
#define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr);
Found at `<http://stackoverflow.com/questions/2487653/avoiding-denormal-values-in-c>`_.
''',
),
)
class CPPLanguage(Language):
'''
C++ language
C++ code templates should provide Jinja2 macros with the following names:
``main``
The main loop.
``support_code``
The support code (function definitions, etc.), compiled in a separate
file.
For user-defined functions, there are two keys to provide:
``support_code``
The function definition which will be added to the support code.
``hashdefine_code``
The ``#define`` code added to the main loop.
See `TimedArray` for an example of these keys.
'''
language_id = 'cpp'
def __init__(self, c_data_type=c_data_type):
self.restrict = brian_prefs['codegen.languages.cpp.restrict_keyword'] + ' '
self.flush_denormals = brian_prefs['codegen.languages.cpp.flush_denormals']
self.c_data_type = c_data_type
def translate_expression(self, expr):
return CPPNodeRenderer().render_expr(expr).strip()
def translate_statement(self, statement):
var, op, expr = statement.var, statement.op, statement.expr
if op == ':=':
decl = self.c_data_type(statement.dtype) + ' '
op = '='
if statement.constant:
decl = 'const ' + decl
else:
decl = ''
return decl + var + ' ' + op + ' ' + self.translate_expression(expr) + ';'
def translate_statement_sequence(self, statements, variables, namespace,
variable_indices, iterate_all):
# Note that C++ code does not care about the iterate_all argument -- it
# always has to loop over the elements
read, write = self.array_read_write(statements, variables)
lines = []
# read arrays
for varname in read:
index_var = variable_indices[varname]
var = variables[varname]
if varname not in write:
line = 'const '
else:
line = ''
line = line + self.c_data_type(var.dtype) + ' ' + varname + ' = '
line = line + '_ptr' + var.arrayname + '[' + index_var + '];'
lines.append(line)
# simply declare variables that will be written but not read
for varname in write:
if varname not in read:
var = variables[varname]
line = self.c_data_type(var.dtype) + ' ' + varname + ';'
lines.append(line)
# the actual code
lines.extend([self.translate_statement(stmt) for stmt in statements])
# write arrays
for varname in write:
index_var = variable_indices[varname]
var = variables[varname]
line = '_ptr' + var.arrayname + '[' + index_var + '] = ' + varname + ';'
lines.append(line)
code = '\n'.join(lines)
# set up the restricted pointers, these are used so that the compiler
# knows there is no aliasing in the pointers, for optimisation
lines = []
# It is possible that several different variable names refer to the
# same array. E.g. in gapjunction code, v_pre and v_post refer to the
# same array if a group is connected to itself
arraynames = set()
for varname, var in variables.iteritems():
if isinstance(var, ArrayVariable):
arrayname = var.arrayname
if not arrayname in arraynames:
line = self.c_data_type(var.dtype) + ' * ' + self.restrict + '_ptr' + arrayname + ' = ' + arrayname + ';'
lines.append(line)
arraynames.add(arrayname)
pointers = '\n'.join(lines)
# set up the functions
user_functions = []
support_code = ''
hash_defines = ''
for varname, variable in namespace.items():
if isinstance(variable, Function):
user_functions.append(varname)
speccode = variable.code(self, varname)
support_code += '\n' + deindent(speccode['support_code'])
hash_defines += deindent(speccode['hashdefine_code'])
# add the Python function with a leading '_python', if it
# exists. This allows the function to make use of the Python
# function via weave if necessary (e.g. in the case of randn)
if not variable.pyfunc is None:
pyfunc_name = '_python_' + varname
if pyfunc_name in namespace:
logger.warn(('Namespace already contains function %s, '
'not replacing it') % pyfunc_name)
else:
namespace[pyfunc_name] = variable.pyfunc
# delete the user-defined functions from the namespace
for func in user_functions:
del namespace[func]
# return
return (stripped_deindented_lines(code),
{'pointers_lines': stripped_deindented_lines(pointers),
'support_code_lines': stripped_deindented_lines(support_code),
'hashdefine_lines': stripped_deindented_lines(hash_defines),
'denormals_code_lines': stripped_deindented_lines(self.denormals_to_zero_code()),
})
def denormals_to_zero_code(self):
if self.flush_denormals:
return '''
#define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr);
'''
else:
return ''
| en | 0.73502 | TODO: use preferences to get arguments to Language Gives the C language specifier for numpy data types. For example,
``numpy.int32`` maps to ``int32_t`` in C. # this handles the case where int is specified, it will be int32 or int64 # depending on platform # Preferences The keyword used for the given compiler to declare pointers as restricted.
This keyword is different on different compilers, the default is for gcc. Adds code to flush denormals to zero.
The code is gcc and architecture specific, so may not compile on all
platforms. The code, for reference is::
#define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr);
Found at `<http://stackoverflow.com/questions/2487653/avoiding-denormal-values-in-c>`_. C++ language
C++ code templates should provide Jinja2 macros with the following names:
``main``
The main loop.
``support_code``
The support code (function definitions, etc.), compiled in a separate
file.
For user-defined functions, there are two keys to provide:
``support_code``
The function definition which will be added to the support code.
``hashdefine_code``
The ``#define`` code added to the main loop.
See `TimedArray` for an example of these keys. # Note that C++ code does not care about the iterate_all argument -- it # always has to loop over the elements # read arrays # simply declare variables that will be written but not read # the actual code # write arrays # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation # It is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself # set up the functions # add the Python function with a leading '_python', if it # exists. This allows the function to make use of the Python # function via weave if necessary (e.g. in the case of randn) # delete the user-defined functions from the namespace # return #define CSR_FLUSH_TO_ZERO (1 << 15)
unsigned csr = __builtin_ia32_stmxcsr();
csr |= CSR_FLUSH_TO_ZERO;
__builtin_ia32_ldmxcsr(csr); | 2.877441 | 3 |
examples/tactics.py | vercity/czsc | 1 | 6619349 | # -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/2/10 21:12
"""
from czsc import signals, CZSC
from czsc.objects import Freq, Operate, Signal, Factor, Event
from collections import OrderedDict
def trader_strategy_a():
"""A股市场择时策略A"""
def get_signals(c: CZSC) -> OrderedDict:
s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close})
if c.freq in [Freq.F15]:
s.update(signals.bxt.get_s_d0_bi(c))
s.update(signals.other.get_s_zdt(c, di=1))
s.update(signals.other.get_s_op_time_span(c, op='开多', time_span=('13:00', '14:50')))
s.update(signals.other.get_s_op_time_span(c, op='平多', time_span=('09:35', '14:50')))
if c.freq in [Freq.F60, Freq.D, Freq.W]:
s.update(signals.ta.get_s_macd(c, di=1))
return s
long_states_pos = {
'hold_long_a': 1.0,
'hold_long_b': 1.0,
'hold_long_c': 1.0,
}
short_states_pos = None
long_events = [
Event(name="开多", operate=Operate.LO, factors=[
Factor(name="低吸", signals_all=[
Signal("开多时间范围_13:00_14:50_是_任意_任意_0"),
Signal("15分钟_倒1K_ZDT_非涨跌停_任意_任意_0"),
Signal("60分钟_倒1K_MACD多空_多头_任意_任意_0"),
Signal("15分钟_倒0笔_方向_向上_任意_任意_0"),
Signal("15分钟_倒0笔_长度_5根K线以下_任意_任意_0"),
]),
]),
Event(name="平多", operate=Operate.LE, factors=[
Factor(name="持有资金", signals_all=[
Signal("平多时间范围_09:35_14:50_是_任意_任意_0"),
Signal("15分钟_倒1K_ZDT_非涨跌停_任意_任意_0"),
], signals_not=[
Signal("15分钟_倒0笔_方向_向上_任意_任意_0"),
Signal("60分钟_倒1K_MACD多空_多头_任意_任意_0"),
]),
]),
]
short_events = None
tactic = {
"base_freq": '15分钟',
"freqs": ['60分钟', '日线'],
"get_signals": get_signals,
"signals_n": 0,
"long_states_pos": long_states_pos,
"long_events": long_events,
"long_min_interval": 3600*4,
"short_states_pos": short_states_pos,
"short_events": short_events,
"short_min_interval": 3600*4,
}
return tactic
| # -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/2/10 21:12
"""
from czsc import signals, CZSC
from czsc.objects import Freq, Operate, Signal, Factor, Event
from collections import OrderedDict
def trader_strategy_a():
"""A股市场择时策略A"""
def get_signals(c: CZSC) -> OrderedDict:
s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close})
if c.freq in [Freq.F15]:
s.update(signals.bxt.get_s_d0_bi(c))
s.update(signals.other.get_s_zdt(c, di=1))
s.update(signals.other.get_s_op_time_span(c, op='开多', time_span=('13:00', '14:50')))
s.update(signals.other.get_s_op_time_span(c, op='平多', time_span=('09:35', '14:50')))
if c.freq in [Freq.F60, Freq.D, Freq.W]:
s.update(signals.ta.get_s_macd(c, di=1))
return s
long_states_pos = {
'hold_long_a': 1.0,
'hold_long_b': 1.0,
'hold_long_c': 1.0,
}
short_states_pos = None
long_events = [
Event(name="开多", operate=Operate.LO, factors=[
Factor(name="低吸", signals_all=[
Signal("开多时间范围_13:00_14:50_是_任意_任意_0"),
Signal("15分钟_倒1K_ZDT_非涨跌停_任意_任意_0"),
Signal("60分钟_倒1K_MACD多空_多头_任意_任意_0"),
Signal("15分钟_倒0笔_方向_向上_任意_任意_0"),
Signal("15分钟_倒0笔_长度_5根K线以下_任意_任意_0"),
]),
]),
Event(name="平多", operate=Operate.LE, factors=[
Factor(name="持有资金", signals_all=[
Signal("平多时间范围_09:35_14:50_是_任意_任意_0"),
Signal("15分钟_倒1K_ZDT_非涨跌停_任意_任意_0"),
], signals_not=[
Signal("15分钟_倒0笔_方向_向上_任意_任意_0"),
Signal("60分钟_倒1K_MACD多空_多头_任意_任意_0"),
]),
]),
]
short_events = None
tactic = {
"base_freq": '15分钟',
"freqs": ['60分钟', '日线'],
"get_signals": get_signals,
"signals_n": 0,
"long_states_pos": long_states_pos,
"long_events": long_events,
"long_min_interval": 3600*4,
"short_states_pos": short_states_pos,
"short_events": short_events,
"short_min_interval": 3600*4,
}
return tactic
| en | 0.156734 | # -*- coding: utf-8 -*- author: zengbin93 email: <EMAIL> create_dt: 2022/2/10 21:12 A股市场择时策略A | 2.213671 | 2 |
app/waterQual/basinAll/dataPrep.py | fkwai/geolearn | 0 | 6619350 | <gh_stars>0
from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.data import gageII
import pandas as pd
import numpy as np
import os
import time
# all gages
fileSiteNo = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
tabSel = gageII.readData(
varLst=['CLASS'], siteNoLst=siteNoLstAll)
tabSel = gageII.updateCode(tabSel)
siteNoLst = tabSel[tabSel['CLASS'] == 1].index.tolist()
# wqData = waterQuality.DataModelWQ.new('basinRef', siteNoLst)
wqData = waterQuality.DataModelWQ('basinRef')
# indYr1 = waterQuality.indYr(wqData.info, yrLst=[1979, 2000])[0]
# wqData.saveSubset('Y8090', indYr1)
# indYr2 = waterQuality.indYr(wqData.info, yrLst=[2000, 2020])[0]
# wqData.saveSubset('Y0010', indYr2)
indYrO, indYrE = waterQuality.indYrOddEven(wqData.info)
wqData.saveSubset('Yodd', indYrO)
wqData.saveSubset('Yeven', indYrE)
| from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.data import gageII
import pandas as pd
import numpy as np
import os
import time
# all gages
fileSiteNo = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
tabSel = gageII.readData(
varLst=['CLASS'], siteNoLst=siteNoLstAll)
tabSel = gageII.updateCode(tabSel)
siteNoLst = tabSel[tabSel['CLASS'] == 1].index.tolist()
# wqData = waterQuality.DataModelWQ.new('basinRef', siteNoLst)
wqData = waterQuality.DataModelWQ('basinRef')
# indYr1 = waterQuality.indYr(wqData.info, yrLst=[1979, 2000])[0]
# wqData.saveSubset('Y8090', indYr1)
# indYr2 = waterQuality.indYr(wqData.info, yrLst=[2000, 2020])[0]
# wqData.saveSubset('Y0010', indYr2)
indYrO, indYrE = waterQuality.indYrOddEven(wqData.info)
wqData.saveSubset('Yodd', indYrO)
wqData.saveSubset('Yeven', indYrE) | en | 0.407099 | # all gages # wqData = waterQuality.DataModelWQ.new('basinRef', siteNoLst) # indYr1 = waterQuality.indYr(wqData.info, yrLst=[1979, 2000])[0] # wqData.saveSubset('Y8090', indYr1) # indYr2 = waterQuality.indYr(wqData.info, yrLst=[2000, 2020])[0] # wqData.saveSubset('Y0010', indYr2) | 2.211717 | 2 |