hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a49afb80789480d7cb57a77b968a1e32a26b82e3 | 806 | py | Python | app.py | iUwej/Remote-Logger-Server | 8adfd4b85e277ea7e4bd24c22462ff54f0ddedd8 | [
"Unlicense"
] | null | null | null | app.py | iUwej/Remote-Logger-Server | 8adfd4b85e277ea7e4bd24c22462ff54f0ddedd8 | [
"Unlicense"
] | null | null | null | app.py | iUwej/Remote-Logger-Server | 8adfd4b85e277ea7e4bd24c22462ff54f0ddedd8 | [
"Unlicense"
] | null | null | null |
from flask import Flask
from flask import request
from flask import jsonify
from flask_redis import FlaskRedis
app = Flask(__name__)
#provide the redis configuration in the app configs to use this
redis_store = FlaskRedis(app)
@app.route('/')
def index():
return 'Home to the remote logger'
@app.route('/logerror',methods=['POST','GET'])
def logerror():
if request.method == 'POST':
log = request.get_json(force=True)
#print(log)
redis_store.rpush("errors",str(log))
return "Log saved",201
else:
all_logs = redis_store.lrange("errors",0,-1)
all_logs_str = [item.decode('utf-8') for item in all_logs]
return jsonify(all_logs_str)
@app.route('/clearerror')
def clearerror():
redis_store.delete("errors")
return 'Deleted',200
if __name__ == '__main__':
app.run()
| 17.148936 | 63 | 0.705955 | 118 | 806 | 4.618644 | 0.491525 | 0.066055 | 0.082569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013255 | 0.157568 | 806 | 46 | 64 | 17.521739 | 0.789396 | 0.08933 | 0 | 0 | 0 | 0 | 0.142466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.16 | 0.04 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a49de0a573a17e1a8abeb597091da40cf1ac2a4e | 2,529 | py | Python | DBProcessing/ProcessIphoneBackup.py | georgezywang/RealTime_Wechat_Analysis | baa9ba4a06d9d6b4ce13b951f1b3846ebd338ce8 | [
"MIT"
] | null | null | null | DBProcessing/ProcessIphoneBackup.py | georgezywang/RealTime_Wechat_Analysis | baa9ba4a06d9d6b4ce13b951f1b3846ebd338ce8 | [
"MIT"
] | null | null | null | DBProcessing/ProcessIphoneBackup.py | georgezywang/RealTime_Wechat_Analysis | baa9ba4a06d9d6b4ce13b951f1b3846ebd338ce8 | [
"MIT"
] | null | null | null | import sqlite3 as sqlite
import os
from Utils import *
iphoneBackupDir = "IphoneBackup"
m_nsAliasName = "wxid_t798rxqmvz7s11"
PARSED_DB_PATH = "DataStore/Contact.db"
PARSED_DATA_CONNECTION = ConnectNonEncryptedDB(PARSED_DB_PATH)
userAlias, userChatEncryption, userDB = GetUpdateContactInfo(PARSED_DATA_CONNECTION, m_nsAliasName)
def GetUserIphoneDB(userChatEncryption):
for i in range(4):
DBName = "message_{}.sqlite".format(i + 1)
CurrDBConnection = ConnectNonEncryptedDB(os.path.join(iphoneBackupDir, DBName))
currentDBCursor =CurrDBConnection.cursor()
currentDBCursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tableList = [table[0] for table in currentDBCursor.fetchall()]
CurrDBConnection.close()
if userChatEncryption in tableList:
return i + 1
return -1
def UpdateIphoneContactMap():
parsedDataCursor = PARSED_DATA_CONNECTION.cursor()
parsedDataCursor.execute("""CREATE TABLE IF NOT EXISTS IphoneParsedContact(
m_nsUsrName TEXT PRIMARY KEY,
m_nsRemark TEXT,
m_nsAliasName TEXT,
chat_md5ID TEXT,
db_Stored INTEGER
);""")
PARSED_DATA_CONNECTION.commit()
parsedDataCursor.execute("""SELECT m_nsUsrName, m_nsRemark, m_nsAliasName, chat_md5ID
FROM ParsedContact;""")
contactData = parsedDataCursor.fetchall()
for contact in contactData:
m_nsUsrName = contact[0]
m_nsRemark = contact[1]
m_nsAliasName = contact[2]
chat_md5ID = contact[3]
db_Stored = GetUserIphoneDB(chat_md5ID)
parsedDataCursor.execute("""INSERT OR REPLACE INTO IphoneParsedContact(
m_nsUsrName,
m_nsRemark,
m_nsAliasName,
chat_md5ID,
db_Stored)
VALUES(?,?,?,?,?);""", (m_nsUsrName, m_nsRemark,
m_nsAliasName, chat_md5ID, db_Stored))
contactRemark = m_nsRemark if type(m_nsRemark) is not None or len(m_nsRemark.replace(" ", "")) > 1 else m_nsUsrName
print("Contact {} Stored in DB {}".format(contactRemark, db_Stored))
PARSED_DATA_CONNECTION.commit()
UpdateIphoneContactMap()
| 38.318182 | 123 | 0.594306 | 228 | 2,529 | 6.377193 | 0.368421 | 0.049519 | 0.068776 | 0.039202 | 0.093535 | 0.093535 | 0.093535 | 0.093535 | 0.066025 | 0.066025 | 0 | 0.013537 | 0.328193 | 2,529 | 65 | 124 | 38.907692 | 0.84226 | 0 | 0 | 0.04 | 0 | 0 | 0.373217 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.06 | 0 | 0.14 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a49f7d80f24b4797e2ac4f693214e1ea5cd6017e | 2,936 | py | Python | sohojoe_wrappers.py | Sohojoe/many_towers | 527b3c4b591a3d0919b76395ecfc22c4c0059b08 | [
"MIT"
] | null | null | null | sohojoe_wrappers.py | Sohojoe/many_towers | 527b3c4b591a3d0919b76395ecfc22c4c0059b08 | [
"MIT"
] | null | null | null | sohojoe_wrappers.py | Sohojoe/many_towers | 527b3c4b591a3d0919b76395ecfc22c4c0059b08 | [
"MIT"
] | null | null | null | import os
import gym
import numpy as np
def done_grading(env):
if hasattr(env, 'done_grading'):
return env.done_grading()
if hasattr(env, 'env'):
return done_grading(env.env)
if hasattr(env, '_env'):
return done_grading(env._env)
def is_grading(env):
if hasattr(env, 'is_grading'):
return env.is_grading()
if hasattr(env, 'env'):
return is_grading(env.env)
if hasattr(env, '_env'):
return is_grading(env._env)
class RenderObservations(gym.Wrapper):
def __init__(self, env, display_vector_obs=True):
gym.Wrapper.__init__(self, env)
self.viewer = None
self._empty = np.zeros((1,1,1))
self._has_vector_obs = hasattr(self.observation_space, 'spaces')
self._8bit = None
self._display_vector_obs = display_vector_obs
def step(self, action):
ob, reward, done, info = self.env.step(action)
should_render = True
if 'human_agent_display' in globals():
global human_agent_display
should_render = human_agent_display
self._renderObs(ob, should_render)
return ob, reward, done, info
def _renderObs(self, obs, should_render):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
if not should_render:
self.viewer.imshow(self._empty)
return self.viewer.isopen
if self._has_vector_obs:
visual_obs = obs['visual'].copy()
vector_obs = obs['vector'].copy()
else:
visual_obs = obs.copy()
if self._has_vector_obs and self._display_vector_obs:
w = 84
# Displays time left and number of keys on visual observation
key = vector_obs[0:-1]
time_num = vector_obs[-1]
key_num = np.argmax(key, axis=0)
# max_bright = 1
max_bright = 255
visual_obs[0:10, :, :] = 0
for i in range(key_num):
start = int(i * 16.8) + 4
end = start + 10
visual_obs[1:5, start:end, 0:2] = max_bright
visual_obs[6:10, 0:int(time_num * w), 1] = max_bright
self._8bit = visual_obs
# if type(visual_obs[0][0][0]) is np.float32 or type(visual_obs[0][0][0]) is np.float64:
# _8bit = (255.0 * visual_obs).astype(np.uint8)
self._8bit = ( visual_obs).astype(np.uint8)
self.viewer.imshow(self._8bit)
return self.viewer.isopen
def render(self, mode='human', **kwargs):
if self.viewer:
self.viewer.imshow(self._8bit)
return self._8bit
def reset(self):
return self.env.reset()
def close(self):
self.env.close()
if self.viewer is not None:
self.viewer.close()
self.viewer = None
| 32.988764 | 96 | 0.583447 | 388 | 2,936 | 4.193299 | 0.255155 | 0.073755 | 0.044253 | 0.036878 | 0.244622 | 0.195452 | 0.157345 | 0.11555 | 0.090965 | 0 | 0 | 0.027107 | 0.308924 | 2,936 | 88 | 97 | 33.363636 | 0.774766 | 0.070504 | 0 | 0.138889 | 0 | 0 | 0.028645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0.013889 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4a374d1f47637ba07c0cd5f25d45e1f33628c90 | 1,466 | py | Python | jdcloud_sdk/services/asset/models/OperatingStatementVo.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/asset/models/OperatingStatementVo.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/asset/models/OperatingStatementVo.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class OperatingStatementVo(object):
def __init__(self, tradeType=None, tradeStatus=None, beginTime=None, endTime=None, pageIndex=None, pageSize=None):
"""
:param tradeType: (Optional) 交易类型:1-充值(11.在线充值 12.退单充值 13.线下汇款非人工认领 14.线下汇款人工认领 15.补单充值 16.退款充值);2-消费;3-提现
:param tradeStatus: (Optional) 交易状态:1-成功 2-失败 31-提现全部成功 32-提现全部失败 33-提现部分成功 34-运营待审核 35-运营通过 36-运营驳回 37-处理中 38-预占充值单失败
:param beginTime: (Optional) 开始时间
:param endTime: (Optional) 结束时间
:param pageIndex: (Optional) 当前页码
:param pageSize: (Optional) 每页条数
"""
self.tradeType = tradeType
self.tradeStatus = tradeStatus
self.beginTime = beginTime
self.endTime = endTime
self.pageIndex = pageIndex
self.pageSize = pageSize
| 38.578947 | 126 | 0.705321 | 202 | 1,466 | 5.09901 | 0.628713 | 0.058252 | 0.025243 | 0.031068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036114 | 0.206685 | 1,466 | 37 | 127 | 39.621622 | 0.849527 | 0.678035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4a94de479ad444e62d5c5754fb01c753297dfe3 | 34,996 | py | Python | moya/tags/server.py | moyaproject/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 129 | 2015-02-16T12:02:50.000Z | 2021-11-06T00:20:01.000Z | moya/tags/server.py | liaohandel/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 5 | 2015-02-19T15:56:41.000Z | 2015-09-08T18:58:35.000Z | moya/tags/server.py | liaohandel/moya | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | [
"MIT"
] | 14 | 2015-02-19T17:20:34.000Z | 2022-03-28T01:38:09.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from ..elements import Attribute
from ..elements.elementbase import LogicElement
from ..tags.context import ContextElementBase, DataSetter
from .. import logic
from ..urlmapper import URLMapper, MissingURLParameter, RouteError
from ..context.expressiontime import ExpressionDateTime
from ..render import render_object
from .. import http
from ..http import StatusCode, standard_response, RespondWith
from .. import errors
from ..template.errors import MissingTemplateError
from ..template.rendercontainer import RenderContainer
from .. import trace
from .. import __version__
from ..content import Content
from ..tags.content import ContentElementMixin
from ..tools import get_return
from .. import syntax
from ..timezone import Timezone
from ..context.tools import to_expression, set_dynamic
from ..sites import LocaleProxy
from ..compat import text_type, itervalues, py2bytes, iteritems
from .. import db
from ..response import MoyaResponse
from ..request import ReplaceRequest
from ..urltools import urlencode as moya_urlencode
from .. import tools
from .. import pilot
from .. import namespaces
from webob import Response
from fs.path import splitext
from fs.errors import NoSysPath
import pytz
import sys
import logging
log = logging.getLogger("moya.runtime")
startup_log = logging.getLogger("moya.startup")
class Mountpoint(LogicElement):
"""
A [i]mountpoint[/i] defines a collection of URL *routes* which map incoming requests on to moya code.
An app will typically have at least one mountpoint with [c]name="main"[/c] (the default) which is used when the app is mounted. Moya will check each enclosed <url> in turn until it finds a route which matches.
An app may contain multiple mountpoints, which can be [i]mounted[/i] separately.
"""
class Help:
synopsis = "define a collection of url routes"
example = """
<mountpoint name="main">
<!-- should contain <url> tags -->
</mountpoint>
"""
name = Attribute(
"Mountpoint name unique to the application", default="main", map_to="_name"
)
preserve_attributes = ["urlmapper", "middleware", "name"]
def post_build(self, context):
self.urlmapper = URLMapper(self.libid)
self.middleware = dict(request=URLMapper(), response=URLMapper())
self.name = self._name(context)
class URL(LogicElement):
"""
Add a URL route to a [tag]mountpoint[/tag].
"""
class Help:
synopsis = """add a url to a mountpoint"""
mountpoint = Attribute("Name of the parent mount point", required=False)
mount = Attribute("Mountpoint to mount on this url", required=False, default=None)
route = Attribute("URL route", required=True)
view = Attribute("View element", required=False, map_to="target", example="#post")
methods = Attribute(
"A list of comma separated HTTP methods",
type="commalist",
evaldefault=True,
required=False,
default="GET,POST",
example="GET,POST",
map_to="_methods",
)
handler = Attribute(
"A list of comma separated http status codes",
type="commalist",
evaldefault=False,
required=False,
default=[],
example="404",
map_to="_handlers",
)
name = Attribute("An optional name", required=False, default=None)
final = Attribute(
"Ignore further URLs if this route matches?", type="boolean", default=False
)
def lib_finalize(self, context):
if not self.check(context):
return
defaults = self.get_let_map(context)
params = self.get_parameters(context)
methods = params._methods
handlers = []
for h in params._handlers:
try:
handlers.append(StatusCode(h))
except KeyError:
raise errors.ElementError(
""""{}" is not a valid http status code""".format(h), element=self
)
target = params.target
url_target = self.document.lib.qualify_libname(self.libname)
try:
if target is None:
target = (url_target,)
else:
target = (
url_target,
self.document.qualify_element_ref(target, lib=self.lib),
)
except errors.ElementNotFoundError:
raise errors.ElementError(
"No view called '{}' in the project".format(target), element=self
)
if params.mountpoint is None:
mount_point = self.get_ancestor("mountpoint")
else:
_, mount_point = self.get_element(params.mountpoint)
if params.mount:
try:
_, element = self.archive.get_element(params.mount, lib=self.lib)
if not hasattr(element, "urlmapper"):
raise ValueError("element {} is not mountable".format(element))
mount_point.urlmapper.map(
params.route.rstrip("/") + "/*",
[url_target],
methods=methods,
handlers=handlers or None,
defaults=defaults,
)
mount_point.urlmapper.mount(
params.route, element.urlmapper, name=params.name, defaults=defaults
)
except Exception as e:
raise errors.ElementError(
text_type(e), element=self, diagnosis=getattr(e, "diagnosis", None)
)
else:
try:
mount_point.urlmapper.map(
params.route,
target,
methods=methods,
handlers=handlers or None,
name=params.name,
defaults=defaults,
final=params.final,
)
except ValueError as e:
raise errors.ElementError(text_type(e), element=self)
class Middleware(LogicElement):
"""Add middleware to a mountpoint"""
class Help:
synopsis = "add middleware to a mountpoint"
route = Attribute("Route", required=True)
methods = Attribute(
"A list of comma separated HTTP methods",
required=False,
type="commalist",
evaldefault=True,
default="*",
example="GET,POST",
map_to="_methods",
)
mountpoint = Attribute("Mount point", required=False)
stage = Attribute(
"Stage in request handling",
required=False,
default="request",
metavar="STAGE",
choices=["request", "response"],
)
macro = Attribute("Macro to call", required=False, default=None)
name = Attribute("An optional name", required=False, default=None)
def lib_finalize(self, context):
if not self.check(context):
return
params = self.get_parameters(context)
methods = params._methods
target = params.macro
url_target = self.document.lib.qualify_libname(self.libname)
if target is None:
target = (url_target,)
else:
target = (url_target, self.document.qualify_element_ref(target))
if params.mountpoint is None:
mount_point = self.get_ancestor("mountpoint")
else:
_, mount_point = self.get_element(params.mountpoint)
mapper = mount_point.middleware[params.stage]
_route = mapper.map(params.route, target, methods=methods, name=params.name)
class Mount(LogicElement):
"""Mount a library."""
class Help:
synopsis = "mount a library on a given URL"
app = Attribute("Application", required=True)
url = Attribute("Url", required=True)
mountpoint = Attribute("Mount point", required=False, default="main")
priority = Attribute(
"Priority (highest priority is checked first)",
type="integer",
required=False,
default=0,
)
def logic(self, context):
if self.archive.test_build:
return
self.archive.build_libs()
params = self.get_parameters(context)
app = self.archive.find_app(params.app)
server = self.get_ancestor("server")
url_params = self.get_let_map(context, check_missing=False)
url_params["app"] = app.name
mountpoint = app.lib.get_element_by_type_and_attribute(
"mountpoint", "name", params.mountpoint
)
app.mounts.append((params.mountpoint, params.url))
server.urlmapper.mount(
params.url,
mountpoint.urlmapper,
defaults=url_params,
name=app.name,
priority=params.priority,
)
for stage, urlmapper in server.middleware.items():
urlmapper.mount(
params.url,
mountpoint.middleware[stage],
defaults=url_params,
name=app.name,
priority=params.priority,
)
startup_log.debug(
"%s (%s) mounted on %s",
app,
params.mountpoint,
tools.normalize_url_path(params.url),
)
class GetURL(DataSetter):
"""Get a named URL."""
class Help:
synopsis = "get a named URL"
name = Attribute("URL name", required=True)
_from = Attribute("Application", type="application", default=None, evaldefault=True)
query = Attribute(
"Mapping expression to use as a query string",
metavar="EXPRESSION",
required=False,
default=None,
type="expression",
missing=False,
)
_with = Attribute(
"Extract URL values from this object",
type="expression",
required=False,
default=None,
)
base = Attribute("Base (protocol and domain) of the URL", default=None)
def get_value(self, context):
params = self.get_parameters(context)
query = params.query
app = self.get_app(context)
try:
if self.has_parameter("with"):
url_params = self.get_let_map(context)
url_params.update(params["with"])
else:
url_params = {
k: text_type(v) for k, v in iteritems(self.get_let_map(context))
}
for k, v in iteritems(url_params):
if not v:
self.throw(
"bad-value.parameter",
"URL parameter '{}' must not be blank or missing (it is {})".format(
k, to_expression(context, v)
),
)
url = context[".server"].get_url(app.name, params.name, url_params)
except MissingURLParameter as e:
self.throw("get-url.missing-parameter", text_type(e))
except RouteError as e:
self.throw("get-url.no-route", text_type(e))
if query and hasattr(query, "items"):
qs = moya_urlencode(query)
if qs:
url += "?" + qs
url = self.qualify(context, url)
return url
def qualify(self, context, url):
base = self.base(context)
if base is not None:
url = base.rstrip("/") + "/" + url.lstrip("/")
return url
class GetFqURL(GetURL):
"""Get a [i]fully qualified[/i] (including domain name and scheme) named URL."""
base = Attribute("Base (protocol and domain) of the URL", default=None)
class Help:
synopsis = "get a fully qualified URL"
def qualify(self, context, url):
base = self.base(context)
if base is None:
base = context[".sys.site.host"] or context[".request.host_url"]
url = base + url
return url
class Trace(DataSetter):
"""
Extract route information from a URL path.
Returns route matches in a list of dictionaries. Route matches have three keys;
[c]data[/c] is the url data (as returned in [c].url[/c]), [c]targets[/c] is a list of element references,
[c]name[/c] is the name of the matching URL.
If [c]app[/c] or [c]name[/c] is provided, this tag will return the first url route matching the given app / named url.
"""
class Help:
synopsis = "extract routing information from mounted URL paths"
example = """
<trace path=".request.path" dst="matches"/>
"""
server = Attribute(
"Server containing URL routes",
type="expression",
default=".server",
evaldefault=True,
)
path = Attribute(
"URL path to parse", type="expression", required=True, missing=False
)
method = Attribute("HTTP method", type="text", default="GET")
app = Attribute("Application name", required=False, default=None, type="text")
name = Attribute(
"Route name to find", required=False, type="commalist", default=None
)
def get_value(self, context):
server, path, method, app, name = self.get_parameters(
context, "server", "path", "method", "app", "name"
)
if "://" in path:
_, _, path = path.partition("://")
if not path.startswith("/"):
path = "/" + path
if app is None and name is None:
routes = []
for route_match in server.urlmapper.iter_routes(path, method):
if route_match is not None:
data, targets, name = route_match
routes.append({"data": data, "targets": targets, "name": name})
return routes
else:
for route_match in server.urlmapper.iter_routes(path, method):
data, targets, _name = route_match
if app is not None:
if data.get("app", None) != app:
continue
if name is not None:
if _name not in name:
continue
return {"data": data, "targets": targets, "name": _name}
else:
return None
def wrap_element_error(f):
def deco(self, context):
try:
for node in f(self, context):
yield node
except (errors.ElementError, logic.LogicFlowException):
raise
except Exception as e:
# import traceback; traceback.print_exc(e)
raise errors.ElementError(
text_type(e), self, diagnosis=getattr(e, "diagnosis", None)
)
return deco
class View(ContextElementBase, ContentElementMixin):
"""Define a view to handle a URL"""
class Help:
synopsis = "define a view to handle a URL"
content = Attribute("Content", type="elementref", required=False, default=None)
template = Attribute("Template", type="templates", required=False, default=None)
requires = Attribute(
"Permission expression", type="expression", required=False, default=None
)
withscope = Attribute(
"Use scope as template / content data?",
type="boolean",
required=False,
default=True,
)
def extend_context(self, context):
"""Hook to extend the context."""
@wrap_element_error
def run(self, context):
(content, templates, requires, withscope) = self.get_parameters(
context, "content", "template", "requires", "withscope"
)
if self.has_parameter("requires"):
if not requires:
raise logic.EndLogic(http.RespondForbidden())
self.extend_context(context)
yield logic.DeferNodeContents(self)
if "_return" in context:
scope = get_return(context.get("_return"))
else:
if withscope:
scope = context[".call"]
else:
scope = {}
if scope is not None and not isinstance(scope, Content):
app = self.get_app(context)
template = self.resolve_templates(app, templates)
# if content is None and self.younger_sibling.check_type(namespaces.default, 'content'):
# content = self.younger_sibling
if content is not None:
if not hasattr(scope, "items"):
self.throw(
"view.bad-return",
"View should return a dict or other mapping object (not {})".format(
to_expression(scope)
),
)
for defer in self.generate_content(context, content, app, td=scope):
yield defer
context.copy("_content", "_return")
elif template is not None:
render_container = RenderContainer.create(app, template=template)
render_container.update(scope)
context["_return"] = render_container
class AppUrlsProxy(object):
def __moyacontext__(self, context):
urls = context.get(".urls")
app = context[".app"]
return urls[app.name]
class Trace(object):
def __init__(self, target, app=None, route_data=None, response=None):
self.target = target
self.app = app
self.route_data = route_data
if isinstance(response, http.RespondWith):
self.response = text_type(response)
else:
self.response = None
def __moyarepr__(self, context):
return "<trace>"
@property
def target_html(self):
return syntax.highlight("target", self.target, line_numbers=False)
class GetLocale(DataSetter):
"""Get an object containing locale information"""
class Help:
synopsis = "get locale information"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"get-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
self.set_context(context, self.dst(context), locale)
class SetLocale(LogicElement):
"""Switches the current locale"""
class Help:
synopsis = "switch the current locale"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"change-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
context[".locale"] = locale
class SetLanguage(LogicElement):
"""Set the current language"""
class Help:
synopsis = "set the current language"
language = Attribute("Language code")
def logic(self, context):
language = self.language(context)
if not isinstance(language, list):
language = [language]
context[".languages"] = language
class Server(LogicElement):
"""Defines a server"""
class Help:
synopsis = "define a server"
def post_build(self, context):
self.urlmapper = URLMapper()
self.middleware = {"request": URLMapper(), "response": URLMapper()}
self.fs = None
super(Server, self).post_build(context)
def startup(self, archive, context, fs, breakpoint=False):
self.fs = fs
archive.build_libs()
try:
if breakpoint:
logic.debug(archive, context, logic.DeferNodeContents(self))
else:
logic.run_logic(archive, context, logic.DeferNodeContents(self))
except Exception as e:
# import traceback
# traceback.print_exc(e)
raise
archive.build_libs()
def get_url(self, app_name, url_name, params=None):
app_routes = self.urlmapper.get_routes(app_name)
url = None
# Could be multiple routes for this name
# Try each one and return the url that doesn't fail
for route in app_routes[:-1]:
try:
url = route.target.get_url(url_name, params, base_route=route)
except RouteError:
continue
else:
break
else:
# Last one, if this throws an exception, we want it to propagate
route = app_routes[-1]
url = route.target.get_url(url_name, params, base_route=route)
return url
def trace(self, archive, url, method="GET"):
for route_match in self.urlmapper.iter_routes(url, method):
route_data = route_match.data
target = route_match.target
if target:
for element_ref in target:
app = archive.get_app(route_data.get("app", None))
yield (route_data, archive.get_element(element_ref, app))
def process_response(self, context, response):
cookies = context.root.get("cookiejar", {})
for cookie in itervalues(cookies):
cookie.set(response)
for cookie_name in cookies.deleted_cookies:
response.delete_cookie(cookie_name)
try:
if not response.date and "now" in context.root:
response.date = context.root["now"]._dt
except:
# Don't want to discard the response here, so log exception
log.exception("error setting response date")
return response
def render_response(self, archive, context, obj, status=StatusCode.ok):
response = Response(
charset=py2bytes("utf8"), status=int(getattr(obj, "http_status", status))
)
result = render_object(obj, archive, context, "html")
response.text = text_type(result)
return self.process_response(context, response)
def _dispatch_result(self, archive, context, request, result, status=StatusCode.ok):
if result is None:
return None
if isinstance(result, ReplaceRequest):
return result
if isinstance(result, RespondWith):
return self.dispatch_handler(
archive, context, request, status=result.status, headers=result.headers
)
if not isinstance(result, Response):
status = int(getattr(result, "http_status", None) or status)
response = MoyaResponse(charset=py2bytes("utf8"), status=status)
html = render_object(result, archive, context, "html")
response.text = html
else:
response = result
return self.process_response(context, response)
def handle_error(self, archive, context, request, error, exc_info):
context.safe_delete("._callstack")
context.safe_delete(".call")
return self.dispatch_handler(
archive,
context,
request,
status=StatusCode.internal_error,
error=error,
exc_info=exc_info,
)
def _dispatch_mapper(
self, archive, context, mapper, url, method="GET", status=None, breakpoint=False
):
"""Loop to call targets for a url/method/status combination"""
dispatch_trace = context.root.get("_urltrace", [])
if breakpoint:
call = archive.debug_call
else:
call = archive.call
root = context.root
for route_data, target, name in mapper.iter_routes(url, method, status):
root.update(urlname=name, headers={})
if target:
for element_ref in target:
app, element = archive.get_element(element_ref)
if element:
app = app or archive.get_app(route_data.get("app", None))
context.root.update(url=route_data)
result = call(element_ref, context, app, url=route_data)
dispatch_trace.append(
Trace(element_ref, app, route_data, result)
)
if result is not None:
yield result
else:
dispatch_trace.append(Trace(element_ref))
else:
dispatch_trace.append(Trace(element_ref))
@classmethod
def set_site(cls, archive, context, request):
"""Set site data for a request"""
domain = request.host
if ":" in domain:
domain = domain.split(":", 1)[0]
site_instance = archive.sites.match(domain, context=context)
if site_instance is None:
log.error(
'no site matching domain "{domain}", consider adding [site:{domain}] to settings'.format(
domain=domain
)
)
return None
context.root["sys"]["site"] = site_instance
try:
context.root["sys"]["base"] = archive.project_fs.getsyspath("/")
except NoSysPath:
context.root["sys"]["base"] = None
context.root["site"] = site_instance._data
return site_instance
@classmethod
def _get_tz(self, context, default_timezone="UTC", user_timezone=False):
"""lazy insertion of .tz"""
if context is None:
context = pilot.context
tz = None
if user_timezone:
tz = context.get(".user.timezone", None)
if not tz:
tz = context.get(".sys.site.timezone", None)
if not tz:
tz = default_timezone
if not tz:
return None
try:
return Timezone(tz)
except pytz.UnknownTimeZoneError:
log.error("invalid value for timezone '%s', defaulting to UTC", tz)
return Timezone("UTC")
def run_middleware(self, stage, archive, context, request, url, method):
middleware = self.middleware[stage]
try:
for result in self._dispatch_mapper(
archive, context, middleware, url, method
):
response = self._dispatch_result(archive, context, request, result)
if response:
return response
except Exception as e:
return self.handle_error(archive, context, request, e, sys.exc_info())
def _populate_context(self, archive, context, request):
"""Add standard values to context."""
populate_context = {
"permissions": {},
"libs": archive.libs,
"apps": archive.apps,
"debug": archive.debug,
"develop": archive.develop,
"sys": {},
"server": self,
"urls": self.urlmapper,
"now": ExpressionDateTime.moya_utcnow(),
"appurls": AppUrlsProxy(),
"moya": {"version": __version__},
"enum": archive.enum,
"accept_language": list(request.accept_language),
"media_url": archive.media_url,
"filters": archive.filters,
"secret": archive.secret,
}
context.root.update(populate_context)
set_dynamic(context)
def dispatch(self, archive, context, request, breakpoint=False):
"""Dispatch a request to the server and return a response object."""
url = request.path_info
method = request.method
self._populate_context(archive, context, request)
site = self.set_site(archive, context, request)
if site is None:
# No site match, return a 404
return self.dispatch_handler(
archive, context, request, StatusCode.not_found
)
root = context.root
if site.head_as_get and method == "HEAD":
# Treat HEAD requests as GET requests
request = request.copy()
request.method = "GET"
root["request"] = request
method = "GET"
root["locale"] = site.locale
context.set_lazy(
".tz",
self._get_tz,
None,
user_timezone=site.user_timezone,
default_timezone=site.timezone,
)
# Request middleware
response = self.run_middleware(
"request", archive, context, request, url, method
)
if response is not None:
return response
def response_middleware(response):
context.safe_delete("._callstack", ".call")
context.root["response"] = response
new_response = self.run_middleware(
"response", archive, context, request, url, method
)
return new_response or response
# Run main views
root["urltrace"] = root["_urltrace"] = []
context.safe_delete("._callstack", ".call")
response = None
try:
for result in self._dispatch_mapper(
archive, context, self.urlmapper, url, method, breakpoint=breakpoint
):
response = self._dispatch_result(archive, context, request, result)
if response:
response = response_middleware(response)
db.commit_sessions(context)
return response
else:
db.commit_sessions(context)
except Exception as e:
db.rollback_sessions(context, close=False)
return self.handle_error(archive, context, request, e, sys.exc_info())
finally:
for thread in context.get("._threads", []):
thread.wait()
context.safe_delete("._threads")
db.close_sessions(context)
root["_urltrace"] = []
# Append slash and redirect if url doesn't end in a slash
if not url.endswith("/") and site.append_slash:
# Check in advance if the url ending with / actually maps to anything
if method in ("HEAD", "GET") and self.urlmapper.has_route(
url + "/", method, None
):
_, ext = splitext(url)
# Don't redirect when the filename has an extension
if not ext:
response = MoyaResponse(
status=StatusCode.temporary_redirect, location=url + "/"
)
return response
if request.method in ["GET", "POST", "HEAD"]:
status_code = StatusCode.not_found
else:
status_code = StatusCode.method_not_allowed
# No response returned, handle 404
return self.dispatch_handler(archive, context, request, status=status_code)
def dispatch_handler(
self,
archive,
context,
request,
status=404,
headers=None,
error=None,
exc_info=None,
):
"""Respond to a status code"""
context.safe_delete(
"._callstack",
".call",
".td",
"._td",
".contentstack",
".content",
".headers",
)
if headers is not None:
context.root["headers"] = headers
moya_trace = None
error2 = None
moya_trace2 = None
if error is not None:
moya_trace = getattr(error, "moya_trace", None)
if moya_trace is None:
try:
moya_trace = trace.build(
context, None, None, error, exc_info, request
)
except Exception as e:
# import traceback; traceback.print_exc(e)
raise
try:
url = request.path_info
method = request.method
for result in self._dispatch_mapper(
archive, context, self.urlmapper, url, method, status
):
if not isinstance(result, RespondWith):
return self._dispatch_result(
archive, context, request, result, status=status
)
except Exception as e:
log.exception("error in dispatch_handler")
# from traceback import print_exc
# print_exc()
if status != StatusCode.internal_error:
return self.handle_error(archive, context, request, e, sys.exc_info())
error2 = e
moya_trace2 = getattr(error2, "moya_trace", None)
if moya_trace2 is None:
moya_trace2 = trace.build(
context, None, None, error2, sys.exc_info(), request
)
if error is not None:
log.error("unhandled exception ({})".format(text_type(error).lstrip()))
try:
context[".console"].obj(context, moya_trace)
except:
pass
context.reset()
context.safe_delete(
"._callstack",
".call",
".td",
"._td",
".contentstack",
".content",
"_funccalls",
"._for",
"_for_stack",
)
# pilot.context = context
# No handlers have been defined for this status code
# We'll look for a template named <status code>.html and render that
template_filename = "{}.html".format(int(status))
try:
response = MoyaResponse(charset=py2bytes("utf8"), status=status)
rc = RenderContainer.create(None, template=template_filename)
rc["request"] = request
rc["status"] = status
rc["error"] = error
rc["trace"] = moya_trace
rc["error2"] = error
rc["trace2"] = moya_trace2
rc["moya_error"] = (
getattr(moya_trace.exception, "type", None) if moya_trace else None
)
if status == 500:
archive.fire(context, "sys.unhandled-exception", data=rc)
response.text = render_object(rc, archive, context, "html")
return response
except MissingTemplateError:
pass
except Exception as e:
# import traceback
# traceback.print_exc(e)
# print(e)
log.error("unable to render %s (%s)", template_filename, text_type(e))
# Render a very basic response
response = Response(charset=py2bytes("utf8"), status=status)
url = request.path_info
try:
response.text = standard_response(
status, url, error, moya_trace, debug=archive.debug
)
except Exception as e:
log.exception("error generating standard response")
return response
| 33.81256 | 213 | 0.566808 | 3,678 | 34,996 | 5.283578 | 0.126427 | 0.023054 | 0.022693 | 0.01235 | 0.297792 | 0.235784 | 0.195647 | 0.161118 | 0.133639 | 0.100242 | 0 | 0.001718 | 0.334695 | 34,996 | 1,034 | 214 | 33.845261 | 0.832925 | 0.068465 | 0 | 0.330909 | 0 | 0 | 0.098984 | 0.003128 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042424 | false | 0.002424 | 0.046061 | 0.002424 | 0.213333 | 0.001212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ef69a8e8cd014e8cddf55bfd7f3ad8fb1bda81e | 5,132 | py | Python | src/media/data/nouns.py | cjcodeproj/medialibrary | 466ba475561f7701fe41ebe196aaf789a0aa7237 | [
"MIT"
] | null | null | null | src/media/data/nouns.py | cjcodeproj/medialibrary | 466ba475561f7701fe41ebe196aaf789a0aa7237 | [
"MIT"
] | 29 | 2021-09-06T00:46:30.000Z | 2022-03-23T16:47:04.000Z | src/media/data/nouns.py | cjcodeproj/medialibrary | 466ba475561f7701fe41ebe196aaf789a0aa7237 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Objects for representation of proper nouns used in keywords.
'''
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
from media.xml.namespaces import Namespaces
class AbstractNoun():
'''
Root class for all nouns
'''
def __init__(self):
self.value = ''
self.sort_value = ''
self.tagname = ''
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
return self.sort_value < other.sort_value
def __rt__(self, other):
return self.sort_value > other.sort_value
def __eq__(self, other):
return self.sort_value == other.sort_value
class Noun(AbstractNoun):
'''
Simplest class to represent proper nouns
for Thing, Event, Group, Entity
value represents the value that is displayed
sort_value represents the value for sorting
'''
def __init__(self, in_element):
super().__init__()
self.value = in_element.text
self.sort_value = self.value.casefold()
self.tagname = Namespaces.ns_strip(in_element.tag)
class Place(AbstractNoun):
'''
ProperNoun class for a location
Has attributes for every possible aspect
of a location, which is probably going to be
a problem.
'''
def __init__(self, in_place):
super().__init__()
self.generic = ''
self.name = ''
self.city = ''
self.county = ''
self.state = ''
self.country = ''
self.planet = ''
if in_place is not None:
self.tagname = Namespaces.ns_strip(in_place.tag)
self._process(in_place)
def _process(self, in_element):
first_tag = True
major = ''
minor = ''
for child in in_element:
if first_tag:
major = self._build_major_value(child)
first_tag = False
else:
minor = self._build_minor_value(child, minor)
if minor:
minor = '(' + minor + ')'
self.value = major + ' ' + minor
else:
self.value = major
self.sort_value = self.value.casefold()
def _build_major_value(self, in_element):
tagname = Namespaces.ns_strip(in_element.tag)
if tagname == 'generic':
self.generic = in_element.text
if tagname == 'name':
self.name = in_element.text
elif tagname == 'ci':
self.city = in_element.text
elif tagname == 'co':
self.county = in_element.text
elif tagname in ['st', 'pr']:
self.state = in_element.text
elif tagname == 'cn':
self.country = in_element.text
elif tagname == 'planet':
self.planet = in_element.text
major = in_element.text
return major
def _build_minor_value(self, in_element, minor):
tagname = Namespaces.ns_strip(in_element.tag)
if tagname == 'ci':
self.city = in_element.text
elif tagname == 'co':
self.county = in_element.text
elif tagname in ['st', 'pr']:
self.state = in_element.text
elif tagname == 'cn':
self.country = in_element.text
elif tagname == 'planet':
self.planet = in_element.text
if minor:
minor += ', ' + in_element.text
else:
minor = in_element.text
return minor
class Name(AbstractNoun):
'''
Proper noun for the name of a real person.
A real person's name will include
the common components like a given name,
a family name, and maybe a middle name.
This class is more heavily used since it
is the standard name class for crew members
or any other data types that use a name.
'''
def __init__(self, in_element):
super().__init__()
self.given = ''
self.family = ''
self.middle = ''
self.sort = ''
if in_element is not None:
self.tagname = Namespaces.ns_strip(in_element.tag)
self._process(in_element)
def _process(self, in_element):
for child in in_element:
tagname = Namespaces.ns_strip(child.tag)
if tagname == 'gn':
self.given = child.text
if tagname == 'fn':
self.family = child.text
if tagname == 'mn':
self.middle = child.text
self._build_value()
# self._build_sort()
def _build_value(self):
raw = ''
if self.given:
raw += self.given + ' '
if self.family:
raw += self.family
if self.middle:
raw += ' ' + self.middle
self.value = raw
self.sort_value = self.family.casefold() + '_' \
+ self.given.casefold() + '_' + self.middle.casefold()
def __str__(self):
'''
The formal string value should be returned
'''
return f"{self.given} {self.family}"
| 28.511111 | 66 | 0.567225 | 607 | 5,132 | 4.581549 | 0.228995 | 0.097087 | 0.074793 | 0.055016 | 0.347717 | 0.314635 | 0.277958 | 0.277958 | 0.236246 | 0.163251 | 0 | 0 | 0.33067 | 5,132 | 179 | 67 | 28.670391 | 0.809607 | 0.16855 | 0 | 0.336134 | 0 | 0 | 0.020369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12605 | false | 0 | 0.008403 | 0.042017 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ef75298706c9e95500d4b99ead2e2f3a0f95ab6 | 1,325 | py | Python | get_together/views/utils.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 446 | 2018-01-21T09:22:41.000Z | 2022-03-25T17:46:12.000Z | get_together/views/utils.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 272 | 2018-01-03T16:55:39.000Z | 2022-03-11T23:12:30.000Z | get_together/views/utils.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 100 | 2018-01-27T02:04:15.000Z | 2021-09-09T09:02:21.000Z | import math
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from events.location import get_client_ip, get_geoip
from events.models import Team
KM_PER_DEGREE_LAT = 110.574
KM_PER_DEGREE_LNG = 111.320 # At the equator
DEFAULT_NEAR_DISTANCE = 100 # kilometeres
def get_nearby_teams(request, near_distance=DEFAULT_NEAR_DISTANCE):
g = get_geoip(request)
if g.latlng is None or g.latlng[0] is None or g.latlng[1] is None:
print("Could not identify latlng from geoip")
return Team.objects.none()
try:
minlat = g.latlng[0] - (near_distance / KM_PER_DEGREE_LAT)
maxlat = g.latlng[0] + (near_distance / KM_PER_DEGREE_LAT)
minlng = g.latlng[1] - (
near_distance / (KM_PER_DEGREE_LNG * math.cos(math.radians(g.latlng[0])))
)
maxlng = g.latlng[1] + (
near_distance / (KM_PER_DEGREE_LNG * math.cos(math.radians(g.latlng[0])))
)
near_teams = Team.public_objects.filter(
city__latitude__gte=minlat,
city__latitude__lte=maxlat,
city__longitude__gte=minlng,
city__longitude__lte=maxlng,
)
return near_teams
except Exception as e:
print("Error looking for local teams: ", e)
return Team.objects.none()
| 33.974359 | 85 | 0.667925 | 186 | 1,325 | 4.467742 | 0.403226 | 0.075812 | 0.079422 | 0.081829 | 0.262335 | 0.226233 | 0.226233 | 0.226233 | 0.226233 | 0.144404 | 0 | 0.022931 | 0.243019 | 1,325 | 38 | 86 | 34.868421 | 0.805583 | 0.019623 | 0 | 0.125 | 0 | 0 | 0.051698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.15625 | 0 | 0.28125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f103e29578195afcbfb92ba556cee4316213c6ba | 1,833 | py | Python | tests/python/unittest/test_infer_type.py | mozga-intel/incubator-mxnet | 7dcfedca704f39b4b9b7497dabf3fea47ad40df4 | [
"BSL-1.0",
"Apache-2.0"
] | 13 | 2017-08-11T05:19:48.000Z | 2020-05-12T02:09:27.000Z | tests/python/unittest/test_infer_type.py | mozga-intel/incubator-mxnet | 7dcfedca704f39b4b9b7497dabf3fea47ad40df4 | [
"BSL-1.0",
"Apache-2.0"
] | 4 | 2021-03-30T11:59:59.000Z | 2022-03-12T00:40:23.000Z | tests/python/unittest/test_infer_type.py | mozga-intel/incubator-mxnet | 7dcfedca704f39b4b9b7497dabf3fea47ad40df4 | [
"BSL-1.0",
"Apache-2.0"
] | 13 | 2016-11-10T06:38:46.000Z | 2021-03-18T21:26:11.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
import numpy as np
from common import models, with_seed
from mxnet import autograd
from mxnet.test_utils import assert_almost_equal
@with_seed()
def test_infer_multiout_op():
data = mx.nd.arange(16, dtype=np.float64).reshape((4, 4))
data.attach_grad()
with autograd.record():
y = mx.nd.split(data, axis=0, num_outputs=2)
y[0].backward()
assert data.grad.dtype == np.float64
mx.nd.waitall()
@with_seed()
def test_infer_multiout_op2():
def test_func(a):
q, l = mx.nd.linalg.gelqf(a)
return mx.nd.sum(l)
data32 = mx.nd.random.normal(shape=(2, 3), ctx=mx.cpu(), dtype=np.float32)
data32.attach_grad()
with autograd.record():
test32 = test_func(data32)
test32.backward()
data64 = mx.nd.Cast(data32, dtype=np.float64)
data64.attach_grad()
with autograd.record():
test64 = test_func(data64)
test64.backward()
assert_almost_equal(data64.grad.asnumpy(), data32.grad.asnumpy(), atol=1e-5, rtol=1e-5)
| 33.327273 | 91 | 0.713039 | 279 | 1,833 | 4.609319 | 0.487455 | 0.021773 | 0.032659 | 0.051322 | 0.108865 | 0.043546 | 0 | 0 | 0 | 0 | 0 | 0.034783 | 0.184397 | 1,833 | 54 | 92 | 33.944444 | 0.825418 | 0.420076 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.166667 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1040870685ccb486372b534a48008c1e473c417 | 871 | py | Python | Examples/ReinforcementLearning/deeprl/env/env_factory.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 17,702 | 2016-01-25T14:03:01.000Z | 2019-05-06T09:23:41.000Z | Examples/ReinforcementLearning/deeprl/env/env_factory.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 3,489 | 2016-01-25T13:32:09.000Z | 2019-05-03T11:29:15.000Z | Examples/ReinforcementLearning/deeprl/env/env_factory.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 5,180 | 2016-01-25T14:02:12.000Z | 2019-05-06T04:24:28.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from gym import envs
from . import maze2d, puddleworld
def register_env(env_id):
if env_id == 'Maze2D-v0':
envs.register(
id=env_id,
entry_point='env:maze2d.Maze2D',
kwargs={},
max_episode_steps=200,
reward_threshold=-110.0)
elif env_id == 'PuddleWorld-v0':
envs.register(
id=env_id,
entry_point='env:puddleworld.PuddleWorld',
kwargs={},
max_episode_steps=200,
reward_threshold=-100.0)
else:
raise ValueError('Cannot find environment "{0}"\n'.format(env_id))
return True
| 29.033333 | 80 | 0.552239 | 97 | 871 | 4.804124 | 0.56701 | 0.064378 | 0.060086 | 0.06867 | 0.313305 | 0.313305 | 0.313305 | 0.145923 | 0.145923 | 0 | 0 | 0.032813 | 0.265212 | 871 | 29 | 81 | 30.034483 | 0.695313 | 0.259472 | 0 | 0.4 | 0 | 0 | 0.153125 | 0.042188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f104bbe2ef41441b68ca62399f90007c7e48f1ad | 2,599 | py | Python | labdrivers/version.py | pbnjeff89/labdrivers | 1091b9f746a5a011d94cd63abf5010fc8cde1556 | [
"MIT"
] | null | null | null | labdrivers/version.py | pbnjeff89/labdrivers | 1091b9f746a5a011d94cd63abf5010fc8cde1556 | [
"MIT"
] | null | null | null | labdrivers/version.py | pbnjeff89/labdrivers | 1091b9f746a5a011d94cd63abf5010fc8cde1556 | [
"MIT"
] | null | null | null | from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 9
_version_micro = 8 # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "labdrivers: python drivers for lab instruments"
# Long description will go up on the pypi page
long_description = """
labdrivers
========
labdrivers is a collection of drivers for common research lab instruments.
It contains a suite of instrument-specific drivers which can be used to
interface measurement hardware with Python code, along with a set of
Jupyter notebooks demonstrating example use cases.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/masonlab/labdrivers/blob/master/README.md
License
=======
``labdrivers`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016--, Henry Hinnefeld.
"""
NAME = "labdrivers"
MAINTAINER = "Jeff Damasco"
MAINTAINER_EMAIL = "jeffdamasco@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/masonlab/labdrivers"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Henry Hinnefeld"
AUTHOR_EMAIL = "henry.hinnefeld@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['labdrivers',
'labdrivers.keithley',
'labdrivers.lakeshore',
'labdrivers.srs',
'labdrivers.quantumdesign',
'labdrivers.oxford',
'labdrivers.ni']
PACKAGE_DATA = {'labdrivers': [pjoin('data', '*')]}
REQUIRES = ["pyvisa", "PyDAQmx"]
| 32.4875 | 77 | 0.709504 | 319 | 2,599 | 5.626959 | 0.570533 | 0.026741 | 0.017827 | 0.030084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004286 | 0.191997 | 2,599 | 79 | 78 | 32.898734 | 0.850476 | 0.116199 | 0 | 0 | 0 | 0 | 0.591525 | 0.040192 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.016393 | 0 | 0.016393 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f10572b5580f12fbec993a136bc3e170dbb62b5c | 10,347 | py | Python | experiments/gbnf/experiment/boosted_experiment.py | robert-giaquinto/survae_flows | 4d7dc638f77c48ad3c8393b967c33ac9dbad60fe | [
"MIT"
] | 2 | 2021-03-06T19:37:39.000Z | 2022-01-09T11:19:45.000Z | experiments/gbnf/experiment/boosted_experiment.py | robert-giaquinto/survae_flows | 4d7dc638f77c48ad3c8393b967c33ac9dbad60fe | [
"MIT"
] | null | null | null | experiments/gbnf/experiment/boosted_experiment.py | robert-giaquinto/survae_flows | 4d7dc638f77c48ad3c8393b967c33ac9dbad60fe | [
"MIT"
] | null | null | null | import torch
import torchvision.utils as vutils
import math
import numpy as np
from survae.distributions import DataParallelDistribution
from survae.utils import elbo_bpd
from .utils import get_args_table, clean_dict
# Path
import os
import time
from survae.data.path import get_survae_path
# Experiment
from .base import BaseExperiment
from .flow_experiment import FlowExperiment
from experiments.gbnf.optim import get_optim
# Logging frameworks
from torch.utils.tensorboard import SummaryWriter
import wandb
class BoostedFlowExperiment(FlowExperiment):
def __init__(self, args,
data_id, model_id, optim_id,
train_loader, eval_loader,
model, optimizer, scheduler_iter, scheduler_epoch):
# Init parent
super(BoostedFlowExperiment, self).__init__(args=args,
data_id=data_id, model_id=model_id, optim_id=optim_id,
train_loader=train_loader,
eval_loader=eval_loader,
model=model,
optimizer=optimizer,
scheduler_iter=scheduler_iter,
scheduler_epoch=scheduler_epoch)
self.num_components = args.boosted_components
self.epochs_per_component = self.args.epochs
self.component_epoch = 0
if args.pretrained_model is not None:
self.args.epochs = self.args.epochs * (self.num_components - 1)
else:
self.args.epochs = self.args.epochs * self.num_components
def run(self):
if self.args.resume:
self.resume()
while self.model.component < self.num_components:
self.init_component()
for epoch in range(self.component_epoch, self.epochs_per_component):
# Train
train_dict = self.train_fn(epoch)
self.log_train_metrics(train_dict)
# Eval
if (epoch+1) % self.eval_every == 0:
eval_dict = self.eval_fn(epoch)
self.log_eval_metrics(eval_dict)
self.eval_epochs.append(epoch)
converged, improved = self.stop_early(eval_dict, epoch)
self.sample_fn(components="c")
else:
eval_dict = None
converged = False
improved = False
# Log
self.save_metrics()
self.log_fn(self.current_epoch, train_dict, eval_dict)
# Checkpoint
self.current_epoch += 1
self.component_epoch += 1
if (self.check_every > 0 and (epoch+1) % self.check_every == 0) or improved:
self.checkpoint_save()
# Early stopping
if converged:
break
# initialize training for next component
if self.check_every == 0:
self.resume() # reload if using early stopping
print(f"--- Boosting component {self.model.component + 1}/{self.num_components} complete ---")
self.model.update_rho(self.train_loader)
self.model.increment_component()
self.component_epoch = 0
self.optimizer, self.scheduler_iter, self.scheduler_epoch = get_optim(self.args, self.model)
self.checkpoint_save()
# Sampling
self.sample_fn(components="1:c")
def eval_fn(self, epoch):
if self.args.super_resolution or self.args.conditional:
return self._cond_eval_fn(epoch)
else:
return self._eval_fn(epoch)
def _cond_eval_fn(self, epoch):
self.model.eval()
with torch.no_grad():
loss_sum = 0.0
approx_loss_sum = 0.0
loss_count = 0
for (x, context) in self.eval_loader:
batch_size = len(x)
context = context.to(self.args.device)
x = x.to(self.args.device)
#loss = -1.0 * self.model.log_prob(x, context).sum() / (math.log(2) * x.shape.numel())
#loss_sum += loss.detach().cpu().item() * batch_size
approx_loss = -1.0 * self.model.approximate_mixture_log_prob(x, context).sum() / (math.log(2) * x.shape.numel())
approx_loss_sum += approx_loss.detach().cpu().item() * batch_size
loss_count += batch_size
#print('Evaluating. Epoch: {}/{}, Datapoint: {}/{}, Bits/dim: {:.3f}, aprx={:.3f}'.format(
# self.current_epoch+1, self.args.epochs, loss_count, len(self.eval_loader.dataset), loss_sum/loss_count, approx_loss_sum/loss_count), end='\r')
print('Evaluating. Epoch: {}/{}, Datapoint: {}/{}, Bits/dim: {:.3f}'.format(
self.current_epoch+1, self.args.epochs, loss_count, len(self.eval_loader.dataset), approx_loss_sum/loss_count), end='\r')
print('')
#return {'bpd': loss_sum/loss_count, 'bpd_aprx': approx_loss_sum/loss_count}
return {'bpd': approx_loss_sum/loss_count}
def _eval_fn(self, epoch):
self.model.eval()
with torch.no_grad():
loss_sum = 0.0
approx_loss_sum = 0.0
loss_count = 0
for x in self.eval_loader:
batch_size = len(x)
x = x.to(self.args.device)
#loss = -1.0 * self.model.log_prob(x).sum() / (math.log(2) * x.shape.numel())
#loss_sum += loss.detach().cpu().item() * batch_size
approx_loss = -1.0 * self.model.approximate_mixture_log_prob(x).sum() / (math.log(2) * x.shape.numel())
approx_loss_sum += approx_loss.detach().cpu().item() * batch_size
loss_count += batch_size
#print('Evaluating. Epoch: {}/{}, Datapoint: {}/{}, Bits/dim: {:.3f}, aprx={:.3f}'.format(
# self.current_epoch+1, self.args.epochs, loss_count, len(self.eval_loader.dataset), loss_sum/loss_count, approx_loss_sum/loss_count), end='\r')
print('Evaluating. Epoch: {}/{}, Datapoint: {}/{}, Bits/dim: {:.3f}'.format(
self.current_epoch+1, self.args.epochs, loss_count, len(self.eval_loader.dataset), approx_loss_sum/loss_count), end='\r')
print('')
#return {'bpd': loss_sum/loss_count, 'bpd_aprx': approx_loss_sum/loss_count}
return {'bpd': approx_loss_sum/loss_count}
def sample_fn(self, components="1:c", temperature=None, sample_new_batch=False):
if self.args.samples < 1:
return
self.model.eval()
get_new_batch = self.sample_batch is None or sample_new_batch
if get_new_batch:
self.sample_batch = next(iter(self.eval_loader))
if self.args.super_resolution or self.args.conditional:
imgs = self.sample_batch[0][:self.args.samples]
context = self.sample_batch[1][:self.args.samples]
self._cond_sample_fn(context, components, temperature=temperature, save_context=get_new_batch)
else:
imgs = self.sample_batch[:self.args.samples]
self._sample_fn(components, temperature=temperature)
if get_new_batch:
# save real samples
path_true_samples = '{}/samples/true_te{}_s{}.png'.format(self.log_path, self.current_epoch, self.args.seed)
self.save_images(imgs, path_true_samples)
def _cond_sample_fn(self, context, components, temperature=None, save_context=True):
if self.args.super_resolution and save_context:
path_context = '{}/samples/context_te{}_s{}.png'.format(self.log_path, self.current_epoch, self.args.seed)
self.save_images(context, path_context)
if components == "1:c":
# save samples from each component
for c in range(self.num_components):
path_samples = '{}/samples/sample_te{}_c{}_s{}.png'.format(self.log_path, self.current_epoch, c, self.args.seed)
samples = self.model.sample(context.to(self.args.device), component=c, temperature=temperature)
self.save_images(samples, path_samples)
else:
path_samples = '{}/samples/sample_c{}_ce{}_te{}_s{}.png'.format(
self.log_path, self.model.component, self.component_epoch, self.current_epoch, self.args.seed)
samples = self.model.sample(context.to(self.args.device), component=self.model.component, temperature=temperature)
self.save_images(samples, path_samples)
def _sample_fn(self, components, temperature=None):
if components == "1:c":
for c in range(self.num_components):
path_samples = '{}/samples/sample_te{}_c{}_s{}.png'.format(self.log_path, self.current_epoch, c, self.args.seed)
samples = self.model.sample(self.args.samples, component=c, temperature=temperature)
self.save_images(samples, path_samples)
else:
path_samples = '{}/samples/sample_component{}_componentepoch{}_totalepochs{}_seed{}.png'.format(
self.log_path, self.model.component, self.component_epoch, self.current_epoch, self.args.seed)
samples = self.model.sample(self.args.samples, component=self.model.component, temperature=temperature)
self.save_images(samples, path_samples)
def init_component(self):
self.best_loss = np.inf
self.best_loss_epoch = 0
for c in range(self.num_components):
if c != self.model.component:
self.optimizer.param_groups[c]['lr'] = 0.0
for n, param in self.model.named_parameters():
param.requires_grad = True if n.startswith(f"flows.{self.model.component}") else False
def update_learning_rates(self):
for c in range(self.num_components):
self.optimizer.param_groups[c]['lr'] = self.args.lr if c == model.component else 0.0
| 45.183406 | 163 | 0.586837 | 1,234 | 10,347 | 4.689627 | 0.138574 | 0.048384 | 0.026611 | 0.033178 | 0.527562 | 0.488509 | 0.470192 | 0.460515 | 0.449974 | 0.418524 | 0 | 0.007769 | 0.303373 | 10,347 | 228 | 164 | 45.381579 | 0.795089 | 0.106311 | 0 | 0.354037 | 0 | 0 | 0.05379 | 0.033619 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062112 | false | 0 | 0.093168 | 0 | 0.192547 | 0.031056 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f105b721afb9f845c4e320b2b60eb5e5d9422cbd | 6,838 | py | Python | code4step2/data_registration.py | yukeyi/MCDS-Capstone | f7ce48fc5d3f5f96c1f29556585ed2338683c7d2 | [
"MIT"
] | null | null | null | code4step2/data_registration.py | yukeyi/MCDS-Capstone | f7ce48fc5d3f5f96c1f29556585ed2338683c7d2 | [
"MIT"
] | null | null | null | code4step2/data_registration.py | yukeyi/MCDS-Capstone | f7ce48fc5d3f5f96c1f29556585ed2338683c7d2 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import xarray as xr
import pickle as pkl
from datetime import datetime
from scipy import ndimage as ndi
import SimpleITK as sitk
import skimage as skim
from skimage import feature, morphology
import glob
class RegHearts:
'''Class that generates liver masks for MRE input images'''
def __init__(self, fixed_subj, moving_subj, tslice=0, verbose=False):
self.verbose = verbose
self.fixed_subj = fixed_subj
self.moving_subj = moving_subj
self.tslice = tslice
self.load_niftis()
def load_niftis(self):
fixed_ct_name = os.path.join(self.fixed_subj, f'CT_tslice_{self.tslice}.nii')
fixed_mask_name = os.path.join(self.fixed_subj, f'mask_tslice_{self.tslice}.nii')
moving_ct_name = os.path.join(self.moving_subj, f'CT_tslice_{self.tslice}.nii')
moving_mask_name = os.path.join(self.moving_subj, f'mask_tslice_{self.tslice}.nii')
self.fixed_ct = self.get_sitk_image(fixed_ct_name)
self.fixed_mask = self.get_sitk_image(fixed_mask_name)
self.moving_ct = self.get_sitk_image(moving_ct_name)
self.moving_mask = self.get_sitk_image(moving_mask_name)
def get_sitk_image(self, nifti_name):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(nifti_name)
img = reader.Execute()
size = img.GetSize()
dims = img.GetSpacing()
orig = img.GetOrigin()
if self.verbose:
print(f"Image info for {nifti_name}:")
print("Image size:", size[0], size[1], size[2])
print("Image dims:", dims[0], dims[1], dims[2])
print("Image orig:", orig[0], orig[1], orig[2])
caster = sitk.CastImageFilter()
caster.SetOutputPixelType(sitk.sitkFloat32)
return caster.Execute(img)
def gen_param_map(self):
self.p_map_vector = sitk.VectorOfParameterMap()
paff = sitk.GetDefaultParameterMap("affine")
pbsp = sitk.GetDefaultParameterMap("bspline")
paff['AutomaticTransformInitialization'] = ['true']
paff['AutomaticTransformInitializationMethod'] = ['GeometricalCenter']
paff['NumberOfSamplesForExactGradient'] = ['100000']
pbsp['NumberOfSamplesForExactGradient'] = ['100000']
# paff['MaximumNumberOfSamplingAttempts'] = ['2']
# pbsp['MaximumNumberOfSamplingAttempts'] = ['2']
paff['NumberOfSpatialSamples'] = ['5000']
pbsp['NumberOfSpatialSamples'] = ['5000']
paff['NumberOfHistogramBins'] = ['32', '32', '64', '128']
pbsp['NumberOfHistogramBins'] = ['32', '32', '64', '128']
paff['MaximumNumberOfIterations'] = ['1024']
pbsp['MaximumNumberOfIterations'] = ['1024']
# paff['NumberOfResolutions'] = ['4']
# pbsp['NumberOfResolutions'] = ['4']
paff['GridSpacingSchedule'] = ['6', '4', '2', '1.000000']
pbsp['GridSpacingSchedule'] = ['6', '4', '2', '1.000000']
# pbsp['FinalGridSpacingInPhysicalUnits'] = ['40', '40', '40']
pbsp['FinalGridSpacingInPhysicalUnits'] = ['32', '32', '32']
# pbsp['Metric0Weight'] = ['0.01']
# pbsp['Metric1Weight'] = ['0.1']
# paff['FixedImagePyramid'] = ['FixedShrinkingImagePyramid']
# pbsp['FixedImagePyramid'] = ['FixedShrinkingImagePyramid']
# attempting to use multiple fixed images at once
# paff['Registration'] = ['MultiMetricMultiResolutionRegistration']
# paff['FixedImagePyramid'] = ['FixedSmoothingImagePyramid', 'FixedSmoothingImagePyramid']
# paff['ImageSampler'] = ['RandomCoordinate', 'RandomCoordinate']
# paff['Metric'] = ['AdvancedMattesMutualInformation', 'AdvancedMattesMutualInformation']
# pbsp['Metric'] = ['AdvancedMattesMutualInformation', 'TransformBendingEnergyPenalty',
# 'AdvancedMattesMutualInformation', 'TransformBendingEnergyPenalty']
# pbsp['FixedImagePyramid'] = ['FixedSmoothingImagePyramid', 'FixedSmoothingImagePyramid']
# pbsp['ImageSampler'] = ['RandomCoordinate', 'RandomCoordinate']
# 'RandomCoordinate', 'RandomCoordinate']
self.p_map_vector.append(paff)
self.p_map_vector.append(pbsp)
if self.verbose:
sitk.PrintParameterMap(self.p_map_vector)
def register_imgs(self):
self.elastixImageFilter = sitk.ElastixImageFilter()
self.elastixImageFilter.SetFixedImage(self.fixed_ct)
self.elastixImageFilter.SetMovingImage(self.moving_ct)
self.elastixImageFilter.SetParameterMap(self.p_map_vector)
self.elastixImageFilter.Execute()
self.moving_ct_result = self.elastixImageFilter.GetResultImage()
self.moving_ct_result.CopyInformation(self.fixed_ct)
def gen_mask(self, smooth=False):
transformixImageFilter = sitk.TransformixImageFilter()
transformixImageFilter.SetTransformParameterMap(
self.elastixImageFilter.GetTransformParameterMap())
transformixImageFilter.SetMovingImage(self.moving_mask)
transformixImageFilter.Execute()
self.moving_mask_result = transformixImageFilter.GetResultImage()
if smooth:
tmp_img = sitk.GetArrayFromImage(self.moving_mask_result)
tmp_img = np.where((tmp_img > 0), 1, 0)
self.moving_mask_result = sitk.GetImageFromArray(tmp_img)
self.moving_mask_result.CopyInformation(self.fixed_ct)
self.moving_mask_result = sitk.Cast(self.moving_mask_result, sitk.sitkFloat32)
def recenter_img_z(self, sitk_img, offset=False):
spacing = sitk_img.GetSpacing()[2]
layers = sitk_img.GetSize()[2]
orig = sitk_img.GetOrigin()
if not offset:
sitk_img.SetOrigin([orig[0], orig[1], spacing*(-layers/2)])
else:
sitk_img.SetOrigin([orig[0], orig[1], spacing*(-layers/1.5)])
def add_liver_mask(ds, moving_name='19', extra_name='extra1'):
'''Generate a mask from the liver registration method, and place it into the given "extra" slot.
Assumes you are using an xarray dataset from the MREDataset class.'''
for sub in tqdm(ds.subject):
mask_maker = MRELiverMask(str(sub.values), moving_name, verbose=False, center=True,
fixed_seq='T1Pre', moving_seq='T1_inphase')
mask_maker.gen_param_map()
mask_maker.register_imgs()
mask_maker.gen_mask(smooth=True)
mask = sitk.GetArrayFromImage(mask_maker.moving_mask_result)
mask = np.where(mask >= 1, 1, 0)
ds['image'].loc[dict(sequence=extra_name, subject=sub)] = mask
new_sequence = [a.replace(extra_name, 'liverMsk') for a in ds.sequence.values]
ds = ds.assign_coords(sequence=new_sequence)
return ds
| 45.586667 | 100 | 0.665107 | 722 | 6,838 | 6.121884 | 0.279778 | 0.033937 | 0.025339 | 0.027149 | 0.159276 | 0.080995 | 0.078281 | 0.043439 | 0.017647 | 0 | 0 | 0.022803 | 0.211173 | 6,838 | 149 | 101 | 45.892617 | 0.796626 | 0.200351 | 0 | 0.018868 | 0 | 0 | 0.122539 | 0.075621 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.103774 | 0 | 0.207547 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f10a2eb48e0f895a84465c9f5a4dec731cfd9d4c | 1,584 | py | Python | minicCompiler.py | CorentinGoet/miniC-Compiler | 8631b1ce47e9de1c3a3255d7c0a941242ad48292 | [
"MIT"
] | null | null | null | minicCompiler.py | CorentinGoet/miniC-Compiler | 8631b1ce47e9de1c3a3255d7c0a941242ad48292 | [
"MIT"
] | null | null | null | minicCompiler.py | CorentinGoet/miniC-Compiler | 8631b1ce47e9de1c3a3255d7c0a941242ad48292 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
@author Corentin Goetghebeur (github.com/CorentinGoet).
"""
from lexer_pkg.lexer import Lexer
from parser_pkg.parser import Parser
from CLI.CLIinterface import CLI
import sys
import os
from CLI.actions import Actions
from pretty_printer_pkg.pretty_printer import PrettyPrinter
def main():
"""
Main function.
"""
cli = CLI()
action, file, output = cli.process_args(sys.argv)
lexer = Lexer()
parser = Parser()
if action == Actions.HELP:
cli.display_usage()
sys.exit(0)
src = open(file, "r").read()
if output is None:
output = "pretty.minic"
out = open(output, "w")
# Lexing
try:
lexer.tokenize(src)
except Exception as e:
print(f"Error during lexing: {e}")
sys.exit(1)
# Parsing
try:
parser.parse(lexer.lexems)
except Exception as e:
print(f"Error during parsing: {e}")
sys.exit(1)
# Action
try:
if action == Actions.PRETTY_PRINT:
visitor = PrettyPrinter()
elif action == Actions.COMPILE:
print("not implemented yet")
except Exception as e:
print(f"Error during instantiation of the visitor: {e}")
sys.exit(1)
# Visitor
try:
visitor.visit(parser.ast)
except Exception as e:
print(f"Error during visitor: {e}")
print(parser.ast)
out.write(visitor.clean_source)
out.close()
print(visitor.clean_source)
print(f"Successfully wrote the file {output}")
if __name__ == '__main__':
main()
| 21.405405 | 64 | 0.614268 | 199 | 1,584 | 4.798995 | 0.40201 | 0.031414 | 0.071204 | 0.075393 | 0.146597 | 0.146597 | 0.146597 | 0.146597 | 0 | 0 | 0 | 0.00434 | 0.272727 | 1,584 | 73 | 65 | 21.69863 | 0.824653 | 0.07702 | 0 | 0.229167 | 0 | 0 | 0.137282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.145833 | 0 | 0.166667 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f110bbe65f1ea9ba273384dc8c0ede07db3a4131 | 1,025 | py | Python | QTM_F/1D/pH2/cor.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | QTM_F/1D/pH2/cor.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | QTM_F/1D/pH2/cor.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | ##!/usr/bin/python
import numpy as np
import pylab as plt
import matplotlib as mpl
import seaborn as sns
sns.set_context("poster",font_scale=1.5)
sns.set_style({'font.family':'Times New Roman'})
mpl.rcParams['lines.linewidth'] = 2
data = np.genfromtxt(fname='cor.dat')
ncols = data.shape[1]
#for x in range(1,ncols):
#plt.plot(data[:,0],data[:,1],linewidth=2,label='$\Re(C_{xx})$')
plt.plot(data[:,0],data[:,2],linewidth=2,label='$\Im(C_{11})$')
plt.plot(data[:,0],data[:,4],linewidth=2,label='$\Im(C_{22})$')
plt.plot(data[:,0],data[:,6],linewidth=2,label='$\Im(C_{33})$')
plt.plot(data[:,0],data[:,8],linewidth=2,label='$\Im(C_{44})$')
plt.plot(data[:,0],data[:,10],linewidth=2,label='$\Im(C_{12})$')
#plt.plot(data[:,0],data[:,3],linewidth=2,label='$\Re(C_{yy})$')
#plt.plot(data[:,0],data[:,4],linewidth=2,label='$\Im(C_{yy})$')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlim(0,40)
plt.legend(loc=3)
plt.xlabel('Time [a.u.]')
#plt.ylabel('Positions')
plt.savefig('cor.pdf')
plt.show()
| 25.625 | 64 | 0.633171 | 186 | 1,025 | 3.430108 | 0.403226 | 0.109718 | 0.137931 | 0.15047 | 0.429467 | 0.109718 | 0.109718 | 0.109718 | 0.109718 | 0.109718 | 0 | 0.049215 | 0.068293 | 1,025 | 39 | 65 | 26.282051 | 0.618848 | 0.29561 | 0 | 0 | 0 | 0 | 0.192686 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f111dbbb6122c9440b75b9f703c03c87600c2765 | 721 | py | Python | packages/pyright-internal/src/tests/samples/tuples10.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/tuples10.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/tuples10.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests that inferred types for tuples strip
# literals under the appropriate circumstances.
from typing import List, Literal, Tuple
a1 = (1, 2)
t1: Literal["tuple[Literal[1], Literal[2]]"] = reveal_type(a1)
a2 = list((1, 2))
t2: Literal["list[int]"] = reveal_type(a2)
a3: List[Literal[1]] = list((1,))
t3: Literal["list[Literal[1]]"] = reveal_type(a3)
def func1(v1: Tuple[Literal[1], ...], v2: Tuple[Literal[1]]):
a4 = set(v1)
t4: Literal["set[Literal[1]]"] = reveal_type(a4)
a5 = set(v2)
t5: Literal["set[Literal[1]]"] = reveal_type(a5)
a6 = (1, "hi")
t6: Literal["tuple[Literal[1], Literal['hi']]"] = reveal_type(a6)
v4 = set(a6)
t7: Literal["set[int | str]"] = reveal_type(v4)
| 23.258065 | 65 | 0.638003 | 114 | 721 | 3.973684 | 0.403509 | 0.14128 | 0.11479 | 0.119205 | 0.242826 | 0.12362 | 0 | 0 | 0 | 0 | 0 | 0.069079 | 0.156727 | 721 | 30 | 66 | 24.033333 | 0.675987 | 0.138696 | 0 | 0 | 0 | 0 | 0.213592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f113582e7cd0fe94674bc2e6c6d22332d46a63a0 | 5,027 | py | Python | chess/states/intro.py | cuiqui/chrisis | d824bde0bc4a3b9def86550f5bae0db6398f971e | [
"MIT"
] | null | null | null | chess/states/intro.py | cuiqui/chrisis | d824bde0bc4a3b9def86550f5bae0db6398f971e | [
"MIT"
] | null | null | null | chess/states/intro.py | cuiqui/chrisis | d824bde0bc4a3b9def86550f5bae0db6398f971e | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
from dataclasses import dataclass
from typing import Union
import pygame as pg
import chess.settings as s
from chess.states.state import State
from chess.panels.intro.title import Title
from chess.panels.intro.menu import Menu
from chess.panels.console import Console
from chess.utils.coords import Coords
from chess.utils.typewriter import Typewriter, TypewriterConfig, LogType
vec = pg.math.Vector2
logger = logging.getLogger(Path(__file__).stem)
@dataclass
class Intro(State):
next = 'GAME'
greet = {
1: [False, ('[DEBUG] Loading protocol...', LogType.DEBUG)],
3: [False, (
'[WARNING] Cannot load "assets/fonts/stolen.ttf". Proceeding with default.',
LogType.WARNING
)],
3.2: [False, (f'[DEBUG] Displaying <State: Intro> interface.', LogType.DEBUG)],
3.3: [False, ('[DEBUG] Invoke <func: self.say_hi>', LogType.DEBUG)],
4: [False, ('[INFO] Hello person, I\'m beep boop.',)],
4.4: [False, ('[DEBUG] Waiting for input...', LogType.DEBUG)]
}
title: Union[None, 'Title'] = None
menu: Union[None, 'Menu'] = None
console: Union[None, 'Console'] = None
info_console: Union[None, 'Console'] = None
def __post_init__(self):
self.debug_draws = [
self.draw_grid,
self.draw_mouse_pos
]
self.new()
def new(self, config=None):
self.title = Title(
sprite_group=self.sprites,
pos=Coords(x=s.GRIDWIDTH//2, y=1),
size=Coords(x=16, y=4)
)
self.menu = Menu(
sprite_group=self.sprites,
pos=Coords(x=s.GRIDWIDTH//2, y=6),
size=Coords(x=22, y=15)
)
self.console = Console(
sprite_group=self.sprites,
pos=Coords(x=6, y=7),
size=Coords(x=6, y=6),
color=s.WHITE,
parent_color=s.DARKGREY,
margin=6,
frame_offset=s.TILESIZE,
tp_config=TypewriterConfig(
padding=5,
size=22,
color=s.DARKGREEN,
surface_color=s.DARKGREY,
pos='midtop'
),
config=TypewriterConfig(
surface_color=s.BLACK,
size=12,
padding=5
)
)
self.info_console = Console(
sprite_group=self.sprites,
pos=Coords(x=s.GRIDWIDTH//2+4, y=7),
size=Coords(x=6, y=6),
title='INFO',
color=s.WHITE,
parent_color=s.DARKGREY,
margin=6,
frame_offset=s.TILESIZE,
tp_config=TypewriterConfig(
padding=5,
size=22,
color=s.DARKGREEN,
surface_color=s.DARKGREY,
pos='midtop'
),
config=TypewriterConfig(
surface_color=s.BLACK,
color=s.WHITE,
size=12,
padding=5
)
)
self.menu.set_console(self.console)
self.menu.set_info_console(self.info_console)
def update(self, screen, current_time, dt):
self.current_time = current_time / 1000
if self.debug:
for func in self.debug_draws:
func(screen)
else:
screen.fill(s.BLACK)
self.sprites.draw(screen)
self.sprites.update()
self.say_hi()
def say_hi(self):
for k, v in self.greet.items():
if not v[0] and k < self.current_time:
v[0] = True
self.console.log(*v[1])
def events(self, events: list):
action = None
for event in events:
if event.type == pg.KEYDOWN and event.key == pg.K_d:
self.toggle_debug()
elif event.type == pg.MOUSEBUTTONUP:
action = self.menu.click(event.pos)
self.persist = self.menu.config
if action == 'PLAY':
if self.check():
self.next = 'GAME'
self.done = True
elif action == 'QUIT':
self.quit = True
def check(self):
if len(self.persist['player']) != 2:
self.console.log('[ERROR] You need another player!', LogType.ERROR)
return False
return True
@staticmethod
def draw_grid(screen):
for x in range(0, s.WIDTH, s.TILESIZE):
pg.draw.line(screen, s.LIGHTGREY, (x, 0), (x, s.HEIGHT))
for y in range(0, s.HEIGHT, s.TILESIZE):
pg.draw.line(screen, s.LIGHTGREY, (0, y), (s.WIDTH, y))
def draw_mouse_pos(self, screen):
mpos = pg.mouse.get_pos()
coords = Coords(x=s.TILESIZE*3, y=s.TILESIZE)
follow = pg.Surface((coords.x, coords.y))
rect = follow.get_rect(topleft=(0, 0))
tp = Typewriter(follow, TypewriterConfig(size=12, pos='center'))
tp.type(str(mpos))
screen.blit(follow, rect)
| 31.616352 | 88 | 0.539089 | 608 | 5,027 | 4.378289 | 0.279605 | 0.024793 | 0.022539 | 0.033058 | 0.279865 | 0.246056 | 0.246056 | 0.246056 | 0.207739 | 0.185199 | 0 | 0.018385 | 0.339964 | 5,027 | 158 | 89 | 31.816456 | 0.783906 | 0 | 0 | 0.258741 | 0 | 0.013986 | 0.065248 | 0.005172 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055944 | false | 0 | 0.083916 | 0 | 0.202797 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f114f18da4ab04c0defa026734c3b47686aa23f5 | 624 | py | Python | examples/custom_full_model_prediction.py | vickyvava/ImageAI | fc23bc1374d5a29f816c0895b37cb769b1766eac | [
"MIT"
] | 6 | 2019-09-03T01:45:20.000Z | 2021-09-08T09:07:49.000Z | examples/custom_full_model_prediction.py | vickyvava/ImageAI | fc23bc1374d5a29f816c0895b37cb769b1766eac | [
"MIT"
] | 3 | 2020-08-09T11:49:24.000Z | 2020-10-20T00:25:07.000Z | examples/custom_full_model_prediction.py | vickyvava/ImageAI | fc23bc1374d5a29f816c0895b37cb769b1766eac | [
"MIT"
] | 1 | 2019-12-30T18:56:05.000Z | 2019-12-30T18:56:05.000Z | from imageai.Prediction.Custom import CustomImagePrediction
import os
execution_path = os.getcwd()
predictor = CustomImagePrediction()
predictor.setModelPath(model_path=os.path.join(execution_path, "idenprof_full_resnet_ex-001_acc-0.119792.h5")) # Download the model via this link https://github.com/OlafenwaMoses/ImageAI/releases/tag/models-v3
predictor.setJsonPath(model_json=os.path.join(execution_path, "idenprof.json"))
predictor.loadFullModel(num_objects=10)
results, probabilities = predictor.predictImage(image_input=os.path.join(execution_path, "1.jpg"), result_count=5)
print(results)
print(probabilities)
| 32.842105 | 209 | 0.820513 | 83 | 624 | 6.012048 | 0.638554 | 0.104208 | 0.06012 | 0.114228 | 0.170341 | 0.124249 | 0 | 0 | 0 | 0 | 0 | 0.027444 | 0.065705 | 624 | 18 | 210 | 34.666667 | 0.828473 | 0.153846 | 0 | 0 | 0 | 0 | 0.116635 | 0.082218 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f116e25e44c1bee36f55a82c2b9058f44ab798b0 | 5,391 | py | Python | prometheus_ecs_discoverer/marshalling.py | lejmr/prometheus-ecs-discoverer | d4968c6e3f8588a9f64157462a82420d099ac583 | [
"Apache-2.0"
] | null | null | null | prometheus_ecs_discoverer/marshalling.py | lejmr/prometheus-ecs-discoverer | d4968c6e3f8588a9f64157462a82420d099ac583 | [
"Apache-2.0"
] | null | null | null | prometheus_ecs_discoverer/marshalling.py | lejmr/prometheus-ecs-discoverer | d4968c6e3f8588a9f64157462a82420d099ac583 | [
"Apache-2.0"
] | null | null | null | import json
import os
import re
from typing import Dict, List, Type
from loguru import logger
from prometheus_ecs_discoverer import s
from prometheus_ecs_discoverer.discovery import Target
# Copyright 2018, 2019 Signal Media Ltd. Licensed under the Apache License 2.0
# Modifications Copyright 2020 Tim Schwenke. Licensed under the Apache License 2.0
"""
Contains functions that work on `Target` objects and are responsible for
turning these into JSON files that can be consued by Prometheus file service
discover.
"""
def extract_path_interval_pairs(
metrics_path: str = None,
) -> Dict[str, str or None]:
"""Extracts path intervals from given metrics path.
Transforms a string like this `30s:/mymetrics1,/mymetrics2` into:
```
{
"/mymetrics1": "30s",
"/mymetrics2": None
}
```
"""
if not metrics_path:
return {s.FALLBACK_METRICS_ENDPOINT: None}
path_interval = {}
for entry in metrics_path.split(","):
if ":" in entry:
pi = entry.split(":")
if re.search("(15s|30s|1m|5m)", pi[0]):
path_interval[pi[1]] = pi[0]
else:
path_interval[pi[1]] = None
else:
path_interval[entry] = None
logger.bind(inp=metrics_path, outp=path_interval).debug(
"Extracted path interval pairs."
) if s.DEBUG else None
return path_interval
def get_filename(
interval: str or None = None,
filename_15s: str = s.FILENAME_15S,
filename_30s: str = s.FILENAME_30S,
filename_1m: str = s.FILENAME_1M,
filename_5m: str = s.FILENAME_5M,
filename_generic: str = s.FILENAME_GENERIC,
) -> str:
"""Gets the filename for given interval.
Exists to allow custom file names.
Returns:
str: File name to use.
"""
if interval == "15s":
return filename_15s
elif interval == "30s":
return filename_30s
elif interval == "1m":
return filename_1m
elif interval == "5m":
return filename_5m
else:
return filename_generic
def marshall_targets(
targets: List[Type[Target]],
filename_15s: str = s.FILENAME_15S,
filename_30s: str = s.FILENAME_30S,
filename_1m: str = s.FILENAME_1M,
filename_5m: str = s.FILENAME_5M,
filename_generic: str = s.FILENAME_GENERIC,
labelname_cluster: str = s.LABELNAME_CLUSTER,
labelname_taskversion: str = s.LABELNAME_TASKVERSION,
labelname_taskid: str = s.LABELNAME_TASKID,
labelname_containerid: str = s.LABELNAME_CONTAINERID,
labelname_instanceid: str = s.LABELNAME_INSTANCEID,
) -> Dict[str, List[Dict]]:
"""Marshalls given targets into JSON compatible structure.
```
{
"tasks.json": [
{
"targets": [
"ip:port"
],
"labels": {
"instance": "ip:port",
"job": "job",
"and": "more"
},
},
...
],
"15s-tasks.json": [
...
],
"30s-tasks.json": [
...
],
"1m-tasks.json": [
...
],
"5m-tasks.json": [
...
]
}
```
"""
result = {
s.FILENAME_GENERIC: [],
s.FILENAME_15S: [],
s.FILENAME_30S: [],
s.FILENAME_1M: [],
s.FILENAME_5M: [],
}
for target in targets:
path_interval_pairs = extract_path_interval_pairs(target.metrics_path)
for path, interval in path_interval_pairs.items():
labels = {}
if target.custom_labels:
labels.update(target.custom_labels)
labels["instance"] = target.p_instance
labels["job"] = target.task_name
labels["metrics_path"] = path
if target.cluster_name:
labels[labelname_cluster] = target.cluster_name
if target.task_version:
labels[labelname_taskversion] = target.task_version
if target.task_id:
labels[labelname_taskid] = target.task_id
if target.container_id:
labels[labelname_containerid] = target.container_id
if target.instance_id:
labels[labelname_instanceid] = target.instance_id
job = {"targets": [f"{target.ip}:{target.port}"], "labels": labels}
result[get_filename(interval)].append(job)
logger.bind(**result).info("Marshalled targets")
return result
def write_targets_to_file(targets: List[Type[Target]], output_directory: str) -> None:
"""Writes targets to files.
Args:
targets: List of target objects.
output_directory: Path to directory where files should be written to.
Raises:
OSError: If the given directory is not valid.
"""
if not os.path.isdir(output_directory):
raise OSError(f"Directory '{output_directory}' not found.")
for file_name, content in marshall_targets(targets).items():
file_path = f"{output_directory}/{file_name}"
tmp_file_path = f"{file_path}.tmp"
with open(tmp_file_path, "w") as file:
file.write(json.dumps(content, indent=4))
os.rename(tmp_file_path, file_path)
logger.bind(file=file_path).debug("Written file.") if s.DEBUG else None
| 27.932642 | 86 | 0.59488 | 623 | 5,391 | 4.969502 | 0.263242 | 0.01938 | 0.03876 | 0.017442 | 0.116925 | 0.106589 | 0.106589 | 0.086563 | 0.086563 | 0.086563 | 0 | 0.02194 | 0.298275 | 5,391 | 192 | 87 | 28.078125 | 0.796458 | 0.213689 | 0 | 0.134021 | 0 | 0 | 0.061526 | 0.014278 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041237 | false | 0 | 0.072165 | 0 | 0.195876 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f117d7f32926745dd2bc177a8ad4c629308d255f | 1,242 | py | Python | tests/mlflow/test_mlflow_logging.py | MuttData/soam | 65612a02552668c6721dc20e675654883391c3e9 | [
"Apache-2.0"
] | 1 | 2021-09-17T01:14:57.000Z | 2021-09-17T01:14:57.000Z | tests/mlflow/test_mlflow_logging.py | MuttData/soam | 65612a02552668c6721dc20e675654883391c3e9 | [
"Apache-2.0"
] | null | null | null | tests/mlflow/test_mlflow_logging.py | MuttData/soam | 65612a02552668c6721dc20e675654883391c3e9 | [
"Apache-2.0"
] | 1 | 2021-08-09T14:22:50.000Z | 2021-08-09T14:22:50.000Z | from unittest.mock import patch
import mlflow
from soam.core import SoamFlow
from soam.workflow import Slicer
from tests.helpers import sample_data_df # pylint: disable=unused-import
def test_simple_flow(sample_data_df, tmpdir): # pylint: disable=redefined-outer-name
tmp_path = "file://" + str(tmpdir) + "/mlruns"
with patch("soam.core.runner.TRACKING_URI", tmp_path), patch(
"soam.core.runner.TRACKING_IS_ACTIVE", True
), patch("soam.core.step.TRACKING_IS_ACTIVE", True):
df = sample_data_df
df['metric'] = 1
dimensions = ["y"]
ds_col = 'ds'
metrics = ['metric']
slice_task = Slicer(ds_col=ds_col, dimensions=dimensions, metrics=metrics)
with SoamFlow(name="flow") as flow:
_ = slice_task(sample_data_df)
flow.run()
log_df = mlflow.search_runs(['0'])
assert len(log_df) == 2
assert log_df['tags.mlflow.runName'].tolist() == ['Slicer', 'flow_run']
slicer_logs = log_df[log_df['tags.mlflow.runName'] == 'Slicer'].iloc[0]
assert slicer_logs['params.dimensions'] == str(dimensions)
assert slicer_logs['params.metrics'] == str(metrics)
assert slicer_logs['params.ds_col'] == str(ds_col)
| 40.064516 | 85 | 0.6562 | 166 | 1,242 | 4.692771 | 0.385542 | 0.032092 | 0.061617 | 0.084724 | 0.125802 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004065 | 0.207729 | 1,242 | 30 | 86 | 41.4 | 0.787602 | 0.05314 | 0 | 0 | 0 | 0 | 0.198636 | 0.082694 | 0 | 0 | 0 | 0 | 0.192308 | 1 | 0.038462 | false | 0 | 0.192308 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f11901c892a2e1651656900a41006977b1b933d2 | 1,609 | py | Python | emote_recognizer.py | realPanamo/EmoteRecognizer | 9467b7f673266b258fe2cfd76f49c3dd83b2839c | [
"MIT"
] | 2 | 2019-06-23T17:59:52.000Z | 2019-06-25T06:33:15.000Z | emote_recognizer.py | juliarn/EmoteRecognizer | 9467b7f673266b258fe2cfd76f49c3dd83b2839c | [
"MIT"
] | null | null | null | emote_recognizer.py | juliarn/EmoteRecognizer | 9467b7f673266b258fe2cfd76f49c3dd83b2839c | [
"MIT"
] | null | null | null | import enum
import cv2
import numpy
import requests
import config
from model.model import KerasModel
from model.training_data import TrainingData
class EmoteType(enum.Enum):
PEEPO = 0
KAPPA = 1
class EmoteRecognizer:
def __init__(self):
training_data = TrainingData(config.peepo_data_dir, config.kappa_data_dir, config.image_size)
keras_model = KerasModel(config.model_filepath, config.image_size, config.batch_size, training_data)
keras_model.create_weights()
self.model = keras_model.model
self.image_size = config.image_size
def predict(self, image_array):
"""
Predicts an image
:param image_array: the image in the correct size turned into an array
:return: the predicted emote type of the image
"""
# the model expects a list a images to predict, but we have only one image.
# So we're expanding the array
image_array = numpy.expand_dims(image_array, 0)
prediction = self.model.predict(image_array)[0][0]
return EmoteType(prediction)
def parse_image(self, url):
"""
Downloads an image, resizes it and turns it into an array
:param url: the url of the image which should be downloaded
:return: the array of the image, ready to be predicted
"""
response = requests.get(url)
image = numpy.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return cv2.resize(image, (self.image_size, self.image_size), interpolation=cv2.INTER_CUBIC)
| 29.254545 | 108 | 0.683033 | 216 | 1,609 | 4.939815 | 0.402778 | 0.050609 | 0.042174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009009 | 0.241144 | 1,609 | 54 | 109 | 29.796296 | 0.864865 | 0.257303 | 0 | 0 | 0 | 0 | 0.004484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.269231 | 0 | 0.615385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f119173712c2b862677623c0eea51d0e26339373 | 633 | py | Python | Python/Supermarket-Queue.py | kbgoda/Codewars-Challenges | b163df4f0bb5ccf5b6482d26b7c1d1a4ec4e9683 | [
"MIT"
] | null | null | null | Python/Supermarket-Queue.py | kbgoda/Codewars-Challenges | b163df4f0bb5ccf5b6482d26b7c1d1a4ec4e9683 | [
"MIT"
] | null | null | null | Python/Supermarket-Queue.py | kbgoda/Codewars-Challenges | b163df4f0bb5ccf5b6482d26b7c1d1a4ec4e9683 | [
"MIT"
] | null | null | null | # Author: Karan Goda
# https://www.codewars.com/kata/57b06f90e298a7b53d000a86
def queue_time(customers, n):
# e.g. customers = [12, 13]
queues = []
if customers is [] or n <= 0:
return 0
elif n >= 1:
# E.g. for 3 queues (n = 3), value would be (0, 0, 0)
[queues.append(0) for queue in range(n)]
for customer in customers:
minQueue = min(queues)
queues[queues.index(minQueue)] += customer
return max(queues)
# Test cases
print(queue_time([1, 2, 3], 1)) # Ans is 3
print(queue_time([1, 2, 3], 2)) # Ans is 3
print(queue_time([1, 2, 3, 5], 3)) # Ans is 6
| 31.65 | 61 | 0.578199 | 100 | 633 | 3.62 | 0.46 | 0.099448 | 0.116022 | 0.124309 | 0.174033 | 0.174033 | 0.127072 | 0.127072 | 0.127072 | 0 | 0 | 0.099783 | 0.271722 | 633 | 19 | 62 | 33.315789 | 0.685466 | 0.298578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f11a59c34afba587fb8aa830114d8088e1e90046 | 777 | py | Python | setup.py | mitchelllisle/monstermash | 724907514a9727a2970b3ddffe4d6fb2a490da48 | [
"MIT"
] | null | null | null | setup.py | mitchelllisle/monstermash | 724907514a9727a2970b3ddffe4d6fb2a490da48 | [
"MIT"
] | null | null | null | setup.py | mitchelllisle/monstermash | 724907514a9727a2970b3ddffe4d6fb2a490da48 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements/requirements-test.txt') as f:
test_requirements = f.read().splitlines()
setup(
name='monstermash',
author='Mitchell Lisle',
author_email='m.lisle90@gmail.com',
description="A Python Encryption Helper Library",
install_requires=requirements,
packages=find_packages(),
setup_requires=[],
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts': [
'monstermash=monstermash.__main__:main',
],
},
url='https://github.com/mitchelllisle/monstermash',
version='0.1.0',
zip_safe=False,
)
| 25.064516 | 56 | 0.666667 | 84 | 777 | 5.97619 | 0.607143 | 0.047809 | 0.079681 | 0.12749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008078 | 0.203346 | 777 | 30 | 57 | 25.9 | 0.802908 | 0 | 0 | 0 | 0 | 0 | 0.317889 | 0.1287 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f11a62a225b14fde953a537552d646f6d24b4f0a | 2,117 | py | Python | flatpickr/_base.py | maqnius/django-flatpickr | 92d5bbf9d4c0c01f904053b39f587053d072b45d | [
"MIT"
] | 40 | 2019-03-07T08:48:58.000Z | 2021-12-25T21:26:14.000Z | flatpickr/_base.py | maqnius/django-flatpickr | 92d5bbf9d4c0c01f904053b39f587053d072b45d | [
"MIT"
] | 6 | 2019-08-06T11:08:25.000Z | 2021-11-16T10:05:52.000Z | flatpickr/_base.py | maqnius/django-flatpickr | 92d5bbf9d4c0c01f904053b39f587053d072b45d | [
"MIT"
] | 8 | 2020-01-02T15:14:38.000Z | 2022-01-24T13:10:26.000Z | # -*- coding: utf-8 -*-
"""Contains Base Date-Picker input class for widgets of this package."""
from django.forms.widgets import DateTimeBaseInput
from flatpickr._settings import WidgetSettings
from flatpickr._media import WidgetMedia
from flatpickr._config import WidgetConfig
class BasePickerInput(DateTimeBaseInput):
"""Base Date-Picker input class for widgets of this package."""
Media = WidgetMedia
picker_type = 'DATE'
datetime_format = '%Y-%m-%d'
format_key = 'DATE_INPUT_FORMATS'
option_overrides = {
'dateFormat': 'Y-m-d',
}
def __init__(self, attrs=None, options=None):
"""Initialize the Date-picker widget."""
self.config = WidgetConfig(self.picker_type)
self.config._calculate_options(options, self.option_overrides)
self.template_name = WidgetSettings.TEMPLATE_NAME or self.template_name
_attrs = WidgetSettings.ATTRS.copy()
_attrs.update(attrs or {})
super().__init__(_attrs, self.datetime_format)
def get_context(self, name, value, attrs):
"""Return widget context dictionary."""
context = super().get_context(
name, value, attrs)
context['widget']['attrs']['fp_config'] = self.config.to_json()
return context
def start_of(self, event_id):
"""
Set Date-Picker as the start-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
"""
WidgetConfig.events[str(event_id)] = self
return self
def end_of(self, event_id, import_options=True):
"""
Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
"""
event_id = str(event_id)
if event_id in WidgetConfig.events:
linked_picker = WidgetConfig.events[event_id]
self.config.linked_to = linked_picker.config.id
else:
raise KeyError(
'start-date not specified for event_id "%s"' % event_id)
return self
| 34.145161 | 79 | 0.64478 | 257 | 2,117 | 5.120623 | 0.350195 | 0.058511 | 0.021277 | 0.028875 | 0.206687 | 0.179331 | 0.179331 | 0.179331 | 0.179331 | 0.179331 | 0 | 0.000633 | 0.253188 | 2,117 | 61 | 80 | 34.704918 | 0.831752 | 0.222957 | 0 | 0.055556 | 0 | 0 | 0.069211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.138889 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f11f0d6ac32bd29c53535c13dbecdf64bee130f1 | 1,968 | py | Python | app/usr/lib/chewup/chewup/ui/indicator.py | samwhelp/util-chewup | aedcfe4a765218e11936dc4e5c259157635d7f41 | [
"MIT"
] | null | null | null | app/usr/lib/chewup/chewup/ui/indicator.py | samwhelp/util-chewup | aedcfe4a765218e11936dc4e5c259157635d7f41 | [
"MIT"
] | null | null | null | app/usr/lib/chewup/chewup/ui/indicator.py | samwhelp/util-chewup | aedcfe4a765218e11936dc4e5c259157635d7f41 | [
"MIT"
] | null | null | null |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
class Indicator:
app = None
view = None
indicator = None
menu = None
icon_name_on_win_activate = 'empty'
icon_name_on_win_deactivate = 'folder'
icon_name_btn_app_quit = 'application-exit'
def prep (self, *args, **kwds):
self.app = kwds['app']
def init (self):
self.init_menu()
self.view = self.indicator
def init_menu (self):
## Menu
self.menu = menu = Gtk.Menu()
## Activate
item = Gtk.MenuItem.new_with_label('Activate (<Super>+a)')
item.connect('activate', self.on_activate_win)
menu.append(item)
## Fullscreen
item = Gtk.MenuItem.new_with_label('Fullscreen (F11)')
item.connect('activate', self.on_fullscreen_win)
menu.append(item)
## About
item = Gtk.MenuItem.new_with_label('About')
item.connect('activate', self.on_show_about)
menu.append(item)
## Quit
img = Gtk.Image.new_from_icon_name(self.icon_name_btn_app_quit, 16)
item = Gtk.ImageMenuItem.new_with_label('Quit')
item.connect('activate', self.on_quit_app)
item.set_image(img)
menu.append(item)
menu.show_all()
## Indicator
self.indicator = indicator = AppIndicator.Indicator.new(
self.app.name,
self.icon_name_on_win_activate,
AppIndicator.IndicatorCategory.APPLICATION_STATUS
)
indicator.set_menu(menu)
indicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
def on_show_about (self, menu_item):
self.app.go_show_about()
def on_quit_app (self, menu_item):
self.app.go_quit()
def on_activate_win (self, menu_item):
self.app.win.go_activate()
def on_fullscreen_win (self, menu_item):
self.app.win.go_fullscreen()
def go_switch_icon_on_win_activate (self):
self.indicator.set_icon(self.icon_name_on_win_activate)
def go_switch_icon_on_win_deactivate (self):
self.indicator.set_icon(self.icon_name_on_win_deactivate)
| 23.710843 | 69 | 0.745427 | 293 | 1,968 | 4.716724 | 0.211604 | 0.04631 | 0.036179 | 0.047033 | 0.377713 | 0.240232 | 0.098408 | 0.098408 | 0.059334 | 0.059334 | 0 | 0.005862 | 0.13313 | 1,968 | 82 | 70 | 24 | 0.80422 | 0.022866 | 0 | 0.074074 | 0 | 0 | 0.067575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f11f62efb3e0225c391fb17a49be38c58be39674 | 4,779 | py | Python | mutation_lib_prep/cosmic_integrator.py | vrushali-broad/ctat-mutations | ba451dc36039f47e9c61b3ee76211070f6dc53a5 | [
"BSD-3-Clause"
] | null | null | null | mutation_lib_prep/cosmic_integrator.py | vrushali-broad/ctat-mutations | ba451dc36039f47e9c61b3ee76211070f6dc53a5 | [
"BSD-3-Clause"
] | null | null | null | mutation_lib_prep/cosmic_integrator.py | vrushali-broad/ctat-mutations | ba451dc36039f47e9c61b3ee76211070f6dc53a5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#import inspect
import os,sys
import csv
import argparse
import subprocess
import gzip
import glob
import logging
##
## This script decorates the Cosmic coding variants with cancer census annotations.
FORMAT = "%(asctime)-15s: %(message)s"
logger = logging.getLogger()
logging.basicConfig(stream=sys.stderr, format=FORMAT, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--CosmicCodingMuts", required = True ,help="CosmicCodingMut VCF file")
parser.add_argument("--CosmicMutantExport", required = True ,help="CosmicMutantExport TSV file")
parser.add_argument("--output_vcf", required=True, help="output vcf file")
args=parser.parse_args()
csv.field_size_limit(sys.maxsize)
##Add lines to header
add_header_lines = [
'##INFO=<ID=COSMIC_ID,Type=String,Description="COSMIC mutation id (unique).">\n',
'##INFO=<ID=TISSUE,Type=String,Description="The primary tissue/cancer and subtype from which the sample originated.">\n',
'##INFO=<ID=TUMOR,Type=String,Description="The histological classification of the sample.">\n',
'##INFO=<ID=FATHMM,Type=String,Description="FATHMM (Functional Analysis through Hidden Markov Models). \'Pathogenic\'=Cancer or damaging, \'Neutral\'=Passanger or Tolerated.">\n',
'##INFO=<ID=SOMATIC,Type=String,Description="Information on whether the sample was reported to be Confirmed Somatic. \'Confirmed somatic\'=if the mutation has been confimed to be somatic in the experiment by sequencing both the tumour and a matched normal from the same patient, \'Previously Observed\'=when the mutation has been reported as somatic previously but not in current paper, \'variant of unknown origin\'=when the mutation is known to be somatic but the tumour was sequenced without a matched normal">\n',
'##INFO=<ID=PUBMED_COSMIC,Type=String,Description="The PUBMED ID for the paper that the sample was noted in COSMIC.">\n'
]
####################################
# parsing the cancer gene census: CosmicMutantExport
#GENE,STRAND,CDS,AA,CNT
#COSMIC_ID,TISSUE,TUMOR,FATHMM,SOMATIC,PUBMED_COSMIC,GENE,STRAND,GENE,STRAND,CDS,AA,CNT
mutant_dict_necessary_info={}
logger.info("Capturing info from: {}".format(args.CosmicMutantExport))
with gzip.open(args.CosmicMutantExport,"rt") as mt:
mutant_reader=csv.DictReader(mt, delimiter=str("\t"), quoting=csv.QUOTE_NONE)
for row in mutant_reader:
info_items=["COSMIC_ID="+row.get("GENOMIC_MUTATION_ID",""),
"TISSUE="+row.get("Primary site",""),
"TUMOR="+row.get("Primary histology","")+" -- "+row.get("Histology subtype 1",""),
"FATHMM="+row.get("FATHMM prediction",""),
"SOMATIC="+row.get("Mutation somatic status",""),
"PUBMED_COSMIC="+row.get("Pubmed_PMID",""),
"GENE="+row.get("Gene name",""),
"STRAND="+row.get("Mutation strand",""),
"CDS="+row.get("Mutation CDS",""),
"AA="+row.get("Mutation AA","")]
info=";".join(info_items)
mutant_dict_necessary_info[row["GENOMIC_MUTATION_ID"]]=info
logger.info("Now annotating {}".format(args.CosmicCodingMuts))
coding_muts_gzip_fh = gzip.open(args.CosmicCodingMuts,"rt")
cosmic_vcf=os.path.join(args.output_vcf)
logger.info("writing summary file: {}".format(cosmic_vcf))
ofh = open(cosmic_vcf, 'wt')
annotated_set = set()
not_annotated = set()
with gzip.open(args.CosmicCodingMuts,"rt") as fh:
for line in fh:
if line.startswith("##"):
ofh.write(line)
continue
if line.startswith("#CHROM"):
ofh.write("".join(add_header_lines))
ofh.write(line)
continue
line = line.rstrip()
vals = line.split("\t")
vals[0] = "chr" + vals[0]
cosmic_id = vals[2]
if cosmic_id in mutant_dict_necessary_info:
current_info = vals[7]
vals[7] += ";" + mutant_dict_necessary_info[cosmic_id]
annotated_set.add(cosmic_id)
else:
not_annotated.add(cosmic_id)
ofh.write("\t".join(vals) + "\n")
ofh.close()
logger.info("-number of variants with annotations added: {}".format(len(annotated_set)))
logger.info("-number of variants w/o added annotations: {}".format(len(not_annotated)))
logger.info("bgzip compressing {}".format(cosmic_vcf))
subprocess.check_call("bgzip -f {}".format(cosmic_vcf), shell=True)
logger.info("indexing {}".format(cosmic_vcf))
subprocess.check_call(["bcftools", "index", "{}.gz".format(cosmic_vcf)])
logger.info("Done prepping cosmic vcf: {}".format(cosmic_vcf))
sys.exit(0)
| 38.853659 | 517 | 0.676292 | 619 | 4,779 | 5.105008 | 0.355412 | 0.020886 | 0.039873 | 0.029114 | 0.068354 | 0.021519 | 0 | 0 | 0 | 0 | 0 | 0.002524 | 0.170956 | 4,779 | 122 | 518 | 39.172131 | 0.795053 | 0.064658 | 0 | 0.050633 | 0 | 0.075949 | 0.37924 | 0.067843 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.012658 | 0.101266 | 0 | 0.101266 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f121b237306aa5803c642431f163fbdc7a638007 | 1,296 | py | Python | src/adversarial_q_learning/network/dqn.py | shuvoxcd01/neural_tic_tac_toe | a988230ff3dd0d882ebc0fb19630c9ff22fef629 | [
"Apache-2.0"
] | null | null | null | src/adversarial_q_learning/network/dqn.py | shuvoxcd01/neural_tic_tac_toe | a988230ff3dd0d882ebc0fb19630c9ff22fef629 | [
"Apache-2.0"
] | null | null | null | src/adversarial_q_learning/network/dqn.py | shuvoxcd01/neural_tic_tac_toe | a988230ff3dd0d882ebc0fb19630c9ff22fef629 | [
"Apache-2.0"
] | null | null | null | import os
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Flatten
from tensorflow.keras.models import clone_model
class DQN:
@staticmethod
def get_q_network(input_shape, num_actions):
model = Sequential()
model.add(InputLayer(input_shape=input_shape))
model.add(Flatten())
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=250, activation='relu'))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=num_actions))
return model
@staticmethod
def get_weights(model: Sequential):
weights = {}
for weight in model.trainable_weights:
weights[weight.name] = weight
return weights
@staticmethod
def clone(model):
cloned_model = clone_model(model=model)
cloned_model.set_weights(model.get_weights())
return cloned_model
@staticmethod
def save_model(model, saved_model_dir, saved_model_name):
if not os.path.exists(saved_model_dir):
os.makedirs(saved_model_dir)
path_to_saved_model = os.path.join(saved_model_dir, saved_model_name)
model.save(path_to_saved_model)
| 28.173913 | 77 | 0.681327 | 163 | 1,296 | 5.202454 | 0.306748 | 0.09434 | 0.076651 | 0.106132 | 0.242925 | 0.242925 | 0.125 | 0.125 | 0.125 | 0.125 | 0 | 0.010924 | 0.222994 | 1,296 | 45 | 78 | 28.8 | 0.831182 | 0 | 0 | 0.181818 | 0 | 0 | 0.012346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.121212 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f122a05ae2a117a955547f6e8fd27731b4497e6e | 6,160 | py | Python | conmato/member.py | ngocbh/codeforces-management-tools | 4064cf3cf4bd9ffabdab15e4243e3fbe80a824ad | [
"MIT"
] | 6 | 2020-03-24T16:57:31.000Z | 2020-09-19T13:34:14.000Z | conmato/member.py | ngocjr7/codeforces-standings-crawler | 1bb8bf468299ea2c944a238627ee1516625cb91e | [
"MIT"
] | 1 | 2021-02-04T04:39:55.000Z | 2021-02-04T04:39:55.000Z | conmato/member.py | ngocjr7/codeforces-standings-crawler | 1bb8bf468299ea2c944a238627ee1516625cb91e | [
"MIT"
] | 1 | 2020-04-26T11:25:55.000Z | 2020-04-26T11:25:55.000Z | from __future__ import absolute_import
import re
import requests
import time
import random
from conmato.utils import *
def remove_participants(session, member, group_id=GROUP_ID):
url = MEMBERS_URL.format(group_id)
payload = {
'_tta': member['_tta'],
'action': 'removeMember',
'csrf_token': member['csrf_token'],
'memberGroupRoleId': member['groupRoleId']
}
response = session.post(url, data=payload)
if response.status_code != 200:
logger.warning('confirm_joining: an error occurred while confirming')
def remove_all_participants(session, user_format='.*', group_id=GROUP_ID):
members = get_all_members(session, group_id)
for member in members:
if member['pending'] or member['role'] == 'manager':
continue
if re.search(user_format, member['username']):
remove_participants(session, member, group_id)
se = random.uniform(float(TIMESLEEP)/2, TIMESLEEP)
time.sleep(se)
def confirm_joining(session, member, action, group_id=GROUP_ID):
"""
action = ['accept', 'reject']
"""
url = MEMBERS_URL.format(group_id)
payload = {
'_tta': member['_tta'],
'action': 'confirmJoining',
'confirmed': action,
'csrf_token': member['csrf_token'],
'groupRoleId': member['groupRoleId']
}
response = session.post(url, data=payload)
if response.status_code != 200:
logger.warning('confirm_joining: an error occurred while confirming')
def confirm_all_participants(session, action, user_format=USER_FORMAT, group_id=GROUP_ID):
"""
if action == 'accept' -> accept all user that match user_format
if action == 'reject' -> reject all user that not match user_format
"""
members = get_pending_participants(session, group_id)
if action != 'accept' and action != 'reject':
logger.warning('confirm_all_participants: cannot recognize action')
return
for member in members:
if re.search(user_format, member['username']) and action == 'accept':
confirm_joining(session, member, action, group_id)
elif not re.search(user_format, member['username']) and action == 'reject':
confirm_joining(session, member, action, group_id)
se = random.uniform(float(TIMESLEEP)/2, TIMESLEEP)
time.sleep(se)
def get_pending_participants(session, group_id=GROUP_ID):
logger.info("Getting pending members of group: {}".format(group_id))
url = MEMBERS_URL.format(group_id)
response = session.get(url)
doc = pq(response.text)
table = doc('table').not_('.rtable').not_('.table-form')
members = []
for tr in pq(table.children())[1:]:
if pq(tr).children().eq(5).children().eq(0).is_('form'):
member = {
'username': pq(tr).children().eq(0)('a').eq(0).text(),
'groupRoleId': pq(tr).children().eq(5).children().eq(0)('input').eq(2).attr('value'),
'csrf_token': pq(tr).children().eq(5).children().eq(0)('input').eq(0).attr('value'),
'_tta': 961
}
members.append(member)
return members
def get_all_members(session, group_id=GROUP_ID):
logger.info("Getting all members in group: {}".format(group_id))
url = MEMBERS_URL.format(group_id)
response = session.get(url)
doc = pq(response.text)
table = doc('table').not_('.rtable').not_('.table-form')
members = []
for tr in pq(table.children())[1:]:
member = {
'username': pq(tr).children().eq(0)('a').eq(0).text()
}
if member['username'] == '':
continue
member['csrf_token'] = pq(tr).children().eq(
0)('form')('input').eq(0).attr('value')
member['groupRoleId'] = pq(tr).children().eq(
0)('form')('input').eq(2).attr('value')
member['_tta'] = 961
if pq(tr).children().eq(5).children().eq(0).is_('form'):
member['pending'] = True
else:
member['pending'] = False
if pq(tr).children().eq(1).text().lower() == 'creator':
member['role'] = 'manager'
else:
member['role'] = 'spectator'
for option in pq(tr).children().eq(1)('select')('option'):
if pq(option).attr['selected'] == 'selected':
member['role'] = pq(option).val().lower()
members.append(member)
return members
def is_manager(group_id=GROUP_ID, username='', password=''):
"""
check if user is manager of codeforces group
Return:
True, False
"""
if username == '' or password == '':
logger.warning(
"isManager:Please provide username and password before using.")
return False
tmp_ss = requests.Session()
url = MEMBERS_URL.format(group_id)
response = tmp_ss.get(url)
doc = pq(response.text)
members = {}
for e in doc('table').eq(1).children():
username_tmp = pq(e)('td').eq(0).text()
mtype_tmp = pq(e)('td').eq(1).text()
members[username_tmp.lower()] = mtype_tmp.lower()
payload = {
"handleOrEmail": username,
"password": password,
"csrf_token": "",
"bfaa": '1ef059a32710a29f84fbde5b5500d49c',
"action": 'enter',
"ftaa": 'uf8qxh8b5vphq6wna4',
"_tta": 569
}
response = tmp_ss.get(LOGIN_URL)
doc = pq(response.text)
payload['csrf_token'] = doc('input').attr('value')
response = tmp_ss.post(
LOGIN_URL,
data=payload,
headers=dict(referer=LOGIN_URL)
)
doc = pq(response.text)
username_again = doc('div').filter(
'.lang-chooser').children().eq(1).children().eq(0).text()
if username_again is None or username.lower() != username_again.lower():
logger.warning('isManager:Login failed, wrong username or password')
return False
if username.lower() in members and members[username.lower()] == 'manager':
return True
logger.warning(
'isManager:Username isnot members or manager of codeforces group')
return False
| 33.478261 | 101 | 0.601948 | 739 | 6,160 | 4.878214 | 0.188092 | 0.050485 | 0.033287 | 0.038835 | 0.570874 | 0.490985 | 0.386685 | 0.335922 | 0.275728 | 0.275728 | 0 | 0.014801 | 0.243182 | 6,160 | 183 | 102 | 33.661202 | 0.758473 | 0.037338 | 0 | 0.4 | 0 | 0 | 0.175103 | 0.009747 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.035714 | 0.042857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f123c50479e273b94aab8c70c02be92191fe755f | 6,620 | py | Python | canary/argument_pipeline/component_prediction.py | Open-Argumentation/Canary | 1a3128a5357f0428b7cb19d66b52e83dbe75fff0 | [
"MIT"
] | 3 | 2020-12-16T19:26:39.000Z | 2022-03-16T16:41:31.000Z | canary/argument_pipeline/component_prediction.py | Open-Argumentation/Canary | 1a3128a5357f0428b7cb19d66b52e83dbe75fff0 | [
"MIT"
] | 4 | 2021-05-25T13:28:40.000Z | 2022-01-15T12:44:54.000Z | canary/argument_pipeline/component_prediction.py | Open-Argumentation/Canary | 1a3128a5357f0428b7cb19d66b52e83dbe75fff0 | [
"MIT"
] | 2 | 2020-12-10T13:40:36.000Z | 2020-12-16T19:34:03.000Z | import pandas
from imblearn.over_sampling import RandomOverSampler
from nltk.tree import Tree
from scipy.sparse import hstack
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_union, make_pipeline
from sklearn.preprocessing import MaxAbsScaler
from sklearn.svm import SVC
from ..argument_pipeline.base import Model
from ..corpora import load_essay_corpus
from ..nlp import Lemmatiser, PosDistribution
from ..nlp._utils import spacy_download
from ..nlp.transformers import DiscourseMatcher, EmbeddingTransformer
from ..utils import logger
_nlp = spacy_download(disable=['ner', 'textcat', 'tagger', 'lemmatizer', 'tokenizer',
'attribute_ruler',
'tok2vec', ])
__all__ = [
"ArgumentComponent",
"ArgumentComponentFeatures"
]
class ArgumentComponent(Model):
"""Detects argumentative components from natural language e.g. premises and claims"""
def __init__(self, model_id: str = None):
if model_id is None:
model_id = "argument_component"
super().__init__(
model_id=model_id,
)
@staticmethod
def default_train():
"""The default training method. ArgumentComponent defaults to using the essay corpus with undersampling."""
from sklearn.model_selection import train_test_split
ros = RandomOverSampler(random_state=0, sampling_strategy='not majority')
x, y = load_essay_corpus(purpose="component_prediction")
x, y = ros.fit_resample(pandas.DataFrame(x), pandas.DataFrame(y))
train_data, test_data, train_targets, test_targets = \
train_test_split(x, y,
train_size=0.7,
shuffle=True,
random_state=0,
stratify=y
)
logger.debug("Resample")
return list(train_data.to_dict("index").values()), list(test_data.to_dict("index").values()), train_targets[
0].tolist(), test_targets[0].tolist()
@classmethod
def train(cls, pipeline_model=None, train_data=None, test_data=None, train_targets=None, test_targets=None,
save_on_finish=True, *args, **kwargs):
# If the pipeline model is none, use this algorithm
if pipeline_model is None:
pipeline_model = make_pipeline(
ArgumentComponentFeatures(),
MaxAbsScaler(),
SVC(random_state=0, class_weight='balanced', probability=True, cache_size=1000)
)
return super().train(
pipeline_model=pipeline_model,
train_data=train_data,
test_data=test_data,
train_targets=train_targets,
test_targets=test_targets,
save_on_finish=save_on_finish
)
class ArgumentComponentFeatures(TransformerMixin, BaseEstimator):
"""Transformer Mixin that extracts features for the ArgumentComponent model"""
features: list = [
TfidfVectorizer(ngram_range=(1, 1), tokenizer=Lemmatiser(), lowercase=False),
TfidfVectorizer(ngram_range=(2, 2), tokenizer=Lemmatiser(), lowercase=False, max_features=2000),
DiscourseMatcher('forward'),
DiscourseMatcher('thesis'),
DiscourseMatcher('rebuttal'),
DiscourseMatcher('backward'),
DiscourseMatcher('obligation'),
DiscourseMatcher('recommendation'),
DiscourseMatcher('possible'),
DiscourseMatcher('intention'),
DiscourseMatcher('option'),
DiscourseMatcher('first_person'),
EmbeddingTransformer()
]
def __init__(self):
self.__dict_feats = DictVectorizer()
self.__features = make_union(*ArgumentComponentFeatures.features)
@staticmethod
def _prepare_dictionary_features(data):
pos_dist = PosDistribution()
cover_sentences = pandas.DataFrame(data).cover_sentence.tolist()
cover_sentences = list(_nlp.pipe(cover_sentences))
def get_features(feats):
features = []
for i, d in enumerate(feats):
cover_sen_parse_tree = Tree.fromstring(list(cover_sentences[i].sents)[0]._.parse_string)
items = {
'tree_height': cover_sen_parse_tree.height(),
'len_paragraph': d.get('len_paragraph'),
"len_component": d.get('len_component'),
"len_cover_sen": d.get('len_cover_sen'),
'is_in_intro': d.get('is_in_intro'),
'is_in_conclusion': d.get('is_in_conclusion'),
"n_following_components": d.get("n_following_components"),
"n_preceding_components": d.get("n_preceding_components"),
"component_position": d.get("component_position"),
'n_preceding_comp_tokens': d.get('n_preceding_comp_tokens'),
'n_following_comp_tokens': d.get('n_following_comp_tokens'),
'first_in_paragraph': d.get('first_in_paragraph'),
'last_in_paragraph': d.get('last_in_paragraph')
}
items.update(pos_dist(d['cover_sentence']).items())
features.append(items)
return features
return get_features(data)
def fit(self, x: list, y: list = None):
"""Fits self to data provided.
Parameters
----------
x: list
The data on which the transformer is fitted.
y: list
Ignored. Providing will have no effect. Provided for compatibility reasons.
Returns
-------
Self
"""
logger.debug("Fitting")
self.__dict_feats.fit(x)
self.__features.fit(pandas.DataFrame(x).cover_sentence.tolist())
return self
def transform(self, x: list):
"""Transforms data provided.
Parameters
----------
x: list
A list of datapoints which are to be transformed using the mixin
Returns
-------
scipy.sparse.hstack
The features of the inputted list
See Also
---------
scipy.sparse.hstack
"""
features = self.__features.transform(pandas.DataFrame(x).cover_sentence.tolist())
dict_features = self.__dict_feats.transform(self._prepare_dictionary_features(x))
return hstack([features, dict_features])
| 36.373626 | 116 | 0.623112 | 688 | 6,620 | 5.731105 | 0.318314 | 0.012173 | 0.005072 | 0.014202 | 0.061882 | 0.017753 | 0 | 0 | 0 | 0 | 0 | 0.004391 | 0.277492 | 6,620 | 181 | 117 | 36.574586 | 0.819987 | 0.115408 | 0 | 0.016807 | 0 | 0 | 0.12615 | 0.03627 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067227 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f125c669b4d659e35b8a378e25dd4d527ec4dbd4 | 27,310 | py | Python | flask/lib/python3.8/site-packages/to/trainer.py | Otybrian/blogpost | 518599019e11cd7ee11e01470c4d51dfb4583274 | [
"MIT"
] | null | null | null | flask/lib/python3.8/site-packages/to/trainer.py | Otybrian/blogpost | 518599019e11cd7ee11e01470c4d51dfb4583274 | [
"MIT"
] | null | null | null | flask/lib/python3.8/site-packages/to/trainer.py | Otybrian/blogpost | 518599019e11cd7ee11e01470c4d51dfb4583274 | [
"MIT"
] | null | null | null | import re
import os
import traceback
import importlib.util
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.validation import ValidationError
from colored import fg, bg, attr
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from .utils.cli import *
from .utils.helpers import *
from .utils.batch_logger import *
from .utils.options import *
from .net import *
from .data.dataset import *
class Trainer(object):
#----------------------------------------------------------------------------------------------------------
# Initialization
#----------------------------------------------------------------------------------------------------------
def __init__(self):
super(Trainer, self).__init__()
self.epoch_ran = 0
self.logger = Logger(self)
self.name = sys.argv[0].replace('.py', '')
self.commands = ['list', 'help', 'use', 'load', 'run', 'test', 'validate', 'set', 'exit']
# Configurations
self.cfg_folder = 'configurations'
self.default_cfg = 'default'
self.current_cfg = self.default_cfg
self.current_cfg_path = None
# Models
self.models_folder = 'models'
self.Model = NeuralNetwork
self.cuda_enabled = False
# Events
self.event_handlers = {}
# Data
self.DataLoader = None
self.DataSet = DataSet
# Submission
self.submissions_folder = 'submissions'
if len(sys.argv) == 2:
self.load_cfg('{}/{}.py'.format(self.cfg_folder, sys.argv[1].replace('.py', '')))
else:
self.load_cfg('{}/{}.py'.format(self.cfg_folder, self.current_cfg))
self.reset()
def reset(self):
self.__init_model()
self.__init_optim()
self.__init_loss_fn()
return self
def __init_folder(self):
self.cfg_folder = get(self.cfg, TrainerOptions.CFG_FOLDER.value, default='configurations')
self.models_folder = get(self.cfg, TrainerOptions.MODELS_FOLDER.value, default='models')
self.submissions_folder = get(self.cfg, TrainerOptions.SUBMISSIONS_FOLDER.value, default='submissions')
def __init_model(self):
self.model = self.Model(self.cfg)
init_model_parameters(self.model)
if torch.cuda.is_available():
self.cuda_enabled = True
self.model = self.model.cuda()
def __init_optim(self):
Optimizer = get(self.cfg, TrainerOptions.OPTIMIZER.value, default=optim.Adam)
optim_args = get(self.cfg, TrainerOptions.OPTIMIZER_ARGS.value, default={'lr': 0.01})
self.optimizer = Optimizer(self.model.parameters(), **optim_args)
Scheduler = get(self.cfg, TrainerOptions.SCHEDULER.value, default=None)
sched_args = get(self.cfg, TrainerOptions.SCHEDULER_ARGS.value, default=[])
sched_kwargs = get(self.cfg, TrainerOptions.SCHEDULER_KWARGS.value, default={})
self.scheduler = None
if Scheduler is not None:
self.scheduler = Scheduler(self.optimizer, *sched_args, **sched_kwargs)
def __init_loss_fn(self):
Fn = get(self.cfg, TrainerOptions.LOSS_FN.value, default=nn.CrossEntropyLoss)
self.loss_fn = Fn()
#----------------------------------------------------------------------------------------------------------
# Folder
#----------------------------------------------------------------------------------------------------------
def set_models_folder(self, models_folder):
self.models_folder = models_folder
return self
def set_submissions_folder(self, submissions_folder):
self.submissions_folder = submissions_folder
return self
def set_configurations_folder(self, cfg_folder):
self.cfg_folder = cfg_folder
self.load('{}/{}.py'.format(self.cfg_folder, self.current_cfg))
self.reset()
return self
#----------------------------------------------------------------------------------------------------------
# Configuration
#----------------------------------------------------------------------------------------------------------
def has_cfg(self, cfg):
if not cfg.endswith('.py'):
cfg += '.py'
if '/' not in cfg:
cfg = os.path.join(csd(), self.cfg_folder, cfg)
return os.path.isfile(cfg)
def load_cfg(self, cfg_file):
if not cfg_file.startswith(self.cfg_folder):
cfg_file = os.path.join(self.cfg_folder, cfg_file)
if not cfg_file.endswith('.py'):
cfg_file += '.py'
path = os.path.join(csd(), cfg_file)
try:
p('Loading configuration file at "{}"'.format(path))
spec = importlib.util.spec_from_file_location('configuration', path)
self.cfg = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.cfg)
self.current_cfg = filename(path).replace('.py', '')
self.current_cfg_path = path
self.__init_folder()
except IOError as e:
raise Exception('Configuration file not found at "{}".'.format(path))
return self
#----------------------------------------------------------------------------------------------------------
# Events
#----------------------------------------------------------------------------------------------------------
def bind(self, event, handler):
if isinstance(event, TrainerEvents):
self.event_handlers[event.value] = handler
else:
raise Exception('Event "{}" should be a TrainerEvents.'.format(event))
return self
#----------------------------------------------------------------------------------------------------------
# DataSet and DataLoader
#----------------------------------------------------------------------------------------------------------
def set_dataloader(self, DataLoader):
self.DataLoader = DataLoader
return self
def set_dataset(self, DataSet):
self.DataSet = DataSet
return self
def __get_dataloader(self, data_type, debug=True):
if self.DataLoader is not None:
return self.DataLoader(self.cfg, data_type)
else:
dataset = self.DataSet(self.cfg, data_type, debug)
if has(self.event_handlers, TrainerEvents.CUSTOMIZE_DATALOADER.value):
return get(self.event_handlers, TrainerEvents.CUSTOMIZE_DATALOADER.value)(self.cfg, data_type, dataset)
else:
should_shuffle = data_type != TEST
batch_size = get(self.cfg, TrainerOptions.BATCH_SIZE.value, default=64)
return DataLoader(dataset, batch_size=batch_size, shuffle=should_shuffle)
#----------------------------------------------------------------------------------------------------------
# Model
#----------------------------------------------------------------------------------------------------------
def set_lr(self, new_lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
self.cfg.learning_rate = new_lr
if has(self.cfg, TrainerOptions.OPTIMIZER_ARGS.value, 'lr'):
get(self.cfg, TrainerOptions.OPTIMIZER_ARGS.value)['lr'] = new_lr
return self
def get_lr(self):
lr = [g['lr'] for g in self.optimizer.param_groups]
return lr
def set_model(self, Model):
self.Model = Model
self.reset()
return self
def save_model(self, percentage=None, loss=None):
mkdirp(os.path.join(csd(), self.models_folder, self.name))
path = '{}/{}/{} - {:03d}'.format(self.models_folder, self.name, self.current_cfg, self.epoch_ran)
if percentage is not None:
path += ' - {:.2f}%'.format(percentage)
if loss is not None:
path += ' - {:.6f}'.format(loss)
path += '.model'
path = os.path.join(csd(), path)
p('Saving neural network "{}" using configuration "{}" to disk at "{}"'.format( \
self.name, self.current_cfg, path))
torch.save(self.model.state_dict(), path)
return self
def load_model(self, epoch=None):
pattern = None
if epoch == 0:
p('Resetting model to primitive state.')
return self.reset()
epoch, path, files, versions = self.get_versions(epoch)
if path is None and epoch is not None: # Can't find the exact epoch, loading the highest.
epoch, path, files, versions = self.get_versions()
if epoch > 0 and path is not None:
p('Loading neural network "{}" using configuration "{}" and epoch "{}" at "{}"'.format( \
self.name, self.current_cfg, epoch, path))
try:
if torch.cuda.is_available():
self.model.load_state_dict(torch.load(path))
else:
self.model.load_state_dict(torch.load(path, lambda storage, loc: storage))
self.epoch_ran = epoch
except Exception as e:
p('Failed to load model at path "{}"'.format(path))
traceback.print_exc()
else:
p('No saved model for neural network "{}" using configuration "{}".'.format(self.name, self.current_cfg))
return self
def has_version(self, epoch):
version, path, files, versions = self.get_versions(epoch)
return version > 0 and version == epoch
def get_versions(self, epoch=None):
folder = csd()
if epoch is not None:
pattern = '{}/{}/{} - {:03d}*.model'.format(self.models_folder, self.name, self.current_cfg, epoch)
else:
pattern = '{}/{}/{}*.model'.format(self.models_folder, self.name, self.current_cfg)
files = find_pattern(os.path.join(folder, pattern), relative_to=folder)
if len(files) > 0:
versions = [int(re.findall(' \d{3} |$', filename(f))[0]) for f in files]
epoch = max(versions)
i = versions.index(epoch)
path = files[i]
return epoch, path, files, versions
return (0, None, files, [])
#----------------------------------------------------------------------------------------------------------
# CLI
#----------------------------------------------------------------------------------------------------------
def cli(self):
print()
print('----------------------------------------------------------')
print('| |')
print('| Welcome to Flare Neural Network Trainer. |')
print('| |')
print('----------------------------------------------------------')
print()
if get(self.cfg, TrainerOptions.AUTO_RELOAD_SAVED_MODEL.value, default=False):
self.load_model()
mkdirp('.flare')
touch('.flare/history')
should_exit = False
while not should_exit:
c = prompt(
'> ',
history=FileHistory('.flare/history'),
auto_suggest=AutoSuggestFromHistory(),
completer=CommandCompleter(self),
validator=CommandValidator(self)
)
try:
should_exit = self.process_command(c)
except Exception as e:
traceback.print_exc()
return self
def process_command(self, c):
parts = list(filter(None, c.split(' ')))
command = parts[0]
if command == 'list':
self.list()
elif command == 'help':
self.help()
elif command == 'use':
self.load_cfg('{}/{}.py'.format(self.cfg_folder, parts[1].replace('.py', '')))
elif command == 'load':
if len(parts) == 2:
self.load_model(int(parts[1]))
else:
self.load_model()
elif command == 'run':
if len(parts) == 1:
self.run()
else:
self.run(int(parts[1]))
elif command == 'set':
parts = list(filter(None, c.split(' ', 2)))
self.set(parts[1], parts[2])
elif command == 'test' or command == 'validate':
fn = self.test if command == 'test' else self.validate
if len(parts) == 1:
fn()
else:
locs = list(map(int, parts[1].split(':')))
if len(locs) == 1:
if self.load_model(locs[0]):
fn()
else:
p('Skipping test because epoch {} cannot be loaded correctly.'.format(locs[0]))
else:
for i in range(*locs):
if self.load_model(i):
fn()
else:
p('Skipping test because epoch {} cannot be loaded correctly.'.format(i))
elif command == 'exit':
return True
return False
#----------------------------------------------------------------------------------------------------------
# Commands
#----------------------------------------------------------------------------------------------------------
def list(self):
color = fg(45)
parameter = fg(119)
reset = attr('reset')
def colorize(o):
return '{}{}{}'.format(color, o, reset)
configs = [
('Module', self.name, color),
('Epoch', self.epoch_ran, color),
('Configuration', self.current_cfg, color),
('Configuration Path', self.current_cfg_path, color),
('Configuration Folder', self.cfg_folder, color),
('Models Folder', self.models_folder, color),
('Submissions Folder', self.submissions_folder, color),
None,
]
for k in list(filter(lambda x: not x.startswith('__'), dir(self.cfg))):
v = getattr(self.cfg, k)
configs.append((k, v, parameter))
max_key_len = max([len(o[0]) if o else 0 for o in configs])
for o in configs:
if o is None:
print()
else:
w('{}{} : {}'.format(o[0], ' ' * (max_key_len - len(o[0])), o[2]))
w(re.sub('^ ', ' ' * (max_key_len + 6), ff(o[1], prefix=' '), flags=re.M))
print(reset)
return self
def help(self):
command = fg(45)
parameter = fg(119)
sample = fg(105)
reset = attr('reset')
indent = ' '
print(
indent + """
{0}python {1}<PYTHON>{3} {1}[CONFIG]{3} {1}[EPOCH]{3}{3}
You can to specify the configuration file path and epoch count to load at script
launch where {1}<PYTHON>{3} is the location of your python file, {1}[CONFIG]{3} is the
location of your configuration file and {1}[EPOCH]{3} is the epoch count you wish to
load.
e.g: {2}python nn.py default 2{3}
{0}list:{3}
Usage: {0}list{3}
List current module, epoch count and configuration file path.
e.g: {2}list{3}
{0}help:{3}
Usage: {0}help{3}
Print help message.
e.g: {2}help{3}
{0}use:{3}
Usage: {0}use{3} {1}<PATH>{3}
Switch to configuration file located at {1}<PATH>{3}.
e.g: {2}use default{3}
{0}load:{3}
Usage: {0}load{3} {1}<EPOCH>{3}
Load previously trained model at epoch {1}<EPOCH>{3}.
e.g: {2}load 10{3}
{0}run:{3}
Usage: {0}run{3} {1}[COUNT]{3}
Run training, optionally {1}[COUNT]{3} times
e.g: {2}run{3} OR {2}run 10{3}
{0}set:{3}
Usage: {0}set{3} {1}<ATTR> <VALUE>{3}
Set the value in configuration dynamically, this does NOT overwrite the
configuration file.
e.g: {2}set learn_rate 0.01{3}
{0}test:{3}
Usage: {0}test{3} {1}[EPOCH]{3}
Test using the model trained, optionally using at epoch {1}[EPOCH]{3}.
{1}[EPOCH]{3} can be a range input to range() or an integer.
e.g: {2}test 10{3} OR {2}test 1:10:2{3}
{0}validate:{3}
Usage: {0}validate{3} {1}[EPOCH]{3}
Validate using the model trained, optionally using at epoch {1}[EPOCH]{3}.
{1}[EPOCH]{3} can be a range input to range() or an integer.
e.g: {2}validate 10{3} OR {2}validate 1:10:2{3}
""".format(command, parameter, sample, reset).replace('\t\t\t', indent).strip()
)
return self
def set(self, key, val):
p('Setting configuration key "{}" to "{}"'.format(key, val))
if key == 'learning_rate':
self.set_lr(num(val))
else:
cmd = 'self.cfg.{} = {}'.format(key, val)
try:
exec(cmd)
self.reset()
except Exception as e:
p('Failed to set configuration key "{}" to "{}"'.format(key, val))
return self
#----------------------------------------------------------------------------------------------------------
# Neural Network
#----------------------------------------------------------------------------------------------------------
def __generate(self, x, y, extras, y_hat):
result = None
if has(self.event_handlers, TrainerEvents.GENERATE.value):
result = get(self.event_handlers, TrainerEvents.GENERATE.value)(x, y, extras, y_hat)
else:
labels_axis = get(self.cfg, TrainerOptions.GENERATE_AXIS.value, default=1)
result = predictions.data.max(1, keepdim=True)[1].cpu().numpy().flatten()
return result
def __post_test(self, results):
if has(self.event_handlers, TrainerEvents.POST_TEST.value):
return get(self.event_handlers, TrainerEvents.POST_TEST.value)(results)
return results
def _match(self, mode, x, y, extras, y_hat):
match_results = None
if has(self.event_handlers, TrainerEvents.MATCH_RESULTS.value):
match_results = get(self.event_handlers, TrainerEvents.MATCH_RESULTS.value)(mode, x, y, extras, y_hat)
else:
match_results = self.__default_match(y_hat, y) # Compute losses
return match_results
def __default_match(self, y_hat, y):
predictions = y_hat.data.max(1, keepdim=True)[1]
expectations = y.long()
if torch.cuda.is_available():
return predictions.eq(expectations.cuda())
else:
return predictions.cpu().eq(expectations)
def __compute_loss(self, mode, x, y, extras, y_hat, logger):
loss = None
if has(self.event_handlers, TrainerEvents.COMPUTE_LOSS.value):
loss = get(self.event_handlers, TrainerEvents.COMPUTE_LOSS.value)(mode, x, y, extras, y_hat)
else:
loss = self.loss_fn(y_hat, to_variable(y).long().squeeze()) # Compute losses
extra_log_msg = {}
if has(self.event_handlers, TrainerEvents.EXTRA_LOG_MSG.value):
result = get(self.event_handlers, TrainerEvents.EXTRA_LOG_MSG.value)(mode, x, y, extras, y_hat)
if result is not None:
extra_log_msg = result
logger.log_loss(loss.data.cpu().numpy(), **extra_log_msg)
return loss
def __propagate_loss(self, mode, x, y, extras, y_hat, logger):
loss = self.__compute_loss(mode, x, y, extras, y_hat, logger)
loss.backward()
self.optimizer.step()
return loss
def __get_validation_results(self, batch_count=-1):
dataloader = self.__get_dataloader(DEV, False)
mode = Mode.VALIDATE
validate_logger = Logger(self)
validate_logger.start(mode)
validate_logger.start_epoch()
for i, batch in enumerate(dataloader):
x, y, extras, y_hat = self.__process_batch(batch, validate_logger, mode)
self.__print_batch(mode, x, y, extras, y_hat, validate_logger)
if batch_count > 0 and i + 1 == batch_count:
break
percentage, (_, _, loss) = validate_logger.get_percentage(), validate_logger.get_loss()
return percentage, loss
def __lr_changed(self, old_lr, new_lr):
eps = 1e-6
for i in range(len(old_lr)):
old, new = old_lr[i], new_lr[i]
if old - new > eps:
return True
return False
def __tune_lr(self):
if self.scheduler is None:
return
precentage, loss = 0.0, 0.0
use_train_data = get(self.cfg, TrainerOptions.SCHEDULE_ON_TRAIN_DATA.value, default=False)
if use_train_data:
percentage, (_, _, loss) = self.logger.get_percentage(), self.logger.get_loss()
else:
batch_count = get(self.cfg, TrainerOptions.SCHEDULE_BATCH_COUNT.value, default=-1)
percentage, loss = self.__get_validation_results(batch_count)
old_lr = self.get_lr()
use_percentage = get(self.cfg, TrainerOptions.SCHEDULE_ON_ACCURACY.value, default=False)
value = percentage if use_percentage else loss
args, kwargs = filter_args(self.scheduler.step, [value], {})
self.scheduler.step(*args, **kwargs)
new_lr = self.get_lr()
verbose = get(self.cfg, TrainerOptions.SCHEDULE_VERBOSE.value, default=False)
if verbose:
data_type = 'percentage {:.2f} %' if use_percentage else 'loss {:.8f}'
data_source = 'training' if use_train_data else 'validation'
template = 'Tuning learning rate using {} from {} data.'.format(data_type, data_source)
p(template.format(value), debug=False)
if self.__lr_changed(old_lr, new_lr):
p('Learning rate is now at: {}'.format(new_lr))
def __process_batch(self, batch, logger, mode=Mode.TRAIN):
logger.increment()
x, y, extras = batch[0], batch[1], batch[2:]
self.optimizer.zero_grad()
if has(self.event_handlers, TrainerEvents.PRE_PROCESS.value):
x, y, extras = get(self.event_handlers, TrainerEvents.PRE_PROCESS.value)(mode, x, y, extras)
if mode is Mode.TRAIN:
self.model.train()
else:
self.model.eval()
y_hat = None
if has(self.event_handlers, TrainerEvents.MODEL_EXTRA_ARGS.value):
args, kwargs = get(self.event_handlers, TrainerEvents.MODEL_EXTRA_ARGS.value)(mode, x, y, extras)
y_hat = forward(self.model, [to_variable(x)] + args, kwargs)
else:
y_hat = self.model(to_variable(x))
if has(self.event_handlers, TrainerEvents.POST_PROCESS.value):
y_hat = get(self.event_handlers, TrainerEvents.POST_PROCESS.value)(mode, x, y, extras, y_hat)
if mode is Mode.TRAIN:
self.__propagate_loss(mode, x, y, extras, y_hat, logger)
elif mode is Mode.VALIDATE:
self.__compute_loss(mode, x, y, extras, y_hat, logger)
return x, y, extras, y_hat
def __print_batch(self, mode, x, y, extras, y_hat, logger):
logger.log_batch(mode, x, y, extras, y_hat)
logger.print_batch(logger is self.logger)
def __save_path(self, save_as):
folder = os.path.join(csd(), self.submissions_folder, self.name)
file = None
if save_as == SaveAs.CSV:
file = '{} - {:03d}.csv'.format(self.current_cfg, self.epoch_ran)
elif save_as == SaveAs.NPY:
file = '{} - {:03d}.npy'.format(self.current_cfg, self.epoch_ran)
return folder, file
def __save_results(self, results, save_as):
folder, file = self.__save_path(save_as)
if folder is None or file is None:
return
path = os.path.join(folder, file)
mkdirp(folder)
if save_as == SaveAs.CSV:
field_names = get(self.cfg, TrainerOptions.CSV_FIELD_NAMES.value, default=['id', 'label'])
write_to_csv(results, path, field_names)
elif save_as == SaveAs.NPY:
np.save(path, np.array(results, dtype='object'))
p('Submission file saved to "{}".'.format(path))
def run(self, epochs=1):
has_scheduler = self.scheduler != None
schedule_on_batch = get(self.cfg, TrainerOptions.SCHEDULE_ON_BATCH.value, default=False)
schedule_first = get(self.cfg, TrainerOptions.SCHEDULE_FIRST.value, default=True)
dev_mode = get(self.cfg, TrainerOptions.DEV_MODE.value, default=False)
train_type = DEV if dev_mode else TRAIN
dataloader = self.__get_dataloader(train_type)
self.logger.start()
for epoch in range(epochs):
self.logger.start_epoch()
if has_scheduler and not schedule_on_batch and schedule_first:
self.__tune_lr()
for batch in dataloader:
if has_scheduler and schedule_on_batch and schedule_first:
self.__tune_lr()
x, y, extras, y_hat = self.__process_batch(batch, self.logger)
if has_scheduler and schedule_on_batch and not schedule_first:
self.__tune_lr()
self.__print_batch(Mode.TRAIN, x, y, extras, y_hat, self.logger)
if has_scheduler and not schedule_on_batch and not schedule_first:
self.__tune_lr()
self.epoch_ran += 1
percentage, (_, loss, _) = self.logger.get_percentage(), self.logger.get_loss()
if abs(percentage) < 1e-6:
percentage = None
self.save_model(percentage, loss)
return self
def validate(self):
return self.test(Mode.VALIDATE)
def test(self, mode=Mode.TEST):
data_type = TEST if mode == Mode.TEST else DEV
dataloader = self.__get_dataloader(data_type)
self.logger.start(mode)
self.logger.start_epoch()
results = []
for batch in dataloader:
x, y, extras, y_hat = self.__process_batch(batch, self.logger, mode)
if mode == Mode.TEST:
result = self.__generate(x, y, extras, y_hat)
for i in range(len(result)):
results.append(result[i])
self.__print_batch(mode, x, y, extras, y_hat, self.logger)
if mode is Mode.TEST:
results = self.__post_test(results)
save_as = get(self.cfg, TrainerOptions.SAVE_AS.value, default=SaveAs.CSV)
self.__save_results(results, save_as)
else:
self.logger.print_summary()
return self
| 38.464789 | 119 | 0.528781 | 3,096 | 27,310 | 4.49677 | 0.112726 | 0.023632 | 0.01494 | 0.014869 | 0.323301 | 0.248958 | 0.21053 | 0.125916 | 0.08641 | 0.070177 | 0 | 0.010282 | 0.287733 | 27,310 | 709 | 120 | 38.519041 | 0.705429 | 0.078067 | 0 | 0.188433 | 0 | 0.007463 | 0.149425 | 0.004614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08209 | false | 0 | 0.037313 | 0.003731 | 0.20709 | 0.033582 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1266a803627ec1031a1ee5077266bcc7b6391cf | 1,420 | py | Python | lambdata/test_lambdata.py | leibo411/lambdata-leibo411 | 59b322e4e3e4d27970dea21efdecaa7d65029c7f | [
"MIT"
] | null | null | null | lambdata/test_lambdata.py | leibo411/lambdata-leibo411 | 59b322e4e3e4d27970dea21efdecaa7d65029c7f | [
"MIT"
] | null | null | null | lambdata/test_lambdata.py | leibo411/lambdata-leibo411 | 59b322e4e3e4d27970dea21efdecaa7d65029c7f | [
"MIT"
] | null | null | null | """Basic unit test for lambdata"""
import unittest
import random
from example_module import favorite_animals, colors, add, increment, becca, rand_num
class ExampleTests(unittest.TestCase):
"""Making sure examples work as expected"""
def test_add(self):
"""Testing that add works as expected"""
num1 = 0
num2 = 1
self.assertEqual(add(num1,num2), 1)
self.assertEqual(add(num2, num2), 2)
def test_increment(self):
"""Testing the increment function"""
x0 = 0
y0 = increment(x0)
self.assertEqual(y0,1)
x1 = 100
y1 = increment(x1)
self.assertEqual(y1,101)
x2 = -1
y2 = increment(x2)
self.assertEqual(y2,0)
def test_colors(self):
"""Testing the colors function"""
self.assertIn("Teal", colors)
self.assertNotIn("yellow", colors)
def test_favorite_animals(self):
"""Testing the favorite animals function"""
length_fa = len(favorite_animals)
self.assertEqual(length_fa, 4)
def test_becca(self):
"""Testing the becca function"""
self.assertIn('Becca is crying', becca)
def test_rand_num(self):
"""Testing the rand_num funciton"""
y4 = random.randint(0, 100)
y5 = rand_num(y4)
self.assertGreater(y5,1000)
if __name__ == "__main__":
unittest.main()
| 23.278689 | 84 | 0.600704 | 170 | 1,420 | 4.876471 | 0.4 | 0.050663 | 0.084439 | 0.048251 | 0.055489 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044379 | 0.285915 | 1,420 | 60 | 85 | 23.666667 | 0.773176 | 0.179577 | 0 | 0 | 0 | 0 | 0.029464 | 0 | 0 | 0 | 0 | 0 | 0.30303 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1298764a70d48a5cc02427e03a11f21ba24e293 | 6,989 | py | Python | capt/function/push.py | tmanfree/capt | a6c1c12bb2677aef718f550c5fa7ffd4b71dedd4 | [
"MIT"
] | null | null | null | capt/function/push.py | tmanfree/capt | a6c1c12bb2677aef718f550c5fa7ffd4b71dedd4 | [
"MIT"
] | null | null | null | capt/function/push.py | tmanfree/capt | a6c1c12bb2677aef718f550c5fa7ffd4b71dedd4 | [
"MIT"
] | null | null | null |
# system imports
import sys
import os
import time
# local imports
from function.find import Find
from connector.switch import Switch
class Push:
def __init__(self):
self.find = Find()
def template(self, args, config, logger):
dev_id_list = []
address_list = []
try:
file = open(os.path.join(args.file_name), "r")
for ip in file:
dev_id = self.find.dev_id(args, config, ip, logger)
time.sleep(1)
dev_id_list.append({"targetDeviceID": "{}".format(dev_id)})
address_list.append({"address": "{}".format(ip.strip())})
file.close()
except FileNotFoundError:
print("##### ERROR iplist files not found #####")
except Exception as err:
print("##### ERROR with processing:{} #####".format(err))
# require 'yes' input to proceed
# logger.info('Activate BAS on switch INTERFACE {} using VLAN: {}'.format(found_int['name'], args.vlan))
# response = input("Confirm action of changing VLAN ('yes'):")
# if not response == 'yes':
# logger.info('Did not proceed with change.')
# sys.exit(1)
# invoke API call to change VLAN
sw_api_call = Switch(config, logger) # create API switch call object
# push API_CALL_conf_if_bas template out. Update this to use a shared template, the same as change vlan?
job_id = sw_api_call.conf_template(dev_id_list, args.template_name)
timeout = time.time() + 30 # 30 second timeout starting now
time.sleep(1) # without the sleep the job_complete can balk, not finding the job_id yet
while not sw_api_call.job_complete(job_id):
time.sleep(5)
if time.time() > timeout:
logger.critical("Template push failed. Prime job not completed")
sys.exit(1)
###################Only sync successful?
self.force_sync_multiple(address_list, sw_api_call) # 20 minute timeout
#################
if not sw_api_call.job_successful(job_id):
logger.critical("Template push failed. Prime job not successful")
sys.exit(1)
logger.info("Synchronizing ...")
# logger.info("Synchronized!")
logger.info('Template push complete.')
return args
def bas(self, args, config, logger):
# find and display (update this call to work)
dev_id, found_int, dev_ip = self.find.int(args, config, args.interface, logger)
# require 'yes' input to proceed
logger.info('Activate BAS on switch INTERFACE {} using VLAN: {}'.format(found_int['name'], args.vlan))
response = input("Confirm action of changing VLAN ('yes'):")
if not response == 'yes':
logger.info('Did not proceed with change.')
sys.exit(1)
# invoke API call to change VLAN
# sw_api_call = Switch(config.username, config.password, config.cpi_ipv4_address, logger) # create API switch call object
sw_api_call = Switch(config, logger) # create API switch call object
# push API_CALL_conf_if_bas template out. Update this to use a shared template, the same as change vlan?
job_id = sw_api_call.conf_if_bas(dev_id, found_int['name'], args.description, args.vlan)
timeout = time.time() + 30 # 30 second timeout starting now
time.sleep(1) # without the sleep the job_complete can balk, not finding the job_id yet
while not sw_api_call.job_complete(job_id):
time.sleep(5)
if time.time() > timeout:
logger.critical("Change VLAN failed. Prime job not completed")
sys.exit(1)
if not sw_api_call.job_successful(job_id):
logger.critical("Change VLAN failed. Prime job not successful")
sys.exit(1)
logger.info('Change VLAN complete.')
########################################################
#add a verification flag to sync and display after, instead of default?
########################################################
logger.info("Synchronizing ...")
self.force_sync(dev_id,dev_ip, sw_api_call, 20, logger) # 20 minute timeout
logger.info("Synchronized!")
dev_id, found_int, dev_ip = self.find.int(args, config, args.interface, logger)
return args
def force_sync_multiple(self, address_list, sw_api_call):
#no error handling, for triggering a config backup
sw_api_call.sync_multiple(address_list) # force a sync!
# Copies of synchronized and force_sync from upgrade_code.py That uses a constant to hold values though
def force_sync(self, sw_id,sw_ip, sw_api_call, timeout, logger):
old_sync_time = sw_api_call.sync_time(sw_id)
sw_api_call.sync(sw_ip) # force a sync!
end_time = time.time() + 60 * timeout
logger.info("Timeout set to {} minutes.".format(timeout))
time.sleep(20) # don't test for sync status too soon (CPI delay and all that)
while not self.synchronized(sw_id, sw_api_call, logger):
time.sleep(10)
if time.time() > end_time:
logger.critical("Timed out. Sync failed.")
sys.exit(1)
new_sync_time = sw_api_call.sync_time(sw_id)
if old_sync_time == new_sync_time: # KEEP CODE! needed for corner case where force sync fails (code 03.03.03)
logger.critical("Before and after sync time is the same. Sync failed.")
sys.exit(1)
# def force_sync_multiple(self, sw_id,sw_ip, sw_api_call, timeout, logger):
# old_sync_time = sw_api_call.sync_time(sw_id)
# sw_api_call.sync(sw_ip) # force a sync!
# end_time = time.time() + 60 * timeout
# logger.info("Timeout set to {} minutes.".format(timeout))
# time.sleep(20) # don't test for sync status too soon (CPI delay and all that)
# while not self.synchronized(sw_id, sw_api_call, logger):
# time.sleep(10)
# if time.time() > end_time:
# logger.critical("{} Timed out. Sync failed.".format(sw_ip))
# return
#
# new_sync_time = sw_api_call.sync_time(sw_id)
# if old_sync_time == new_sync_time: # KEEP CODE! needed for corner case where force sync fails (code 03.03.03)
# logger.critical("{} Before and after sync time is the same. Sync failed.".format(sw_ip))
# return
def synchronized(self, sw_id, sw_api_call, logger):
if sw_api_call.sync_status(sw_id) == "COMPLETED":
logger.info("Synchronization Complete!")
return True
elif sw_api_call.sync_status(sw_id) == "SYNCHRONIZING":
return False
else:
#sw.sync_state = sw_api_call.sync_status(sw_id)
logger.warning("Unexpected sync state:")
return False | 41.35503 | 129 | 0.606954 | 939 | 6,989 | 4.336528 | 0.192758 | 0.053291 | 0.059676 | 0.031925 | 0.680501 | 0.654715 | 0.634578 | 0.617633 | 0.590128 | 0.590128 | 0 | 0.010222 | 0.272142 | 6,989 | 169 | 130 | 41.35503 | 0.79025 | 0.342681 | 0 | 0.333333 | 0 | 0 | 0.152743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.055556 | 0 | 0.188889 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f12a3ccfb07fc32ea4a8769b9c53d6c5dadcdff4 | 5,200 | py | Python | python/py_basic_ide/pyBASIC/parser.py | josephlewis42/personal_codebase | aa0fff9a908ab90bc78d24aa69d1b91163c35314 | [
"Unlicense"
] | 3 | 2015-11-24T17:06:58.000Z | 2018-05-01T14:03:57.000Z | python/py_basic_ide/pyBASIC/parser.py | josephlewis42/personal_codebase | aa0fff9a908ab90bc78d24aa69d1b91163c35314 | [
"Unlicense"
] | null | null | null | python/py_basic_ide/pyBASIC/parser.py | josephlewis42/personal_codebase | aa0fff9a908ab90bc78d24aa69d1b91163c35314 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
SHOW_ERRORS = True
import sys
def error_fx(text):
'''The default error handling, print the text to the console.
replace with your own function if you want, have it print to your
wx application or whatever.'''
sys.stderr.write(text)
def show_error(text):
'''
Send an error if SHOW_ERRORS = True
'''
if SHOW_ERRORS:
error_fx(text)
def split_text(text, seperator=" "):
return get_word(text, seperator)
def get_word(text, seperator=" "):
'''
Returns the beginning and end of text seperated around seperator.
If seperator is not found, the tail will be a blank string.
'''
try:
head = text[0:text.index(seperator)]
tail = text[text.index(seperator) + len(seperator) : len(text)]
except ValueError:
return text, ""
return head.strip(), tail.strip()
def remove_between(text, char="\""):
'''
Returns a string from between the next two characters from the
input string, returns the head, thorax, and tail.
Example:
remove_between("TEST \"Hello Jane!\" said Dick.")
("TEST ", "Hello Jane!", "said Dick.")
'''
head, tail = get_word(text, char)
thorax, abdomen = get_word(tail,char)
return head.strip(), thorax.strip(), abdomen.strip()
def has_another(text, substring):
'''
Tests if the text has another substring, if it does returns true,
if else it returns false.
'''
try:
text.index(substring)
return True
except:
return False
def tokenize(line, linenumber):
'''
Tokenize so the runner can work and check for errors in the syntax.
'''
word_list = [] #Is returned with each token in a proper area.
#Get the keyword
first_word, rest_line = split_text(line)
first_word = first_word.upper()
#Add the first word to the list for identification in runner.
word_list.append(first_word)
#Check for first keyword
acceptable_words_list = ["PRINT", "CLS", "IF", "GOTO", \
"LABEL", "INPUT", "LET", "REM", \
"END", "STOP", "", "CLEAR", "LBL"]
if first_word not in acceptable_words_list:
show_error("Token error line %d, %s is not a valid token."
%(linenumber, first_word))
#Tokenize the rest of the line based off of first keyword.
"""
If statment:
["IF", "EXPRESSION", "THEN STATMENT", "ELSE STATMENT"]
Example
IF y=='' THEN PRINT 'Hello'
Is formatted as.
["IF", "%(y)s == ''", "PRINT 'Hello'", "PRINT 'Goodbye'"]
The else is optional.
"""
if first_word in ["IF"]:
#Check for syntax errors
if not has_another(rest_line, "THEN"):
show_error("IF error line %d, no THEN statment."%(linenumber))
expression, tail = get_word(rest_line, "THEN")
word_list.append(expression)
if not has_another(rest_line, "ELSE"):
#if no else
word_list.append( tokenize(tail, linenumber) )
word_list.append( tokenize("REM Nothing", linenumber) )
else:
#If there is an else still.
then, rest = get_word(tail, "ELSE")
word_list.append( tokenize(then, linenumber) )
word_list.append( tokenize(rest, linenumber) )
#Let
if first_word in ["LET"]:
if not has_another(rest_line, "="):
show_error("LET error line %d, no assignment operator after variable." %(linenumber))
else:
head, tail = get_word(rest_line, "=")
word_list.append(head)
word_list.append(tail)
#Input
if first_word in ["INPUT"]:
a,b,c = remove_between(rest_line, "\"")
if a != "":
show_error("INPUT error line %d, too many tokens before String." %(linenumber))
if has_another(c, " "):
show_error("INPUT error line %d, extra tokens found after variable." %(linenumber))
if c == "":
show_error("INPUT error line %d, no assignment variable." %(linenumber))
word_list.append(b) #User Display Text
word_list.append(c) #Variable
#Rem
if first_word in ["REM"]:
word_list.append(rest_line)
#End
if first_word in ["END"]:
if rest_line != "":
show_error("END error line %d, too many tokens after END." %(linenumber))
#Stop
if first_word in ["STOP"]:
if rest_line != "":
show_error("STOP error line %d, too many tokens after STOP." %(linenumber))
#gosub
#Goto Statment
if first_word in ["GOTO"]:
if has_another(rest_line, " "):
show_error("GOTO error line %d, too many tokens after GOTO" %(linenumber))
else:
word_list.append(rest_line)
#PRINT Statment
if first_word in ["PRINT"]:
word_list.append(rest_line)
#Clear statment
if first_word in ["CLS", "CLEAR"]:
if rest_line != "":
show_error("CLEAR/CLS error line %d, too many tokens after CLEAR/CLS." %(linenumber))
#LABEL statment
if first_word in ["LABEL", "LBL"]:
if has_another(rest_line, " "):
show_error("LABEL/LBL error line %d, too many tokens after LABEL/LBL." %(linenumber))
else:
word_list.append(rest_line)
#Return the list of tokenized words
return word_list
def tokenize_document(text):
'''
Create a token list of a document with newline characters.
'''
tokens = []
tokenlines = text.split("\n")
index = 1
for line in tokenlines:
t = tokenize(line, index)
if t != [""]:
tokens.append(t)
index += 1
return tokens
def tokenize_from_file(path):
'''
Create a basic token list from a document.
'''
text = ""
a = file(path)
for line in a:
text += line
return tokenize_document(text)
| 26.804124 | 88 | 0.674808 | 768 | 5,200 | 4.449219 | 0.213542 | 0.044776 | 0.05736 | 0.038045 | 0.248756 | 0.127305 | 0.093649 | 0 | 0 | 0 | 0 | 0.000713 | 0.190385 | 5,200 | 193 | 89 | 26.943005 | 0.810926 | 0.230769 | 0 | 0.141509 | 0 | 0 | 0.181447 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084906 | false | 0 | 0.009434 | 0.009434 | 0.179245 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f12dc2176e2beefeeb42b25ea4471be881f3f01d | 1,316 | py | Python | Arays/6_Equilibrium index of an array_approach_2.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null | Arays/6_Equilibrium index of an array_approach_2.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null | Arays/6_Equilibrium index of an array_approach_2.py | sounak95/100_days_of_code | 50fbf088ce6ab2137aa216a30e3b3f828b278a22 | [
"Apache-2.0"
] | null | null | null | """
Description - Equilibrium index of an array is an index such that the sum of elements at lower indexes is equal to the sum of elements at higher indexes. We are given an Array of integers, We have to find out the first index i from left such that -
A[0] + A[1] + ... A[i-1] = A[i+1] + A[i+2] ... A[n-1]
Input
[-7, 1, 5, 2, -4, 3, 0]
Output
3
A[0] + A[1] + A[2] = A[4] + A[5] + A[6]
Tricky Solution : The idea is to get total sum of array first. Then Iterate through the array and keep updating the left sum which is initialized as zero. In the loop, we can get right sum by subtracting the elements one by one. Then check whether Leftsum and Rightsum are equal.
Pseudo Code
// n : size of array
int eqindex(arr, n)
{
sum = 0
leftsum = 0
for (i=0 to n-1)
sum += arr[i]
for (i=0 to n-1)
{
// now sum will be righsum for index i
sum -= a[i]
if (sum == leftsum )
return i
leftsum += a[i]
}
}
Time Complexity : O(n)
Auxiliary Space : O(1)
input:
-7 1 5 2 -4 3 0
output:
2
"""
arr = list(map(int,input().split()))
n= len(arr)
flag=True
sum=0
for item in arr:
sum+=item
left_sum=0
for i in range(n):
sum-=arr[i]
if sum==left_sum:
flag=False
print(i)
break
left_sum+=arr[i]
if flag:
print(-1)
| 23.087719 | 279 | 0.604103 | 250 | 1,316 | 3.168 | 0.384 | 0.012626 | 0.011364 | 0.040404 | 0.137626 | 0.082071 | 0.04798 | 0.04798 | 0.04798 | 0.04798 | 0 | 0.039874 | 0.275836 | 1,316 | 56 | 280 | 23.5 | 0.791186 | 0.794833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f12e84e71dc2614e3a6f1d2f7d671fe27072ff71 | 474 | py | Python | py_framework/wsgi.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | py_framework/wsgi.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | py_framework/wsgi.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | from wsgiref.simple_server import make_server
def application(envrion, start_response):
response_body = [
'{key}: {value}'.format(key=key, value=value) for key, value in sorted(envrion.items())
]
response_body = '\n'.join(response_body)
status = '200'
response_headers = [
('Content-type', 'text/plain'),
]
return [response_body.encode('utf-8')]
server = make_server('localhost', 8000, app=application)
server.serve_forever() | 26.333333 | 95 | 0.670886 | 58 | 474 | 5.310345 | 0.637931 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020672 | 0.183544 | 474 | 18 | 96 | 26.333333 | 0.775194 | 0 | 0 | 0 | 0 | 0 | 0.115789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f130b64b0ff7b024705421268b8468e5fc3ddf42 | 3,345 | py | Python | pomma/determine_symbols_and_max_repeats.py | NickleDave/pomma | e41dc4b354edb0c3a52685365fd79653e1930d43 | [
"BSD-3-Clause"
] | 1 | 2019-02-06T16:51:46.000Z | 2019-02-06T16:51:46.000Z | pomma/determine_symbols_and_max_repeats.py | NickleDave/pomma | e41dc4b354edb0c3a52685365fd79653e1930d43 | [
"BSD-3-Clause"
] | null | null | null | pomma/determine_symbols_and_max_repeats.py | NickleDave/pomma | e41dc4b354edb0c3a52685365fd79653e1930d43 | [
"BSD-3-Clause"
] | null | null | null | from itertools import groupby, chain
def determine_symbols_and_max_repeats(sequences):
"""determines unique set of symbols used in sequences, and maximum number
of repeats of those symbols (consecutive repeats, not just repeats in the
sense of occurrences). Any symbol with a maximum number of repeats > 1 is
considered a symbol that repeats. These repeating symbols will be fit with
states that adapt.
To make fitting easier, maps symbols to a set of consecutive integers
from 0 to n where n is the number of symbols,
then applies that mapping to sequences.
Parameters
----------
sequences : list
of lists. Representations of sequences of symbols.
Lists can be of ints or of str (single characters).
If str, will be converted to int.
Returns
-------
symbols_and_max_repeats: dict
with following key, value pairs:
symbols : set
of ints, unique set of symbols used in sequences
symbols_int_map: dict
mapping from symbols to integers 0,1,2,...,n
where n is the number of symbols
seqs_mapped : list
lList of lists of int. Result of "converting" sequences
to ints by applying symbols_int_map to it.
max_repeats : dict
where each key is a symbol and the corresponding value is
the maximum number of consecutive repeats of that symbol
found in any of the sequences
repeat_symbols : list
of int, symbols with repeat strings with max_repeats > 1
"""
if type(sequences) != list:
raise TypeError('sequences should be a list, not {}'.format(type(sequences)))
if not all([type(seq) == list for seq in sequences]):
raise TypeError('sequences should be a list of lists')
# chain.from_iterable concatenates sequences
seqs_concat = list(chain.from_iterable(sequences))
# map unique set of symbols to consecutive integers starting from 0
symbols = set(seqs_concat)
symbols_int_map = dict(zip(symbols,
range(len(symbols))))
# apply mapping to sequences
seqs_mapped = []
for seq in sequences:
seq_mapped = [symbols_int_map[symbol]
for symbol in seq]
seqs_mapped.append(seq_mapped)
# find maximum number of consecutive repeats for each symbol
repeat_counts = []
for seq_mapped in seqs_mapped:
counts_this_seq = [(k, sum(1 for i in g)) for k, g in groupby(seq_mapped)]
repeat_counts.extend(counts_this_seq)
max_repeats = {}
for symbol, symbol_int in symbols_int_map.items():
all_counts_this_symbol = [tuple_count
for tuple_symbol, tuple_count in repeat_counts
if tuple_symbol == symbol_int]
max_repeat = max(all_counts_this_symbol)
max_repeats[symbol] = max_repeat
repeat_symbols = [symbol
for symbol, max_repeat in max_repeats.items()
if max_repeat > 1]
symbols_and_max_repeats = {
'symbols': symbols,
'seqs_mapped': seqs_mapped,
'max_repeats': max_repeats,
'repeat_symbols': repeat_symbols
}
return symbols_and_max_repeats
| 41.8125 | 85 | 0.637668 | 439 | 3,345 | 4.697039 | 0.266515 | 0.053346 | 0.031523 | 0.038797 | 0.125121 | 0.093113 | 0.093113 | 0.026188 | 0 | 0 | 0 | 0.003861 | 0.303139 | 3,345 | 79 | 86 | 42.341772 | 0.880738 | 0.479223 | 0 | 0 | 0 | 0 | 0.069825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.027778 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f130c55a426adac68bf09f355daa9ca3125bc0da | 292 | py | Python | week2/scripts/tb_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | week2/scripts/tb_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | week2/scripts/tb_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Float32
rospy.init_node('radius_publisher')
pub = rospy.Publisher('/radius', Float32, queue_size=1)
rate = rospy.Rate(1)
if __name__ == '__main__':
while not rospy.is_shutdown():
pub.publish(0.5)
rate.sleep() | 22.461538 | 55 | 0.695205 | 43 | 292 | 4.418605 | 0.72093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036885 | 0.164384 | 292 | 13 | 56 | 22.461538 | 0.741803 | 0.071918 | 0 | 0 | 0 | 0 | 0.114391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1313cae1d8ecddeb5f75f139601242ca6ec08e2 | 3,330 | py | Python | exps/supp-synthetic/synth_utils.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | 2 | 2020-07-09T03:15:58.000Z | 2022-03-09T11:57:17.000Z | exps/supp-synthetic/synth_utils.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | null | null | null | exps/supp-synthetic/synth_utils.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | 1 | 2021-05-18T11:55:04.000Z | 2021-05-18T11:55:04.000Z | import pandas as pd
import numpy as np
identity_func = lambda a, b: b
def compliance(D, R, inv_trans=lambda x,y : y):
ops = {'<=': (lambda x,y : x <= y),
'>': (lambda x,y : x > y),
'>=': (lambda x,y : x >= y),
'<': (lambda x,y : x < y),
'==': (lambda x,y : x == y),
'': (lambda x,y : x==True),
'not': (lambda x,y : x==False)}
Ws = []
for r in R:
W = []
for c in r:
try:
v = float(c[2])
except:
v = c[2]
W.append(ops[c[1]](inv_trans(c[0], D[c[0]].values), v))
W = np.array(W)
Ws.append(W)
return Ws
def calc_coverage(X, RS_s):
# This predicts whether or not X trips ANY of the CONSIDERED rules
x_by_all_rules = RS_s.predict_rules(X)
# This lays out the set of singletons* x CONSIDERED rules
# *this is 2 x dimension for binary variables
clauses_by_all_rules = RS_s.M.z
# This lays out the FINAL rules with 1,0, after rounding
rules_used_idx = RS_s.M.w == 1
x_by_used_rules = x_by_all_rules[:, rules_used_idx]
prop_covered_by_used_rule = x_by_used_rules.mean(axis=0)
return prop_covered_by_used_rule
def eval_confusion_matrix(RS_s, x, check_fn):
# Predicted reference samples in support
pred_ref = RS_s.predict(RS_s.refSamples)
# Actual reference samples in support
true_ref = check_fn(RS_s.refSamples)
# Check the confusion matrix
ct_ref = pd.crosstab(
pred_ref, true_ref,
rownames=['Predicted'], colnames=['Actual'])
# Predicted reference samples in support
pred_dat = RS_s.predict(x)
true_dat = check_fn(x)
ct_dat = pd.crosstab(pred_dat, true_dat,
rownames=['Predicted'], colnames=['Actual'])
return cmat_ref, cmat_X
def eval_false_inclusion_rate(RS_s, check_fn):
# Predicted reference samples in support
pred_ref = RS_s.predict(RS_s.refSamples)
# Actual reference samples in support
true_ref = check_fn(RS_s.refSamples)
# Of the reference samples that should be excluded, how many get through?
false_inclusion_rate = pred_ref[true_ref == 0].mean()
return false_inclusion_rate
return cmat_ref, cmat_X
def print_synth_rules(X, RS_s, CNF=True):
rules_support = RS_s.rules(transform=identity_func, fmt='%.1f')
prop_covered = calc_coverage(X, RS_s)
# Outer logic takes into account the negation of the CNF
outer_logic = ['NOT', 'AND NOT'] if CNF else [' ', 'AND']
inner_logic = [' ', 'AND'] if CNF else ['EITHER', 'OR']
print("Total coverage of X: {:.3f}".format(RS_s.predict(X).mean()))
print("Total volume: {:.3f}".format(RS_s.predict(RS_s.refSamples).mean()))
print("-----------")
for i in range(len(rules_support)):
this_rule = rules_support[i]
print("{:<7} Rule: {} \t \t \t Covers {:.3f} of X".format(
outer_logic[0] if i == 0 else outer_logic[1],
i,
prop_covered[i]))
print("{")
for j in range(len(this_rule)):
this_clause = this_rule[j]
print("\t {} {} {}".format(
inner_logic[0] if j == 0 else inner_logic[1],
'NOT' if this_clause[1] == 'not' else ' ',
this_clause[0]))
print("}")
| 33.979592 | 85 | 0.58979 | 503 | 3,330 | 3.697813 | 0.270378 | 0.030645 | 0.034409 | 0.033871 | 0.305376 | 0.225806 | 0.17043 | 0.17043 | 0.17043 | 0.17043 | 0 | 0.009942 | 0.275075 | 3,330 | 97 | 86 | 34.329897 | 0.760563 | 0.168769 | 0 | 0.115942 | 0 | 0 | 0.074047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0 | 0.028986 | 0 | 0.173913 | 0.115942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1378b6a473af8bf2230b8b3abb2ec910392d01c | 4,995 | py | Python | Data-Lake/etl.py | naderAsadi/Udacity-Data-Engineering-Projects | d12c42b3260379a470abd244f98a1fd5b32718f7 | [
"MIT"
] | 4 | 2020-10-03T18:14:20.000Z | 2021-11-01T08:15:32.000Z | Data-Lake/etl.py | naderAsadi/Udacity-Data-Engineering-Projects | d12c42b3260379a470abd244f98a1fd5b32718f7 | [
"MIT"
] | null | null | null | Data-Lake/etl.py | naderAsadi/Udacity-Data-Engineering-Projects | d12c42b3260379a470abd244f98a1fd5b32718f7 | [
"MIT"
] | null | null | null | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek
from pyspark.sql.types import *
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
"""Create or retrieve a Spark session
"""
return SparkSession.builder.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0")\
.getOrCreate()
def process_song_data(spark, input_data, output_data):
"""[summary]
Args:
spark ([type]): [description]
input_data ([type]): [description]
output_data ([type]): [description]
"""
song_data = input_data + 'song_data/*/*/*/*.json'
song_schema = StructType([
StructField("artist_id", StringType()),
StructField("artist_latitude", DoubleType()),
StructField("artist_location", StringType()),
StructField("artist_longitude", DoubleType()),
StructField("artist_name", StringType()),
StructField("duration", DoubleType()),
StructField("num_songs", IntegerType()),
StructField("title", StringType()),
StructField("year", IntegerType())
])
df = spark.read.json(song_data, schema=song_schema)
# song table
song_table = df.select('title', 'artist_id', 'year', 'duration').dropDuplicates()\
.withColumn('song_id', monotonically_increasing_id())
song_table.write.parquet(output_data + 'songs/', mode='overwrite', partitionBy=['year', 'artist_id'])
# artist table
artist_table = df.select("artist_id","artist_name","artist_location","artist_latitude","artist_longitude").dropDuplicates()
artist_table.write.parquet(output_data + 'artists/', mode='overwrite')
def process_log_data(spark, input_data, output_data):
"""[summary]
Args:
spark ([type]): [description]
input_data ([type]): [description]
output_data ([type]): [description]
"""
log_data = input_data + 'log-data/'
df = spark.read.json(log_data).drop_duplicates()
df = df.filter(df.page == 'NextSong')
# user table
users_fields = ["userId", "firstName", "lastName", "gender", "level"]
users_table = df.selectExpr(users_fields).drop_duplicates()
users_table.write.parquet(output_data + 'users/', mode='overwrite')
# time table
get_timestamp = udf(lambda x: datetime.utcfromtimestamp(int(x) / 1000), TimestampType())
df = df.withColumn('start_time', get_timestamp('ts'))
time_table = df.withColumn("hour",hour("start_time"))\
.withColumn("day",dayofmonth("start_time"))\
.withColumn("week",weekofyear("start_time"))\
.withColumn("month",month("start_time"))\
.withColumn("year",year("start_time"))\
.withColumn("weekday",dayofweek("start_time"))\
.select("ts","start_time","hour", "day", "week", "month", "year", "weekday").drop_duplicates()
time_table.write.parquet(output_data + 'time_table/', mode='overwrite', partitionBy=['year', 'month'])
# songplays table
# read in song data to use for songplays table
song_df = spark.read\
.format("parquet")\
.option("basePath", os.path.join(output_data, "songs/"))\
.load(os.path.join(output_data, "songs/*/*/"))
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.join(song_df, df.song == song_df.title, how='inner')\
.select(monotonically_increasing_id().alias("songplay_id"), col("start_time"),
col("userId").alias("user_id"), "level", "song_id", "artist_id",
col("sessionId").alias("session_id"), "location", col("userAgent").alias("user_agent"))
songplays_table = songplays_table.join(time_table, songplays_table.start_time == time_table.start_time, how="inner")\
.select("songplay_id", songplays_table.start_time, "user_id", "level", "song_id", "artist_id",
"session_id", "location", "user_agent", "year", "month")
# write songplays table to parquet files partitioned by year and month
songplays_table.drop_duplicates().write.parquet(os.path.join(output_data, "songplays/"), mode="overwrite", partitionBy=["year","month"])
def main():
spark = create_spark_session()
input_data = "s3://udacity-spark-project/"
output_data = "s3://udacity-spark-project/output/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| 40.609756 | 140 | 0.646847 | 567 | 4,995 | 5.460317 | 0.255732 | 0.04522 | 0.030685 | 0.023256 | 0.235142 | 0.153747 | 0.117571 | 0.101421 | 0.069767 | 0.069767 | 0 | 0.002256 | 0.201401 | 4,995 | 122 | 141 | 40.942623 | 0.773878 | 0.110711 | 0 | 0 | 0 | 0 | 0.214987 | 0.036326 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.1 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f13af7308893eb6dd4208f6a9361b6902679648a | 433 | py | Python | pyxel_book/sample02.py | scnsh/pyxelGames | 058a4c0ad0b421cffc5746bfed2a6eddb3d85949 | [
"MIT"
] | null | null | null | pyxel_book/sample02.py | scnsh/pyxelGames | 058a4c0ad0b421cffc5746bfed2a6eddb3d85949 | [
"MIT"
] | null | null | null | pyxel_book/sample02.py | scnsh/pyxelGames | 058a4c0ad0b421cffc5746bfed2a6eddb3d85949 | [
"MIT"
] | null | null | null | import pyxel
WIDTH = 128
HEIGHT = 128
IMG_NO = 0
class App:
my_x = 0
my_y = 0
def __init__(self):
pyxel.init(WIDTH, HEIGHT)
pyxel.load("mychara.pyxres")
pyxel.run(self.update, self.draw)
def update(self):
self.my_x = pyxel.mouse_x
self.my_y = pyxel.mouse_y
def draw(self):
pyxel.cls(7)
pyxel.blt(self.my_x, self.my_y, IMG_NO, 0, 0, 16, 16, 0)
App()
| 16.037037 | 64 | 0.575058 | 71 | 433 | 3.309859 | 0.380282 | 0.102128 | 0.051064 | 0.068085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056106 | 0.300231 | 433 | 26 | 65 | 16.653846 | 0.719472 | 0 | 0 | 0 | 0 | 0 | 0.032333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f13b704fd8d0df2530e8a9f2950ab903d8a850f5 | 44,784 | py | Python | Heavylifting.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | Heavylifting.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | Heavylifting.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, abort, make_response
from flask_restful import Api, Resource, reqparse, fields, marshal, abort
from py2neo import *
import json
from flask_cors import CORS
from ipaddress import *
import werkzeug
import os
from netaddr import *
import re
import random
import time
import CSVSplit_generalised_v3
import logging
import RawRuleslist
import configparser
import sqlite3
#config object to pull the password from conf file
config = configparser.ConfigParser()
config.read('conf/creds.ini')
# UPLOAD_FOLDER = 'uploads/'
UPLOAD_FOLDER = config.get('uploads', 'UPLOAD_FOLDER')
db_location=config.get('sqliteDB', 'database_folder')
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.INFO)
dirLogFolder = config.get('logs', 'LOGS_FOLDER')
# Create target Directory if don't exist
if not os.path.exists(dirLogFolder):
os.mkdir(dirLogFolder)
print("[*] Directory \'"+dirLogFolder+"\' Created ")
else:
print("[*] Directory \'"+dirLogFolder+"\' already exists")
# define file handler and set formatter
LOG_FILE = config.get('logs', 'LOGS_FOLDER')+'\\sample.log'
file_handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter(
'%(asctime)s | %(levelname)s | %(name)s | %(funcName)s | :%(lineno)s | %(message)s', datefmt='%y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
# %(filename)s:%(lineno)s - %(funcName)20s()
# add file handler to logger
logger.addHandler(file_handler)
def updateriskconfig(onecolumn,twocolumns,threecolumns):
config.set("riskconfigAny","onecolumn",onecolumn)
config.set("riskconfigAny","twocolumns",twocolumns)
config.set("riskconfigAny","threecolumns",threecolumns)
with open('conf/creds.ini', 'w') as configfile:
config.write(configfile)
return('updated')
def retrieveriskconfig():
onecolumn= config.get('riskconfigAny', 'onecolumn')
twocolumns= config.get('riskconfigAny', 'twocolumns')
threecolumns= config.get('riskconfigAny', 'threecolumns')
insecureproto= config.get('riskconfigAny', 'insecureriskvalue')
itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue')
etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue')
return(onecolumn,twocolumns,threecolumns,insecureproto,itoeriskvalue,etoiriskvalue)
def segregateIandE(db_name):
table_name="netobj"
allrows = RawRuleslist.ReadSqlitenetobj(db_name,table_name)
for x in allrows:
idvalue = x['Name']
ipvalue=x['IPv4']
mask=x['Mask']
riskvalue='yes'
if x['Mask']!='NA':
print('NA is not there')
cip = ipvalue+"/"+mask
#ipnetwork=IPNetwork[cip]
ipnetwork=IPNetwork(cip)
ip=ipnetwork
if "-" in ipvalue:
print("- is there")
ipranges = ipvalue.split('-')
iprange =IPRange(ipranges[0].strip(),ipranges[1].strip())
ip=iprange
else:
print('NA is there')
#ip=IPAddress[ipvalue]
ip = IPAddress(ipvalue)
#print(ip_address(var_121).is_private)
if ip.is_private():
column_name="Internal"
riskvalue="\'yes\'"
id_column="Name"
idvalue = "\'"+idvalue+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
else:
column_name="External"
riskvalue="\'yes\'"
id_column="Name"
idvalue = "\'"+idvalue+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
def segregateIntExtConn(db_name,table_name):
try:
allrows = RawRuleslist.ReadSqlite(db_name,table_name)
sqlite_file = db_location+"\\\\"+db_name+'.db'
value="\'yes\'"
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
tablename2 = "netobj"
colname2="External"
#query to get rows which has External=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename2, cn=colname2,val=value))
queryresult2 =c.fetchall()
logger.info("queryresults")
for x in allrows:
idvalue=x['No']
if x['Action']=="Accept":
individualsource1 = str(x['Source']).split(';')
for xy in individualsource1:
if xy in queryresult2:
individualdestination1 = str(x['Destination']).split(';')
for xz in individualdestination1:
if xz in queryresult2:
column_name="ExttoExt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = x['Name']
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
break
else:
column_name="ExttoInt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
else:
individualdestination1 = str(x['Destination']).split(';')
for xz in individualdestination1:
if xz in queryresult2:
column_name="InttoExt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = x['Name']
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
else:
column_name="InttoInt"
riskvalue="\'yes\'"
id_column="No"
# idvalue = "\'"+idvalue+"\'"
table_name=table_name
RawRuleslist.UpdateTable(db_name, table_name, column_name, riskvalue,id_column, idvalue)
except Exception as e:
logger.exception("%s", e)
def riskcalculator(db_name,table_name):
try:
segregateIandE(db_name)
segregateIntExtConn(db_name,table_name)
riskcalculator_parked(db_name,table_name)
return {
'data': '',
'message': 'Risk updated!',
'status': 'success'
}
except Exception as e:
logger.exception("%s", e)
# class HeavyLifting():
def riskcalculator_parked(db_name,table_name):
try:
allrows = RawRuleslist.ReadSqlite(db_name,table_name)
# if source, destination or service has any fields
onecolumn= config.get('riskconfigAny', 'onecolumn')
twocolumns= config.get('riskconfigAny', 'twocolumns')
threecolumns= config.get('riskconfigAny', 'threecolumns')
insecureriskvalue= config.get('riskconfigAny', 'insecureriskvalue')
itoeriskvalue= config.get('riskconfigAny', 'itoeriskvalue')
etoiriskvalue= config.get('riskconfigAny', 'etoiriskvalue')
id_column = "No"
sqlite_file = db_location+"\\\\"+db_name+'.db'
value="\'yes\'"
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
tablename = "services"
colname="Insecure"
#query to get rows which has insecure=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult1 =c.fetchall()
colname="ItoE"
#query to get rows which has InttoExt=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult2 =c.fetchall()
colname="EtoI"
#query to get rows which has ExttoInt=yes
c.execute("SELECT Name from {tn} where {cn}={val}".format(tn=tablename, cn=colname,val=value))
queryresult3 =c.fetchall()
for x in allrows:
riskvalue = 0
riskreason = ""
idvalue=x['No']
id_column = "No"
#Any in columns- Risk assignment
if x['Action']=="Accept":
if (x['Source']=="Any" and x['Destination']=="Any" and x['Service']=="Any"):
riskvalue=riskvalue+int(threecolumns)
riskreason = riskreason+"1,-,"+"All three columns have Any "+","+str(threecolumns)+";"
elif ((x['Source']=="Any" and x['Destination']=="Any") or (x['Destination']=="Any" and x['Service']=="Any") or ( x['Service']=="Any" and x['Source']=="Any")):
riskvalue=riskvalue+int(twocolumns)
riskreason = riskreason+"1,-,"+"Two columns have Any "+","+str(twocolumns)+";"
elif (x['Source']=="Any" or x['Destination']=="Any" or x['Service']=="Any"):
riskvalue=riskvalue+int(onecolumn)
riskreason = riskreason+"1,-,"+"One column has Any"+","+str(onecolumn)+";"
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
'''Insecure protocols- Risk assignment'''
for xy in individualservice:
if xy in queryresult1:
riskvalue=riskvalue+int(insecureriskvalue)
riskreason = riskreason+"2"+","+xy+","+"Insecure proto"+","+str(insecureriskvalue)+";"
column_name = "Risk"
logger.info(riskreason)
logger.info(riskvalue)
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
colname="InttoExt"
queryresult11 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname)
for x in queryresult11:
riskvalue = x['Risk']
riskreason = x['RiskReason']
idvalue=x['No']
id_column = "No"
#riskreason = riskreason.replace("'", "")
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
'''Internal to External connections- Risk assignment'''
for xy in individualservice:
if xy not in queryresult2:
riskvalue=riskvalue+int(itoeriskvalue)
riskreason = riskreason+"3"+","+xy+","+"Int to Ext conn - non approved"+","+str(itoeriskvalue)+";"
column_name = "Risk"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
colname="ExttoInt"
queryresult12 = RawRuleslist.ReadSqlitewSelected(db_name,table_name,colname)
for x in queryresult12:
id_column = "No"
idvalue=x['No']
riskvalue = x['Risk']
riskreason = x['RiskReason']
#riskreason = riskreason.replace("'", "")
if x['Action']=="Accept":
individualservice = str(x['Service']).split(';')
logger.info(individualservice)
'''External to Internal connections- Risk assignment'''
for xy in individualservice:
if xy not in queryresult3:
riskvalue=riskvalue+int(etoiriskvalue)
riskreason = riskreason+"4"+","+xy+","+"Ext to Int conn - non approved"+","+str(etoiriskvalue)+";"
column_name = "Risk"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskvalue, id_column, idvalue)
column_name = "RiskReason"
riskreason="\'"+riskreason+"\'"
RawRuleslist.UpdateTable(db_name, table_name, column_name,riskreason, id_column, idvalue)
return {
'data': '',
'message': 'Risk updated!',
'status': 'success'
}
except Exception as e:
logger.exception("%s", e)
return {
'data': '',
'message': 'Some error occured',
'status': 'error'
}
def getselectrules(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
print(output)
output1 = []
if (output==[]):
finalgrouping =[]
GrpNodes =[]
logger.error("Error: Neo4j didnt return any output for the query")
message = "Error: Neo4j didnt return any output for the query"
status = 'error'
print(message)
else:
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
message = "Query completed successfully"
status = 'success'
print(message)
return(output1, finalgrouping, GrpNodes, message, status)
def uploadwithcustquery(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).stats()
return(output)
def getfwrulesneo4j( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
RawRuleslist.InsertTable(output)
return (rules)
def defaultrules( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph2 = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph2.run(statement).data()
output1 = []
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
return(output1, finalgrouping, GrpNodes)
def custquery( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
id_Node = []
NoDup_id_Node = []
Created_rels = []
for rel in output:
mi = re.compile("\([_]*(?P<grouping>[0-9]*)\)")
mi_rels = re.compile("\)(?P<grouping>.*?)\(")
mi_rels_only_name = re.compile("\).*?\[\:(?P<grouping>\w+)\s\{")
tomatch = rel['r']
m = mi.findall(str(tomatch))
mi_rels_data = mi_rels.findall(str(tomatch))
mi_rels_data_only_name = mi_rels_only_name.findall(str(tomatch))
print("================mi_nodes_data==============")
print(m)
print("================mi_rels_data==============")
# print(mi_rels_data)
for one in m:
id_rels = {}
intone = int(one)
ab = graph.nodes.get(intone)
id_rels['id'] = one
id_rels['id_prop'] = ab
id_Node.append(id_rels)
# print (ab)
for x in id_Node:
if x not in NoDup_id_Node:
NoDup_id_Node.append(x)
xx = 0
yy = 1
for i in mi_rels_data:
if ">" in i:
create_rel = {}
# print("forward")
print(i)
for id in NoDup_id_Node:
if m[xx] == id['id']:
create_rel['s'] = id['id_prop']
# for id in NoDup_id_Node:
if m[yy] == id['id']:
create_rel['d'] = id['id_prop']
create_rel['r'] = mi_rels_data_only_name[xx]
print("source: "+m[xx]+" destination: "+m[yy])
print(create_rel)
Created_rels.append(create_rel)
print(
"======================Created_rels===========================")
print(Created_rels)
print(
"======================Created_rels===========================")
# break
else:
# print("backward")
create_rel = {}
print(i)
for id in NoDup_id_Node:
if m[yy] == id['id']:
create_rel['s'] = id['id_prop'] # Source
# for id in NoDup_id_Node:
if m[xx] == id['id']:
create_rel['d'] = id['id_prop'] # Destination
# create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")"
# print(i['name'])
create_rel['r'] = mi_rels_data_only_name[xx]
# create_rel['r']="(_"+m[xx]+")"+i+"(_"+m[yy]+")"
print("source: "+m[yy]+" destination: "+m[xx])
print(create_rel)
Created_rels.append(create_rel)
print(
"======================Created_rels===========================")
print(Created_rels)
print(
"======================Created_rels===========================")
# break
xx += 1
yy += 1
print("=!@#====== Create rels ==========!@#=")
print(Created_rels)
output1 = []
output1.append(Created_rels)
finalgrouping = FinalGrouping(output1)
# print("################################################################3")
# print(finalgrouping)
return(output1, finalgrouping)
def allRels(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return (output)
def allGroups(statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return (output)
def CreateGroup( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
return output
def check( statement, checkip):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
output = graph.run(statement).data()
Node = []
NameAndIP = []
NoDupNode = []
MatchNetwork = []
MatchNodes = []
print("Printing Output")
print(output)
for item in output:
nodes_source = {}
name_ip = {}
nodes_source['Name'] = item['n']['Name']
nodes_source['IPAddress'] = item['n']['IPAddress']
nodes_source['Mask'] = item['n']['Mask']
nodes_source['Comments'] = item['n']['Comments']
Node.append(nodes_source)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] != "NA"):
cip = item['n']['IPAddress']+"/"+item['n']['Mask']
name_ip['Name'] = item['n']['Name']
name_ip['IPAddress'] = item['n']['IPAddress']
name_ip['Comments'] = item['n']['Comments']
name_ip['Network'] = cip
NameAndIP.append(name_ip)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] == "NA"):
name_ip['Name'] = item['n']['Name']
name_ip['Network'] = item['n']['IPAddress']
name_ip['Comments'] = item['n']['Comments']
NameAndIP.append(name_ip)
# tocheck_ip="194.127.24.66"
# tocheck_ip="10.197.167.96"
tocheck_ip = checkip
# print("Printing tocheck_ip")
# print(ip_network(tocheck_ip,strict=False))
# print("Printing NameAndIP")
# print(NameAndIP)
# print(checkip)
MatchRel = []
for y in NameAndIP:
if ("-" in y['Network']):
# if m.group('IP_start')=="0.0.0.0" and m.group('IP_end')=="255.255.255.255":
if y['Network'] == "0.0.0.0 - 255.255.255.255":
MatchNetwork.append(y)
else:
ip_range_to_match = y['Network']
m = re.search(
"^(?P<IP_start>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\-\s+(?P<IP_end>.\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", ip_range_to_match)
iprange = IPRange(m.group('IP_start'), m.group('IP_end'))
if tocheck_ip in iprange:
MatchNetwork.append(y)
else:
# if (tocheck_ip == y['Network']):
# MatchNetwork.append(y)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(y['Network'], strict=False)
if (IPv4Address(tocheck_ip) in IPv4Network(ab)):
# print(y) #print(y['Name'])
MatchNetwork.append(y)
print("Printing MatchNetwork")
print(MatchNetwork)
for z in MatchNetwork:
graph_z = Graph(password="myneo2")
# statement="MATCH (s:Hosts {Name:'"+z['Name']+"'})-[r]-(d:Hosts) RETURN s,d,r"
statement1 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE s.Name='" + \
z['Name']+"' RETURN s,d,r"
statement2 = "MATCH (s:Hosts)-[r]->(d:Hosts) WHERE d.Name='" + \
z['Name']+"' RETURN s,d,r"
# MATCH p=(s:Hosts)-[r:"+searchterm+"]->(d:Hosts) RETURN s as source,d as target,r as service LIMIT 5 #to search
print(statement1)
output1 = graph_z.run(statement1).data()
print(len(output1))
print(output1)
print(statement2)
output2 = graph_z.run(statement2).data()
print(len(output2))
print(output2)
print("Printing matched nodes relationships")
# MatchNodes.append
if output1 != []:
MatchRel.append(output1)
if output2 != []:
MatchRel.append(output2)
# out={}
# out={"MatchNetwork":MatchNetwork}
print("=====================================================================================")
print(MatchRel)
finalgrouping = FinalGrouping(MatchRel)
print("finalgroupingtest1")
print(finalgrouping)
return(MatchRel, finalgrouping)
def Convert_to_IP_Network( output):
Node = []
NoDupNode = []
for item1 in output:
for item in item1:
nodes_source = {}
nodes_target = {}
name_ip_host = {}
name_ip_net = {}
# Assign the name of the node to ID
nodes_source['Name'] = item['s']['Name']
nodes_source['IPAddress'] = item['s']['IPAddress']
nodes_source['Mask'] = item['s']['Mask']
nodes_source['Comments'] = item['s']['Comments']
if (item['s']['Mask'] == "NA"):
nodes_source['Network'] = item['s']['IPAddress']
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['s']['Mask'] != "NA"):
cip = item['s']['IPAddress']+"/"+item['s']['Mask']
nodes_source['Network'] = cip
# ab=ip_network(nodes_source['Network'],strict=False)
# nodes_source['Network']=ab
nodes_target['Name'] = item['d']['Name']
nodes_target['IPAddress'] = item['d']['IPAddress']
nodes_target['Mask'] = item['d']['Mask']
nodes_target['Comments'] = item['d']['Comments']
if (item['d']['Mask'] == "NA"):
nodes_target['Network'] = item['d']['IPAddress']
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['d']['Mask'] != "NA"):
cip = item['d']['IPAddress']+"/"+item['d']['Mask']
nodes_target['Network'] = cip
cd = ip_network(nodes_target['Network'], strict=False)
nodes_target['Network'] = cd
Node.append(nodes_source)
Node.append(nodes_target)
for x in Node:
if x not in NoDupNode:
NoDupNode.append(x)
return NoDupNode
def FinalGrouping( finalarray):
print("Printing final array")
NameAndIP = Convert_to_IP_Network(finalarray)
print(NameAndIP)
ParentChild = []
NoDupParentChild = []
graph = Graph(password="myneo2")
statement = "MERGE (d:Groups) RETURN d"
# fetch the source, target and relationship details
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
print(Groups)
for y in NameAndIP:
for Group in Groups:
ab = ip_network(y['Network'], strict=False)
print(IPv4Network(ab))
print(IPv4Network(Group['IPAddress']))
c = ip_network(IPv4Network(ab), strict=False)
d = ip_network(IPv4Network(Group['IPAddress']), strict=False)
# if IPv4Network(ab) in IPv4Network(Group['d']['IPAddress']):
if c.subnet_of(d):
par_child = {}
print(IPv4Network(ab))
print(IPv4Network(Group['IPAddress']))
# par_child="sdsadsa"
par_child['ChildName'] = y['Name']
par_child['ParentName'] = Group['Name']
par_child['Parent_IP'] = Group['IPAddress']
par_child['color'] = Group['color']
ParentChild.append(par_child)
for x in ParentChild:
if x not in NoDupParentChild:
NoDupParentChild.append(x)
print("==============Printing NoDupParentChild===============")
return (NoDupParentChild)
def FinalGroupingv2( finalarray):
try:
print("================Printing final array==================")
NameAndIP = Convert_to_IP_Network(finalarray)
# print(NameAndIP)
ParentChild = []
NoDupParentChild = []
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
statement = "MERGE (d:Groups) RETURN d"
# fetch the source, target and relationship details
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
AllIPNetwork = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
arrangedones = ArrangeNodesv2(Grouping)
#arrangedones = self.ArrangeNodes(AllIPNetwork)
print("===================== Printing the arranged ones =============")
print(arrangedones)
print("===================== Printed the arranged ones =============")
GrpParChd = []
# this is to access the pair {depth0:[xx]}
for evry in arrangedones:
print(evry) # key values
# this is to access the array in values of key/value pairs
for evry2 in arrangedones[evry]:
print(evry2)
x = len(evry2)-1
print(x) # no of elements in values array
while (x >= 0):
grpparchild = {}
if (x == 0):
y = x
print(x, evry2[x], y, evry2[y])
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
else:
y = x-1
print(x, evry2[x], y, evry2[y])
while (y >= 0):
if (evry2[x].subnet_of(evry2[y])):
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
break
y -= 1
x -= 1
print("========== Parent Child pair in Groups=======")
print(GrpParChd)
GrpNodes = []
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
grp_item = {}
# print("========== Printing only keys in Groups=======")
# print(k)
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if(d == k):
print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&")
print(d, k, v)
for grpk in Group.keys():
grp_item[grpk] = Group[grpk]
grp_item['id'] = Group['Name']
grp_item['isgrp'] = "true"
print(
"!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11")
print(grp_item)
for Grouppar in Groups:
print(
"*****************All values from groups********************")
print(Grouppar)
d_par = ip_network(
Grouppar['IPAddress'], strict=False)
if(d_par == v):
print(
"*****************Entered into matched parent group********************")
print(d_par, v)
grp_item['parent'] = Grouppar['Name']
GrpNodes.append(grp_item)
print(
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^")
logger.info("GrpNodes:")
logger.info(GrpNodes)
print(GrpNodes)
print("========== Parent Child pair in Groups (reverse)=======")
#GrpParChdreverse = GrpParChd.reverse()
print(list(reversed(GrpParChd)))
print("========== Print Groups=======")
print(Groups)
print("========== All IP Network=======")
print(AllIPNetwork)
ip_list_sorted = sorted(AllIPNetwork)
SortedReversedAllIPNetwork = list(reversed(ip_list_sorted))
print("================Printing SortedReversedAllIPNetwork===============")
print(SortedReversedAllIPNetwork)
for y in NameAndIP:
ab = ip_network(y['Network'], strict=False)
c = ip_network(IPv4Network(ab), strict=False)
for matchsortedIpnetwork in SortedReversedAllIPNetwork:
e = ip_network(IPv4Network(
matchsortedIpnetwork), strict=False)
if c.subnet_of(e):
# print(c,e)
i = 0
while(i < len(Groups)):
#d = ip_network(IPv4Network(Groups[i]['IPAddress']), strict=False)
d = ip_network(
Groups[i]['IPAddress'], strict=False)
#print(d, " ; ",e)
if (d == e):
print(d, e, i)
print(Groups[i]['Name'], Groups[i]['color'])
par_child = {}
par_child['ChildName'] = y['Name']
par_child['ParentName'] = Groups[i]['Name']
par_child['Parent_IP'] = Groups[i]['IPAddress']
par_child['color'] = Groups[i]['color']
ParentChild.append(par_child)
i += 1
# node_any = {}
# node_any['Name'] = "Any"
# node_any['color'] = "#ffff80"
# ParentChild.append(node_any)
for x in ParentChild:
if x not in NoDupParentChild:
NoDupParentChild.append(x)
print("==============Printing NoDupParentChild===============")
print(NoDupParentChild)
return (NoDupParentChild, GrpNodes)
print("========== End of new attempt=======")
except Exception as e:
logger.exception("%s", e)
def groupheirarchy( statement):
try:
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
Grouping = graph.run(statement).data()
number_of_colors = len(Grouping)
Groups = []
AllIPNetwork = []
for rot in range(number_of_colors):
grp = {}
grp['Name'] = Grouping[rot]['d']['Name']
grp['IPAddress'] = Grouping[rot]['d']['IPAddress']
grp['color'] = Grouping[rot]['d']['color']
Groups.append(grp)
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(Grouping[rot]['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
arrangedones = ArrangeNodesv2(Grouping)
GrpParChd = []
# this is to access the pair {depth0:[xx]}
for evry in arrangedones:
print(evry) # key values
# this is to access the array in values of key/value pairs
for evry2 in arrangedones[evry]:
print(evry2)
x = len(evry2)-1
print(x) # no of elements in values array
while (x >= 0):
grpparchild = {}
if (x == 0):
y = x
print(x, evry2[x], y, evry2[y])
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
else:
y = x-1
print(x, evry2[x], y, evry2[y])
while (y >= 0):
if (evry2[x].subnet_of(evry2[y])):
print(evry2[x], " is subnet of ", evry2[y])
grpparchild[evry2[x]] = evry2[y]
GrpParChd.append(grpparchild)
break
y -= 1
x -= 1
print("========== Parent Child pair in Groups=======")
print(GrpParChd)
GrpNodes = []
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
grp_item = {}
# print("========== Printing only keys in Groups=======")
# print(k)
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if(d == k):
print("&&&&&&&&&&&& Comparing &&&&&&&&&&&&")
print(d, k, v)
for grpk in Group.keys():
grp_item[grpk] = Group[grpk]
grp_item['id'] = Group['Name']
grp_item['label'] = Group['Name'] +" ("+ Group['IPAddress']+")"
grp_item['isgrp'] = "true"
print(
"!!!!!!!!!!!!!!!! Key-value pairs so far !!!!!!!!!!!!!!!!11")
print(grp_item)
for Grouppar in Groups:
print(
"*****************All values from groups********************")
print(Grouppar)
d_par = ip_network(
Grouppar['IPAddress'], strict=False)
if(d_par == v):
print(
"*****************Entered into matched parent group********************")
print(d_par, v)
# grp_item['parent'] = Grouppar['Name']
GrpNodes.append(grp_item)
print(
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^Nodes format for groups^^^^^^^^^^^^^^^^")
print(GrpNodes)
Node = []
NoDupNode = []
for eve in GrpNodes:
print("+++++++++++++++++++ Printing every nodes in groups+++++++++++")
print(eve)
nodes_datawrapper_grp = {}
nodes_datawrapper_grp['data'] = eve
Node.append(nodes_datawrapper_grp)
for x in Node:
if x not in NoDupNode:
NoDupNode.append(x)
print("========== Parent Child pair in Groups (reverse)=======")
Link = []
for evrypair in GrpParChd:
for k, v in evrypair.items(): # for k,v in list(a.items():
if (k!=v):
links_datawrapper = {}
grp_link = {}
for Group in Groups:
d = ip_network(Group['IPAddress'], strict=False)
if (d==k):
grp_link['target'] = Group['Name']
if (d==v):
grp_link['source'] = Group['Name']
# Group_Link.append(grp_link)
links_datawrapper['data'] = grp_link
Link.append(links_datawrapper)
print("========================== Built the LInk ==================")
print(Link)
print("========== Print Groups=======")
print(Groups)
print("========== All IP Network=======")
print(AllIPNetwork)
ip_list_sorted = sorted(AllIPNetwork)
SortedReversedAllIPNetwork = list(reversed(ip_list_sorted))
print("================Printing SortedReversedAllIPNetwork===============")
print(SortedReversedAllIPNetwork)
print("==============Printing NoDupParentChild===============")
print (NoDupNode, Link)
return (NoDupNode, Link)
print("========== End of new attempt=======")
except Exception as e:
logger.exception("%s", e)
def ArrangeNodesv2( newlist):
ax = ip_network('10.0.0.0/8', strict=False)
b = ip_network('192.168.4.0/25', strict=False)
c = ip_network('192.168.9.0/25', strict=False)
# 192.168.10.0/22 is considered as 192.168.8.0/22 #CHECKTHIS
d = ip_network('10.0.0.0/8', strict=False)
e = ip_network('192.168.9.0/26', strict=False)
f = ip_network('192.168.9.0/24', strict=False)
#newlist = self.GetAllNodes(statement)
ip_list = newlist
print("===========================printing the incoming groups array===========================")
print(ip_list)
AllIPNetwork = []
for rotate in ip_list:
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(rotate['d']['IPAddress'], strict=False)
AllIPNetwork.append(ab)
ip_list_sorted = sorted(AllIPNetwork)
print("===========================printing the SORTED incoming groups array===========================")
print(ip_list_sorted)
x = 0
y = 0
z = 0
a = dict()
depth_z = []
firstentry = 1
firstfirstentry = 1
index = x
while y < len(ip_list_sorted):
ipx = ip_network((ip_list_sorted[x]), strict=False) #this uses ipaddress module
ipy = ip_network((ip_list_sorted[y]), strict=False)
ipindex = ip_network((ip_list_sorted[index]), strict=False)
print("ipindex: ", ipindex, " ipx: ", ipx, " ipy: ", ipy)
if ipy.subnet_of(ipx):
print("it is a subnet")
depth_z.append(ip_list_sorted[y])
if y == len(ip_list_sorted)-1:
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
# x=y
# print(x)
if not ipy.subnet_of(ipx):
if ipy.subnet_of(ipindex):
print("it is not a subnet")
x = y
print(x)
if not ipy.subnet_of(ipindex):
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
depth_z = []
print(
"it is not a subnet of ipx and ipindex, so adding a new entry to the depth_z")
depth_z.append(ip_list_sorted[y])
index = y
x = y
if y == len(ip_list_sorted)-1:
a["depth_"+str(z)] = []
a["depth_"+str(z)].append(depth_z)
z += 1
y += 1
print("===================== a ====")
print(a)
return (a)
def GetAllNodes( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
output = graph.run(statement).data()
Node = []
NameAndIP = []
NoDupNode = []
AllIPNetwork = []
MatchNodes = []
print("========================================================================================================")
print(output)
for item in output:
nodes_source = {}
name_ip = {}
nodes_source['Name'] = item['n']['Name']
nodes_source['IPAddress'] = item['n']['IPAddress']
nodes_source['Mask'] = item['n']['Mask']
nodes_source['Comments'] = item['n']['Comments']
Node.append(nodes_source)
# & (item['n']['IPAddress']!="10.15.208.0") & (item['n']['IPAddress']!="10.18.112.0")
if (item['n']['Mask'] != "NA"):
cip = item['n']['IPAddress']+"/"+item['n']['Mask']
name_ip['Name'] = item['n']['Name']
name_ip['Network'] = cip
NameAndIP.append(name_ip)
for y in NameAndIP:
# if not for strict=false, typeerror will be raised as "with hostbits set"!
ab = ip_network(y['Network'], strict=False)
AllIPNetwork.append(ab)
AllIPNetwork.sort()
out = {}
out = {"NameAndIP": AllIPNetwork}
# newhelo ="helo"
# return(AllIPNetwork)
return(out)
def GetRelationshipFromNeo4jv3( statement):
user=config.get('neo4j', 'user')
password=config.get('neo4j', 'passwd')
graph = Graph(host=config.get('neo4j', 'host'),auth=(user,password))
# graph = Graph(password="myneo2")
# fetch the source, target and relationship details
print(statement)
output = graph.run(statement).data()
output1 = []
output1.append(output)
finalgrouping, GrpNodes = FinalGroupingv2(output1)
# print("################################################################3")
# print(finalgrouping)
return(output1, finalgrouping, GrpNodes)
| 40.345946 | 174 | 0.49741 | 4,469 | 44,784 | 4.882524 | 0.104274 | 0.022686 | 0.025023 | 0.015811 | 0.634693 | 0.59583 | 0.572869 | 0.539459 | 0.521265 | 0.498671 | 0 | 0.0155 | 0.335901 | 44,784 | 1,109 | 175 | 40.382326 | 0.718167 | 0.093404 | 0 | 0.600454 | 0 | 0.00227 | 0.161986 | 0.039203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024972 | false | 0.031782 | 0.019296 | 0 | 0.057889 | 0.148695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f141c618f7f5e93e7eb5c8ee22854c6063df694c | 4,044 | py | Python | grabNdays.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | grabNdays.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | grabNdays.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | # %%
import datetime
import time
import pandas as pd
from io import StringIO
from function_grab import grab_price
import numpy as np
import warnings
import random
data = {}
n_days = 120
date = datetime.datetime.now()
fail_count = 0
allow_continuous_fail_count = 15 # 近五年最長連續12天休市
while len(data) < n_days:
print('parsing', date)
# 使用 grabPrice 爬資料
try:
# 抓資料
data[date.date()] = grab_price(date)
print('success!')
fail_count = 0
except:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
break
# 減一天
date -= datetime.timedelta(days=1)
time.sleep(random.randint(5, 10))
updown = pd.DataFrame({k: d['漲跌(+/-)'] for k, d in data.items()})
# print(updown)
# %%
trade_n = pd.DataFrame({k: d['成交股數'] for k, d in data.items()})
i = 0
while i <= trade_n.shape[0]-1:
trade_n.iloc[i] = trade_n.iloc[i].str.replace(',', '')
trade_n.iloc[i] = round(trade_n.iloc[i].astype(float)/1000)
i += 1
# print(trade_n)
PEratio = pd.DataFrame({k: d['本益比'] for k, d in data.items()})
i = 0
while i <= PEratio.shape[0]-1:
PEratio.iloc[i] = PEratio.iloc[i].str.replace(',', '')
i += 1
updown.to_excel('grab120days_updown.xlsx')
trade_n.to_excel('grab120days_traden.xlsx')
PEratio.to_excel('grab120days_PE.xlsx')
# print(trade_n)
# print(PEratio)
close = pd.DataFrame({k: d['收盤價'] for k, d in data.items()}) # type=string
# print(close)
# print(close.shape)
# %%
# MA5
i = 0
MA5 = []
close = close.replace('--', np.NaN)
while i <= close.shape[0]-1:
close.iloc[i] = close.iloc[i].str.replace(',', '')
mean = np.nanmean(close.iloc[i, 0:5].astype(float))
# print(mean)
MA5.append([close.index[i], mean])
i = i+1
# 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN
MA5 = pd.DataFrame(MA5, columns=['證券代號', 'MA5']).set_index('證券代號')
close = pd.merge(close, MA5, how='outer', left_index=True, right_index=True)
# 必須將dtype轉為float(與算出來的平均值同樣型態),否則在後面merge的時候會因為型別不一樣而存成NaN
close = close.astype(float) # 全轉float
# MA20
i = 0
MA20 = []
# print(close.shape) #df.shape=(n_rows,n_columns)
# 將有收盤價的資料存入np.nan 在後面使用np.mean時可以直接跳過不加入平均值的計算
# close = close.replace('--', np.NaN) replace只能針對字符串全等於才能替換
while i <= close.shape[0]-1:
# dataframe中需使用iloc對索引編號定位,loc是針對索引名稱進行定位
# print(type(close.iloc[i, 1]))
# 針對某一列的字符串進行replace才能將千分位符號消去
# close.iloc[i] = close.iloc[i].str.replace(',', '')
# print(close.iloc[i, 0:3])
# astype只是將資料暫時當作別的型態使用,而不是真的更改 除非有另外存回變數
mean = np.nanmean(close.iloc[i, 0:20])
# print(type(close.iloc[i, 1]))
"""for j in range(3):
close.iloc[i, j] = close.iloc[i, j].replace(',', '')
sum = sum + float(close.iloc[i, j])
print(sum)
mean = sum/3"""
# print(mean)
MA20.append([close.index[i], mean])
i = i+1
# 先創造空list使計算值與代號存入,而後將list存入df中,將一列設為index,使用merge針對匹配的index進行合併,空值存入NaN
MA20 = pd.DataFrame(MA20, columns=['證券代號', 'MA20']).set_index('證券代號')
close = pd.merge(close, MA20, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# MA60
i = 0
MA60 = []
while i <= close.shape[0]-1:
mean = np.nanmean(close.iloc[i, 0:60])
MA60.append([close.index[i], mean])
i = i+1
MA60 = pd.DataFrame(MA60, columns=['證券代號', 'MA60']).set_index('證券代號')
close = pd.merge(close, MA60, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# %%
# MA120
i = 0
MA120 = []
while i <= close.shape[0]-1:
mean = np.nanmean(close.iloc[i, 0:120])
MA120.append([close.index[i], mean])
i = i+1
MA120 = pd.DataFrame(MA120, columns=['證券代號', 'MA120']).set_index('證券代號')
close = pd.merge(close, MA120, how='outer', left_index=True, right_index=True)
print(close)
close = close.astype(float)
# print(close)
close.to_excel('stock120.xlsx')
| 29.955556 | 79 | 0.629327 | 575 | 4,044 | 4.347826 | 0.229565 | 0.04 | 0.056 | 0.022 | 0.3884 | 0.3708 | 0.3276 | 0.262 | 0.2048 | 0.2048 | 0 | 0.039531 | 0.199308 | 4,044 | 134 | 80 | 30.179104 | 0.732551 | 0.203759 | 0 | 0.294118 | 0 | 0 | 0.074254 | 0.015961 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.094118 | 0 | 0.094118 | 0.070588 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14245be4031d7387f3b61db91e9fdbcd0da8f5b | 1,811 | py | Python | CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | CONTENT/Resources/guides/__UNSORTED/297_serialize_and_deserialize_binary_tree/serialize_and_desialize_binary_tree.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import json
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return "null"
return (
"["
+ str(root.val)
+ ","
+ self.serialize(root.left)
+ ","
+ self.serialize(root.right)
+ "]"
)
def getSection(self, data):
brackets = 1
idx = 1
while brackets != 0:
if data[idx] == "[":
brackets += 1
elif data[idx] == "]":
brackets -= 1
idx += 1
return data[:idx], idx
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if data == "null":
return None
first_comma = data.index(",")
val = int(data[1:first_comma])
data = data[first_comma + 1 :]
if data[0] == "[":
leftsection, last = self.getSection(data)
left = self.deserialize(leftsection)
else:
last = 4
left = None
data = data[last + 1 :]
if data[0] == "[":
rightsection, _ = self.getSection(data)
right = self.deserialize(rightsection)
else:
right = None
node = TreeNode(val)
node.left = left
node.right = right
return node
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| 24.146667 | 60 | 0.47101 | 184 | 1,811 | 4.592391 | 0.331522 | 0.028402 | 0.040237 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011321 | 0.414688 | 1,811 | 74 | 61 | 24.472973 | 0.785849 | 0.227499 | 0 | 0.130435 | 0 | 0 | 0.012938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.021739 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14272e90c89cf6a167f3389d2119044ece31836 | 1,832 | py | Python | lhc/collections/interval_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/collections/interval_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/collections/interval_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | from collections import defaultdict
from lhc.interval import IntervalBinner
class IntervalMap(object):
def __init__(self, key_value_pairs=None):
self.len = 0
self.binner = IntervalBinner()
self.bins = defaultdict(list)
self.values = defaultdict(list)
if key_value_pairs is not None:
for key, value in key_value_pairs:
self[key] = value
def __len__(self):
return self.len
def __iter__(self):
for bin in self.bins.values():
for item in bin:
yield item
def __contains__(self, item):
bins = self.binner.get_overlapping_bins(item)
for fr, to in bins:
for bin in range(fr, to + 1):
for set_interval in self.bins[bin]:
if set_interval == item:
return True
return False
def __setitem__(self, key, value):
self.len += 1
bin = self.binner.get_bin(key)
self.bins[bin].append(key)
self.values[bin].append(value)
def __getitem__(self, item):
bins = self.binner.get_overlapping_bins(item)
for fr, to in bins:
for bin in range(fr, to + 1):
for i, set_interval in enumerate(self.bins[bin]):
if set_interval.overlaps(item):
yield self.values[bin][i]
def iterkeys(self):
for bin in self.bins.values():
for item in bin:
yield item
def itervalues(self):
for bin in self.values.values():
for value in bin:
yield value
def iteritems(self):
for keys, values in zip(iter(self.bins.items()), iter(self.values.items())):
for key, value in zip(keys, values):
yield key, value
| 30.032787 | 84 | 0.556223 | 231 | 1,832 | 4.242424 | 0.238095 | 0.065306 | 0.040816 | 0.036735 | 0.326531 | 0.310204 | 0.261224 | 0.261224 | 0.261224 | 0.261224 | 0 | 0.003376 | 0.353166 | 1,832 | 60 | 85 | 30.533333 | 0.823629 | 0 | 0 | 0.244898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.183673 | false | 0 | 0.040816 | 0.020408 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1432fbe7e4b709efdff7049c6c10eeddba93d8f | 11,788 | py | Python | bin/consolidate-preempted-logs.py | phyletica/ecoevolity-experiments | bb16e34c4c7495feaa68653df98d5fbead93cf44 | [
"CC-BY-4.0"
] | null | null | null | bin/consolidate-preempted-logs.py | phyletica/ecoevolity-experiments | bb16e34c4c7495feaa68653df98d5fbead93cf44 | [
"CC-BY-4.0"
] | null | null | null | bin/consolidate-preempted-logs.py | phyletica/ecoevolity-experiments | bb16e34c4c7495feaa68653df98d5fbead93cf44 | [
"CC-BY-4.0"
] | null | null | null | #! /usr/bin/env python
import sys
import os
import re
import glob
import argparse
import project_util
batch_number_pattern = re.compile(r'batch(?P<batch_number>\d+)')
sim_number_pattern = re.compile(r'-sim-(?P<sim_number>\d+)-')
run_number_pattern = re.compile(r'-run-(?P<sim_number>\d+)\.log')
def line_count(path):
count = 0
with open(path) as stream:
for line in stream:
count += 1
return count
def get_run_number(log_path):
run_number_matches = run_number_pattern.findall(log_path)
assert len(run_number_matches) == 1
run_number_str = run_number_matches[0]
return int(run_number_str)
def consolidate_preempted_logs(
target_run_number = 1,
number_of_samples = 1501,
batch_dir_name = None):
number_of_lines = number_of_samples + 1
val_sim_dirs = glob.glob(os.path.join(project_util.VAL_DIR, '0*'))
for val_sim_dir in sorted(val_sim_dirs):
sim_name = os.path.basename(val_sim_dir)
batch_dirs = glob.glob(os.path.join(val_sim_dir, "batch*"))
for batch_dir in sorted(batch_dirs):
if batch_dir_name and (os.path.basename(batch_dir) != batch_dir_name):
sys.stderr.write("Skipping {0}\n".format(batch_dir))
continue
batch_number_matches = batch_number_pattern.findall(batch_dir)
assert len(batch_number_matches) == 1
batch_number_str = batch_number_matches[0]
batch_number = int(batch_number_str)
sh_paths = glob.glob(os.path.join(batch_dir,
"*simcoevolity-sim-*-config-run-{0}-qsub.sh".format(
target_run_number)))
if not sh_paths:
sys.stderr.write("WARNING: No qsub files found for\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Target run: {2}\n Skipping!!\n".format(
sim_name,
batch_number,
target_run_number))
continue
for sh_path in sorted(sh_paths):
posterior_path = sh_path.replace(
"-run-{0}-qsub.sh".format(target_run_number),
"-state-run-{0}.log".format(target_run_number))
if not os.path.exists(posterior_path):
sys.stderr.write("WARNING: Missing log: {0}\n".format(posterior_path))
sys.stdout.write("{0}\n".format(sh_path))
continue
sim_number_matches = sim_number_pattern.findall(posterior_path)
assert len(sim_number_matches) == 1
sim_number_str = sim_number_matches[0]
sim_number = int(sim_number_str)
posterior_file = os.path.basename(posterior_path)
prefix = posterior_file.split("-sim-")[0]
gp = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
target_run_number))
target_state_log_paths = glob.glob(gp)
assert (len(target_state_log_paths) == 1), (
"Multiple matches to {0!r}".format(gp))
target_state_log_path = target_state_log_paths[0]
gp = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
target_run_number))
target_op_log_paths = glob.glob(gp)
assert (len(target_op_log_paths) == 1), (
"Multiple matches to {0!r}".format(gp))
target_op_log_path = target_op_log_paths[0]
state_log_path_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-*.log*".format(
prefix,
sim_number_str))
state_log_paths = glob.glob(state_log_path_pattern)
op_log_path_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-*.log*".format(
prefix,
sim_number_str))
op_log_paths = glob.glob(op_log_path_pattern)
assert (len(state_log_paths) == len(op_log_paths)), (
"{0} matches for {1!r} and {2} for {3!r}".format(
len(state_log_paths),
state_log_path_pattern,
len(op_log_paths),
op_log_path_pattern))
assert (target_state_log_path in state_log_paths), (
"Target {0!r} not in matches".format(
target_state_log_path))
assert (target_op_log_path in op_log_paths), (
"Target {0!r} not in matches".format(
target_op_log_path))
run_numbers = sorted(get_run_number(p) for p in state_log_paths)
assert (run_numbers == sorted(get_run_number(p) for p in op_log_paths))
extra_run_numbers = [rn for rn in run_numbers if rn > target_run_number]
if len(extra_run_numbers) < 1:
if line_count(target_state_log_path) != number_of_lines:
sys.stderr.write(
"WARNING: Target log is incomplete, but there are no extra runs\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
else:
if line_count(target_state_log_path) >= number_of_lines:
sys.stderr.write(
"WARNING: Target log is complete, but there are extra runs\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
completed_run_number = extra_run_numbers.pop(-1)
completed_state_log_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
completed_run_number))
completed_state_log_paths = glob.glob(completed_state_log_pattern)
assert (len(completed_state_log_paths) == 1), (
"Multiple matches to complete state log {0!r}".format(
completed_state_log_pattern))
completed_state_log_path = completed_state_log_paths[0]
completed_op_log_pattern = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
completed_run_number))
completed_op_log_paths = glob.glob(completed_op_log_pattern)
assert (len(completed_op_log_paths) == 1), (
"Multiple matches to complete op log {0!r}".format(
completed_state_log_pattern))
completed_op_log_path = completed_op_log_paths[0]
if line_count(completed_state_log_path) != number_of_lines:
sys.stderr.write(
"WARNING: could not find completed log for\n"
" Simulation: {0}\n"
" Batch: {1}\n"
" Rep: {2}\n"
" Target run: {3}\n Skipping!!\n".format(
sim_name,
batch_number,
sim_number,
target_run_number))
sys.stdout.write("{0}\n".format(sh_path))
continue
os.rename(completed_state_log_path, target_state_log_path)
os.rename(completed_op_log_path, target_op_log_path)
for n in extra_run_numbers:
sp = os.path.join(batch_dir,
"{0}-sim-{1}-config-state-run-{2}.log*".format(
prefix,
sim_number_str,
n))
state_purge_paths = glob.glob(sp)
assert (len(state_purge_paths) == 1), (
"Multiple matches to incomplete state log {0!r}".format(
sp))
state_purge_path = state_purge_paths[0]
op = os.path.join(batch_dir,
"{0}-sim-{1}-config-operator-run-{2}.log*".format(
prefix,
sim_number_str,
n))
op_purge_paths = glob.glob(op)
assert (len(op_purge_paths) == 1), (
"Multiple matches to incomplete op log {0!r}".format(
op))
op_purge_path = op_purge_paths[0]
os.remove(state_purge_path)
os.remove(op_purge_path)
def main_cli(argv = sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--run-number',
action = 'store',
type = int,
default = 1,
help = 'Target run number for consolidation.')
parser.add_argument('-n', '--number-of-samples',
action = 'store',
type = int,
default = 1501,
help = ('Number of MCMC samples that should be found in the '
'completed log file of each analysis.'))
parser.add_argument('-b', '--batch-dir',
action = 'store',
type = str,
default = None,
help = ('Batch directory name.'))
if argv == sys.argv:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
consolidate_preempted_logs(
target_run_number = args.run_number,
number_of_samples = args.number_of_samples,
batch_dir_name = args.batch_dir)
if __name__ == "__main__":
main_cli()
| 49.529412 | 98 | 0.462335 | 1,220 | 11,788 | 4.160656 | 0.112295 | 0.045705 | 0.038416 | 0.026596 | 0.514972 | 0.439322 | 0.391056 | 0.349291 | 0.312254 | 0.287431 | 0 | 0.014264 | 0.446895 | 11,788 | 237 | 99 | 49.738397 | 0.764264 | 0.001781 | 0 | 0.384615 | 0 | 0 | 0.136325 | 0.036206 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.0181 | false | 0 | 0.027149 | 0 | 0.054299 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f144647a79fb401204bd68db25162da1d4a3b39a | 303 | py | Python | web/util/dateutil.py | weerapatbook/studentmonitor | 82d3f5f3ce123b447ba4e4930765319734eab223 | [
"Apache-2.0"
] | null | null | null | web/util/dateutil.py | weerapatbook/studentmonitor | 82d3f5f3ce123b447ba4e4930765319734eab223 | [
"Apache-2.0"
] | 4 | 2020-02-12T00:58:14.000Z | 2021-06-10T21:43:33.000Z | web/util/dateutil.py | weerapatbook/studentmonitor | 82d3f5f3ce123b447ba4e4930765319734eab223 | [
"Apache-2.0"
] | null | null | null | import datetime
class DateUtil(object):
@classmethod
def convertDateToString(cls, date):
print(type(date))
value = ''
try:
value = date.strftime('%d/%m/%Y')
except Exception as ex:
print (ex)
value = date
return value
| 21.642857 | 45 | 0.528053 | 31 | 303 | 5.16129 | 0.741935 | 0.1125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.369637 | 303 | 13 | 46 | 23.307692 | 0.837696 | 0 | 0 | 0 | 0 | 0 | 0.02649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.333333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14589148cdcd887c0aef6b10b5a56f95ad59841 | 1,389 | py | Python | my_UpNDown/UpNDown_env.py | yifjiang/UCB-review | e5c96c0cf1977012edb7d6ea02ac5362e766980e | [
"MIT"
] | 6 | 2018-05-01T14:25:37.000Z | 2021-07-19T15:36:57.000Z | my_UpNDown/UpNDown_env.py | yifjiang/UCB-review | e5c96c0cf1977012edb7d6ea02ac5362e766980e | [
"MIT"
] | null | null | null | my_UpNDown/UpNDown_env.py | yifjiang/UCB-review | e5c96c0cf1977012edb7d6ea02ac5362e766980e | [
"MIT"
] | null | null | null | from gym.envs.atari.atari_env import AtariEnv
from gym import spaces
import numpy as np
class my_UpNDownEnv(AtariEnv):
def __init__(self):
super(my_UpNDownEnv, self).\
__init__(game = 'up_n_down', obs_type = 'image', frameskip = 1, repeat_action_probability = 0.25)
self.observation_space = spaces.Box(low = 0, high = 255, shape = (840, 160, 3))
def _step(self, action):
ob_list = np.zeros((840, 160, 3))
reward_sum = 0
for i in range(4):
observation, reward, done, info = \
super(my_UpNDownEnv, self)._step(action)
#print(reward)
#print(info)
# print(observation.shape)
# print(ob_list.shape)
ob_list[i * 210:(i+1)*210, :, :] = observation
# if i == 0:
# ob_list = observation
# else:
# ob_list = np.append(ob_list, observation, axis = 0)
reward_sum += reward
if done:
if i != 3:
for j in range(i+1, 4):
ob_list[j * 210:(j+1)*210, :, :]= observation
# ob_list = np.append(ob_list, observation, axis = 0)
reward_sum += reward
break
return ob_list, reward_sum/4, done, info
# return observation, reward_sum/4, done, info
| 38.583333 | 109 | 0.521238 | 170 | 1,389 | 4.052941 | 0.376471 | 0.087083 | 0.034833 | 0.060958 | 0.20029 | 0.148041 | 0.148041 | 0.148041 | 0.148041 | 0.148041 | 0 | 0.052332 | 0.367171 | 1,389 | 35 | 110 | 39.685714 | 0.731513 | 0.196544 | 0 | 0.086957 | 0 | 0 | 0.012658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14691bf0dfdd285e473f195a1a0b4f5ae79472f | 3,813 | py | Python | src/anima_site.py | TheNetAdmin/AniMaid | 54e6b593f49561d7bfd08e117675f69ad059132a | [
"MIT"
] | null | null | null | src/anima_site.py | TheNetAdmin/AniMaid | 54e6b593f49561d7bfd08e117675f69ad059132a | [
"MIT"
] | 4 | 2021-04-01T00:36:06.000Z | 2021-12-11T02:08:49.000Z | src/anima_site.py | TheNetAdmin/AniMaid | 54e6b593f49561d7bfd08e117675f69ad059132a | [
"MIT"
] | null | null | null | import logging
import re
import requests
from dateutil.parser import parse as parse_time
from json import JSONDecodeError
class site:
def __init__(self):
pass
def parse_team(self, url: str) -> dict:
pass
class bangumi_moe_site(site):
def __init__(self):
self.logger = logging.getLogger("animaid.bangumi_moe_site")
def parse_team(self, url: str) -> dict:
if url.startswith("https") and "torrent" not in url:
raise Exception(
f'This is not a torrent url, as "torrent" is not part of the url. Click the anima title and use new page\'s url (should have "torrent" in it).'
)
torrent_id = url.split("/")[-1]
search_url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}"
response = requests.get(url=search_url).json()
if "team" not in response.keys() or "_id" not in response["team"]:
raise Exception(
f"This record does not have a valid team info, "
f"try another anima record from the same team."
)
team_info = response["team"]
team_name = team_info["name"]
team_id = team_info["_id"]
print(f"The following team info is found:")
print(f" team name: {team_name}")
print(f" team id: {team_id}")
filename = response["content"][0][0]
print(f" filename: {filename}")
auto_alias = re.findall(r"\[[\w\s-]+\]", filename)[0]
if auto_alias:
team_alias = auto_alias.replace("[", "").replace("]", "").replace(" ", "_")
print(f" team alias:{team_alias}")
else:
print(f"Please give this team a unique alias in English,")
team_alias = input(f"Input the team alias:")
team_alias = team_alias.strip()
team = {
"_id": team_alias,
"name": team_name,
"alias": team_alias,
"source": [
{
"site": "bangumi_moe",
"team_id": team_id,
"last_update": parse_time("2000").isoformat(),
}
],
}
return team
def _search(self, url, ignore_properties=["introduction"]):
try:
res = requests.get(url=url).json()
except JSONDecodeError as e:
self.logger.error(f"Anima site request is invalid, url: {url}")
raise Exception(f"Anima site request is invalid, url: {url}")
try:
res["torrents"] = sorted(
res["torrents"],
key=lambda x: parse_time(x["publish_time"]),
reverse=True,
)
except KeyError as e:
self.logger.error(f"Invalid response {res}")
raise e
for t in res["torrents"]:
for i in ignore_properties:
del t[i]
if len(res) == 0:
raise Exception(
f"No data responded, something is wrong with the request to bangumi.moe, url: {url}",
extra={"info": {"url": url}},
)
return res
def search_by_team(self, team, page, ignore_properties=["introduction"]):
url = f'https://bangumi.moe/api/v2/torrent/team/{team["team_id"]}?p={page+1}&LIMIT=500'
return self._search(url, ignore_properties)
def searcy_by_tag(self, tag, page, ignore_properties=["introduction"]):
url = f"https://bangumi.moe/api/v2/torrent/search?query=`{tag}`&p={page+1}&LIMIT=500"
return self._search(url, ignore_properties)
def search_by_torrent(self, torrent_id):
url = f"https://bangumi.moe/api/v2/torrent/{torrent_id}"
res = requests.get(url=url).json()
if len(res) == 0:
return None
return res
| 35.635514 | 159 | 0.552583 | 475 | 3,813 | 4.296842 | 0.284211 | 0.039686 | 0.034297 | 0.031357 | 0.250367 | 0.250367 | 0.208721 | 0.183244 | 0.151886 | 0.151886 | 0 | 0.008481 | 0.319696 | 3,813 | 106 | 160 | 35.971698 | 0.778335 | 0 | 0 | 0.208791 | 0 | 0.032967 | 0.275636 | 0.006294 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087912 | false | 0.021978 | 0.054945 | 0 | 0.230769 | 0.065934 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f147c644059fdd73863ea4fdd99a9a3c68be91b0 | 3,227 | py | Python | datareduction/TestLinearRegression.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | datareduction/TestLinearRegression.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | datareduction/TestLinearRegression.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | import inferpy as inf
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from datareduction.variational_linear_regressor_DR import VariationalLinearRegressor_DR
from prml.rv import VariationalGaussianMixture
from prml.features import PolynomialFeatures
from prml.linear import (
VariationalLinearRegressor,
VariationalLogisticRegressor
)
np.random.seed(1234)
N=10000
K=50
D=10
# def create_toy_data(func, sample_size, std, domain=[0, 1]):
# x = np.linspace(domain[0], domain[1], sample_size)
# np.random.shuffle(x)
# t = func(x) + np.random.normal(scale=std, size=x.shape)
# return x, t
#
# def cubic(x):
# return x * (x - 5) * (x + 5)
#
# x_train, y_train = create_toy_data(cubic, N, 10., [-5, 5])
# x = np.linspace(-5, 5, 100)
# y = cubic(x)
X_train=np.ones((N,D+1))
X_train[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2))
X_train[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2))
w = np.random.rand(D+1)
y_train = X_train@w.T
X=np.ones((N,D+1))
X[0:int(N/2),:] = inf.models.Normal(0,1,dim = D+1).sample(int(N/2))
X[int(N/2):N,:] = inf.models.Normal(10,1,dim = D+1).sample(int(N/2))
y = X@w.T
#feature = PolynomialFeatures(degree=D)
#X_train = feature.transform(x_train)
#X = feature.transform(x)
vlr = VariationalLinearRegressor(beta=0.01)
vlr.fit(X_train, y_train)
y_mean, y_std = vlr.predict(X, return_std=True)
# plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b")
# plt.plot(x, y, c="g", label="$\sin(2\pi x)$")
# plt.plot(x, y_mean, c="r", label="prediction")
# plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink")
# plt.legend()
# plt.show()
normal = inf.models.Normal(y_mean,y_std)
l = normal.log_prob(y)
print(np.sum(l))
y_repeated = np.repeat(np.expand_dims(y_train,axis=1),X_train.shape[1],axis=1)
XY_train = np.multiply(X_train,y_repeated)
# np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1))[1] == np.matmul(np.expand_dims(X_train[1],axis=1), np.expand_dims(X_train[1],axis=1).T)
XX_train = np.multiply(np.expand_dims(X_train,axis=2),np.expand_dims(X_train,axis=1))
XX_train = XX_train.reshape((XX_train.shape[0],-1))
XJoin_train = np.concatenate((XY_train,XX_train),axis=1)
kmeans = KMeans(n_clusters=K, random_state=0).fit(XJoin_train)
weights = np.asarray([sum(kmeans.labels_==x) for x in range(0, K)])
clusters_centers = np.multiply(kmeans.cluster_centers_,np.repeat(weights.reshape(K,1),kmeans.cluster_centers_.shape[1],axis=1))
clusters_sum = np.sum(clusters_centers,axis=0)
X_dr = {'XY': clusters_sum[0:(D+1)],'XX': clusters_sum[(D+1):(D+1)+(D+1)*(D+1)].reshape((D+1,D+1))}
vlr_dr = VariationalLinearRegressor_DR(beta=0.01)
vlr_dr.fit(X_dr)
y_mean_dr, y_std_dr = vlr_dr.predict(X, return_std=True)
# plt.scatter(x_train, y_train, s=100, facecolor="none", edgecolor="b")
# plt.plot(x, y, c="g", label="$\sin(2\pi x)$")
# plt.plot(x, y_mean, c="r", label="prediction")
# plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color="pink")
# plt.legend()
# plt.show()
normal_dr = inf.models.Normal(y_mean_dr,y_std_dr)
l_dr = normal_dr.log_prob(y)
print(np.sum(l_dr))
| 31.330097 | 164 | 0.704059 | 606 | 3,227 | 3.580858 | 0.20297 | 0.04977 | 0.018433 | 0.024885 | 0.384793 | 0.368664 | 0.343779 | 0.326267 | 0.304147 | 0.304147 | 0 | 0.035294 | 0.104431 | 3,227 | 102 | 165 | 31.637255 | 0.715571 | 0.356678 | 0 | 0 | 0 | 0 | 0.001954 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.191489 | 0 | 0.191489 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14b7fd1ac391cc7123c9a3508a739a92e9a94f9 | 1,277 | py | Python | fast/day02/day02.py | JerryDot/advent-2021-py | c3eb44b5989c276de0b5e721d3b8dfa1698796f1 | [
"MIT"
] | null | null | null | fast/day02/day02.py | JerryDot/advent-2021-py | c3eb44b5989c276de0b5e721d3b8dfa1698796f1 | [
"MIT"
] | null | null | null | fast/day02/day02.py | JerryDot/advent-2021-py | c3eb44b5989c276de0b5e721d3b8dfa1698796f1 | [
"MIT"
] | null | null | null | from typing import Iterable, List, Tuple
"""
----------> (1,0)
|
|
|
|
v
(0,1)
"""
def parse_input() -> List[Tuple[int, int]]:
with open('day02.txt', 'rb') as f:
INPUT = map(lambda x: x.strip(), map(lambda x: x.decode("utf-8"), f.readlines()))
moves = []
for entry in INPUT:
direction, size = entry.split()[0], int(entry.split()[1])
if direction == "forward":
moves.append((size, 0))
elif direction == "backward":
moves.append((-size, 0))
elif direction == "down":
moves.append((0, size))
elif direction == "up":
moves.append((0, -size))
else:
raise Exception("This should not occur")
return moves
def part_one(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
for move in p_input:
position[0] += move[0]
position[1] += move[1]
return position[0] * position[1]
def part_two(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
aim = 0
for move in p_input:
position[0] += move[0]
position[1] += move[0] * aim
aim += move[1]
return position[0] * position[1]
if __name__ == "__main__":
print(part_one(parse_input()))
print(part_two(parse_input()))
| 22.803571 | 89 | 0.539546 | 172 | 1,277 | 3.895349 | 0.348837 | 0.080597 | 0.059701 | 0.076119 | 0.425373 | 0.395522 | 0.308955 | 0.228358 | 0.228358 | 0.228358 | 0 | 0.033806 | 0.281911 | 1,277 | 55 | 90 | 23.218182 | 0.696838 | 0 | 0 | 0.228571 | 0 | 0 | 0.053485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.028571 | 0 | 0.2 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f14c0cd89a3a51b3ae10b34f33f09e21ce1188ac | 1,300 | py | Python | a.py | sarthak7838/Music-Recommender-Web-App | cda0d1a73915495b05ef9b094fad509f34afc2b6 | [
"MIT"
] | null | null | null | a.py | sarthak7838/Music-Recommender-Web-App | cda0d1a73915495b05ef9b094fad509f34afc2b6 | [
"MIT"
] | null | null | null | a.py | sarthak7838/Music-Recommender-Web-App | cda0d1a73915495b05ef9b094fad509f34afc2b6 | [
"MIT"
] | 1 | 2021-12-03T20:51:50.000Z | 2021-12-03T20:51:50.000Z | import requests as req
from bs4 import BeautifulSoup
import os
import os.path
import shutil
try:
r1=req.get("https://gaana.com/playlist/gaana-dj-best-of-badshah")
c1=r1.content
print(r)
soup1=BeautifulSoup(c1,"html.parser")
F1=soup1.find_all("a",{"class":"sng_c "})
#print(F1)
namelist=[]
#count=0
for i in F1:
print (i.text)
x=i.text
l=x.split()
name=""
url="https://www.youtube.com/results?search_query="
for j in l:
url=url+j+"+"
name=name+j+"_"
#print(":",url)
url=url[:-1]
name=name[:-1]
namelist.append(name)
#print(url,name)
r2=req.get(url)
c2=r2.content
soup2=BeautifulSoup(c2,"html.parser")
#F2=soup2.find_all("a",{"class":"yt-simple-endpoint"})
#print(F2[0]['href'])
#print(F2)
#F2 = soup2.find_all('a',href=True)
#print(link[40]['href'])
F2 =soup2.findAll(attrs={'class':'yt-uix-tile-link'})
link="https://www.youtube.com"+F2[0]['href']
command="youtube-dl --extract-audio --audio-format mp3 "+link+" -o "+name+".mp3"
os.system(command)
#count=count+1
except req.exceptions.RequestException as e:
print (e)
'''
for n in namelist:
if os.path.isfile(n):
n=n+".mp3"
src = n
dst = 'data/romance/{}'.format(n)
shutil.move(src, dst)
filelist=os.listdir('.')
for f in filelist:
os.remove(f)
'''
| 19.402985 | 82 | 0.636923 | 211 | 1,300 | 3.895735 | 0.445498 | 0.025547 | 0.029197 | 0.03163 | 0.036496 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031732 | 0.151538 | 1,300 | 66 | 83 | 19.69697 | 0.713509 | 0.151538 | 0 | 0 | 0 | 0 | 0.26 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.151515 | 0 | 0.151515 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f150530daa19fd258ee7528230b71a750194f14d | 1,041 | py | Python | data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_DIVISIBLE_4_STRING_INTEGERS.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s )
count = 0 ;
for i in range ( 0 , n , 1 ) :
if ( s [ i ] == '4' or s [ i ] == '8' or s [ i ] == '0' ) :
count += 1
for i in range ( 0 , n - 1 , 1 ) :
h = ( ord ( s [ i ] ) - ord ( '0' ) ) * 10 + ( ord ( s [ i + 1 ] ) - ord ( '0' ) )
if ( h % 4 == 0 ) :
count = count + i + 1
return count
#TOFILL
if __name__ == '__main__':
param = [
('Qaq',),
('9400761825850',),
('0011001111',),
('lasWqrLRq',),
('5662',),
('110',),
(' tOYKf',),
('6536991235305',),
('11111',),
('uZftT iDHcYiCt',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 26.692308 | 90 | 0.480307 | 137 | 1,041 | 3.525547 | 0.481752 | 0.020704 | 0.020704 | 0.045549 | 0.057971 | 0.057971 | 0.057971 | 0 | 0 | 0 | 0 | 0.104348 | 0.337176 | 1,041 | 39 | 91 | 26.692308 | 0.595652 | 0.177714 | 0 | 0 | 0 | 0 | 0.128386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0 | 0 | 0.068966 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f156bf8245bfd78557645c9f4c7188cd9870a78f | 846 | py | Python | IPython/frontend/html/notebook/fabfile.py | 3kwa/ipython | a5922fd39ed4b2067d64b285125278a850bb129f | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-13T23:06:43.000Z | 2022-03-13T23:06:43.000Z | IPython/frontend/html/notebook/fabfile.py | 3kwa/ipython | a5922fd39ed4b2067d64b285125278a850bb129f | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/frontend/html/notebook/fabfile.py | 3kwa/ipython | a5922fd39ed4b2067d64b285125278a850bb129f | [
"BSD-3-Clause-Clear"
] | null | null | null | """ fabfile to prepare the notebook """
from fabric.api import local,lcd
from fabric.utils import abort
import os
static_dir = 'static'
components_dir = os.path.join(static_dir,'components')
def test_component(name):
if not os.path.exists(os.path.join(components_dir,name)):
components()
def components():
"""install components with bower"""
with lcd(static_dir):
local('bower install')
def css(minify=True):
"""generate the css from less files"""
test_component('bootstrap')
test_component('less.js')
if minify not in ['True', 'False', True, False]:
abort('minify must be Boolean')
minify = (minify in ['True',True])
min_flag= '-x' if minify is True else ''
with lcd(static_dir):
local('lessc {min_flag} less/style.less css/style.min.css'.format(min_flag=min_flag))
| 27.290323 | 93 | 0.674941 | 121 | 846 | 4.61157 | 0.413223 | 0.064516 | 0.035842 | 0.057348 | 0.075269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.187943 | 846 | 30 | 94 | 28.2 | 0.812227 | 0.111111 | 0 | 0.1 | 0 | 0 | 0.180082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f15902568327420d51680a127a7bed4e1c5d6fac | 2,028 | py | Python | qchem/tests/test_molecular_hamiltonian.py | ryanlevy/pennylane | fb03b09d17267ebd0b9050432f9eeb84b5dff200 | [
"Apache-2.0"
] | 3 | 2021-02-22T18:30:55.000Z | 2021-02-23T10:54:58.000Z | qchem/tests/test_molecular_hamiltonian.py | ryanlevy/pennylane | fb03b09d17267ebd0b9050432f9eeb84b5dff200 | [
"Apache-2.0"
] | null | null | null | qchem/tests/test_molecular_hamiltonian.py | ryanlevy/pennylane | fb03b09d17267ebd0b9050432f9eeb84b5dff200 | [
"Apache-2.0"
] | 1 | 2021-03-27T09:03:15.000Z | 2021-03-27T09:03:15.000Z | import os
import pytest
from pennylane import qchem
from pennylane.vqe import Hamiltonian
import numpy as np
symbols = ["C", "C", "N", "H", "H", "H", "H", "H"]
coordinates = np.array(
[
0.68219113,
-0.85415621,
-1.04123909,
-1.34926445,
0.23621577,
0.61794044,
1.29068294,
0.25133357,
1.40784596,
0.83525895,
-2.88939124,
-1.16974047,
1.26989596,
0.19275206,
-2.69852891,
-2.57758643,
-1.05824663,
1.61949529,
-2.17129532,
2.04090421,
0.11338357,
2.06547065,
2.00877887,
1.20186581,
]
)
@pytest.mark.parametrize(
(
"charge",
"mult",
"package",
"nact_els",
"nact_orbs",
"mapping",
),
[
(0, 1, "psi4", 2, 2, "jordan_WIGNER"),
(1, 2, "pyscf", 3, 4, "BRAVYI_kitaev"),
(-1, 2, "pyscf", 1, 2, "jordan_WIGNER"),
(2, 1, "psi4", 2, 2, "BRAVYI_kitaev"),
],
)
def test_building_hamiltonian(
charge,
mult,
package,
nact_els,
nact_orbs,
mapping,
psi4_support,
requires_babel,
tmpdir,
):
r"""Test that the generated Hamiltonian `built_hamiltonian` is an instance of the PennyLane
Hamiltonian class and the correctness of the total number of qubits required to run the
quantum simulation. The latter is tested for different values of the molecule's charge and
for active spaces with different size"""
if package == "psi4" and not psi4_support:
pytest.skip("Skipped, no Psi4 support")
built_hamiltonian, qubits = qchem.molecular_hamiltonian(
symbols,
coordinates,
charge=charge,
mult=mult,
package=package,
active_electrons=nact_els,
active_orbitals=nact_orbs,
mapping=mapping,
outpath=tmpdir.strpath,
)
assert isinstance(built_hamiltonian, Hamiltonian)
assert qubits == 2 * nact_orbs
| 22.043478 | 95 | 0.569527 | 233 | 2,028 | 4.862661 | 0.476395 | 0.007061 | 0.007944 | 0.007061 | 0.068844 | 0.068844 | 0.068844 | 0.068844 | 0 | 0 | 0 | 0.172688 | 0.317554 | 2,028 | 91 | 96 | 22.285714 | 0.645954 | 0.149901 | 0 | 0 | 0 | 0 | 0.086166 | 0 | 0 | 0 | 0 | 0 | 0.025974 | 1 | 0.012987 | false | 0 | 0.064935 | 0 | 0.077922 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1595406821dd1cc1d957a18aeecca6864b3bc11 | 1,106 | py | Python | lims_dashboard/utils.py | alneberg/lims_dashboard | 7a833a7d8e9548fb5012dad814e6c9b807454fe7 | [
"MIT"
] | null | null | null | lims_dashboard/utils.py | alneberg/lims_dashboard | 7a833a7d8e9548fb5012dad814e6c9b807454fe7 | [
"MIT"
] | null | null | null | lims_dashboard/utils.py | alneberg/lims_dashboard | 7a833a7d8e9548fb5012dad814e6c9b807454fe7 | [
"MIT"
] | null | null | null |
import os
import subprocess
import traceback
import logging
logger = logging.getLogger("lims_dashboard")
def run_script(app, name, options):
cwd = os.getcwd()
os.chdir('{0}/uploads'.format(app.root_path))
conf_obj = app.config['my_scripts'][name]
command = [':']
if conf_obj['type'] == 'python':
try:
python_exec = conf_obj['python_exec']
except KeyError: # No python exec specified in script conf
python_exec = app.config['python_exec']
command = [python_exec, os.path.join(app.config['SCRIPT_FOLDER'],app.config['my_scripts'][name]['script'])]
command.extend(options.split())
logger.info("About to run command: {}".format(" ".join(command)))
try:
handle = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = handle.communicate()
returncode = handle.returncode
except Exception:
returncode = -1
out = "Running the command: {}".format(" ".join(command))
err = traceback.format_exc()
os.chdir(cwd)
return returncode, out, err
| 30.722222 | 115 | 0.644665 | 134 | 1,106 | 5.208955 | 0.455224 | 0.08596 | 0.031519 | 0.051576 | 0.063037 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002304 | 0.21519 | 1,106 | 35 | 116 | 31.6 | 0.801843 | 0.035262 | 0 | 0.071429 | 0 | 0 | 0.137218 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.142857 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f15a182c95aa4785e3fe9ddf7f1191498aa88c8a | 5,152 | py | Python | chat/indico_chat/models/chatrooms.py | aristofanischionis/indico-plugins | abd67d8f5173aa0e2d80613cf8bef415e332d773 | [
"MIT"
] | null | null | null | chat/indico_chat/models/chatrooms.py | aristofanischionis/indico-plugins | abd67d8f5173aa0e2d80613cf8bef415e332d773 | [
"MIT"
] | null | null | null | chat/indico_chat/models/chatrooms.py | aristofanischionis/indico-plugins | abd67d8f5173aa0e2d80613cf8bef415e332d773 | [
"MIT"
] | null | null | null | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2019 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import UTCDateTime, db
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
from indico_chat.xmpp import delete_room
class Chatroom(db.Model):
__tablename__ = 'chatrooms'
__table_args__ = (db.UniqueConstraint('jid_node', 'custom_server'),
{'schema': 'plugin_chat'})
#: Chatroom ID
id = db.Column(
db.Integer,
primary_key=True
)
#: Node of the chatroom's JID (the part before `@domain`)
jid_node = db.Column(
db.String,
nullable=False
)
#: Name of the chatroom
name = db.Column(
db.String,
nullable=False
)
#: Description of the chatroom
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: Password to join the room
password = db.Column(
db.String,
nullable=False,
default=''
)
#: Custom Jabber MUC server hostname
custom_server = db.Column(
db.String,
nullable=False,
default=''
)
#: ID of the creator
created_by_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
#: Creation timestamp of the chatroom
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
#: Modification timestamp of the chatroom
modified_dt = db.Column(
UTCDateTime
)
#: The user who created the chatroom
created_by_user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'chatrooms',
lazy='dynamic'
)
)
@property
def locator(self):
return {'chatroom_id': self.id}
@property
def server(self):
"""The server name of the chatroom.
Usually the default one unless a custom one is set.
"""
from indico_chat.plugin import ChatPlugin
return self.custom_server or ChatPlugin.settings.get('muc_server')
@property
def jid(self):
return '{}@{}'.format(self.jid_node, self.server)
@return_ascii
def __repr__(self):
server = self.server
if self.custom_server:
server = '!' + server
return '<Chatroom({}, {}, {}, {})>'.format(self.id, self.name, self.jid_node, server)
class ChatroomEventAssociation(db.Model):
__tablename__ = 'chatroom_events'
__table_args__ = {'schema': 'plugin_chat'}
#: ID of the event
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
#: ID of the chatroom
chatroom_id = db.Column(
db.Integer,
db.ForeignKey('plugin_chat.chatrooms.id'),
primary_key=True,
index=True
)
#: If the chatroom should be hidden on the event page
hidden = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: If the password should be visible on the event page
show_password = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The associated :class:Chatroom
chatroom = db.relationship(
'Chatroom',
lazy=False,
backref=db.backref('events', cascade='all, delete-orphan')
)
#: The associated event
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'chatroom_associations',
lazy='dynamic'
)
)
@property
def locator(self):
return dict(self.chatroom.locator, confId=self.event_id)
@return_ascii
def __repr__(self):
return '<ChatroomEventAssociation({}, {})>'.format(self.event_id, self.chatroom)
@classmethod
def find_for_event(cls, event, include_hidden=False, **kwargs):
"""Returns a Query that retrieves the chatrooms for an event
:param event: an indico event (with a numeric ID)
:param include_hidden: if hidden chatrooms should be included, too
:param kwargs: extra kwargs to pass to ``find()``
"""
query = cls.find(event_id=event.id, **kwargs)
if not include_hidden:
query = query.filter(~cls.hidden)
return query
def delete(self, reason=''):
"""Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
"""
db.session.delete(self)
db.session.flush()
if not self.chatroom.events:
db.session.delete(self.chatroom)
db.session.flush()
delete_room(self.chatroom, reason)
return True
return False
| 26.694301 | 93 | 0.60132 | 598 | 5,152 | 5.053512 | 0.279264 | 0.034414 | 0.0364 | 0.015884 | 0.180675 | 0.143944 | 0.108206 | 0.027796 | 0 | 0 | 0 | 0.002218 | 0.299884 | 5,152 | 192 | 94 | 26.833333 | 0.835597 | 0.240101 | 0 | 0.343284 | 0 | 0 | 0.080369 | 0.019499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0.014925 | 0.044776 | 0.029851 | 0.335821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f163e4eec92c5b9acccead12e1775a3ab901332a | 532 | py | Python | src/Blog/assets.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | 1 | 2020-04-20T05:38:01.000Z | 2020-04-20T05:38:01.000Z | src/Blog/assets.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | 8 | 2020-02-11T21:41:52.000Z | 2022-01-13T00:33:02.000Z | src/Blog/assets.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | null | null | null | from django.conf import settings
from django_assets import Bundle, register
from webassets.filter import get_filter
libsass = get_filter("libsass", style="compressed")
css_libs = Bundle(
settings.BASE_DIR + "/assets/styles/css/libs/normalize.css",
filters="cssutils",
output="css/libs.css"
)
css_custom = Bundle(
settings.BASE_DIR + "/assets/styles/sass/base.sass",
filters=libsass,
output="css/style.css",
depends="/**/*.sass",
)
register("css_libs", css_libs)
register("css_custom", css_custom)
| 21.28 | 64 | 0.716165 | 70 | 532 | 5.285714 | 0.371429 | 0.094595 | 0.086486 | 0.113514 | 0.178378 | 0.178378 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140977 | 532 | 24 | 65 | 22.166667 | 0.809628 | 0 | 0 | 0 | 0 | 0 | 0.270677 | 0.12406 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f165f215e0f29ecdf542caf1a3d19feab64687a9 | 2,864 | py | Python | lib/tools/job_launcher.py | anonconda/RTDmaker | ac2a070ba26933194aa13041e1fdf92cbc39f201 | [
"MIT"
] | 1 | 2021-04-13T18:30:14.000Z | 2021-04-13T18:30:14.000Z | lib/tools/job_launcher.py | anonconda/RTDmaker | ac2a070ba26933194aa13041e1fdf92cbc39f201 | [
"MIT"
] | null | null | null | lib/tools/job_launcher.py | anonconda/RTDmaker | ac2a070ba26933194aa13041e1fdf92cbc39f201 | [
"MIT"
] | null | null | null | import os
import sys
import time
import multiprocessing
from subprocess import Popen
from multiprocessing.pool import ThreadPool
def work(command, logfile, job_id, tot):
line_info = f'\n{time.asctime()} Starting Job {job_id} (out of {tot})'
line_command = f'\n{time.asctime()} Job {job_id} command: {command}\n'
with open(logfile, "a+") as fh:
fh.write(line_info)
fh.write(line_command)
try:
process = Popen(command, shell=True)
process.wait()
# If the output of the process needs further processing/parsing, it can be done here
# Source: https://stackoverflow.com/questions/26774781/
# python-multiple-subprocess-with-a-pool-queue-recover-output-as-soon-as-one-finis
except Exception as e:
line_error = f'\n{time.asctime()} Error while executing Job {job_id}:\n'
print(line_error)
with open(logfile, "a+") as fh:
fh.write(line_error)
fh.write(str(e) + "\n")
line_end = f'\n{time.asctime()} Job {job_id} completed!\n'
print(line_end)
with open(logfile, "a+") as fh:
fh.write(line_end)
def launch_jobs(commands_list, logfile=None, n_jobs=None, core_proportion=(1, 3), max_cores=8, log_dir=None):
if not log_dir:
log_dir = os.getcwd()
# Create a log file to track the completed jobs
if not logfile:
logfile = os.path.join(log_dir, f"{time.asctime().replace(' ', '_')}_jobs_logfile.txt")
# Attach ID to the jobs to track their execution
indexed_commands = [(i + 1, command) for i, command in enumerate(commands_list)]
tot = len(indexed_commands)
if n_jobs:
if n_jobs > multiprocessing.cpu_count():
sys.exit(f"The system does not posses that many cores. It must be {multiprocessing.cpu_count()} or less.")
else:
n_cores = n_jobs
else:
# Use a predetermined proportion of available total cores (ex: one third (1/3) of the available cores)
n_cores = int((multiprocessing.cpu_count() / core_proportion[1]) * core_proportion[0])
# Limit the maximum number of cores to use at a time
if n_cores > max_cores:
n_cores = max_cores
line_start = f'{time.asctime()} Launching {tot} jobs, using {n_cores} cores\n'
print(line_start)
with open(logfile, "a+") as fh:
fh.write(line_start)
# Launch "n" number of jobs at a time; whenever a job is finish, launch a new one
# The number of jobs is determined by the number of available/selected cores to use
tp = ThreadPool(n_cores)
for (job_id, command) in indexed_commands:
tp.apply_async(work, (command, logfile, job_id, tot, ))
tp.close()
tp.join()
line_end = f'\n{time.asctime()} All Jobs completed! ({tot})\n'
print(line_end)
with open(logfile, "a+") as fh:
fh.write(line_end)
| 34.506024 | 118 | 0.651885 | 432 | 2,864 | 4.19213 | 0.331019 | 0.019326 | 0.036444 | 0.035892 | 0.170072 | 0.170072 | 0.126449 | 0.103258 | 0.103258 | 0.051905 | 0 | 0.007273 | 0.231844 | 2,864 | 82 | 119 | 34.926829 | 0.815909 | 0.217877 | 0 | 0.203704 | 0 | 0 | 0.212108 | 0.034081 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1698183cab47657356ca5d40e4c3268c220a1ca | 2,388 | py | Python | malware.py | c0zzy/semi-supervised-ann | 5b458774388d79f64b19ca696afb0d42ed403731 | [
"MIT"
] | null | null | null | malware.py | c0zzy/semi-supervised-ann | 5b458774388d79f64b19ca696afb0d42ed403731 | [
"MIT"
] | null | null | null | malware.py | c0zzy/semi-supervised-ann | 5b458774388d79f64b19ca696afb0d42ed403731 | [
"MIT"
] | null | null | null | import copy
from alg.semi_supervised import SemiSupervised
from lib.ops import evaluate
from lib.utils import load_avast_weeks_pca, parse_arguments
args = parse_arguments()
dataset_avast_pca = {
'name': 'avast_pca',
'input_size': 128,
'num_classes': 5,
'mlp_arch': [96, 64, 32],
'visualise_funcs': ['losses']
}
dataset_avast_pca_binary = copy.deepcopy(dataset_avast_pca)
dataset_avast_pca_binary['num_classes'] = 2
ds = dataset_avast_pca_binary if args.binary else dataset_avast_pca
if args.m: # can't load matplotlib in metacentrum
ds['visualise_funcs'] = []
def train(run_nmb, x, y, x_un=None, y_un=None, x_test=None, y_test=None):
semi_sup = SemiSupervised(
dataset_params=ds,
num_epoch=100,
ssl_method=args.method,
options=args.options,
hyper_par=args.hyper_par,
save_results=False,
imbalanced=False,
should_evaluate=True
)
semi_sup.set_train_data(x, y)
if x_test is not None and y_test is not None:
semi_sup.set_test_data(x_test, y_test)
if x_un is not None:
semi_sup.set_unsupervised_data(x_un)
else:
semi_sup.split_sup_unsup(args.ratio)
semi_sup.prepare_train_test_data()
model = semi_sup.train(run_nmb)
return model
def train_eval(run_nmb):
x, y, x_test, y_test = load_avast_weeks_pca(args.train_weeks, 10000 + 5000, 5000, args.binary)
print('y_test: ', y_test[:10])
model = train(run_nmb, x, y, x_test=x_test, y_test=y_test)
test_acc = evaluate(model, ds['num_classes'], x_test, y_test, hot=False)
return test_acc
def main():
with open(args.out_path + 'results.out', 'w') as f:
print('weeks:', args.train_weeks, file=f)
print('ratio:', args.ratio, file=f)
print('ssl:', args.method, file=f)
print(args.hyper_par, file=f)
print(args.options, file=f)
runs = args.runs
accuracies = []
for i in range(runs):
acc = train_eval(i)
print('Final test acc:', acc)
with open(args.out_path + 'results.out', 'a') as f:
print(i, acc, file=f)
accuracies.append(acc)
with open(args.out_path + 'results.out', 'a') as f:
print('min', min(accuracies), file=f)
print('max', max(accuracies), file=f)
print('avg', sum(accuracies) / runs, file=f)
if __name__ == '__main__':
main()
| 27.767442 | 98 | 0.652429 | 366 | 2,388 | 3.986339 | 0.300546 | 0.030843 | 0.061686 | 0.027416 | 0.132968 | 0.132968 | 0.07608 | 0.056203 | 0.056203 | 0.056203 | 0 | 0.015625 | 0.222781 | 2,388 | 85 | 99 | 28.094118 | 0.770474 | 0.015075 | 0 | 0.030769 | 0 | 0 | 0.081702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0.061538 | 0 | 0.138462 | 0.169231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f16aaf2fc0ac983255c89745ef68d2c5983e2b39 | 593 | py | Python | phishfry/remediation_result.py | krayzpipes/phishfry | 6da69548ae6970e2bc773d112509a7c94ce0e265 | [
"Apache-2.0"
] | null | null | null | phishfry/remediation_result.py | krayzpipes/phishfry | 6da69548ae6970e2bc773d112509a7c94ce0e265 | [
"Apache-2.0"
] | 3 | 2019-12-31T00:12:12.000Z | 2020-01-03T13:43:30.000Z | phishfry/remediation_result.py | krayzpipes/phishfry | 6da69548ae6970e2bc773d112509a7c94ce0e265 | [
"Apache-2.0"
] | 2 | 2019-02-12T21:01:20.000Z | 2019-04-22T17:41:46.000Z | import logging
log = logging.getLogger(__name__)
class RemediationResult(object):
def __init__(self, address, message_id, mailbox_type, action, success=True, message=None):
self.address = address
self.message_id = message_id
self.mailbox_type = mailbox_type
self.success = success
self.message = message
self.owner = None
self.members = []
self.forwards = []
self.action = action
def result(self, message, success=False):
log.info(message)
self.success = success
self.message = message
| 28.238095 | 94 | 0.640809 | 66 | 593 | 5.545455 | 0.393939 | 0.120219 | 0.098361 | 0.120219 | 0.196721 | 0.196721 | 0 | 0 | 0 | 0 | 0 | 0 | 0.269815 | 593 | 20 | 95 | 29.65 | 0.845266 | 0 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f16b40ca452b4c54cacfb36dfaca7d833512d61a | 10,452 | py | Python | psyIP.py | chocosobo/psypy | ebaa9e59a70117acf3182f86d7ab7fc3c40338c3 | [
"MIT"
] | 6 | 2015-07-24T05:08:52.000Z | 2017-12-08T04:39:22.000Z | psyIP.py | chocosobo/psypy | ebaa9e59a70117acf3182f86d7ab7fc3c40338c3 | [
"MIT"
] | 1 | 2017-08-23T21:40:17.000Z | 2017-09-27T00:32:55.000Z | psyIP.py | chocosobo/psypy | ebaa9e59a70117acf3182f86d7ab7fc3c40338c3 | [
"MIT"
] | 7 | 2019-09-14T18:16:05.000Z | 2021-11-26T15:34:26.000Z | import math as m
# All functions expect base SI units for any arguments given
# DBT - Dry bulb temperature - Degrees Rankine, R
# DPT - Dew point temperature - Degress Rankine, R
# H - Specific enthalpy - British thermal unit per pound mass,
# Btu/lbm
# P - Atmospheric pressure - Pounds force per square inch, psi
# Pw - Water vapor partial pressure - Pounds force per square inch, psi
# RH - Relative humidity - Decimal (i.e. not a percentage)
# V - Specific volume - Cubic feet per pound mass, ft^3/lbm
# W - Humidity ratio - pounds mass per pound mass, lbm/lbm
# WBT - Wet bulb temperature - Degrees Rankine, R
# Minimum dry bulb temperature
Min_DBT=491.67
# Maximum dry bulb temperature
Max_DBT=851.67
# Convergence tolerance
TOL=0.0000005
def __DBT_H_RH_P(H, RH, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_V_P(H, V, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_V_P(DBTa, V, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_V_P(DBT, V, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_W(H, W):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=W-__W_DBT_H(DBTa, H)
y=W-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_H_WBT_P(H, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_H(DBTa, H)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_H(DBT, H)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_V_P(RH, V, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-__W_DBT_V_P(DBTa, V, P)
y=__W_DBT_RH_P(DBT, RH, P)-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_W_P(RH, W, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_RH_P(DBTa, RH, P)-W
y=__W_DBT_RH_P(DBT, RH, P)-W
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_RH_WBT_P(RH, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_RH_P(DBTa, RH, P)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_RH_P(DBT, RH, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_V_W_P(V, W, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=W-__W_DBT_V_P(DBTa, V, P)
y=W-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_V_WBT_P(V, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-__W_DBT_V_P(DBTa, V, P)
y=__W_DBT_WBT_P(DBT, WBT, P)-__W_DBT_V_P(DBT, V, P)
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
def __DBT_W_WBT_P(W, WBT, P):
[DBTa, DBTb]=[Min_DBT, Max_DBT]
DBT=(DBTa+DBTb)/2
while DBTb-DBTa>TOL:
ya=__W_DBT_WBT_P(DBTa, WBT, P)-W
y=__W_DBT_WBT_P(DBT, WBT, P)-W
if __is_positive(y)==__is_positive(ya):
DBTa=DBT
else:
DBTb=DBT
DBT=(DBTa+DBTb)/2
return DBT
# ASHRAE 2009 Chapter 1 Equation 39
def __DPT_Pw(Pw):
Pw=Pw
C14=100.45
C15=33.193
C16=2.319
C17=0.17074
C18=1.2063
a=m.log(Pw)
return (C14+C15*a+C16*a**2+C17*a**3+C18*Pw**0.1984)+459.67
# ASHRAE 2009 Chapter 1 Equation 32
def __H_DBT_W(DBT, W):
if __valid_DBT(DBT):
DBT=DBT-459.67
return 0.240*DBT+W*(1061+0.444*DBT)
def __is_positive(x):
if x>0:
return True
else:
return False
# ASHRAE 2009 Chapter 1 Equation 22
def __Pw_W_P(W, P):
return W*P/(W+0.621945)
# ASHRAE 2009 Chapter 1 Equation 6
def __Pws(DBT):
if __valid_DBT(DBT):
C8=-1.0440397*10**4
C9=-1.1294650*10**1
C10=-2.7022355*10**-2
C11=1.2890360*10**-5
C12=-2.4780681*10**-9
C13=6.5459673
return m.exp(C8/DBT+C9+C10*DBT+C11*DBT**2+C12*DBT**3+C13*m.log(DBT))
def state(prop1, prop1val, prop2, prop2val,P):
if prop1==prop2:
print("Properties must be independent.")
return
prop=["DBT","WBT","RH","W","V","H"]
if prop1 not in prop or prop2 not in prop:
print("Valid property must be given.")
return
prop1i=prop.index(prop1)
prop2i=prop.index(prop2)
if prop1i<prop2i:
cd1=prop1
cd1val=prop1val
cd2=prop2
cd2val=prop2val
else:
cd1=prop2
cd1val=prop2val
cd2=prop1
cd2val=prop1val
if cd1=="DBT":
DBT=cd1val
if cd2=="WBT":
WBT=cd2val
W=__W_DBT_WBT_P(DBT, WBT, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="RH":
RH=cd2val
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="W":
W=cd2val
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
W=__W_DBT_V_P(DBT, V, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
W=__W_DBT_H(DBT, H)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="WBT":
WBT=cd1val
if cd2=="RH":
RH=cd2val
DBT=__DBT_RH_WBT_P(RH, WBT, P)
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="W":
W=cd2val
DBT=__DBT_W_WBT_P(W, WBT, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
DBT=__DBT_V_WBT_P(V, WBT, P)
W=__W_DBT_V_P(DBT, V, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_WBT_P(H, WBT, P)
W=__W_DBT_H(DBT, H)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
elif cd1=="RH":
RH=cd1val
if cd2=="W":
W=cd2val
DBT=__DBT_RH_W_P(RH, W, P)
H=__H_DBT_W(DBT, W)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="V":
V=cd2val
DBT=__DBT_RH_V_P(RH, V, P)
W=__W_DBT_RH_P(DBT, RH, P)
H=__H_DBT_W(DBT, W)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_RH_P(H, RH, P)
W=__W_DBT_RH_P(DBT, RH, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="W":
W=cd1val
if cd2=="V":
V=cd2val
DBT=__DBT_V_W_P(V, W, P)
H=__H_DBT_W(DBT, W)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd2=="H":
H=cd2val
DBT=__DBT_H_W(H, W)
RH=__RH_DBT_W_P(DBT, W, P)
V=__V_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
elif cd1=="V":
V=cd1val
H=cd2val
DBT=__DBT_H_V_P(H, V, P)
W=__W_DBT_V_P(DBT, V, P)
RH=__RH_DBT_W_P(DBT, W, P)
WBT=__WBT_DBT_W_P(DBT, W, P)
return [DBT, H, RH, V, W, WBT]
# ASHRAE 2009 Chapter 1 Equation 22 and Equation 24
def __RH_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
return W*P/((0.621945+W)*__Pws(DBT))
# ASHRAE 2009 Chapter 1 Equation 28
def __V_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
return 0.370486*DBT*(1+1.607858*W)/P
# ASHRAE 2009 Chapter 1 Equation 32
def __W_DBT_H(DBT, H):
if __valid_DBT(DBT):
DBT=DBT-459.67
return (H-0.240*DBT)/(1061+0.444*DBT)
# ASHRAE 2009 Chapter 1 Equation 22 and Equation 24
def __W_DBT_RH_P(DBT, RH, P):
if __valid_DBT(DBT):
Pw=RH*__Pws(DBT)
return 0.621945*Pw/(P-Pw)
# ASHRAE 2009 Chapter 1 Equation 28
def __W_DBT_V_P(DBT, V, P):
if __valid_DBT(DBT):
return (P*V-0.370486*DBT)/(1.607858*0.370486*DBT)
# ASHRAE 2009 Chapter 1 Equation 35
def __W_DBT_WBT_P(DBT, WBT, P):
if __valid_DBT(DBT):
DBT=DBT-459.67
WBT=WBT-459.67
return ((1093-0.556*WBT)*__W_DBT_RH_P(WBT+459.67,1,P)-0.240*(DBT-WBT))/\
(1093+0.444*DBT-WBT)
# ASHRAE 2009 Chapter 1 Equation 35
def __WBT_DBT_W_P(DBT, W, P):
if __valid_DBT(DBT):
WBTa=__DPT_Pw(__Pw_W_P(W, P))
WBTb=DBT
WBT=(WBTa+WBTb)/2
while WBTb-WBTa>TOL:
Ws=__W_DBT_WBT_P(DBT, WBT, P)
if W>Ws:
WBTa=WBT
else:
WBTb=WBT
WBT=(WBTa+WBTb)/2
return WBT
def __valid_DBT(DBT):
if Min_DBT<=DBT<=Max_DBT:
return True
else:
return False
| 28.557377 | 80 | 0.540088 | 1,809 | 10,452 | 2.74848 | 0.096186 | 0.07321 | 0.066372 | 0.053097 | 0.707965 | 0.671158 | 0.648632 | 0.559936 | 0.540426 | 0.509453 | 0 | 0.065667 | 0.329793 | 10,452 | 365 | 81 | 28.635616 | 0.644111 | 0.116437 | 0 | 0.592357 | 0 | 0 | 0.01075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076433 | false | 0 | 0.003185 | 0.003185 | 0.16879 | 0.006369 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f16b53621b6f62b6b1717b1dbba16b70894bb004 | 391 | py | Python | newday2/App/hello.py | greatfirsty/hellopython | f12aacf36b8f208d6c5622ffd6b4c1927f37b45a | [
"Apache-2.0"
] | 1 | 2019-05-04T01:25:43.000Z | 2019-05-04T01:25:43.000Z | newday2/App/hello.py | greatfirsty/hellopython | f12aacf36b8f208d6c5622ffd6b4c1927f37b45a | [
"Apache-2.0"
] | null | null | null | newday2/App/hello.py | greatfirsty/hellopython | f12aacf36b8f208d6c5622ffd6b4c1927f37b45a | [
"Apache-2.0"
] | null | null | null | import time
def total_time(fun):
def f():
before_time=time.time()
fun()
current_time=time.time()
t_time=current_time-before_time
print(t_time)
return f
@total_time
def add():
time.sleep(1)
return 3
#为函数添加装饰器,统计时间
@total_time
def sub():
print('睡不好')
time.sleep(2)
print('很烦')
return 10
if __name__=='__main__':
sub()
| 15.64 | 39 | 0.598465 | 56 | 391 | 3.875 | 0.446429 | 0.147465 | 0.110599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 0.2711 | 391 | 24 | 40 | 16.291667 | 0.74386 | 0.033248 | 0 | 0.095238 | 0 | 0 | 0.034574 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.047619 | 0 | 0.380952 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f16cd046c1d5bc8c482c76596f8d465b56eb49c5 | 667 | py | Python | too_simple_server/__main__.py | outcatcher/Simple-Mock-Webserver | 315bccca9dea9e821bb0a02878187058cd1fe38f | [
"MIT"
] | null | null | null | too_simple_server/__main__.py | outcatcher/Simple-Mock-Webserver | 315bccca9dea9e821bb0a02878187058cd1fe38f | [
"MIT"
] | 3 | 2019-09-04T13:22:46.000Z | 2019-09-13T09:35:51.000Z | too_simple_server/__main__.py | opentelekomcloud-infra/Simple-Mock-Webserver | 315bccca9dea9e821bb0a02878187058cd1fe38f | [
"MIT"
] | null | null | null | """Run server as module"""
from argparse import ArgumentParser
from too_simple_server.configuration import DEFAULT_CFG_PATH
from too_simple_server.run import main
AGP = ArgumentParser(description="Mock server with simple DB interactions")
AGP.add_argument("--debug", action="store_true", default=None)
AGP.add_argument("--config", help=f"Configuration file to be used, '{DEFAULT_CFG_PATH}' by default",
default=DEFAULT_CFG_PATH)
AGP.add_argument("--no-wsgi", action="store_true", default=False)
AGP.add_argument("action", default="start", choices=["start", "stop"])
ARGS = AGP.parse_args()
main(ARGS.action, ARGS.debug, ARGS.config, ARGS.no_wsgi)
| 41.6875 | 100 | 0.758621 | 95 | 667 | 5.136842 | 0.463158 | 0.04918 | 0.114754 | 0.077869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106447 | 667 | 15 | 101 | 44.466667 | 0.818792 | 0.029985 | 0 | 0 | 0 | 0 | 0.25741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1729eaefd848cb322cd91bf8c15356b94b4a070 | 3,038 | py | Python | SIM_utils/SIM.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | 14 | 2021-06-01T16:45:19.000Z | 2022-03-08T20:07:00.000Z | SIM_utils/SIM.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | null | null | null | SIM_utils/SIM.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | 3 | 2021-08-05T16:37:47.000Z | 2022-01-06T00:25:49.000Z | #Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
from SIM_utils.components.perf_sim import *
from SIM_utils.components.pow_sim import *
#from OSSIM_utils.components.pow_knob_sim import *
from design_utils.design import *
from settings import config
# This module is our top level simulator containing all simulators (perf, and pow simulator)
class OSASimulator:
def __init__(self, dp, database, pk_dp=""):
self.time_elapsed = 0 # time elapsed from the beginning of the simulation
self.dp = dp # design point to simulate
self.perf_sim = PerformanceSimulator(self.dp) # performance simulator instance
self.pow_sim = PowerSimulator(self.dp) # power simulator instance
self.database = database
if config.simulation_method == "power_knobs":
self.pk_dp = pk_dp
#self.knob_change_sim = PowerKnobSimulator(self.dp, self.pk_dp, self.database)
self.completion_time = -1 # time passed for the simulation to complete
self.program_status = "idle"
self.cur_tick_time = self.next_tick_time = 0 # current tick time
# ------------------------------
# Functionality:
# whether the simulation should terminate
# ------------------------------
def terminate(self, program_status):
if config.termination_mode == "workload_completion":
return program_status == "done"
elif config.termination_mode == "time_budget_reahced":
return self.time_elapsed >= config.time_budge
else:
return False
# ------------------------------
# Functionality:
# ticking the simulation. Note that the tick time varies depending on what is (dynamically) happening in the
# system
# ------------------------------
def tick(self):
self.cur_tick_time = self.next_tick_time
# ------------------------------
# Functionality
# progress the simulation for clock_time forward
# ------------------------------
def step(self, clock_time):
self.next_tick_time, self.program_status = self.perf_sim.simulate(clock_time)
# ------------------------------
# Functionality:
# simulation
# ------------------------------
def simulate(self):
while not self.terminate(self.program_status):
self.tick()
self.step(self.cur_tick_time)
if config.use_cacti:
self.dp.correct_power_area_with_cacti(self.database)
# collect all the stats upon completion of simulation
self.dp.collect_dp_stats(self.database)
if config.simulation_method == "power_knobs":
self.knob_change_sim.launch()
self.completion_time = self.next_tick_time
self.dp.set_serial_design_time(self.perf_sim.serial_latency)
self.dp.set_par_speedup(self.perf_sim.serial_latency/self.completion_time)
return self.dp | 40.506667 | 114 | 0.629032 | 360 | 3,038 | 5.097222 | 0.35 | 0.032698 | 0.023978 | 0.034877 | 0.140599 | 0.140599 | 0.083924 | 0.083924 | 0 | 0 | 0 | 0.001272 | 0.223502 | 3,038 | 75 | 115 | 40.506667 | 0.7766 | 0.38183 | 0 | 0.05 | 0 | 0 | 0.036777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f1743c6ed7aff9eadd1ebcb064f885173f6e5cef | 5,992 | py | Python | netbox_graphql/tests/ipam/tests_vlans.py | ninech/django-netbox-graphql | 8383570bdf3a8ce8d9d912c5b8f7b053b31c7363 | [
"MIT"
] | 17 | 2017-08-17T02:38:09.000Z | 2022-01-05T15:36:20.000Z | netbox_graphql/tests/ipam/tests_vlans.py | ninech/django-netbox-graphql | 8383570bdf3a8ce8d9d912c5b8f7b053b31c7363 | [
"MIT"
] | 2 | 2017-09-13T14:53:56.000Z | 2018-02-08T14:06:54.000Z | netbox_graphql/tests/ipam/tests_vlans.py | ninech/django-netbox-graphql | 8383570bdf3a8ce8d9d912c5b8f7b053b31c7363 | [
"MIT"
] | 2 | 2020-03-04T11:51:10.000Z | 2021-03-11T19:24:37.000Z | from string import Template
from graphene.test import Client
from django.test import TestCase
from ipam.models import VLAN
from netbox_graphql.schema import schema
from netbox_graphql.tests.utils import obj_to_global_id
from netbox_graphql.tests.factories.ipam_factories import VLANFactory, RoleFactory
from netbox_graphql.tests.factories.tenant_factories import TenantFactory
class CreateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.tenant = TenantFactory()
cls.role = RoleFactory()
cls.query = Template('''
mutation{
newVlan(input: { tenant: "$tenantId", role: "$roleId", vid: 2, name: "New Vlan"}) {
vlan{
name
vid
tenant{
name
}
role{
name
}
}
}
}
''').substitute(tenantId=obj_to_global_id(cls.tenant),
roleId=obj_to_global_id(cls.role))
def test_creating_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_creating_returns_data(self):
expected = {'newVlan':
{'vlan': {'name': 'New Vlan',
'vid': 2,
'tenant': {'name': self.tenant.name},
'role': {'name': self.role.name}
}}}
result = schema.execute(self.query)
self.assertEquals(result.data, expected)
def test_creating_creates_it(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount + 1)
class QueryMultipleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.second = VLANFactory()
cls.query = '''
{
vlans {
edges {
node {
id
}
}
}
}
'''
def test_querying_all_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_querying_all_returns_two_results(self):
result = schema.execute(self.query)
self.assertEquals(len(result.data['vlans']['edges']), 2)
class QuerySingleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.second = VLANFactory()
cls.query = Template('''
{
vlans(id: "$id") {
edges {
node {
name
vid
tenant {
name
}
role {
name
}
}
}
}
}
''').substitute(id=obj_to_global_id(cls.second))
def test_querying_single_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_querying_single_returns_result(self):
result = schema.execute(self.query)
self.assertEquals(len(result.data['vlans']['edges']), 1)
def test_querying_single_returns_expected_result(self):
result = schema.execute(self.query)
expected = {'vlans':
{'edges': [
{'node': {'name': self.second.name,
'vid': self.second.vid,
'tenant': {'name': self.second.tenant.name},
'role': {'name': self.second.role.name}}}
]}}
self.assertEquals(result.data, expected)
class UpdateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.tenant = TenantFactory()
cls.query = Template('''
mutation{
updateVlan(input: { id: "$id", vid: 10, name: "New Name", tenant: "$tenantId"}) {
vlan{
name
vid
tenant {
name
}
}
}
}
''').substitute(id=obj_to_global_id(cls.first),
tenantId=obj_to_global_id(cls.tenant))
def test_updating_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_updating_doesnt_change_count(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount)
def test_updating_returns_updated_data(self):
expected = {'updateVlan':
{'vlan': {'name': 'New Name',
'vid': 10,
'tenant': {'name': self.tenant.name}}}}
result = schema.execute(self.query)
self.assertEquals(result.data, expected)
def test_updating_alters_data(self):
schema.execute(self.query)
vlan = VLAN.objects.get(id=self.first.id)
self.assertEquals(vlan.name, 'New Name')
self.assertEquals(vlan.vid, 10)
self.assertEquals(vlan.tenant.name, self.tenant.name)
class DeleteTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.first = VLANFactory()
cls.query = Template('''
mutation{
deleteVlan(input: { id:"$id"}) {
vlan{
id
}
}
}
''').substitute(id=obj_to_global_id(cls.first))
def test_deleting_returns_no_error(self):
result = schema.execute(self.query)
assert not result.errors
def test_deleting_removes_a_type(self):
oldCount = VLAN.objects.all().count()
schema.execute(self.query)
self.assertEquals(VLAN.objects.all().count(), oldCount - 1)
| 30.571429 | 99 | 0.524866 | 570 | 5,992 | 5.370175 | 0.159649 | 0.032016 | 0.077752 | 0.100621 | 0.614832 | 0.508657 | 0.495263 | 0.442992 | 0.40967 | 0.370467 | 0 | 0.003165 | 0.367323 | 5,992 | 195 | 100 | 30.728205 | 0.804273 | 0 | 0 | 0.451807 | 0 | 0.012048 | 0.245995 | 0 | 0 | 0 | 0 | 0 | 0.096386 | 1 | 0.114458 | false | 0 | 0.048193 | 0 | 0.192771 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f175d2c9f715e287b74b3835941c2fefef41cc0e | 36,264 | py | Python | ncdiff/src/yang/ncdiff/gnmi.py | tahigash/yang | f74d4549421a4ca3b9bc9ee46194d9c3810fd32a | [
"Apache-2.0"
] | null | null | null | ncdiff/src/yang/ncdiff/gnmi.py | tahigash/yang | f74d4549421a4ca3b9bc9ee46194d9c3810fd32a | [
"Apache-2.0"
] | null | null | null | ncdiff/src/yang/ncdiff/gnmi.py | tahigash/yang | f74d4549421a4ca3b9bc9ee46194d9c3810fd32a | [
"Apache-2.0"
] | null | null | null | import re
import json
import logging
from lxml import etree
from copy import deepcopy
from xmljson import Parker
from ncclient import xml_
from xml.etree import ElementTree
from collections import OrderedDict, defaultdict
from .errors import ModelError
from .composer import Tag, Composer
from .calculator import BaseCalculator
from .proto.gnmi.gnmi_pb2 import PathElem, Path, SetRequest, TypedValue, Update
# create a logger for this module
logger = logging.getLogger(__name__)
nc_url = xml_.BASE_NS_1_0
config_tag = '{' + nc_url + '}config'
ns_spec = {
'legacy': {
'path': Tag.JSON_PREFIX,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_PREFIX,
},
'rfc7951': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
'openconfig': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
'': {
'path': Tag.JSON_NAME,
'val_name': Tag.JSON_NAME,
'val_val': Tag.JSON_NAME,
},
}
def _tostring(value):
'''_tostring
Convert value to XML compatible string.
'''
if value is True:
return 'true'
elif value is False:
return 'false'
elif value is None:
return None
else:
return str(value)
def _fromstring(value):
'''_fromstring
Convert XML string value to None, boolean, int or float.
'''
if not value:
return None
std_value = value.strip().lower()
if std_value == 'true':
return 'true'
elif std_value == 'false':
return 'false'
# try:
# return int(std_value)
# except ValueError:
# pass
# try:
# return float(std_value)
# except ValueError:
# pass
return value
class gNMIParser(object):
'''gNMIParser
A parser to convert a gNMI GetResponse to an lxml Element object. gNMI
specification can be found at
https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md
Attributes
----------
ele : `Element`
An lxml Element object which is the root of the config tree.
config_nodes : `list`
A list of config nodes. Each config node is an Element node in the
config tree, which is corresponding to one 'update' in the gNMI
GetResponse.
xpaths : `list`
A list of strings. Each string is an xpath of an Element node in the
config tree, which is corresponding to one 'update' in the gNMI
GetResponse.
'''
def __init__(self, device, gnmi_get_reply):
self.device = device
self.reply = gnmi_get_reply
self._config_nodes = None
self._ele = None
self._convert_tag = defaultdict(dict)
self._prefix_to_name = {i[1]: i[0] for i in self.device.namespaces
if i[1] is not None}
self._prefix_to_url = {i[1]: i[2] for i in self.device.namespaces
if i[1] is not None}
@property
def ele(self):
if self._ele is None:
self._ele = self.config_nodes.ele
return self._ele
@property
def config_nodes(self):
if self._config_nodes is None:
self._config_nodes = self.get_config_nodes()
return self._config_nodes
@property
def xpaths(self):
xpaths = []
if len(self.config_nodes) > 0 and len(self.config_nodes[0]) > 0:
if len(self.config_nodes[0]) > 1:
xpaths.append(self.device.get_xpath(self.config_nodes[0][0],
type=Tag.LXML_XPATH,
instance=False))
else:
xpaths.append(self.device.get_xpath(self.config_nodes[0][0],
type=Tag.LXML_XPATH,
instance=True))
return xpaths
def parse_value(self, origin, value, tag):
def convert_xml_to_lxml(xml_element, lxml_parent=None, default_ns=''):
ns_name, tag = self.convert_tag(default_ns, xml_element.tag,
src=ns_spec[origin]['val_name'],
dst=Tag.LXML_ETREE)
val_name_ns_tuple = self.convert_ns(ns_name,
src=ns_spec[origin]['val_name'][0])
nsmap = {None: val_name_ns_tuple[Tag.NAMESPACE]}
val_name_ns = val_name_ns_tuple[ns_spec[origin]['val_val'][0]]
if xml_element.text is not None:
ns_val, text = self.convert_tag(val_name_ns, xml_element.text,
src=ns_spec[origin]['val_val'],
dst=Tag.JSON_PREFIX)
if ns_val != val_name_ns:
v_v_ns = self.convert_ns(ns_val,
src=ns_spec[origin]['val_val'][0])
v_v_prefix = v_v_ns[Tag.PREFIX]
v_v_url = v_v_ns[Tag.NAMESPACE]
nsmap[v_v_prefix] = v_v_url
if lxml_parent is None:
lxml_element = etree.Element(tag, nsmap=nsmap)
else:
lxml_element = etree.SubElement(lxml_parent, tag, nsmap=nsmap)
if xml_element.text is not None:
lxml_element.text = text
for xml_child in xml_element:
convert_xml_to_lxml(xml_child,
lxml_parent=lxml_element,
default_ns=ns_name)
return lxml_element
n, t = self.convert_tag('', tag,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['val_name'])
json_val_str = '{{"{}": {}}}'.format(t, value.json_ietf_val.decode())
json_data = json.loads(json_val_str, object_pairs_hook=OrderedDict)
pk = Parker(xml_tostring=_tostring, element=ElementTree.Element)
return [convert_xml_to_lxml(i) for i in pk.etree(json_data)]
@staticmethod
def parse_tag(tag):
ret = re.search('^{(.+)}(.+)$', tag)
if ret:
return ret.group(1), ret.group(2)
else:
raise ModelError("tag '{}' does not have URL info" \
.format(tag))
def convert_tag(self, default_ns, tag, src=Tag.LXML_ETREE, dst=Tag.YTOOL):
if src == Tag.JSON_NAME and dst == Tag.LXML_ETREE:
if default_ns not in self._convert_tag or \
tag not in self._convert_tag[default_ns]:
self._convert_tag[default_ns][tag] = \
self.device.convert_tag(default_ns, tag, src=src, dst=dst)
return self._convert_tag[default_ns][tag]
else:
return self.device.convert_tag(default_ns, tag, src=src, dst=dst)
def convert_ns(self, ns, src=Tag.NAME):
entries = [i for i in self.device.namespaces if i[src] == ns]
c = len(entries)
if c == 0:
raise ConfigError("{} '{}' does not exist in device attribute " \
"'namespaces'" \
.format(Tag.STR[src], ns))
if c > 1:
raise ModelError("device has more than one {} '{}': {}" \
.format(Tag.STR[src], ns, entries))
return entries[0]
def get_config_nodes(self):
'''get_config_nodes
High-level api: get_config_nodes returns a list of config nodes. Each
config node is an Element node in the config tree, which is
corresponding to one 'update' in the gNMI GetResponse.
Returns
-------
list
A list of config nodes.
Config
A Config object.
'''
from .config import Config
config = Config(self.device, config=None)
for notification in self.reply.notification:
updates = []
for update in notification.update:
config += self.build_config_node(Config(self.device, config=None),
notification.prefix,
update.path, update.val)
return config
def get_schema_node(self, parent_schema_node, tag, origin=''):
def is_parent(node1, node2):
ancestors = {id(a): a for a in node2.iterancestors()}
ids_1 = set([id(a) for a in node1.iterancestors()])
ids_2 = set([id(a) for a in node2.iterancestors()])
if not ids_1 < ids_2:
return False
for i in ids_2 - ids_1:
if ancestors[i] is not node1 and \
ancestors[i].attrib['type'] != 'choice' and \
ancestors[i].attrib['type'] != 'case':
return False
return True
def get_root(tag):
if origin == 'openconfig' or origin == '':
models = [m for m in self.device.models_loaded
if m[:10] == 'openconfig']
else:
models = self.device.models_loaded
roots = {}
for m in models:
root = get_child(tag, parent=self.device.models[m].tree)
if root is not None:
roots[m] = root
if len(roots) == 1:
return list(roots.values())[0]
elif len(roots) > 1:
if origin == 'openconfig' or origin == '':
tag = self.parse_tag(tag)[1]
raise ModelError("more than one models have root with tag " \
"'{}': {}" \
.format(tag, ', '.join(roots.keys())))
else:
return None
def get_child(tag, parent):
if origin == 'openconfig' or origin == '':
children = [i for i in parent.iterdescendants() \
if self.parse_tag(i.tag)[1] == tag and \
i.get('type') != 'choice' and \
i.get('type') != 'case' and \
is_parent(parent, i)]
else:
children = [i for i in parent.iterdescendants() \
if i.tag == tag and \
i.get('type') != 'choice' and \
i.get('type') != 'case' and \
is_parent(parent, i)]
if len(children) == 1:
return children[0]
elif len(children) > 1:
if parent.getparent() is None:
raise ModelError("model {} has more than one root with " \
"tag '{}'" \
.format(parent.tag, tag))
else:
raise ModelError("node {} has more than one child with " \
"tag '{}'" \
.format(self.device.get_xpath(parent),
tag))
else:
return None
# search roots
if parent_schema_node is None:
child = get_root(tag)
if child is None:
raise ConfigError("root '{}' cannot be found in loaded models" \
.format(tag))
else:
return child
# search from a parent
child = get_child(tag, parent_schema_node)
if child is None:
raise ConfigError("node {} does not have child with tag '{}'" \
.format(self.device.get_xpath(parent_schema_node),
tag))
else:
return child
def build_config_node_per_elem(self, origin, parent_config_node, path_elem,
value=None):
def cleanup_and_append(origin, parent_config_node, child_schema_node,
value):
for n in parent_config_node.findall(child_schema_node.tag):
parent_config_node.remove(n)
for n in self.parse_value(origin, value, child_schema_node.tag):
parent_config_node.append(n)
return None
if parent_config_node.tag == config_tag:
parent_schema_node = None
parent_ns = ''
else:
parent_schema_node = self.device.get_schema_node(parent_config_node)
parent_url, parent_tag_name = self.parse_tag(parent_config_node.tag)
parent_ns_tuple = self.convert_ns(parent_url, src=Tag.LXML_ETREE[0])
parent_ns = parent_ns_tuple[ns_spec[origin]['path'][0]]
if origin == 'openconfig' or origin == '':
child_schema_node = self.get_schema_node(parent_schema_node,
path_elem.name,
origin=origin)
else:
child_ns, child_tag = self.convert_tag(parent_ns, path_elem.name,
src=ns_spec[origin]['path'],
dst=Tag.LXML_ETREE)
child_schema_node = self.get_schema_node(parent_schema_node,
child_tag,
origin=origin)
type = child_schema_node.get('type')
if type == 'leaf' or type == 'leaf-list':
if value is None:
raise ConfigError("node {} does not have value" \
.format(self.device.get_xpath(child_schema_node)))
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
elif type == 'container':
if value is None:
match = parent_config_node.find(child_schema_node.tag)
if match is not None:
return match
else:
return self.subelement(origin,
parent_config_node,
child_schema_node.tag)
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
elif type == 'list':
if value is None:
instance = self.find_instance(origin,
parent_config_node,
child_schema_node,
path_elem.key)
if instance is not None:
return instance
else:
return self.subelement(origin,
parent_config_node,
child_schema_node.tag,
key=path_elem.key)
else:
return cleanup_and_append(origin, parent_config_node,
child_schema_node, value)
else:
raise ModelError("type of node {} is unknown: '{}'" \
.format(self.device.get_xpath(parent_schema_node),
type))
def build_config_node(self, config, prefix, path, value):
from .config import Config
config_node = config.ele
absolute_path = list(prefix.elem) + list(path.elem)
for index, elem in enumerate(absolute_path):
if index == len(path.elem) - 1:
config_saved = Config(self.device, config=deepcopy(config.ele))
config_node = self.build_config_node_per_elem(path.origin,
config_node,
elem,
value=value)
return config_saved + config
else:
config_node = self.build_config_node_per_elem(path.origin,
config_node,
elem)
def find_instance(self, origin, parent_config_node, child_schema_node, key):
def find_key(config_node, key_tag, key_text):
match = config_node.find(key_tag)
if match is None:
return False
if match.text != key_text:
return False
return True
def find_keys(config_node, key_tuple):
for key_tag, nsmap, key_text in key_tuple:
if not find_key(config_node, key_tag, key_text):
return False
return True
keys = child_schema_node.get('key').split()
if len(keys) != len(key):
raise ConfigError("node {} has {} keys in Path object, but the " \
"schema node requires {} keys: {}" \
.format(self.device.get_xpath(child_schema_node),
len(key), len(keys), ', '.join(keys)))
key_tuple = self.parse_key(origin, child_schema_node.tag, key)
for key_tag, nsmap, text in key_tuple:
url, tag_name = self.parse_tag(key_tag)
if tag_name not in keys:
raise ConfigError("node {} does not have key {}" \
.format(self.device.get_xpath(child_schema_node),
key_tag))
for child in parent_config_node.findall(child_schema_node.tag):
if find_keys(child, key_tuple):
return child
return None
def get_prefix(self, text):
if text is None:
return '', None
m = re.search('^(.*):(.*)$', text)
if m:
if m.group(1) in self._prefix_to_name:
return m.group(1), m.group(2)
else:
return '', text
else:
return '', text
def parse_key(self, origin, tag, key):
url, tag_name = self.parse_tag(tag)
text_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE)
default_ns = text_ns_tuple[ns_spec[origin]['path'][0]]
ret = []
for k, v in key.items():
tag_ns, key_tag = self.convert_tag(default_ns, k,
src=ns_spec[origin]['path'],
dst=Tag.LXML_ETREE)
text_ns, text = self.convert_tag(tag_ns, v,
src=ns_spec[origin]['path'],
dst=Tag.XPATH)
text_ns_tuple = self.convert_ns(tag_ns,
src=ns_spec[origin]['path'][0])
nsmap = {None: text_ns_tuple[Tag.NAMESPACE]}
if text_ns != tag_ns:
text_ns_tuple = self.convert_ns(text_ns,
src=ns_spec[origin]['path'][0])
nsmap[text_ns_tuple[Tag.PREFIX]] = text_ns_tuple[Tag.NAMESPACE]
ret.append((key_tag, nsmap, text))
return ret
def subelement(self, origin, parent, tag, key={}):
url, tag_name = self.parse_tag(tag)
e = etree.SubElement(parent, tag, nsmap={None: url})
default_ns_tuple = self.convert_ns(url, src=Tag.NAMESPACE)
default_ns = default_ns_tuple[ns_spec[origin]['path'][0]]
if key:
for key_tag, nsmap, text in self.parse_key(origin, tag, key):
e_child = etree.SubElement(e, key_tag, nsmap=nsmap)
e_child.text = text
return e
class gNMIComposer(Composer):
'''gNMIComposer
A composer to convert an lxml Element object to gNMI JSON format. gNMI
adopts RFC 7951 when encoding data. One gNMIComposer instance abstracts
a config node in config tree.
'''
def __init__(self, *args, **kwargs):
super(gNMIComposer, self).__init__(*args, **kwargs)
self._url_to_prefix = {i[2]: i[1] for i in self.device.namespaces
if i[1] is not None}
def get_json(self, instance=True, origin='openconfig'):
'''get_json
High-level api: get_json returns json_val of the config node.
Parameters
----------
instance : `bool`
True if only one instance of list or leaf-list is required. False if
all instances of list or leaf-list are needed.
Returns
-------
str
A string in JSON format.
'''
def get_json_instance(node):
pk = Parker(xml_fromstring=_fromstring, dict_type=OrderedDict)
default_ns = {}
for item in node.iter():
parents = [p for p in node.iter() if item in p]
if parents and id(parents[0]) in default_ns:
ns, tag = self.device.convert_tag(default_ns[id(parents[0])],
item.tag,
dst=ns_spec[origin]['val_name'])
else:
ns, tag = self.device.convert_tag('',
item.tag,
dst=ns_spec[origin]['val_name'])
default_ns[id(item)] = ns
item.tag = tag
if item.text:
text = self.device.convert_tag(self._url_to_prefix[ns],
item.text,
src=Tag.JSON_PREFIX,
dst=ns_spec[origin]['val_val'])[1]
item.text = text
return pk.data(node)
def convert_node(node):
# lxml.etree does not allow tag name like oc-if:enable
# so it is converted to xml.etree.ElementTree
string = etree.tostring(node, encoding='unicode',
pretty_print=False)
return ElementTree.fromstring(string)
if instance:
return json.dumps(get_json_instance(convert_node(self.node)))
else:
nodes = [n for n in
self.node.getparent().iterchildren(tag=self.node.tag)]
if len(nodes) > 1:
return json.dumps([get_json_instance(convert_node(n))
for n in nodes])
else:
return json.dumps(get_json_instance(convert_node(nodes[0])))
def get_path(self, instance=True, origin='openconfig'):
'''get_path
High-level api: get_path returns gNMI path object of the config node.
Note that gNMI Path can specify list instance but cannot specify
leaf-list instance.
Parameters
----------
instance : `bool`
True if the gNMI Path object refers to only one instance of a list.
False if the gNMI Path object refers to all instances of a list.
Returns
-------
Path
An object of gNMI Path class.
'''
def get_name(node, default_ns):
if origin == 'openconfig' or origin == '':
return gNMIParser.parse_tag(node.tag)
else:
return self.device.convert_tag(default_ns,
node.tag,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['path'])
def get_keys(node, default_ns):
keys = Composer(self.device, node).keys
ret = {}
for key in keys:
if origin=='openconfig' or origin == '':
key_ns, key_val = gNMIParser.parse_tag(key)
else:
key_ns, key_val = self.device.convert_tag(default_ns,
key,
src=Tag.LXML_ETREE,
dst=ns_spec[origin]['path'])
ns_tuple = self.convert_ns(key_ns, src=Tag.NAMESPACE)
val_ns, val_val = self.device.convert_tag(ns_tuple[Tag.PREFIX],
node.find(key).text,
src=Tag.XPATH,
dst=ns_spec[origin]['path'])
ret[key_val] = val_val
return ret
def get_pathelem(node, default_ns):
ns, name = get_name(node, default_ns)
schema_node = self.device.get_schema_node(node)
if schema_node.get('type') == 'list' and \
(node != self.node or instance):
return ns, PathElem(name=name, key=get_keys(node, ns))
else:
return ns, PathElem(name=name)
nodes = list(reversed(list(self.node.iterancestors())))[1:] + \
[self.node]
path_elems = []
default_ns = ''
for node in nodes:
default_ns, path_elem = get_pathelem(node, default_ns)
path_elems.append(path_elem)
return Path(elem=path_elems, origin=origin)
def convert_ns(self, ns, src=Tag.NAME):
entries = [i for i in self.device.namespaces if i[src] == ns]
c = len(entries)
if c == 0:
raise ConfigError("{} '{}' does not exist in device attribute " \
"'namespaces'" \
.format(Tag.STR[src], ns))
if c > 1:
raise ModelError("device has more than one {} '{}': {}" \
.format(Tag.STR[src], ns, entries))
return entries[0]
class gNMICalculator(BaseCalculator):
'''gNMICalculator
A gNMI calculator to do subtraction and addition. A subtraction is to
compute the delta between two Config instances in a form of gNMI SetRequest.
An addition is to apply one gNMI SetRequest to a Config instance (TBD).
Attributes
----------
sub : `SetRequest`
A gNMI SetRequest which can achieve a transition from one config, i.e.,
self.etree2, to another config, i.e., self.etree1.
'''
@property
def sub(self):
deletes, replaces, updates = self.node_sub(self.etree1, self.etree2)
return SetRequest(prefix=None,
delete=deletes,
replace=replaces,
update=updates)
def node_sub(self, node_self, node_other):
'''node_sub
High-level api: Compute the delta of two config nodes. This method is
recursive, assuming two config nodes are different.
Parameters
----------
node_self : `Element`
A config node in the destination config that is being processed.
node_self cannot be a leaf node.
node_other : `Element`
A config node in the source config that is being processed.
Returns
-------
tuple
There are three elements in the tuple: a list of gNMI Path
instances that need to be deleted, a list of gNMI Update instances
for replacement purpose, and a list of gNMI Update instances for
merging purpose.
'''
paths_delete = []
updates_replace = []
updates_update = []
done_list = []
# if a leaf-list node, delete the leaf-list totally
# if a list node, by default delete the list instance
# if a list node and delete_whole=True, delete the list totally
def generate_delete(node, instance=True):
paths_delete.append(gNMIComposer(self.device, node) \
.get_path(instance=instance))
# if a leaf-list node, replace the leaf-list totally
# if a list node, replace the list totally
def generate_replace(node, instance=True):
n = gNMIComposer(self.device, node)
json_value = n.get_json(instance=instance).encode()
value = TypedValue(json_val=json_value)
path = n.get_path(instance=instance)
updates_replace.append(Update(path=path, val=value))
# if a leaf-list node, update the leaf-list totally
# if a list node, by default update the list instance
# if a list node and update_whole=True, update the list totally
def generate_update(node, instance=True):
n = gNMIComposer(self.device, node)
json_value = n.get_json(instance=instance).encode()
value = TypedValue(json_val=json_value)
path = n.get_path(instance=instance)
updates_update.append(Update(path=path, val=value))
# the leaf-list value sequence under node_self is different from the one
# under node_other
def leaf_list_seq_is_different(tag):
if [i.text for i in node_self.iterchildren(tag=tag)] == \
[i.text for i in node_other.iterchildren(tag=tag)]:
return False
else:
return True
# the leaf-list value set under node_self is different from the one
# under node_other
def leaf_list_set_is_different(tag):
s_list = [i.text for i in node_self.iterchildren(tag=tag)]
o_list = [i.text for i in node_other.iterchildren(tag=tag)]
if set(s_list) == set(o_list):
return False
else:
return True
# the leaf-list or list under node_self is empty
def list_is_empty(tag):
if [i for i in node_self.iterchildren(tag=tag)]:
return False
else:
return True
# the sequence of list instances under node_self is different from the
# one under node_other
def list_seq_is_different(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
if [self.device.get_xpath(n) for n in s_list] == \
[self.device.get_xpath(n) for n in o_list]:
return False
else:
return True
# all list instances under node_self have peers under node_other, and
# the sequence of list instances under node_self that have peers under
# node_other is same as the sequence of list instances under node_other
def list_seq_is_inclusive(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
s_seq = [self.device.get_xpath(n) for n in s_list]
o_seq = [self.device.get_xpath(n) for n in o_list]
if set(s_seq) <= set(o_seq) and \
[i for i in s_seq if i in o_seq] == o_seq:
return True
else:
return False
in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \
self._group_kids(node_self, node_other)
for child_s in in_s_not_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
generate_update(child_s)
elif schema_node.get('type') == 'leaf-list':
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
elif schema_node.get('type') == 'container':
generate_update(child_s)
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
else:
generate_update(child_s, instance=True)
for child_o in in_o_not_in_s:
schema_node = self.device.get_schema_node(child_o)
if schema_node.get('type') == 'leaf':
generate_delete(child_o)
elif schema_node.get('type') == 'leaf-list':
if child_o.tag not in done_list:
child_s = node_self.find(child_o.tag)
if child_s is None:
generate_delete(child_o, instance=False)
else:
generate_replace(child_s, instance=False)
done_list.append(child_o.tag)
elif schema_node.get('type') == 'container':
generate_delete(child_o)
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_inclusive(child_o.tag):
generate_delete(child_o, instance=True)
else:
if child_o.tag not in done_list:
generate_replace(child_o, instance=False)
done_list.append(child_o.tag)
else:
if list_is_empty(child_o.tag):
if child_o.tag not in done_list:
generate_delete(child_o, instance=False)
done_list.append(child_o.tag)
else:
generate_delete(child_o, instance=True)
for child_s, child_o in in_s_and_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
if child_s.text != child_o.text:
generate_update(child_s)
elif schema_node.get('type') == 'leaf-list':
if child_s.tag not in done_list:
if schema_node.get('ordered-by') == 'user':
if leaf_list_seq_is_different(child_s.tag):
generate_replace(child_s, instance=False)
else:
if leaf_list_set_is_different(child_s.tag):
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
elif schema_node.get('type') == 'container':
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
if child_s.tag not in done_list:
generate_replace(child_s, instance=False)
done_list.append(child_s.tag)
else:
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
else:
if BaseCalculator(self.device, child_s, child_o).ne:
d, r, u = self.node_sub(child_s, child_o)
paths_delete += d
updates_replace += r
updates_update += u
return (paths_delete, updates_replace, updates_update)
| 41.875289 | 90 | 0.506866 | 4,144 | 36,264 | 4.230936 | 0.079151 | 0.033651 | 0.013688 | 0.013574 | 0.530029 | 0.445788 | 0.386357 | 0.352706 | 0.296127 | 0.251697 | 0 | 0.003807 | 0.405995 | 36,264 | 865 | 91 | 41.923699 | 0.81013 | 0.116259 | 0 | 0.415241 | 0 | 0 | 0.039971 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068429 | false | 0 | 0.023328 | 0 | 0.209953 | 0.001555 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17945798753686fc982268f9cb799a7cdccba0f | 1,816 | py | Python | extensions/prefix_handler.py | Elfein7Night/ElfBot | 5967b2b87569badab3ad60d2ecaa139d86bbb093 | [
"MIT"
] | null | null | null | extensions/prefix_handler.py | Elfein7Night/ElfBot | 5967b2b87569badab3ad60d2ecaa139d86bbb093 | [
"MIT"
] | null | null | null | extensions/prefix_handler.py | Elfein7Night/ElfBot | 5967b2b87569badab3ad60d2ecaa139d86bbb093 | [
"MIT"
] | null | null | null | import logging
from discord import Message
from discord.ext.commands import Cog, Context, command, has_permissions, Bot
from utils.utils import log_event, db, get_dict
from extensions.extension_templates import DatabaseHandler
DEFAULT_PREFIX = '?'
PREFIXES_DB_KEY = 'prefixes_for_servers'
class PrefixDBHandler(DatabaseHandler):
# On First Joining Server
@Cog.listener()
async def on_guild_join(self, guild: Context.guild):
self.set_value_for_server(guild_id=guild.id, value=DEFAULT_PREFIX)
log_event(f'Joined the server: {guild.name} - {guild.id}')
@command(brief="Change the bot's prefix for this server")
@has_permissions(administrator=True)
async def pf(self, ctx: Context, prefix):
self.set_value_for_server(guild_id=ctx.guild.id, value=prefix)
message = f"set '{prefix}' as the prefix for the server '{ctx.guild}'"
log_event(message)
await ctx.send(f'{ctx.author.mention} {message}')
############################
# STATIC METHODS #
############################
def get_prefix_for_guild(guild_id: int):
prefixes_raw_dict = db.get(PREFIXES_DB_KEY)
if prefixes_raw_dict is not None:
try:
return get_dict(prefixes_raw_dict)[str(guild_id)]
except KeyError:
log_event(f"Failed trying to fetch prefix for server id {guild_id}", logging.CRITICAL)
return DEFAULT_PREFIX
log_event(f"Error Fetching prefixes DB", logging.CRITICAL)
return DEFAULT_PREFIX
# bot is passed by default by the API but not needed for this function.
def get_prefix(_: Bot, message: Message):
return get_prefix_for_guild(message.guild.id)
# expected function for outside calling function 'load_extension()'
def setup(_bot):
_bot.add_cog(PrefixDBHandler(_bot, PREFIXES_DB_KEY))
| 34.923077 | 98 | 0.701542 | 250 | 1,816 | 4.88 | 0.372 | 0.051639 | 0.031967 | 0.02459 | 0.127049 | 0.045902 | 0.045902 | 0 | 0 | 0 | 0 | 0 | 0.180066 | 1,816 | 51 | 99 | 35.607843 | 0.819342 | 0.101872 | 0 | 0.060606 | 0 | 0 | 0.172721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.151515 | 0.030303 | 0.393939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17b300d0d9a14ab776efbe26b4bf7268c4f862a | 4,116 | py | Python | snakeskin/config.py | healthverity/snakeskin-fabric | 31ba7fa5a71445eba76f89723c998d603704e0f9 | [
"Apache-2.0"
] | 5 | 2019-08-08T17:16:02.000Z | 2021-05-15T07:28:31.000Z | snakeskin/config.py | healthverity/snakeskin-fabric | 31ba7fa5a71445eba76f89723c998d603704e0f9 | [
"Apache-2.0"
] | 4 | 2019-08-20T15:07:12.000Z | 2020-07-31T17:50:51.000Z | snakeskin/config.py | healthverity/snakeskin-fabric | 31ba7fa5a71445eba76f89723c998d603704e0f9 | [
"Apache-2.0"
] | 2 | 2019-08-20T15:22:48.000Z | 2019-12-17T19:38:55.000Z | """
Blockchain configuration
"""
import os
import json
from dataclasses import dataclass, field, replace
from typing import List, Mapping, Optional
import yaml
import dacite
from .models import Peer, Channel, User, Orderer, ChaincodeSpec
from .models.gateway import Gateway
from .constants import ChaincodeLanguage
@dataclass()
class GatewayConfig:
""" A gateway config object """
channel: str
requestor: str
endorsing_peers: List[str] = field(default_factory=list)
orderers: List[str] = field(default_factory=list)
chaincode: Optional[str] = None
@dataclass()
class BlockchainConfig:
""" A gateway for accessing the blockchain """
@classmethod
def from_file(cls, file_path: str):
""" Loads gateway config from a static file """
ext = os.path.splitext(file_path)[1]
with open(file_path) as inf:
if ext == '.json':
return cls.from_dict(json.load(inf))
if ext in {'.yaml', '.yml'}:
return cls.from_dict(yaml.load(inf, Loader=yaml.SafeLoader))
raise ValueError(
f'Unrecognized file extension for file {file_path}'
)
@classmethod
def from_dict(cls, value: dict):
""" Creates a gateway config from a dictionary """
return dacite.from_dict(cls, value, config=dacite.Config(
type_hooks={
ChaincodeLanguage: ChaincodeLanguage
}
))
peers: Mapping[str, Peer] = field(default_factory=dict)
orderers: Mapping[str, Orderer] = field(default_factory=dict)
users: Mapping[str, User] = field(default_factory=dict)
chaincodes: Mapping[str, ChaincodeSpec] = field(default_factory=dict)
gateways: Mapping[str, GatewayConfig] = field(default_factory=dict)
def __post_init__(self):
# Set names to be the mapping key for all entities that weren't
# provided names
self.peers = {
name: replace(peer, name=peer.name or name)
for name, peer in self.peers.items()
}
self.orderers = {
name: replace(orderer, name=orderer.name or name)
for name, orderer in self.orderers.items()
}
self.users = {
name: replace(user, name=user.name or name)
for name, user in self.users.items()
}
self.chaincodes = {
name: replace(chaincode, name=chaincode.name or name)
for name, chaincode in self.chaincodes.items()
}
def get_gateway(self, name: str):
""" Gets a gateway using the config name """
if name not in self.gateways:
raise KeyError(f'No gateway defined with name "{name}"')
config = self.gateways[name]
return Gateway(
endorsing_peers=[
self.get_peer(peer) for peer in config.endorsing_peers
],
chaincode=self.get_chaincode(config.chaincode) if config.chaincode else None,
requestor=self.get_user(config.requestor),
orderers=[
self.get_orderer(orderer) for orderer in config.orderers
],
channel=Channel(name=config.channel)
)
def get_peer(self, name: str):
""" Gets a peer using the config name """
if not name in self.peers:
raise KeyError(f'No peer defined with name "{name}"')
return self.peers[name]
def get_orderer(self, name: str):
""" Gets a orderer using the config name """
if not name in self.orderers:
raise KeyError(f'No orderer defined with name "{name}"')
return self.orderers[name]
def get_user(self, name: str):
""" Gets a user using the config name """
if not name in self.users:
raise KeyError(f'No user defined with name "{name}"')
return self.users[name]
def get_chaincode(self, name: str):
""" Gets a chaincode spec using the config name """
if not name in self.chaincodes:
raise KeyError(f'No chaincode defined with name "{name}"')
return self.chaincodes[name]
| 34.3 | 89 | 0.617104 | 502 | 4,116 | 4.992032 | 0.207171 | 0.021548 | 0.053073 | 0.04589 | 0.189944 | 0.122905 | 0.052674 | 0.052674 | 0.052674 | 0 | 0 | 0.00034 | 0.2845 | 4,116 | 119 | 90 | 34.588235 | 0.850594 | 0.107629 | 0 | 0.067416 | 0 | 0 | 0.067388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089888 | false | 0 | 0.101124 | 0 | 0.41573 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17d0d49e5fc9b163d91e5df674590e6e4cad918 | 1,570 | py | Python | quantifier.py | daibiaoxuwu/cnn-text-classification-tf | 595008c44acef7fa4dd45d1d943f0b8314e6b635 | [
"Apache-2.0"
] | null | null | null | quantifier.py | daibiaoxuwu/cnn-text-classification-tf | 595008c44acef7fa4dd45d1d943f0b8314e6b635 | [
"Apache-2.0"
] | null | null | null | quantifier.py | daibiaoxuwu/cnn-text-classification-tf | 595008c44acef7fa4dd45d1d943f0b8314e6b635 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import nltk
from papersmith.editor.issue import Issue
def check(content):
cno=['many','few','a few','a number of','the number of','numbers of','a quantity of','quantities of','a good many','a great many','a large number of','a great number of','scores of','dozens of']
uno=['much','little','a little','huge amounts of','a great amount of','a large amount of','a great deal of','a large deal of','a plenty of','a good supply of','a piece of','a bit of','an item of', 'an article of','a bottle of','a cup of','a drop of','a glass of']
uncountable_nouns=eval(open('papersmith/editor/grammar/uncountable_nouns.txt').read())
issues=[]
w=''
for i in range(len(content)):
if (ord(content[i])>64 and ord(content[i])<91) or (ord(content[i])>96 and ord(content[i])<123) or content[i]=="'":
w+=content[i]
if len(w)==1 and w=='\'':
w=''
continue
if len(w)==0:
continue
if w=='many' or w=='few':
pos=i
sentence=''
for j in range(1000):
if i>=len(content) or content[i]=='.' or content[i]=='!' or content[i]=='?' or content[i]==',' or content[i]==':' or content[i]==';':
t=nltk.word_tokenize(sentence)
l=nltk.pos_tag(t)
for j in l:
if j[1]=='NN':
if j[0] in uncountable_nouns:
if w=='many':
issues.append(Issue(2, 1, [pos-4], [pos], 'much', 0))
else:
issues.append(Issue(2, 1, [pos-3], [pos], 'little', 0))
break
sentence+=content[i]
i+=1
w=''
return issues
| 35.681818 | 264 | 0.564331 | 256 | 1,570 | 3.441406 | 0.339844 | 0.051078 | 0.079455 | 0.068104 | 0.118048 | 0.118048 | 0.068104 | 0.068104 | 0.068104 | 0.068104 | 0 | 0.022296 | 0.228662 | 1,570 | 43 | 265 | 36.511628 | 0.705202 | 0.013376 | 0 | 0.142857 | 0 | 0 | 0.276665 | 0.030381 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.057143 | 0 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17d6fabf9fa920276d161547a20c9812814326f | 4,038 | py | Python | doc/ansible/__init__.py | jansenm/ansible-jenkins-roles | 73330c53d197b5161d2b46dd27aa609e03902485 | [
"BSD-3-Clause"
] | 10 | 2015-12-30T17:49:19.000Z | 2019-12-16T15:58:11.000Z | doc/ansible/__init__.py | jansenm/ansible-jenkins-roles | 73330c53d197b5161d2b46dd27aa609e03902485 | [
"BSD-3-Clause"
] | null | null | null | doc/ansible/__init__.py | jansenm/ansible-jenkins-roles | 73330c53d197b5161d2b46dd27aa609e03902485 | [
"BSD-3-Clause"
] | 8 | 2016-10-21T04:13:14.000Z | 2020-11-10T19:48:06.000Z | import docutils
import docutils.nodes
import docutils.parsers.rst
import docutils.parsers.rst.directives
import sphinx.addnodes
import sphinx.application
import sphinx.directives
import sphinx.domains
import sphinx.environment
import sphinx.locale
import sphinx.roles
import sphinx.util.compat
import sphinx.util.docfields
import sphinx.util.nodes
class AnsibleRoleRole(sphinx.roles.XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
return title, target
class AnsibleRoleDirective(sphinx.directives.ObjectDescription):
# :BUG: Something is wrong (just a test)
required_arguments = 1
doc_field_types = [
sphinx.util.docfields.GroupedField(
'default',
label=sphinx.locale.l_('Defaults'),
names=('default', 'default')
),
sphinx.util.docfields.Field(
'dependency',
label=sphinx.locale.l_('Dependencies'),
names=('dependency', 'depend'),
rolename='role',
bodyrolename='role'
),
sphinx.util.docfields.TypedField(
'parameter',
label=sphinx.locale.l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='role',
typenames=('type',)
),
sphinx.util.docfields.Field(
'become',
label=sphinx.locale.l_('Uses become'),
names=('become')
)
]
option_spec = {
'noindex': docutils.parsers.rst.directives.flag
}
has_content = True
def handle_signature(self, sig: str, signode: sphinx.addnodes.desc_signature):
(ns, _, rolename) = sig.rpartition('/')
signode += sphinx.addnodes.desc_annotation('role', 'Role ')
signode += sphinx.addnodes.desc_addname(ns, "{ns} ".format(ns=ns))
signode += sphinx.addnodes.desc_name(rolename, rolename)
return 'role-' + sig
def add_target_and_index(self, name, sig, signode):
targetname = name
signode['ids'].append(targetname)
self.env.domaindata['ansible']['roles'][name] = (self.env.docname, name)
self.state.document.note_explicit_target(signode)
class AnsibleDomain(sphinx.domains.Domain):
"""Ansible domain"""
name = "ansible"
label = "Ansible"
object_types = {
'role': sphinx.domains.ObjType(sphinx.locale.l_('role'), 'role')
}
directives = {
'role': AnsibleRoleDirective
}
roles = {
'role': AnsibleRoleRole()
}
initial_data = {
"roles": {}
}
def clear_doc(self, doc):
for name in self.data['roles']:
if doc == self.data['roles'][name][1]:
del self.data['roles'][name]
def get_objects(self):
for docname, name in self.data['roles'].values():
yield name, name, 'role', docname, 'role-' + name, 1
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
print(target)
if (type == "role"):
for (docname, name) in self.data['roles'].values():
if name == target:
print("Yes")
return sphinx.util.nodes.make_refnode(
builder,
fromdocname,
docname,
name,
contnode
)
else:
# print("here")
# print(node)
# print(contnode)
# print(type)
# print(target)
# print(builder)
# print(fromdocname)
# print(env)
return
def resolve_any_xref(self, env, fromdocname, builder,
type, target, node, contnode):
print("resolve_xref")
print(type)
print(target)
def setup(app: sphinx.application.Sphinx):
"""Initialize the sphinx extension for ansible.
"""
app.add_domain(AnsibleDomain)
| 28.041667 | 82 | 0.567112 | 398 | 4,038 | 5.668342 | 0.326633 | 0.053191 | 0.04211 | 0.031915 | 0.089096 | 0.080674 | 0.080674 | 0.080674 | 0.049645 | 0.049645 | 0 | 0.001078 | 0.311045 | 4,038 | 143 | 83 | 28.237762 | 0.809849 | 0.053739 | 0 | 0.085714 | 0 | 0 | 0.074698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07619 | false | 0 | 0.133333 | 0.009524 | 0.371429 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17fd326827766f921f6b7614a4cdc197d55723b | 2,068 | py | Python | odps/pai/nodes/transform_nodes.py | ZZHGit/aliyun-odps-python-sdk | e1c39378863ec7a1947487acab38125ac77f178e | [
"Apache-2.0"
] | null | null | null | odps/pai/nodes/transform_nodes.py | ZZHGit/aliyun-odps-python-sdk | e1c39378863ec7a1947487acab38125ac77f178e | [
"Apache-2.0"
] | null | null | null | odps/pai/nodes/transform_nodes.py | ZZHGit/aliyun-odps-python-sdk | e1c39378863ec7a1947487acab38125ac77f178e | [
"Apache-2.0"
] | 1 | 2019-09-18T05:35:29.000Z | 2019-09-18T05:35:29.000Z | # encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ..core.dag import BaseDagNode, DagEndpointType
from ..nodes.exporters import get_input_table_name, get_input_partitions, get_output_table_name, \
get_output_table_partitions
class SplitNode(BaseDagNode):
def __init__(self, percentage):
super(SplitNode, self).__init__("split")
self.marshal({
"parameters": {
"fraction": percentage,
},
"inputs": [(1, "input", DagEndpointType.DATA)],
"outputs": [(1, "output1", DagEndpointType.DATA), (2, "output2", DagEndpointType.DATA)]
})
self.add_exporter("inputTableName", lambda context: get_input_table_name(context, self, "input"))
self.add_exporter("inputTablePartitions", lambda context: get_input_partitions(context, self, "input"))
self.add_exporter("output1TableName", lambda context: get_output_table_name(context, self, "output1"))
self.add_exporter("output1TablePartition", lambda context: get_output_table_partitions(context, self, "output1"))
self.add_exporter("output2TableName", lambda context: get_output_table_name(context, self, "output2"))
self.add_exporter("output2TablePartition", lambda context: get_output_table_partitions(context, self, "output2"))
| 50.439024 | 121 | 0.729207 | 256 | 2,068 | 5.726563 | 0.453125 | 0.040928 | 0.057299 | 0.060027 | 0.195089 | 0.195089 | 0.122783 | 0.122783 | 0 | 0 | 0 | 0.010545 | 0.174565 | 2,068 | 40 | 122 | 51.7 | 0.848272 | 0.37234 | 0 | 0 | 0 | 0 | 0.157031 | 0.032813 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f17fd5450289443b6d91376201627bb8e37f188a | 12,302 | py | Python | kk/api/views.py | icw82/django-kk | b84c41ceb9f6de4fc94ef4b353583bfec70d6fed | [
"MIT"
] | null | null | null | kk/api/views.py | icw82/django-kk | b84c41ceb9f6de4fc94ef4b353583bfec70d6fed | [
"MIT"
] | null | null | null | kk/api/views.py | icw82/django-kk | b84c41ceb9f6de4fc94ef4b353583bfec70d6fed | [
"MIT"
] | null | null | null | import re
import datetime
import time
import urllib
import json
from inspect import ismethod
from django.views import generic
from django.utils import timezone
from django.utils.decorators import classonlymethod
from django.db.models import (
Model, Manager, AutoField, CharField
)
from django.db.models.query import QuerySet
from django.db.models.fields.files import ImageFieldFile
from django.core.exceptions import FieldDoesNotExist
from django.http import (HttpResponse,
HttpResponseNotFound, HttpResponseForbidden)
from .. import utils
DEFAULT_PAGE_SIZE = 200
DEFAULT_PAGE_SIZE_MAX = 200
class Stream:
user = None
query = None
params = None # from path
count = None
skip = None
keys = [
('count', int, DEFAULT_PAGE_SIZE),
('skip', int, 0)
]
def __init__(self, HttpRequest, params):
self.user = HttpRequest.user
self.query = HttpRequest.GET
self.params = params
self.client_params = {}
for key, type, default in self.keys:
value = HttpRequest.GET.get(key, default)
self.client_params[key] = type(value)
def convertToJSON(method):
def wrapper(self, HttpRequest, **kwargs):
stream = method(self, HttpRequest, **kwargs)
if isinstance(stream.data, HttpResponse):
return stream.data
if stream.data == None:
return HttpResponseNotFound()
response = HttpResponse(
json.dumps(
stream.data,
ensure_ascii = False,
sort_keys = True
),
content_type = 'application/json'
)
if hasattr(stream, 'total'):
response['Total'] = stream.total
if hasattr(stream, 'skip'):
response['Skip'] = stream.skip
if hasattr(stream, 'count'):
response['Count'] = stream.count
# print(stream)
return response
return wrapper
class Base(generic.View):
model = None
key = 'id'
# data = None
page_size = DEFAULT_PAGE_SIZE
page_size_max = DEFAULT_PAGE_SIZE_MAX
@classonlymethod
def as_api(self, **kwargs):
self.dynamic_filters = {};
if len(kwargs) > 0:
if 'filters' in kwargs:
# Представление получает новые фильтры из urls.py (.as_api())
filters = kwargs['filters']
for key in filters:
# print('> > >', key)
value = filters[key]
if type(value) == tuple and len(value) == 2:
self.dynamic_filters.update({
key: value
})
return self.as_view()
available_filters = []
dynamic_filters = {}
def getFilters(self, stream):
# print('- - - - - - - - -')
# print('- - - - - - - - -')
# print(self.model)
# print('- - - - - - - - -')
# print('- - - - - - - - -')
filters = {}
try:
field = self.model._meta.get_field('status')
filters.update({
'status': True
})
except FieldDoesNotExist:
pass
#stream.user.is_staff or stream.user.is_superuser
try:
field = self.model._meta.get_field('pub_date')
filters.update({
'pub_date__lt': timezone.now()
})
except FieldDoesNotExist:
pass
if len(self.dynamic_filters) > 0:
# print('... dynamic_filters:', self.dynamic_filters)
for key in self.dynamic_filters:
param_key = self.dynamic_filters[key][0]
param_class = self.dynamic_filters[key][1]
# print('param_key', param_key)
# print('param_class', param_class)
if param_key in stream.params:
value = param_class(stream.params[param_key])
filters[key] = value
# for filter in self.available_filters:
# if (stream.query.__contains__(filter[0])):
# self.filters[filter[1]] = stream.query.__getitem__(filter[0])
return filters
orders = []
available_orders = []
# Данные
def getResource(self, HttpRequest, params):
stream = Stream(HttpRequest, params)
input_key, field_key = self.getKeys(self.key)
try:
query = {
field_key: stream.params[input_key]
}
stream.response = self.model.objects.get(**query)
except self.model.DoesNotExist:
stream.response = HttpResponseNotFound()
return stream
def getCollection(self, HttpRequest, params):
stream = Stream(HttpRequest, params)
stream.response = self.model.objects
# Проверка и вся хуйня
# Фильтры
filters = self.getFilters(stream)
stream.response = stream.response.filter(**filters)
# print('... filters', filters)
# print('..query..', stream.response.query)
# Сортировка
if (len(self.orders) > 0):
stream.response = stream.response.order_by(*self.orders)
# Пагинация
stream.total = stream.response.count()
stream.skip = stream.client_params['skip']
stream.count = stream.client_params['count']
if (stream.skip > stream.total):
stream.skip = stream.total
elif stream.skip < 0:
stream.skip = 0
if stream.count < 0:
stream.count = 0
elif stream.count > DEFAULT_PAGE_SIZE_MAX:
stream.count = DEFAULT_PAGE_SIZE_MAX
stream.response = stream.response[
stream.skip : stream.skip + stream.count
]
return stream
def getKeys(self, string):
# line = re.search(r'\s-[d]*$', string)
# line = re.sub(r'\s-[d]*', '', string)
# print('..', line)
if ' as ' in string:
orig, view = string.split(' as ')
else:
orig = view = string
return orig, view
# NOTE: Префикс используется?
def export__resource(self, resource, schema = None, prefix = None):
data = {};
if type(schema) != tuple:
schema = self.scheme
def get_attr_by_path(resource, path):
key = path.pop(0)
# print('..', key, resource)
if ismethod(resource):
resource = resource()
if (resource and hasattr(resource, key)):
value = getattr(resource, key)
else:
value = None
# print(resource)
# print(key)
if len(path) == 0:
if isinstance(value, datetime.date):
return(value.isoformat())
if isinstance(value, datetime.time):
return(value.isoformat())
if isinstance(value, datetime.datetime):
return(value.isoformat())
if ismethod(value):
return value()
return value
else:
return get_attr_by_path(value, path)
for item in schema:
if type(item) == str:
orig, view = self.getKeys(item)
if prefix:
orig = prefix + orig
path = orig.split('.')
data[view] = get_attr_by_path(resource, path)
elif type(item) == tuple:
orig, view = self.getKeys(item[0])
keys = item[1]
model = getattr(resource, orig)
if isinstance(model, Manager):
resources = model.all()
if type(keys) == tuple:
data[view] = self.export(
resources,
schema = keys,
)
elif type(keys) == str:
data[view] = [ i[keys] for i in self.export(
resources,
schema = (keys, ),
)]
else:
print('*** Manager: Некорректный тип:', type(keys))
elif isinstance(model, Model):
if type(keys) == str:
keys = (keys, )
if type(keys) == tuple:
data[view] = self.export(
model,
schema = keys,
)
else:
print('*** Model: Некорректный тип:', type(keys))
else:
print('*** Не Менеджер! ***', manager)
else:
print('Что-то непонятное')
return data
# Конвертирование данных для выдачи
def export(self, response, schema = None):
if isinstance(response, QuerySet):
if not self.scheme:
return None
data = []
# FIXME: ЗАПРОСЫ второго уровня НЕ ФИЛЬТРУЮТСЯ
# filters = {}
#
# for filter_key in self.__instance['filters']:
# filter = {filter_key: self.__instance['filters'][filter_key]}
# try:
# field = response.model._meta.get_field(filter_key)
# filters.update(filter)
# except FieldDoesNotExist:
# pass
#
## print('....', filters)
#
# response = response.filter(**filters)
for resource in response:
item = self.export__resource(resource, schema)
if item != None:
data.append(item)
elif isinstance(response, Model):
if not self.scheme:
return None
data = self.export__resource(response, schema);
else:
return None
return data
def get_exported_resource(self, HttpRequest, params):
stream = self.getResource(HttpRequest, params)
stream.data = self.export(stream.response)
return stream
def get_exported_collection(self, HttpRequest, params):
stream = self.getCollection(HttpRequest, params)
stream.data = self.export(stream.response)
return stream
class ResourceMixin:
# def __init__(self):
# self.test = True
'''Заголовок ресурса'''
def head(self, HttpRequest, **kwargs):
pass
'''Ресурс'''
@convertToJSON
def get(self, HttpRequest, **kwargs):
stream = self.get_exported_resource(HttpRequest, kwargs)
return stream
'''Изменение ресурса'''
def post(self, HttpRequest, *args, **kwargs):
pass
'''Удаление ресурса'''
def delete(self, HttpRequest, **kwargs):
pass
class CollectionMixin:
'''Заголовок коллекции'''
def head(self, HttpRequest, **kwargs):
pass
'''Коллекция'''
@convertToJSON
def get(self, HttpRequest, **kwargs):
stream = self.get_exported_collection(HttpRequest, kwargs)
return stream
'''Новый ресурс в коллекции'''
def post(self, HttpRequest, *args, **kwargs):
pass
from django.urls import path
class Scheme:
pass
class API():
model = None
name = None
scheme = []
# filters = {}
# order = []
def __init__(self, Model):
self.model = Model
self.name = Model.KK.name_plural
self.scheme = Model.KK.scheme
class Mixin(Base):
model = self.model
scheme = self.scheme
class ResourceView(Mixin, ResourceMixin): pass
class CollectionView(Mixin, CollectionMixin):pass
self.Mixin = Mixin;
self.ResourceView = ResourceView;
self.CollectionView = CollectionView;
def getUrlPatterns(self):
patterns = []
patterns.append(
path(
'{}/<int:id>/'.format(self.name),
self.ResourceView.as_api()
)
)
patterns.append(
path(
'{}/'.format(self.name),
self.CollectionView.as_api()
)
)
return patterns
| 27.037363 | 78 | 0.520728 | 1,173 | 12,302 | 5.356351 | 0.179881 | 0.033424 | 0.016712 | 0.011459 | 0.172847 | 0.138469 | 0.120325 | 0.049976 | 0.039472 | 0.039472 | 0 | 0.003121 | 0.374898 | 12,302 | 454 | 79 | 27.096916 | 0.813914 | 0.139327 | 0 | 0.236111 | 0 | 0 | 0.021966 | 0 | 0 | 0 | 0 | 0.002203 | 0 | 1 | 0.076389 | false | 0.034722 | 0.055556 | 0 | 0.298611 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f182e3c76127620cdb609d77542bf57fc763d905 | 3,616 | py | Python | Scripts/generateOTTagsModule.py | justvanrossum/fontgoggles | 0054f17d3c82938f62468c0cdb7d90b68c76d396 | [
"Apache-2.0"
] | 308 | 2020-02-17T10:08:17.000Z | 2022-03-27T19:47:49.000Z | Scripts/generateOTTagsModule.py | justvanrossum/fontgoggles | 0054f17d3c82938f62468c0cdb7d90b68c76d396 | [
"Apache-2.0"
] | 147 | 2020-02-17T10:03:56.000Z | 2022-03-28T17:37:09.000Z | Scripts/generateOTTagsModule.py | justvanrossum/fontgoggles | 0054f17d3c82938f62468c0cdb7d90b68c76d396 | [
"Apache-2.0"
] | 41 | 2020-02-25T16:15:51.000Z | 2022-03-12T00:02:11.000Z | import os
import re
def parse(data):
start = data.find("<tbody>")
end = data.find("</tbody>")
data = data[start+7:end]
for chunk in re.findall(r"<tr>.+?</tr>", data, re.DOTALL):
fields = re.findall(r"<td>(.+?)</td>", chunk, re.DOTALL)
parsedFields = []
for field in fields:
m = re.search(r'href="(.+?)"', field)
if m is not None and m.group(1) != "#foot":
parsedFields.append(m.group(1))
tagParts = field.split("'")
if len(tagParts) >= 2:
parsedFields.append(tagParts[1].replace(" ", " "))
else:
parsedFields.append(field)
if parsedFields:
yield parsedFields
def formatFeatures(data, baseURL):
print("features = {")
print(" # tag, friendly name, documentation URL")
for link, tag, friendlyName in data:
if tag == 'cv01':
tags = [f"cv{i:02d}" for i in range(1, 100)]
else:
tags = [tag]
for tag in tags:
print(f" {tag!r}: ({friendlyName!r}, {baseURL+link!r}),")
print("}")
def formatScripts(data):
print("scripts = {")
print(" # tag, friendly name")
duplicates = {}
for i, (friendlyName, tag) in enumerate(data):
if tag in duplicates:
duplicates[tag] = duplicates[tag] + ", " + friendlyName
data[i] = (None, None) # skip
else:
duplicates[tag] = friendlyName
for _, tag in data:
if tag is None:
continue
friendlyName = duplicates[tag]
print(f" {tag!r}: {friendlyName!r},")
print("}")
def formatLanguages(data):
print("languages = {")
print(" # tag, friendly name, ISO 639 IDs (if applicable)")
for friendlyName, *fields in data:
tag = fields[0]
if len(tag) < 4:
tag += (4 - len(tag)) * " "
assert len(tag) == 4, tag
if len(fields) > 1:
assert len(fields) == 2
isoCodes = [isoCode.strip() for isoCode in fields[1].split(",")]
else:
isoCodes = []
t = (friendlyName,) + tuple(isoCodes)
print(f" {tag!r}: {t},")
print("}")
# https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist
# https://docs.microsoft.com/en-us/typography/opentype/spec/scripttags
# https://docs.microsoft.com/en-us/typography/opentype/spec/languagetags
if __name__ == "__main__":
import sys
import time
baseURL = "https://docs.microsoft.com/en-us/typography/opentype/spec/"
if len(sys.argv) > 1:
with open(sys.argv[1]) as f:
html = f.read()
pages = [html]
else:
import urllib.request
pages = []
print(f"# Generated by {os.path.basename(__file__)}")
print("# Scraped from:")
for page in ["featurelist", "scripttags", "languagetags"]:
url = baseURL + page
print(f"# {url}")
with urllib.request.urlopen(url) as fp:
html = fp.read().decode("utf-8", errors="replace")
pages.append(html)
print()
print()
print("__all__ = ['features', 'scripts', 'languages']")
print()
for html in pages:
print()
parsed = list(parse(html))
if "<title>Registered features" in html:
formatFeatures(parsed, baseURL)
elif "<title>Script tags" in html:
formatScripts(parsed)
elif "<title>Language system tags" in html:
formatLanguages(parsed)
else:
assert 0, "huh."
| 32 | 76 | 0.535675 | 412 | 3,616 | 4.660194 | 0.315534 | 0.015625 | 0.0375 | 0.04375 | 0.121875 | 0.121875 | 0.097917 | 0.097917 | 0.097917 | 0 | 0 | 0.011665 | 0.3125 | 3,616 | 112 | 77 | 32.285714 | 0.76066 | 0.059181 | 0 | 0.134021 | 0 | 0 | 0.188402 | 0.008243 | 0 | 0 | 0 | 0 | 0.030928 | 1 | 0.041237 | false | 0 | 0.051546 | 0 | 0.092784 | 0.206186 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f183e8d9598657f18e0da5ccceacb5083a04c145 | 4,000 | py | Python | tests/test_request_logging_middleware.py | quoth/fastapi-cloud-logging | 680f5ab6d90a6d06d1037845a23f355cc6d3b0fe | [
"MIT"
] | null | null | null | tests/test_request_logging_middleware.py | quoth/fastapi-cloud-logging | 680f5ab6d90a6d06d1037845a23f355cc6d3b0fe | [
"MIT"
] | null | null | null | tests/test_request_logging_middleware.py | quoth/fastapi-cloud-logging | 680f5ab6d90a6d06d1037845a23f355cc6d3b0fe | [
"MIT"
] | null | null | null | import pytest
from fastapi import Request
from pytest_mock import MockerFixture
from starlette.datastructures import Headers
from fastapi_cloud_logging.request_logging_middleware import (
_FASTAPI_REQUEST_CONTEXT,
RequestLoggingMiddleware,
)
@pytest.fixture
def middleware(mocker: MockerFixture) -> RequestLoggingMiddleware:
return RequestLoggingMiddleware(app=mocker.Mock(), dispatch=mocker.Mock())
def test__set_request_context(middleware: RequestLoggingMiddleware):
request = Request(
{
"type": "http",
"method": "GET",
"root_path": "https://example.com/",
"path": "",
"headers": Headers({}).raw,
"client": ("127.0.0.1", 80),
}
)
middleware.set_request_context(request=request)
request_context = _FASTAPI_REQUEST_CONTEXT.get()
assert request_context is not None
assert request_context.protocol == "https"
assert request_context.request_method == "GET"
assert request_context.remote_ip == "127.0.0.1"
@pytest.mark.parametrize(
(
"example_request, http_method, protocol, ip_address, content_length,"
"url, user_agent, referer, trace_content"
),
[
(
Request(
{
"type": "http",
"method": "GET",
"root_path": "https://example.com/",
"path": "",
"headers": Headers({"X-Forwarded-For": "192.168.0.1"}).raw,
"client": ("127.0.0.1", 80),
}
),
"GET",
"https",
"192.168.0.1",
None,
"https://example.com/",
None,
None,
None,
),
(
Request(
{
"type": "http",
"method": "POST",
"root_path": "https://example.com/",
"path": "",
"headers": Headers(
{"User-Agent": "curl 7.79.1", "X-Forwarded-For": "192.168.0.1"}
).raw,
"client": ("127.0.0.1", 80),
}
),
"POST",
"https",
"192.168.0.1",
None,
"https://example.com/",
"curl 7.79.1",
None,
None,
),
(
Request(
{
"type": "http",
"method": "POST",
"root_path": "https://example.com/",
"path": "",
"headers": Headers(
{
"User-Agent": "curl 7.79.1",
"X-Forwarded-For": "192.168.0.1",
"X-Cloud-Trace-Context": "105445aa7843bc8bf206b12000100000/1;o=1",
}
).raw,
"client": ("127.0.0.1", 80),
}
),
"POST",
"https",
"192.168.0.1",
None,
"https://example.com/",
"curl 7.79.1",
None,
"105445aa7843bc8bf206b12000100000/1;o=1",
),
],
)
def test__parse_request(
middleware: RequestLoggingMiddleware,
example_request: Request,
http_method,
protocol,
ip_address,
content_length,
url,
user_agent,
referer,
trace_content,
):
request_context = middleware._parse_request(example_request)
assert request_context.request_method == http_method
assert request_context.protocol == protocol
assert request_context.content_length == content_length
assert request_context.request_url == url
assert request_context.remote_ip == ip_address
assert request_context.user_agent == user_agent
assert request_context.referer == referer
assert request_context.cloud_trace_content == trace_content
| 30.075188 | 94 | 0.48975 | 357 | 4,000 | 5.29972 | 0.187675 | 0.133192 | 0.12685 | 0.02537 | 0.450846 | 0.386364 | 0.386364 | 0.377378 | 0.377378 | 0.360465 | 0 | 0.063544 | 0.38625 | 4,000 | 132 | 95 | 30.30303 | 0.707128 | 0 | 0 | 0.448 | 0 | 0 | 0.19275 | 0.02425 | 0 | 0 | 0 | 0 | 0.096 | 1 | 0.024 | false | 0 | 0.04 | 0.008 | 0.072 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f186dbe8c5a6f32bd57f511982e06c3f8a0977c9 | 1,055 | py | Python | api/tests/snapshots/snap_test_api.py | City-of-Helsinki/notification-service-api | 1c4c6d900de5f2ce9ce9becb6774d10892084ede | [
"MIT"
] | null | null | null | api/tests/snapshots/snap_test_api.py | City-of-Helsinki/notification-service-api | 1c4c6d900de5f2ce9ce9becb6774d10892084ede | [
"MIT"
] | 17 | 2020-07-07T12:08:34.000Z | 2021-06-10T20:26:20.000Z | api/tests/snapshots/snap_test_api.py | City-of-Helsinki/notification-service-api | 1c4c6d900de5f2ce9ce9becb6774d10892084ede | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots["test_send_sms 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
snapshots["test_webhook_delivery_log 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
snapshots["test_webhook_delivery_log 2"] = {
"errors": [],
"messages": {
"+358461231231": {
"billingref": "Palvelutarjotin",
"destination": "+358461231231",
"sender": "hel.fi",
"smscount": "1",
"status": "DELIVERED",
"statustime": "2020-07-21T09:18:00Z",
}
},
"warnings": [],
}
snapshots["test_get_delivery_log 1"] = {
"errors": [],
"messages": {"+358461231231": {"converted": "+358461231231", "status": "CREATED"}},
"warnings": [],
}
| 25.731707 | 87 | 0.563033 | 88 | 1,055 | 6.568182 | 0.522727 | 0.089965 | 0.179931 | 0.140138 | 0.484429 | 0.484429 | 0.484429 | 0.484429 | 0.484429 | 0.484429 | 0 | 0.144608 | 0.22654 | 1,055 | 40 | 88 | 26.375 | 0.563725 | 0.058768 | 0 | 0.34375 | 0 | 0 | 0.456566 | 0.071717 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f18777bf37a356b77a08bfd81442ba10cd669ef2 | 2,552 | py | Python | metrics.py | alexjercan/normals-estimation | 6f18f9248b7c01842f0d4ba57cde5227a893f506 | [
"MIT"
] | null | null | null | metrics.py | alexjercan/normals-estimation | 6f18f9248b7c01842f0d4ba57cde5227a893f506 | [
"MIT"
] | null | null | null | metrics.py | alexjercan/normals-estimation | 6f18f9248b7c01842f0d4ba57cde5227a893f506 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Developed by Alex Jercan <jercan_alex27@yahoo.com>
#
# References:
# - https://github.com/XinJCheng/CSPN/blob/b3e487bdcdcd8a63333656e69b3268698e543181/cspn_pytorch/utils.py#L19
# - https://web.eecs.umich.edu/~fouhey/2016/evalSN/evalSN.html
#
from math import radians
import torch
import torch.nn.functional as F
class MetricFunction():
def __init__(self, batch_size) -> None:
self.batch_size = batch_size
self.total_size = 0
self.error_sum = {}
self.error_avg = {}
def evaluate(self, predictions, targets):
normal_p = predictions
normal_gt = targets
error_val = evaluate_error_normal(normal_p, normal_gt)
self.total_size += self.batch_size
self.error_avg = avg_error(self.error_sum, error_val, self.total_size, self.batch_size)
return self.error_avg
def show(self):
error = self.error_avg
format_str = ('======NORMALS=======\nMSE=%.4f\tRMSE=%.4f\tMAE=%.4f\tMME=%.4f\nTANGLE11.25=%.4f\tTANGLE22.5=%.4f\tTANGLE30.0=%.4f')
return format_str % (error['N_MSE'], error['N_RMSE'], error['N_MAE'], error['N_MME'], \
error['N_TANGLE11.25'], error['N_TANGLE22.5'], error['N_TANGLE30.0'])
def evaluate_error_normal(pred_normal, gt_normal):
error = {}
eps = 1e-7
pred_normal = F.normalize(pred_normal, p=2, dim=1)
gt_normal = F.normalize(gt_normal, p=2, dim=1)
dot_product = torch.mul(pred_normal, gt_normal).sum(dim=1)
angular_error = torch.acos(torch.clamp(dot_product, -1+eps, 1-eps))
error['N_MSE'] = torch.mean(torch.mul(angular_error, angular_error))
error['N_RMSE'] = torch.sqrt(error['N_MSE'])
error['N_MAE'] = torch.mean(angular_error)
error['N_MME'] = torch.median(angular_error)
error['N_TANGLE11.25'] = torch.mean((angular_error <= radians(11.25)).float())
error['N_TANGLE22.5'] = torch.mean((angular_error <= radians(22.5)).float())
error['N_TANGLE30.0'] = torch.mean((angular_error <= radians(30.0)).float())
return error
# avg the error
def avg_error(error_sum, error_val, total_size, batch_size):
error_avg = {}
for item, value in error_val.items():
error_sum[item] = error_sum.get(item, 0) + value * batch_size
error_avg[item] = error_sum[item] / float(total_size)
return error_avg
def print_single_error(epoch, loss, error):
format_str = ('%s\nEpoch: %d, loss=%s\n%s\n')
print (format_str % ('eval_avg_error', epoch, loss, error)) | 35.444444 | 138 | 0.653605 | 367 | 2,552 | 4.310627 | 0.318801 | 0.05689 | 0.03287 | 0.053097 | 0.120101 | 0.03287 | 0 | 0 | 0 | 0 | 0 | 0.046027 | 0.191223 | 2,552 | 72 | 139 | 35.444444 | 0.720446 | 0.104232 | 0 | 0 | 0 | 0.022222 | 0.121265 | 0.049649 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.311111 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f188a72c45fc5d13f8ee8845ece06a14fed39304 | 2,088 | py | Python | pyfarm/models/statistics/task_count.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | pyfarm/models/statistics/task_count.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | pyfarm/models/statistics/task_count.py | guidow/pyfarm-master | d41c8f1eb5bfefb8400d400bcecadf197bcfb80a | [
"Apache-2.0"
] | null | null | null | # No shebang line, this module is meant to be imported
#
# Copyright 2015 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TaskCount Model
====================
Model describing the number of tasks in a given queue in a given state at a
point in time
"""
from datetime import datetime
from pyfarm.master.application import db
from pyfarm.master.config import config
from pyfarm.models.core.types import id_column
class TaskCount(db.Model):
__bind_key__ = 'statistics'
__tablename__ = config.get("table_statistics_task_count")
id = id_column(db.Integer)
counted_time = db.Column(
db.DateTime,
nullable=False,
default=datetime.utcnow,
doc="The point in time at which these counts were done")
# No foreign key reference, because this table is stored in a separate db
# Code reading it will have to check for referential integrity manually.
job_queue_id = db.Column(
db.Integer,
nullable=True,
doc="ID of the jobqueue these stats refer to")
total_queued = db.Column(
db.Integer,
nullable=False,
doc="Number of queued tasks at `counted_time`")
total_running = db.Column(
db.Integer,
nullable=False,
doc="Number of running tasks at `counted_time`")
total_done = db.Column(
db.Integer,
nullable=False,
doc="Number of done tasks at `counted_time`")
total_failed = db.Column(
db.Integer,
nullable=False,
doc="Number of failed tasks at `counted_time`")
| 29.408451 | 77 | 0.690134 | 295 | 2,088 | 4.8 | 0.474576 | 0.039548 | 0.063559 | 0.060028 | 0.182203 | 0.115819 | 0.115819 | 0.115819 | 0.115819 | 0 | 0 | 0.004957 | 0.227011 | 2,088 | 70 | 78 | 29.828571 | 0.872367 | 0.429119 | 0 | 0.30303 | 0 | 0 | 0.243151 | 0.023116 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.121212 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f189b87dcc3283704fc6b747b9d51de15714a10a | 2,817 | py | Python | malware prediction.py | Highcourtdurai/AI-prediction | bea4c19c0c9bf3432be6cf61f9a4992d3070746c | [
"Apache-2.0"
] | null | null | null | malware prediction.py | Highcourtdurai/AI-prediction | bea4c19c0c9bf3432be6cf61f9a4992d3070746c | [
"Apache-2.0"
] | null | null | null | malware prediction.py | Highcourtdurai/AI-prediction | bea4c19c0c9bf3432be6cf61f9a4992d3070746c | [
"Apache-2.0"
] | null | null | null | import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import seaborn as sns
#from sklearn.svm import SVC
#from sklearn.model_selection import KFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
data=pd.read_csv('android.csv')
print(data.shape)
data = data.sample(frac=1).reset_index(drop=True)
print(data.head())
import seaborn as sns
sns.countplot(x='malware',data=data)
#Over sampling
target_count = data.malware.value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
count_class_0, count_class_1 = data.malware.value_counts()
df_class_0 = data[data['malware'] == 0]
df_class_1 = data[data['malware'] == 1]
df_class_1_over = df_class_1.sample(count_class_0, replace=True)
df_test_over = pd.concat([df_class_0, df_class_1_over], axis=0)
print(df_test_over.shape)
sns.countplot(x='malware',data=df_test_over)
X=df_test_over.iloc[:,df_test_over.columns !='malware']
Y=df_test_over.iloc[:,df_test_over.columns =="malware"]
print(X.head())
print(Y.head())
from sklearn.utils import shuffle
X, Y=shuffle(X, Y)
print(X.head())
X=X.drop(columns='name')
print(X.head())
print(Y.head())
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
bestfeatures = SelectKBest(score_func=chi2, k=10)
fit = bestfeatures.fit(X,Y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score']
featureScores.nlargest(10,'Score')
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
model = ExtraTreesClassifier()
model.fit(X,Y)
print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers
#plot graph of feature importances for better visualization
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(10).plot(kind='barh')
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, random_state=0)
print(X_train.shape)
print(X_train.head())
print(y_train.head())
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
tree.fit(X_train,y_train)
DecisionTreeClassifier()
y_pred = tree.predict(X_test)
print(y_pred)
model2=metrics.accuracy_score(y_test,y_pred)
print(model2)
cnf_matrix = confusion_matrix(y_test,y_pred)
labels = [0,1]
sns.heatmap(cnf_matrix, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.show()
| 23.475 | 102 | 0.746539 | 417 | 2,817 | 4.848921 | 0.280576 | 0.059842 | 0.034619 | 0.017804 | 0.176558 | 0.068249 | 0.068249 | 0.068249 | 0.037587 | 0 | 0 | 0.013952 | 0.134895 | 2,817 | 119 | 103 | 23.672269 | 0.815757 | 0.071707 | 0 | 0.15942 | 0 | 0 | 0.043024 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.275362 | 0 | 0.275362 | 0.231884 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f18ade7811c36ebc2634566008604890a3cd1f7b | 3,147 | py | Python | _unittests/ut_ipythonhelper/test_ipythonhelper.py | janjagusch/pyquickhelper | d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b | [
"MIT"
] | 18 | 2015-11-10T08:09:23.000Z | 2022-02-16T11:46:45.000Z | _unittests/ut_ipythonhelper/test_ipythonhelper.py | janjagusch/pyquickhelper | d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b | [
"MIT"
] | 321 | 2015-06-14T21:34:28.000Z | 2021-11-28T17:10:03.000Z | _unittests/ut_ipythonhelper/test_ipythonhelper.py | janjagusch/pyquickhelper | d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b | [
"MIT"
] | 10 | 2015-06-20T01:35:00.000Z | 2022-01-19T15:54:32.000Z | """
@brief test log(time=2s)
"""
import sys
import os
import unittest
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.ipythonhelper import AutoCompletion, AutoCompletionFile, MagicCommandParser, MagicClassWithHelpers, open_html_form
class TestAutoCompletion(ExtTestCase):
def test_completion(self):
root = AutoCompletion()
cl = root._add("name", "TestAutoCompletion")
cl._add("method", "test_completion")
cl._add("method2", "test_completion")
cl = root._add("name2", "TestAutoCompletion2")
cl._add("method3", "test_completion")
s = (str # unicode#
(root))
self.assertIn(" | |- method2", s)
ls = len(root)
self.assertEqual(ls, 6)
def test_completion_file(self):
fold = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.join(fold, "..", "..", "src")
this = AutoCompletionFile(fold)
ls = len(this)
self.assertGreater(ls, 30)
def test_html_form(self):
params = {"parA": "valueA", "parB": "valueB"}
title = 'unit_test_title'
key_save = 'jjj'
raw = open_html_form(params, title, key_save, raw=True)
self.assertGreater(len(raw), 1)
def test_eval(self):
params = {"x": 3, "y": 4}
cl = MagicCommandParser(prog="test_command")
res = cl.eval("x+y", params)
self.assertEqual(res, 7)
def test_parse(self):
parser = MagicCommandParser(prog="test_command",
description='display the first lines of a text file')
typstr = str # unicode#
parser.add_argument('f', type=typstr, help='filename')
parser.add_argument(
'-n', '--n',
type=typstr, default=10,
help='number of lines to display')
parser.add_argument(
'-e',
'--encoding',
default="utf8",
help='file encoding')
params = {"x": 3, "y": 4}
res = parser.parse_cmd('this.py -n x+y', context=params)
self.assertNotEmpty(res)
r = parser.format_help()
self.assertIn("usage: test_command", r)
self.assertEqual(res.n, 7)
def test_class_magic(self):
cl = MagicClassWithHelpers()
self.assertEmpty(cl.Context)
def call_MagicCommandParser():
return MagicCommandParser(prog="parser_unittest")
pa = cl.get_parser(call_MagicCommandParser, name="parser_unittest")
typstr = str # unicode#
pa.add_argument('f', type=typstr, help='filename')
pa.add_argument('-n', '--n', type=typstr, default=10,
help='number of lines to display')
pa.add_argument('-e', '--encoding', default="utf8",
help='file encoding')
self.assertNotEmpty(pa)
cl.add_context({"x": 3, "y": 4})
self.assertEqual(cl.Context, {"x": 3, "y": 4})
res = cl.get_args('this.py -n x+y', pa)
if res.n != 7:
raise Exception("res.n == {0}\nres={1}".format(res.n, res))
if __name__ == "__main__":
unittest.main()
| 34.582418 | 133 | 0.57674 | 362 | 3,147 | 4.861878 | 0.328729 | 0.023864 | 0.006818 | 0.009091 | 0.195455 | 0.157955 | 0.157955 | 0.119318 | 0.119318 | 0.065909 | 0 | 0.013239 | 0.279949 | 3,147 | 90 | 134 | 34.966667 | 0.76346 | 0.017159 | 0 | 0.133333 | 0 | 0 | 0.158442 | 0 | 0 | 0 | 0 | 0 | 0.146667 | 1 | 0.093333 | false | 0 | 0.066667 | 0.013333 | 0.186667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74af94c08770ecd85e6abea27c4f5fa08a581c3a | 1,479 | py | Python | dcm2lungwin.py | hbuck1996/CT-Window-adjustment | fc5387d42cb4f4299a074aad7aff43cf396578d3 | [
"MIT"
] | 1 | 2021-11-05T08:37:09.000Z | 2021-11-05T08:37:09.000Z | dcm2lungwin.py | hbuck1996/CT-Window-adjustment | fc5387d42cb4f4299a074aad7aff43cf396578d3 | [
"MIT"
] | null | null | null | dcm2lungwin.py | hbuck1996/CT-Window-adjustment | fc5387d42cb4f4299a074aad7aff43cf396578d3 | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
import pydicom
import os
import numpy
from os.path import splitext
import PIL.Image as Image
def getfile(file):
dcm = pydicom.dcmread(file)
img2 = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept
return img2
def get_window_size(window_type):
if window_type =='lung':#肺窗
center = -600
width = 1200
elif window_type =='Mediastinal':#纵膈窗
center =40
width =400
return center, width
#调整CT图像的窗宽窗位
def setDicomWinWidthWinCenter(img_data, window_type):
img_temp = img_data
rows =len(img_temp)
cols =len(img_temp[0])
center, width = get_window_size(window_type)
img_temp.flags.writeable =True
min = (2 * center - width) /2.0 +0.5
max = (2 * center + width) /2.0 +0.5
dFactor =255.0 / (max - min)
for i in numpy.arange(rows):
for j in numpy.arange(cols):
img_temp[i, j] =int((img_temp[i, j]-min)*dFactor)
min_index = img_temp <0
img_temp[min_index] =0
max_index = img_temp >255
img_temp[max_index] =255
return img_temp
pathin = 'dcmin/'
pathout = 'dcmout/'
for root, dirs, files in os.walk(pathin):
for i in range(len(files)):
filename = files[i]
im = getfile(pathin + filename)
im1 = setDicomWinWidthWinCenter(im, 'lung')
dcm_img = Image.fromarray(im1)
dcm_img = dcm_img.convert('L')
output = splitext(files[i])[0]+"." +"png"
dcm_img.save(pathout + output)
| 26.890909 | 68 | 0.634212 | 213 | 1,479 | 4.258216 | 0.389671 | 0.084895 | 0.028666 | 0.041896 | 0.085998 | 0.035281 | 0.035281 | 0 | 0 | 0 | 0 | 0.03664 | 0.243408 | 1,479 | 54 | 69 | 27.388889 | 0.773905 | 0.024341 | 0 | 0 | 0 | 0 | 0.025748 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.111111 | 0 | 0.244444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74b08192a0962af59339ae5fc75dfd53de367b84 | 1,600 | py | Python | utils/ansi_text.py | hkkwok/MachOTool | 469c0fd06199356fcc6d68809c7ba15a12eac1fd | [
"Apache-2.0"
] | 12 | 2016-01-08T22:35:14.000Z | 2019-07-29T11:50:41.000Z | utils/ansi_text.py | uvbs/MachOTool | 469c0fd06199356fcc6d68809c7ba15a12eac1fd | [
"Apache-2.0"
] | 2 | 2015-12-10T21:28:04.000Z | 2019-10-15T10:05:19.000Z | utils/ansi_text.py | uvbs/MachOTool | 469c0fd06199356fcc6d68809c7ba15a12eac1fd | [
"Apache-2.0"
] | 6 | 2016-10-10T05:29:41.000Z | 2019-10-15T09:59:17.000Z | class AnsiText(object):
ENABLE_COLOR = True
COLORS = {'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37}
BOLD = 1
UNDERLINE = 4
def __init__(self, text, **kwargs):
self.text = text
self.color = None
self.bold = False
self.underline = False
if 'color' in kwargs:
color = kwargs['color']
if color not in self.COLORS:
raise IndexError('unknown color %s' % color)
self.color = color
if 'bold' in kwargs:
value = kwargs['bold']
if not isinstance(value, bool):
raise TypeError('bold must be a bool')
self.bold = value
if 'underline' in kwargs:
value = kwargs['underline']
if not isinstance(value, bool):
raise TypeError('underline must be a bool')
self.underline = value
def __repr__(self):
esc = '\x1b['
output = str(self.text)
if not self.ENABLE_COLOR:
return output
ansi_codes = list()
if self.bold:
ansi_codes.append(self.BOLD)
if self.color is not None:
assert self.color in self.COLORS
ansi_codes.append(self.COLORS[self.color])
if self.underline:
ansi_codes.append(self.UNDERLINE)
output = esc + ';'.join([str(x) for x in ansi_codes]) + 'm' + output + esc + '0m'
return output
| 30.769231 | 89 | 0.5025 | 180 | 1,600 | 4.383333 | 0.361111 | 0.057034 | 0.057034 | 0.072243 | 0.134347 | 0.096324 | 0.096324 | 0 | 0 | 0 | 0 | 0.020555 | 0.391875 | 1,600 | 51 | 90 | 31.372549 | 0.790339 | 0 | 0 | 0.085106 | 0 | 0 | 0.089375 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.042553 | false | 0 | 0 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74b378062d50de8d6fa4de92edd93991772320d6 | 330 | py | Python | Python/Learn Python The Hard Way/ex32.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | Python/Learn Python The Hard Way/ex32.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | Python/Learn Python The Hard Way/ex32.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
for i in the_count:
print(i)
for j in fruits:
print(j)
for k in change:
print(k)
element=[]
for i in range(6):
element.append(i)
for e in element:
print(e)
| 13.2 | 51 | 0.587879 | 56 | 330 | 3.428571 | 0.535714 | 0.083333 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03861 | 0.215152 | 330 | 24 | 52 | 13.75 | 0.702703 | 0.063636 | 0 | 0 | 0 | 0 | 0.149837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74b3f7e3888622737e8eff19f6ae59cf2977b401 | 678 | py | Python | agr0_sh0p/sendemail/sg_verify.py | Mikhail-Kushnerev/agr0-sh0p | b0bea7aef7166ae04691a6b575658a1fb16543ca | [
"BSD-3-Clause"
] | null | null | null | agr0_sh0p/sendemail/sg_verify.py | Mikhail-Kushnerev/agr0-sh0p | b0bea7aef7166ae04691a6b575658a1fb16543ca | [
"BSD-3-Clause"
] | null | null | null | agr0_sh0p/sendemail/sg_verify.py | Mikhail-Kushnerev/agr0-sh0p | b0bea7aef7166ae04691a6b575658a1fb16543ca | [
"BSD-3-Clause"
] | null | null | null | # using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
APIKEY = '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'
message = Mail(
from_email='muxa2k11@gmail.com',
to_emails='mikushnerev@stud.etu.ru',
subject='Sending with Twilio SendGrid is Fun',
html_content='<strong>and easy to do anywhere, even with Python</strong>'
)
try:
sg = SendGridAPIClient(APIKEY)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
| 28.25 | 80 | 0.756637 | 85 | 678 | 5.976471 | 0.647059 | 0.076772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02931 | 0.144543 | 678 | 23 | 81 | 29.478261 | 0.846552 | 0.110619 | 0 | 0 | 0 | 0 | 0.338333 | 0.153333 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74b48488924a2302ea18239cc158782bc978ef6c | 2,324 | py | Python | setup.py | rhys-newbury/swift | 60154b0441c468e3c15225c6820158d52ade98bf | [
"MIT"
] | 25 | 2020-10-09T05:20:21.000Z | 2022-02-27T19:28:14.000Z | setup.py | rhys-newbury/swift | 60154b0441c468e3c15225c6820158d52ade98bf | [
"MIT"
] | 19 | 2020-10-28T22:56:55.000Z | 2022-03-23T11:26:41.000Z | setup.py | rhys-newbury/swift | 60154b0441c468e3c15225c6820158d52ade98bf | [
"MIT"
] | 8 | 2021-03-02T16:13:46.000Z | 2021-12-22T10:50:35.000Z | from setuptools import setup, find_packages, Extension
from os import path
import os
# fmt: off
import pip
pip.main(['install', 'numpy>=1.18.0'])
import numpy
# fmt: on
here = path.abspath(path.dirname(__file__))
req = ["numpy>=1.18.0", "spatialgeometry>=0.2.0", "websockets"]
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
def package_files(directory):
paths = []
for (pathhere, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", pathhere, filename))
return paths
extra_folders = [
"swift/out",
"swift/core",
]
extra_files = []
for extra_folder in extra_folders:
extra_files += package_files(extra_folder)
phys = Extension(
"phys",
sources=["./swift/core/phys.c"],
include_dirs=["./swift/core/", numpy.get_include()],
)
setup(
name="swift-sim",
version="0.10.0",
description="A Python/Javascript Visualiser",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jhavl/swift",
author="Jesse Haviland",
license="MIT",
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6",
keywords="python robotics robotics-toolbox kinematics dynamics"
" motion-planning trajectory-generation jacobian hessian"
" control simulation robot-manipulator mobile-robot",
packages=find_packages(exclude=["tests", "examples"]),
package_data={"swift": extra_files},
# include_package_data=True,
ext_modules=[phys],
install_requires=req,
)
| 29.794872 | 77 | 0.654905 | 285 | 2,324 | 5.235088 | 0.533333 | 0.050268 | 0.067024 | 0.069705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017486 | 0.212565 | 2,324 | 77 | 78 | 30.181818 | 0.797814 | 0.160499 | 0 | 0 | 0 | 0 | 0.345023 | 0.022176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.090909 | 0 | 0.127273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74ba731ff054eb15ac2f02bf27cba0343a1c4050 | 1,510 | py | Python | src/opera/parser/tosca/__init__.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/parser/tosca/__init__.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/parser/tosca/__init__.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | from pathlib import PurePath
import importlib
from opera import stdlib
from opera.error import ParseError
from opera.parser import yaml
SUPPORTED_VERSIONS = dict(
tosca_simple_yaml_1_3="v_1_3",
)
def load(base_path, template_name):
with (base_path / template_name).open() as input_fd:
input_yaml = yaml.load(input_fd, str(template_name))
if not isinstance(input_yaml.value, dict):
raise ParseError(
"Top level structure should be a map.", yaml_node.loc,
)
tosca_version = _get_tosca_version(input_yaml)
parser = _get_parser(tosca_version)
stdlib_yaml = stdlib.load(tosca_version)
service = parser.parse(stdlib_yaml, base_path, PurePath("STDLIB"))
service.merge(parser.parse(input_yaml, base_path, PurePath()))
service.visit("resolve_path", base_path)
service.visit("resolve_reference", service)
return service
def _get_parser(tosca_version):
return importlib.import_module(".v_1_3", __name__).Parser
def _get_tosca_version(input_yaml):
for k, v in input_yaml.value.items():
if k.value == "tosca_definitions_version":
try:
return SUPPORTED_VERSIONS[v.value]
except (TypeError, KeyError):
raise ParseError(
"Invalid TOSCA version. Available: {}.".format(
", ".join(SUPPORTED_VERSIONS.keys()),
), v.loc,
)
raise ParseError("Missing TOSCA version", input_yaml.loc)
| 29.607843 | 70 | 0.662914 | 187 | 1,510 | 5.064171 | 0.374332 | 0.101373 | 0.053854 | 0.066526 | 0.050686 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005226 | 0.239735 | 1,510 | 50 | 71 | 30.2 | 0.819686 | 0 | 0 | 0.054054 | 0 | 0 | 0.110596 | 0.016556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.162162 | 0.027027 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bab9faf0636dab82e5b1e9553de1e5cede8ce6 | 18,100 | py | Python | project/upload/views.py | beijbom/coralnet | c3f4a44eeb60cb41a079329a0068dc8b34096e89 | [
"BSD-2-Clause"
] | 31 | 2019-12-08T14:22:52.000Z | 2021-12-27T04:58:12.000Z | project/upload/views.py | beijbom/coralnet | c3f4a44eeb60cb41a079329a0068dc8b34096e89 | [
"BSD-2-Clause"
] | 193 | 2019-12-07T23:27:43.000Z | 2022-03-05T08:05:46.000Z | project/upload/views.py | beijbom/coralnet | c3f4a44eeb60cb41a079329a0068dc8b34096e89 | [
"BSD-2-Clause"
] | null | null | null | import json
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils.timezone import now
from accounts.utils import get_imported_user
from annotations.model_utils import AnnotationAreaUtils
from annotations.models import Annotation
from images.forms import MetadataForm
from images.model_utils import PointGen
from images.models import Source, Metadata, Image, Point
from images.utils import metadata_obj_to_dict, get_aux_labels, \
metadata_field_names_to_labels
from lib.decorators import source_permission_required, source_labelset_required
from lib.exceptions import FileProcessError
from lib.forms import get_one_form_error
from lib.utils import filesize_display
from visualization.forms import ImageSpecifyByIdForm
from .forms import (
CPCImportForm, CSVImportForm, ImageUploadForm, ImageUploadFrontendForm)
from .utils import (
annotations_cpcs_to_dict, annotations_csv_to_dict,
annotations_preview, find_dupe_image, metadata_csv_to_dict,
metadata_preview, upload_image_process)
import vision_backend.tasks as backend_tasks
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_portal(request, source_id):
"""
Page which points to the pages for the three different upload types.
"""
if request.method == 'POST':
if request.POST.get('images'):
return HttpResponseRedirect(
reverse('upload_images', args=[source_id]))
if request.POST.get('metadata'):
return HttpResponseRedirect(
reverse('upload_metadata', args=[source_id]))
if request.POST.get('annotations_cpc'):
return HttpResponseRedirect(
reverse('upload_annotations_cpc', args=[source_id]))
if request.POST.get('annotations_csv'):
return HttpResponseRedirect(
reverse('upload_annotations_csv', args=[source_id]))
source = get_object_or_404(Source, id=source_id)
return render(request, 'upload/upload_portal.html', {
'source': source,
})
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_images(request, source_id):
"""
Upload images to a source.
This view is for the non-Ajax frontend.
"""
source = get_object_or_404(Source, id=source_id)
images_form = ImageUploadFrontendForm()
proceed_to_manage_metadata_form = ImageSpecifyByIdForm(source=source)
auto_generate_points_message = (
"We will generate points for the images you upload.\n"
"Your Source's point generation settings: {pointgen}\n"
"Your Source's annotation area settings: {annoarea}").format(
pointgen=PointGen.db_to_readable_format(
source.default_point_generation_method),
annoarea=AnnotationAreaUtils.db_format_to_display(
source.image_annotation_area),
)
return render(request, 'upload/upload_images.html', {
'source': source,
'images_form': images_form,
'proceed_to_manage_metadata_form': proceed_to_manage_metadata_form,
'auto_generate_points_message': auto_generate_points_message,
'image_upload_max_file_size': filesize_display(
settings.IMAGE_UPLOAD_MAX_FILE_SIZE),
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_images_preview_ajax(request, source_id):
"""
Preview the images that are about to be uploaded.
Check to see if there's any problems with the filenames or file sizes.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
file_info_list = json.loads(request.POST.get('file_info'))
statuses = []
for file_info in file_info_list:
dupe_image = find_dupe_image(source, file_info['filename'])
if dupe_image:
statuses.append(dict(
error="Image with this name already exists",
url=reverse('image_detail', args=[dupe_image.id]),
))
elif file_info['size'] > settings.IMAGE_UPLOAD_MAX_FILE_SIZE:
statuses.append(dict(
error="Exceeds size limit of {limit}".format(
limit=filesize_display(
settings.IMAGE_UPLOAD_MAX_FILE_SIZE))
))
else:
statuses.append(dict(
ok=True,
))
return JsonResponse(dict(
statuses=statuses,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_images_ajax(request, source_id):
"""
After the "Start upload" button is clicked, this view is entered once
for each image file. This view saves the image to the database
and media storage.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
# Retrieve image related fields
image_form = ImageUploadForm(request.POST, request.FILES)
# Check for validity of the file (filetype and non-corruptness) and
# the options forms.
if not image_form.is_valid():
# Examples of errors: filetype is not an image,
# file is corrupt, file is empty, etc.
return JsonResponse(dict(
error=get_one_form_error(image_form),
))
img = upload_image_process(
image_file=image_form.cleaned_data['file'],
image_name=image_form.cleaned_data['name'],
source=source,
current_user=request.user,
)
backend_tasks.submit_features.apply_async(
args=[img.id],
eta=(now() + timedelta(minutes=1)),
)
return JsonResponse(dict(
success=True,
link=reverse('image_detail', args=[img.id]),
image_id=img.id,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
def upload_metadata(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view is for the non-Ajax frontend.
"""
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm()
return render(request, 'upload/upload_metadata.html', {
'source': source,
'csv_import_form': csv_import_form,
'field_labels': metadata_field_names_to_labels(source).values(),
'aux_field_labels': get_aux_labels(source),
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_metadata_preview_ajax(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view takes the CSV file, processes it, saves the processed metadata
to the session, and returns a preview table of the metadata to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm(request.POST, request.FILES)
if not csv_import_form.is_valid():
return JsonResponse(dict(
error=csv_import_form.errors['csv_file'][0],
))
try:
# Dict of (metadata ids -> dicts of (column name -> value))
csv_metadata = metadata_csv_to_dict(
csv_import_form.get_csv_stream(), source)
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
metadata_preview(csv_metadata, source)
request.session['csv_metadata'] = csv_metadata
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
def upload_metadata_ajax(request, source_id):
"""
Set image metadata by uploading a CSV file containing the metadata.
This view gets the metadata that was previously saved to the session
by the upload-preview view. Then it saves the metadata to the database.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_metadata = request.session.pop('csv_metadata', None)
if not csv_metadata:
return JsonResponse(dict(
error=(
"We couldn't find the expected data in your session."
" Please try loading this page again. If the problem persists,"
" let us know on the forum."
),
))
for metadata_id, csv_metadata_for_image in csv_metadata.items():
metadata = Metadata.objects.get(pk=metadata_id, image__source=source)
new_metadata_dict = metadata_obj_to_dict(metadata)
new_metadata_dict.update(csv_metadata_for_image)
metadata_form = MetadataForm(
new_metadata_dict, instance=metadata, source=source)
# We already validated previously, so this SHOULD be valid.
if not metadata_form.is_valid():
raise ValueError("Metadata became invalid for some reason.")
metadata_form.save()
return JsonResponse(dict(
success=True,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_csv(request, source_id):
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm()
return render(request, 'upload/upload_annotations_csv.html', {
'source': source,
'csv_import_form': csv_import_form,
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_csv_preview_ajax(request, source_id):
"""
Add points/annotations to images by uploading a CSV file.
This view takes the CSV file, processes it, saves the processed data
to the session, and returns a preview table of the data to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
csv_import_form = CSVImportForm(request.POST, request.FILES)
if not csv_import_form.is_valid():
return JsonResponse(dict(
error=csv_import_form.errors['csv_file'][0],
))
try:
csv_annotations = annotations_csv_to_dict(
csv_import_form.get_csv_stream(), source)
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
annotations_preview(csv_annotations, source)
request.session['uploaded_annotations'] = csv_annotations
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required('source_id', perm=Source.PermTypes.EDIT.code)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_cpc(request, source_id):
source = get_object_or_404(Source, id=source_id)
cpc_import_form = CPCImportForm(source)
return render(request, 'upload/upload_annotations_cpc.html', {
'source': source,
'cpc_import_form': cpc_import_form,
})
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_cpc_preview_ajax(request, source_id):
"""
Add points/annotations to images by uploading Coral Point Count files.
This view takes multiple .cpc files, processes them, saves the processed
data to the session, and returns a preview table of the data to be saved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
cpc_import_form = CPCImportForm(source, request.POST, request.FILES)
if not cpc_import_form.is_valid():
return JsonResponse(dict(
error=cpc_import_form.errors['cpc_files'][0],
))
try:
cpc_info = annotations_cpcs_to_dict(
cpc_import_form.get_cpc_names_and_streams(), source,
cpc_import_form.cleaned_data['plus_notes'])
except FileProcessError as error:
return JsonResponse(dict(
error=str(error),
))
preview_table, preview_details = \
annotations_preview(cpc_info['annotations'], source)
request.session['uploaded_annotations'] = cpc_info['annotations']
request.session['cpc_info'] = cpc_info
return JsonResponse(dict(
success=True,
previewTable=preview_table,
previewDetails=preview_details,
))
@source_permission_required(
'source_id', perm=Source.PermTypes.EDIT.code, ajax=True)
@source_labelset_required('source_id', message=(
"You must create a labelset before uploading annotations."))
def upload_annotations_ajax(request, source_id):
"""
This view gets the annotation data that was previously saved to the
session by upload-preview-csv or upload-preview-cpc.
Then it saves the data to the database,
while deleting all previous points/annotations for the images involved.
"""
if request.method != 'POST':
return JsonResponse(dict(
error="Not a POST request",
))
source = get_object_or_404(Source, id=source_id)
uploaded_annotations = request.session.pop('uploaded_annotations', None)
if not uploaded_annotations:
return JsonResponse(dict(
error=(
"We couldn't find the expected data in your session."
" Please try loading this page again. If the problem persists,"
" let us know on the forum."
),
))
cpc_info = request.session.pop('cpc_info', None)
for image_id, annotations_for_image in uploaded_annotations.items():
img = Image.objects.get(pk=image_id, source=source)
# Delete previous annotations and points for this image.
# Calling delete() on these querysets is more efficient
# than calling delete() on each of the individual objects.
Annotation.objects.filter(image=img).delete()
Point.objects.filter(image=img).delete()
# Create new points and annotations.
new_points = []
new_annotations = []
for num, point_dict in enumerate(annotations_for_image, 1):
# Create a Point.
point = Point(
row=point_dict['row'], column=point_dict['column'],
point_number=num, image=img)
new_points.append(point)
# Save to DB with an efficient bulk operation.
Point.objects.bulk_create(new_points)
for num, point_dict in enumerate(annotations_for_image, 1):
# Create an Annotation if a label is specified.
if 'label' in point_dict:
label_obj = source.labelset.get_global_by_code(
point_dict['label'])
# TODO: Django 1.10 can set database IDs on newly created
# objects, so re-fetching the points may not be needed:
# https://docs.djangoproject.com/en/dev/releases/1.10/#database-backends
new_annotations.append(Annotation(
point=Point.objects.get(point_number=num, image=img),
image=img, source=source,
label=label_obj, user=get_imported_user()))
# Do NOT bulk-create the annotations so that the versioning signals
# (for annotation history) do not get bypassed. Create them one by one.
for annotation in new_annotations:
annotation.save()
# Update relevant image/metadata fields.
img.point_generation_method = PointGen.args_to_db_format(
point_generation_type=PointGen.Types.IMPORTED,
imported_number_of_points=len(new_points)
)
if cpc_info:
# We uploaded annotations as CPC. Save contents for future CPC
# exports.
# Note: Since cpc_info went through session serialization,
# dicts with integer keys have had their keys stringified.
img.cpc_content = cpc_info['cpc_contents'][str(img.pk)]
img.cpc_filename = cpc_info['cpc_filenames'][str(img.pk)]
else:
# We uploaded CSV. Any CPC we had saved previously no longer has
# the correct point positions, so we'll just discard the CPC.
img.cpc_content = ''
img.cpc_filename = ''
img.save()
img.metadata.annotation_area = AnnotationAreaUtils.IMPORTED_STR
img.metadata.save()
# Submit job with 1 hour delay to allow the view and thus DB transaction
# to conclude before jobs are submitted.
# Details: https://github.com/beijbom/coralnet-system/issues/31.
backend_tasks.reset_features.apply_async(
args=[img.id], eta=now() + timedelta(hours=1))
if cpc_info:
# We uploaded annotations as CPC. Save some info for future CPC
# exports.
source.cpce_code_filepath = cpc_info['code_filepath']
source.cpce_image_dir = cpc_info['image_dir']
source.save()
return JsonResponse(dict(
success=True,
))
| 35.559921 | 88 | 0.673149 | 2,231 | 18,100 | 5.241147 | 0.161811 | 0.038998 | 0.043274 | 0.036945 | 0.499786 | 0.46515 | 0.430343 | 0.427948 | 0.40366 | 0.383477 | 0 | 0.003998 | 0.23989 | 18,100 | 508 | 89 | 35.629921 | 0.845908 | 0.172376 | 0 | 0.510324 | 0 | 0 | 0.126374 | 0.018596 | 0 | 0 | 0 | 0.001969 | 0 | 1 | 0.035398 | false | 0 | 0.135693 | 0 | 0.265487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bb9cc1b76d05a1ca8fad8227db7488f5dc77ca | 3,846 | py | Python | medgen/annotate/variant.py | text2gene/medgen | e2327bdf8f2c9e35da177fbbd0d217790c0ae361 | [
"Apache-2.0"
] | null | null | null | medgen/annotate/variant.py | text2gene/medgen | e2327bdf8f2c9e35da177fbbd0d217790c0ae361 | [
"Apache-2.0"
] | null | null | null | medgen/annotate/variant.py | text2gene/medgen | e2327bdf8f2c9e35da177fbbd0d217790c0ae361 | [
"Apache-2.0"
] | null | null | null | """ Variant-level annotation functions requiring ClinvarDB and Metapub (NCBI/eutils). """
import requests, json, urllib
from metapub.text_mining import is_pmcid, is_ncbi_bookID
from metapub.pubmedcentral import get_pmid_for_otherid
from ..db.clinvar import ClinVarDB
from ..log import log
##########################################################################################
#
# Functions
#
##########################################################################################
def _clinvar_variant_accession(hgvs_text):
"""
See ClinVar FAQ http://www.ncbi.nlm.nih.gov/clinvar/docs/faq/#accs
:param hgvs_text: c.DNA
:return: RCVAccession "Reference ClinVar Accession"
"""
try:
return ClinVarDB().accession_for_hgvs_text(str(hgvs_text))
except Exception as err:
log.debug("no clinvar accession for variant hgvs_text %s " % hgvs_text)
def _clinvar_variant_allele_id(hgvs_text):
"""
Get the unique AlleleID
:param hgvs_text: c.DNA
:return: AlleleID
"""
try:
return ClinVarDB().allele_id_for_hgvs_text(hgvs_text)
except Exception as err:
log.debug('no clinvar AlleleID for variant hgvs_text %s ' % hgvs_text)
def _clinvar_variant_variation_id(hgvs_text):
"""
Get the unique VariationID
:param hgvs_text: c.DNA
:return: VariationID
"""
try:
return ClinVarDB().variation_id_for_hgvs_text(hgvs_text)
except Exception as err:
log.debug('no clinvar VariationID for variant hgvs_text %s ' % hgvs_text)
def _clinvar_variant2pubmed(hgvs_text):
"""
Get PMID for clinvar variants using the AlleleID key.
Keep GeneReviews book references (NKBxxxx) without argument.
ONE EXPENSIVE LOOKUP HERE:
If the citation_source is PubMedCentral, first convert responses to PMID.
:param hgvs_text: c.DNA
:return: set(PMIDs and possibly also NBK ids)
"""
pubmeds = []
citations = ClinVarDB().var_citations(hgvs_text)
if citations:
for cite in citations:
some_id = cite['citation_id']
if is_ncbi_bookID(some_id):
# Todo: convert? drop??
pubmeds.append(some_id)
elif is_pmcid(some_id):
try:
pmid = get_pmid_for_otherid(some_id)
if pmid is not None:
log.debug('found PubMedCentral PMCID %s, converted to PMID %s ', some_id, str(pmid))
pubmeds.append(pmid)
else:
log.debug('PMID not found for PMCID %s; discarding.', some_id)
except Exception as err:
log.debug('error converting PMCID %s: %r', some_id, err)
elif cite['citation_source'] == 'PubMed':
pubmeds.append(some_id)
#return set([int(entry) for entry in pubmeds])
return set(pubmeds)
def clinvar2pmid_with_accessions(hgvs_list):
ret = []
citations = ClinVarDB().var_citations(hgvs_list)
if citations:
for cite in citations:
article_id = cite['citation_id']
if is_ncbi_bookID(article_id):
pmid = article_id
else:
pmid = article_id if cite['citation_source'] == 'PubMed' else get_pmid_for_otherid(article_id)
if pmid:
ret.append({"hgvs_text": cite['HGVS'], "pmid": pmid, "accession": cite['RCVaccession']})
return ret
##########################################################################################
#
# API
#
##########################################################################################
ClinvarAccession = _clinvar_variant_accession
ClinvarAlleleID = _clinvar_variant_allele_id
ClinvarPubmeds = _clinvar_variant2pubmed
ClinvarVariationID = _clinvar_variant_variation_id
| 34.035398 | 110 | 0.583983 | 428 | 3,846 | 5.016355 | 0.287383 | 0.081975 | 0.018631 | 0.026083 | 0.296227 | 0.264555 | 0.161155 | 0.161155 | 0.133209 | 0.115976 | 0 | 0.00103 | 0.24285 | 3,846 | 112 | 111 | 34.339286 | 0.736264 | 0.191368 | 0 | 0.275862 | 0 | 0 | 0.137158 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.086207 | false | 0 | 0.086207 | 0 | 0.258621 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bbb875400925fd99ab218e4498bdd041de47bc | 6,049 | py | Python | microdf/custom_taxes.py | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 6 | 2019-06-26T14:37:49.000Z | 2020-08-10T22:26:34.000Z | microdf/custom_taxes.py | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 94 | 2019-06-22T14:57:58.000Z | 2020-09-08T16:35:38.000Z | microdf/custom_taxes.py | MaxGhenis/taxcalc-helpers | 85d2739d1c96882424cb51ef1806c9e51f88c085 | [
"MIT"
] | 6 | 2020-09-08T18:29:36.000Z | 2021-04-01T18:31:42.000Z | """
Functions and data for estimating taxes outside the income tax system.
Examples include value added tax, financial transaction tax, and carbon tax.
"""
import microdf as mdf
import numpy as np
import pandas as pd
# Source:
# https://www.taxpolicycenter.org/briefing-book/who-would-bear-burden-vat
VAT_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[3.9, 3.9, 3.6, 3.6, 3.6, 3.6, 3.6, 3.4, 3.4, 3.2, 2.8, 2.5, 2.5],
)
VAT_INCIDENCE /= 100
# Source: Table 5 in
# https://www.treasury.gov/resource-center/tax-policy/tax-analysis/Documents/WP-115.pdf
CARBON_TAX_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[0.8, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.8, 1.8, 1.8, 1.6, 1.4, 0.7],
)
CARBON_TAX_INCIDENCE /= 100
# Source: Figure 1 in
# https://www.taxpolicycenter.org/sites/default/files/alfresco/publication-pdfs/2000587-financial-transaction-taxes.pdf
FTT_INCIDENCE = pd.Series(
index=[-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.9],
data=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.4, 0.8, 1.0],
)
FTT_INCIDENCE /= 100
def add_custom_tax(
df,
segment_income,
w,
base_income,
incidence,
name,
total=None,
ratio=None,
verbose=True,
):
"""Add a custom tax based on incidence analysis driven by percentiles.
:param df: DataFrame.
:param segment_income: Income measure used to segment tax units into
quantiles.
:param w: Weight used to segment into quantiles (either s006 or XTOT_m).
:param base_income: Income measure by which incidence is multiplied to
estimate liability.
:param incidence: pandas Series indexed on the floor of an income
percentile, with values for the tax rate.
:param name: Name of the column to add.
:param total: Total amount the tax should generate. If not provided,
liabilities are calculated only based on the incidence schedule.
(Default value = None)
:param ratio: Ratio to adjust the tax by, compared to the original tax.
This acts as a multiplier for the incidence argument.
(Default value = None)
:param verbose: Whether to print the tax adjustment factor if needed.
Defaults to True.
:returns: Nothing. Adds the column name to df representing the tax
liability. df is also sorted by segment_income.
"""
if ratio is not None:
incidence = incidence * ratio
assert total is None, "ratio and total cannot both be provided."
df.sort_values(segment_income, inplace=True)
income_percentile = 100 * df[w].cumsum() / df[w].sum()
tu_incidence = incidence.iloc[
pd.cut(
income_percentile,
# Add a right endpoint. Should be 100 but sometimes a decimal
# gets added.
bins=incidence.index.tolist() + [101],
labels=False,
)
].values
df[name] = np.maximum(0, tu_incidence * df[base_income])
if total is not None:
initial_total = mdf.weighted_sum(df, name, "s006")
if verbose:
print(
"Multiplying tax by "
+ str(round(total / initial_total, 2))
+ "."
)
df[name] *= total / initial_total
def add_vat(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=VAT_INCIDENCE,
name="vat",
**kwargs
):
"""Add value added tax based on incidence estimate from Tax Policy Center.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with VAT defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = VAT_INCIDENCE)
:param name: Default value = "vat")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds vat to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
def add_carbon_tax(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=CARBON_TAX_INCIDENCE,
name="carbon_tax",
**kwargs
):
"""Add carbon tax based on incidence estimate from the US Treasury
Department.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with carbon tax defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = CARBON_TAX_INCIDENCE)
:param name: Default value = "carbon_tax")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds carbon_tax to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
def add_ftt(
df,
segment_income="tpc_eci",
w="XTOT_m",
base_income="aftertax_income",
incidence=FTT_INCIDENCE,
name="ftt",
**kwargs
):
"""Add financial transaction tax based on incidence estimate from Tax
Policy Center.
:param df: DataFrame with columns for tpc_eci, XTOT_m, and aftertax_income.
:param Other: arguments: Args to add_custom_tax with FTT defaults.
:param segment_income: Default value = "tpc_eci")
:param w: Default value = "XTOT_m")
:param base_income: Default value = "aftertax_income")
:param incidence: Default value = FTT_INCIDENCE)
:param name: Default value = "ftt")
:param **kwargs: Other arguments passed to add_custom_tax().
:returns: Nothing. Adds ftt to df.
df is also sorted by tpc_eci.
"""
add_custom_tax(
df, segment_income, w, base_income, incidence, name, **kwargs
)
| 33.41989 | 119 | 0.65515 | 886 | 6,049 | 4.351016 | 0.226862 | 0.052918 | 0.031128 | 0.02179 | 0.473411 | 0.43917 | 0.431128 | 0.428794 | 0.428794 | 0.42594 | 0 | 0.042073 | 0.237725 | 6,049 | 180 | 120 | 33.605556 | 0.793971 | 0.551661 | 0 | 0.333333 | 0 | 0 | 0.066966 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.045977 | false | 0 | 0.034483 | 0 | 0.08046 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bd2bcc47cfe5b8be22decac84de616312a386c | 9,067 | py | Python | system_c/scripts/combine_two_systems.py | hkayesh/depend_clean | 1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851 | [
"Apache-2.0"
] | null | null | null | system_c/scripts/combine_two_systems.py | hkayesh/depend_clean | 1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851 | [
"Apache-2.0"
] | null | null | null | system_c/scripts/combine_two_systems.py | hkayesh/depend_clean | 1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851 | [
"Apache-2.0"
] | null | null | null | from utilities import Utilities
# from comment_level_evaluation import CommentLevelEvaluation
import operator
class CombineSystems:
def __init__(self):
self.utilities = Utilities()
self.storage_path = 'comment-level-datasets-2/'
# self.storage_path = 'r-combine-outputs/'
# self.random_states = [111, 122, 133, 144, 155]
self.categories = ['environment', 'waiting time', 'staff attitude professionalism', 'care quality', 'other']
def is_valid_asp_from_from_system_a(self, aspect, confidence_value, thresholds):
is_valid = False
# thresholds = {'environment': 0.6,
# 'waiting time': 0.5,
# 'staff attitude and professionalism': 0.5,
# 'care quality': 0.4,
# 'other': 0.7,
# }
aspects = thresholds.keys()
if aspect in aspects and float(confidence_value) >= thresholds[aspect]:
is_valid = True
return is_valid
def is_valid_asp_from_from_system_b(self, aspect, confidence_value, thresholds):
is_valid = False
# thresholds = {'environment': 0.1,
# 'waiting time': 0.8,
# 'staff attitude and professionalism': 0.1,
# 'care quality': 0.1,
# 'other': 0.1
# }
aspects = thresholds.keys()
if aspect in aspects and float(confidence_value) >= thresholds[aspect]:
is_valid = True
return is_valid
def apply_dictionaries(self, comment):
food_lexicon = ['food', 'canteen', 'canten', 'coffee', 'cofee', 'coffe', 'coffee', 'tea', 'drink', 'drinks']
parking_lexicon = ['car park', 'car-park', 'carpark', 'parking', 'bicycle']
aspects = []
all_words = self.utilities.get_lemma(comment)
lemmatized_words = all_words.values()
for word in food_lexicon:
if word in lemmatized_words:
aspects.append('food')
break
for word in parking_lexicon:
if word in lemmatized_words:
aspects.append('parking')
break
return aspects
def combine_by_dynamic_threshold(self, file_a_path, file_b_path, output_file_path, thresholds_a, thresholds_b, evaluation=False):
file_a = self.utilities.read_from_csv(file_a_path)
file_b = self.utilities.read_from_csv(file_b_path)
output = []
for row_a, row_b in zip(file_a, file_b):
comment = row_a[0]
aspects = []
# remove comment from the first column
del row_a[0]
del row_b[0]
for a, b in zip(row_a, row_b):
if not a and not b and a in self.categories:
break
# union with threshold
if a is not None:
asp_threshold = a.rsplit(' ', 1)[0]
sentiment = a.rsplit(' ', 1)[1]
aspect_a = asp_threshold.rsplit(' ', 1)[0]
asp_snt = aspect_a + " " + sentiment
if not any(aspect_a in asp for asp in aspects):
confidence_value_a = asp_threshold.rsplit(' ', 1)[1]
is_valid = self.is_valid_asp_from_from_system_a(aspect_a, confidence_value_a, thresholds_a)
if is_valid:
aspects.append(asp_snt)
if b is not None:
aspect_b = b.rsplit(' ', 1)[0]
if aspect_b in self.categories and not any(aspect_b in asp for asp in aspects):
confidence_value_b = b.rsplit(' ', 1)[1]
is_valid = self.is_valid_asp_from_from_system_b(aspect_b, confidence_value_b, thresholds_b)
if is_valid:
aspects.append(aspect_b)
# Apply food and parking dictionaries
# TURN OFF THIS SNIPPET BEFORE EVALUATION
if evaluation is False:
asps_from_dictionaries = self.apply_dictionaries(comment)
if len(asps_from_dictionaries) > 0:
# if only environment, then replace with food/parking
if len(aspects) == 1 and aspects[0] == 'environment':
aspects = asps_from_dictionaries
else:
aspects = aspects + asps_from_dictionaries
if len(aspects) < 1:
# aspects = ['other']
aspects = ['other negative']
output.append([comment] + aspects)
self.utilities.save_list_as_csv(output, output_file_path)
def combine_by_static_threshold(self, file_a_path, file_b_path, threshold_a, threshold_b, output_file_path):
file_a = self.utilities.read_from_csv(file_a_path)
file_b = self.utilities.read_from_csv(file_b_path)
output = []
for row_a, row_b in zip(file_a, file_b):
comment = row_a[0]
aspects = []
# remove comment from the first column
del row_a[0]
del row_b[0]
for a, b in zip(row_a, row_b):
if not a and not b and a in self.categories:
break
# union with threshold
if a and a.rsplit(' ', 1)[0] not in aspects and float(a.rsplit(' ', 1)[1]) >= threshold_a:
aspects.append(a.rsplit(' ', 1)[0])
if b and b.rsplit(' ', 1)[0] in self.categories and b.rsplit(' ', 1)[0] not in aspects and float(b.rsplit(' ', 1)[1]) >= threshold_b:
aspects.append(b.rsplit(' ', 1)[0])
# Apply food and parking dictionaries
# asps_from_dictionaries = self.apply_dictionaries(comment)
# if len(asps_from_dictionaries) > 0:
# aspects = aspects + asps_from_dictionaries
if len(aspects) < 1:
aspects = ['other']
output.append([comment] + aspects)
self.utilities.save_list_as_csv(output, output_file_path)
def extract_top_comments(self, data_file, output_file_path):
rows = self.utilities.read_from_csv(data_file)
envs = {}
wts = {}
saaps = {}
cqs = {}
ots = {}
for row in rows:
comment = row[0]
del rows[0]
for item in row:
# if there is sentiment remove it
if any(snt_cat in item for snt_cat in self.utilities.sentiment_classes):
item = item.rsplit(' ', 1)[0]
if item and item.rsplit(' ', 1)[0] == 'environment':
envs[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'waiting time':
wts[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'staff attitude and professionalism':
saaps[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'care quality':
cqs[comment] = float(item.rsplit(' ', 1)[1])
if item and item.rsplit(' ', 1)[0] == 'other':
ots[comment] = float(item.rsplit(' ', 1)[1])
# sort comments by the descending order of confidence values
sorted_envs = [comment_data[0] for comment_data in sorted(envs.items(), key=operator.itemgetter(1), reverse=True)]
sorted_wts = [comment_data[0] for comment_data in sorted(wts.items(), key=operator.itemgetter(1), reverse=True)]
sorted_saaps = [comment_data[0] for comment_data in sorted(saaps.items(), key=operator.itemgetter(1), reverse=True)]
sorted_cqs = [comment_data[0] for comment_data in sorted(cqs.items(), key=operator.itemgetter(1), reverse=True)]
sorted_ots = [comment_data[0] for comment_data in sorted(ots.items(), key=operator.itemgetter(1), reverse=True)]
# prepare output to save
output = [['Environment', 'Waiting time', 'Staff attitude and professionalism', 'Care quality', 'Other']]
top = 5
for i in range(0, top):
comments = []
try:
comments.append(sorted_envs[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_wts[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_saaps[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_cqs[i])
except IndexError:
comments.append(None)
try:
comments.append(sorted_ots[i])
except IndexError:
comments.append(None)
output.append(comments)
self.utilities.save_list_as_csv(output, output_file_path)
| 38.419492 | 149 | 0.545715 | 1,062 | 9,067 | 4.46516 | 0.149718 | 0.035428 | 0.023619 | 0.015183 | 0.614509 | 0.548503 | 0.536061 | 0.51202 | 0.381274 | 0.381274 | 0 | 0.01872 | 0.351936 | 9,067 | 235 | 150 | 38.582979 | 0.788291 | 0.122532 | 0 | 0.412162 | 0 | 0 | 0.050366 | 0.003156 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047297 | false | 0 | 0.013514 | 0 | 0.087838 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bdf4ce11cc1cf0cef84ffadb758b63eccf4b5b | 603 | py | Python | python/ops/lekcija 24/program01.py | jasarsoft/examples | d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e | [
"MIT"
] | null | null | null | python/ops/lekcija 24/program01.py | jasarsoft/examples | d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e | [
"MIT"
] | null | null | null | python/ops/lekcija 24/program01.py | jasarsoft/examples | d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e | [
"MIT"
] | null | null | null | import json
student = {
"ime" : "Milan",
"prezime" : "Tair",
"indeks" : 2008213514,
"ispiti" : [
{
"predmet" : "Programiranje 1",
"datum" : "2008-01-05",
"ocjena" : 10
},
{
"predmet" : "Informatika",
"datum" : "2008-01-05",
"ocjena" : 10
}
]
}
studentString = str(student)
print(studentString)
studentString = json.dumps(student)
print(studentString)
datoteka = open("student.json", "w")
datoteka.write(studentString)
datoteka.close()
| 20.1 | 44 | 0.480929 | 48 | 603 | 6.041667 | 0.604167 | 0.062069 | 0.075862 | 0.089655 | 0.144828 | 0.144828 | 0 | 0 | 0 | 0 | 0 | 0.081794 | 0.371476 | 603 | 29 | 45 | 20.793103 | 0.683377 | 0 | 0 | 0.24 | 0 | 0 | 0.208955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74be7f88c0d0fd7163d55362678d0731ca6c0782 | 1,066 | py | Python | ___Python/Carsten/p11_Excel/m01_Excel_einlesen.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Carsten/p11_Excel/m01_Excel_einlesen.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Carsten/p11_Excel/m01_Excel_einlesen.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import xlrd
import xlsxwriter
from p01_kennenlernen import meinebibliothek
df = pd.read_excel("O:\___Python\personen.xlsx") # importieren von excel nach python mit datumsangabe in Timestamp
print(df)
print()
df1 = pd.to_datetime(df["Geburtsdatum"]) # umwandeln von Timestamp in datetime
print(df1)
print()
alter = []
for geburtstag in df1: #verwenden der bereits gebauten Altersberechnung
alter.append(meinebibliothek.alter(geburtstag))
durchschnittsalter = sum(alter) / len(alter) # ermitteln des Durchschnittsalters
print ("Durchschnittsalter ", durchschnittsalter)
print()
df["Alter"] = alter # hinzufügen des berechneten Alters in die aus Excel eingelesene Tabelle
print(df)
writer = pd.ExcelWriter("O:\___Python\personen_bearbeitet.xlsx", engine="xlsxwriter") # erstellen eines Excel-"Writers" mit XlsxWriter
df.to_excel(writer, sheet_name='Sheet1') # konvertieren des dataframe in ein XlsxWriter Excel Objekt
writer.save() # schließen des Pandas Excel-"Writer" und exportieren des Excel-Dokuments
| 34.387097 | 135 | 0.766417 | 132 | 1,066 | 6.098485 | 0.537879 | 0.026087 | 0.037267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.144465 | 1,066 | 30 | 136 | 35.533333 | 0.876096 | 0.401501 | 0 | 0.238095 | 0 | 0 | 0.191987 | 0.105175 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74bf6f3b5de8a5799156e1034c8878c020d100c2 | 7,769 | py | Python | vmz_interface/extractor/build_lmdb.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | vmz_interface/extractor/build_lmdb.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | vmz_interface/extractor/build_lmdb.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | import os
import csv
import numpy as np
import pandas as pd
import pickle as pk
from glob import glob
from math import ceil
from vmz_interface.data.db_video_create import create_video_db
class VideoDBBuilder:
def __init__(self, stimulus_id, lmdb_path, temporal_depth, fpv=75, video_strt_offset=15
, clips_overlap=0, batch_size=4, gpu_count=2, max_num_records=6e4, min_records_factor=1
, allow_mkdir=False, *args, **kwargs):
if not os.path.isdir(lmdb_path):
if allow_mkdir:
os.mkdir(lmdb_path)
else:
raise Exception(f'please make sure {lmdb_path} is a valid directory')
self._stim_id = stimulus_id
self._lmdb_path = lmdb_path
self.num_frames_per_clips = temporal_depth
self.BATCH_SIZE = batch_size
self.GPU_CNT = gpu_count
self.gpus = []
self.MAX_RECORDS = max_num_records # 60K max number of records per lmdb (arbitrarily chosen)
self.MIN_RECORDS_MULT = min_records_factor # used to make sure last file is not too large (arbitrarily chosen)
self.fpv = fpv
self.video_start_offset = video_strt_offset
self.clips_overlap = clips_overlap
self.list_lmdb_meta = []
self.units = 1
self.video_lmdb_paths = None
self.uneven_db = True
self.gpu_batch_combo = None
self.clips_dir = f'{stimulus_id}_{self.num_frames_per_clips}_{self.clips_overlap}'
self.clips_lmdb_data_path = f'{self._lmdb_path}/{self.clips_dir}'
if not os.path.isdir(self.clips_lmdb_data_path):
if allow_mkdir:
os.mkdir(self.clips_lmdb_data_path)
else:
raise Exception(f'please make sure {self.clips_lmdb_data_path} is a valid directory')
def make_from_paths(self, stimuli_paths):
self.video_paths = stimuli_paths
self.vid_cnt = len(self.video_paths)
lmdb_metas = glob(f'{self.clips_lmdb_data_path}/lmdb_meta_*.csv')
# make existence check:
if len(lmdb_metas) > 0:
#
vid_list = set(self.video_paths)
created_metas = set()
for i in range(len(lmdb_metas)):
with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{i}.csv') as f:
df = pd.read_csv(f)
created_metas.update(set(df['org_video'].unique()))
if created_metas == vid_list:
self.video_lmdb_paths = glob(f'{self.clips_lmdb_data_path}/lmdb_*_db')
else:
raise Exception(f'Stimulus id {self._stim_id} does not match the videos in the LMDB')
else:
if not self.write_lmdb_meta():
raise Exception('writing stimulus lmdb metas failed')
else:
self._create_video_dbs()
def write_lmdb_meta(self):
num_clips, start_frms = self._start_frames()
db_starts, db_strides = self._records_per_meta(num_clips)
file_strides = [int(i/num_clips) for i in db_strides]
file_starts = [int(i/num_clips) for i in db_starts]
sub_paths = [self.video_paths[offset:offset+stride] for offset, stride in zip(file_starts, file_strides)]
write_data = [[[ data[i]
, 0 # labels is None? hacs_action_dict[os.path.basename(os.path.dirname(data[i]))]
, start_frms[clip_idx]
, num_clips*i + clip_idx + db_starts[idx]]
for i in range(len(data)) for clip_idx in range(num_clips)]
for idx, data in enumerate(sub_paths)]
self.uneven_db = False
if len(file_strides) > 0:
self.uneven_db = file_strides[-1] == file_strides[0]
# self.units = num_clips
return self._write_lmdb_meta(write_data)
def _write_lmdb_meta(self, write_data):
for group, group_paths in enumerate(write_data):
with open(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['org_video', 'label', 'start_frm', 'video_id'])
writer.writerows(group_paths)
self.list_lmdb_meta.append(f'{self.clips_lmdb_data_path}/lmdb_meta_{group}.csv')
return True
def _start_frames(self):
'''
calculate how many examples given CLIPs type:
FULL: number of clips per video == 1
CLIPs_ONE: each clip strides by 1, overlaping 15 frames between adjacent CLIPs
CLIPs_TEN: overlaping 10 frames between adjacent CLIPs
num_clips = ceil( (total_frames_per_video - temporal_depth - offset) / clips_stride ) + 1
given num_clips per video, calculate frame starts for videos:
start_frm[0] = (total_frames_per_video - temporal_depth) - stride * (num_clips - 1)
start_frm[i] = start_frm[i-1] + 6
'''
video_width = (self.fpv - self.video_start_offset) # 60
clips_stride = (self.num_frames_per_clips - self.clips_overlap)
num_CLIPS = ceil((video_width - self.num_frames_per_clips)/clips_stride) + 1
initial_frame = (self.fpv - self.num_frames_per_clips) - (num_CLIPS - 1) * clips_stride
start_frms = [initial_frame + i*clips_stride for i in range(num_CLIPS)]
assert all(start_frms[i] > 0 for i in range(len(start_frms)))
assert any(start_frms[i] <= self.video_start_offset for i in range(len(start_frms)))
return num_CLIPS, start_frms
def _records_per_meta(self, num_clips):
"""
Caffe2 video model does not pad batched data
this utility function will distribute batched data into even number of record files
a multiple of NUM_GPU and BATCH_SIZE
the remainder will be added to a final meta file with a minimum of
total video remainder * MIN_RECORDS_MULT records
returns list of where in video_paths list lmdb should begin creating DB
and a list of how many videos in list it should consume
"""
total_num_records = num_clips * self.vid_cnt
div_criteria = num_clips * self.BATCH_SIZE * self.GPU_CNT # extract_features requires number of records to divide evenly
# start with 1 files:
num_files = 1
# files_rem = int(total_num_records%num_files)
records_per_file = int(total_num_records/num_files)
if records_per_file > self.MAX_RECORDS:
# files_rem = int(total_num_records % self.MAX_RECORDS)
num_files = int(total_num_records / self.MAX_RECORDS)
records_per_file = int(total_num_records/num_files)
rem_per_file = int(records_per_file % div_criteria)
records_per_file = records_per_file - rem_per_file
file_starts = [int(records_per_file*i) for i in range(0,num_files)]
file_strides = [int(records_per_file) for i in range(num_files)]
rem_total = total_num_records - num_files * records_per_file
temp_rem = rem_total
if rem_total > div_criteria * self.MIN_RECORDS_MULT:
temp_rem = int(rem_total % div_criteria)
extra_file = rem_total - temp_rem
file_starts.append(int(extra_file + file_starts[-1]))
file_strides.append(int(extra_file))
num_files+=1
if temp_rem > 0:
file_starts.append(int(file_starts[-1] + file_strides[-1]))
file_strides.append(int(temp_rem))
self._get_gpu_batch_combo(temp_rem)
assert all(file_starts[i]%div_criteria == 0 for i in range(1,num_files))
assert total_num_records - file_starts[-1] == temp_rem
assert sum(file_strides) == total_num_records
return file_starts, file_strides
def _get_gpu_batch_combo(self, file_remainder):
gpu_check = int(file_remainder % self.GPU_CNT)
batch_check = int(file_remainder % self.BATCH_SIZE)
if gpu_check==0 and (batch_check==0 or batch_check!=0):
self.gpu_batch_combo = [self.GPU_CNT, 1]
elif gpu_check!=0 and batch_check==0:
self.gpu_batch_combo = [1, self.BATCH_SIZE]
else:
self.gpu_batch_combo = [1, 1]
def _create_video_dbs(self):
"""
create_video_db(
args.list_file,
args.output_file,
args.use_list,
args.use_video_id,
args.use_start_frame,
args.num_epochs
)
"""
use_list = 1
use_video_id = 1
use_start_frame = 1
list_lmdb_output = [f'{self.clips_lmdb_data_path}/lmdb_{i}_db' for i in range(len(self.list_lmdb_meta))]
for i in range(len(self.list_lmdb_meta)):
create_video_db(list_file=self.list_lmdb_meta[i], output_file=list_lmdb_output[i], use_list=use_list, use_video_id=use_video_id, use_start_frame=use_start_frame)
self.video_lmdb_paths = list_lmdb_output
| 35.313636 | 164 | 0.740378 | 1,282 | 7,769 | 4.153666 | 0.170047 | 0.02554 | 0.013521 | 0.031925 | 0.281315 | 0.174648 | 0.150423 | 0.107793 | 0.049202 | 0.022911 | 0 | 0.008913 | 0.16244 | 7,769 | 219 | 165 | 35.474886 | 0.809436 | 0.197838 | 0 | 0.072464 | 0 | 0 | 0.098614 | 0.062037 | 0 | 0 | 0 | 0 | 0.036232 | 1 | 0.057971 | false | 0 | 0.057971 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c04ac95cf162be1b2cc7dbd708d0513446fe97 | 13,038 | py | Python | tests/test_pyoidc_facade.py | Titotix/Flask-pyoidc | 87b8059617304ec634d7b6d3f5f649c40d9cdb4c | [
"Apache-2.0"
] | 64 | 2017-01-31T09:08:15.000Z | 2021-12-21T21:05:45.000Z | tests/test_pyoidc_facade.py | Titotix/Flask-pyoidc | 87b8059617304ec634d7b6d3f5f649c40d9cdb4c | [
"Apache-2.0"
] | 99 | 2017-02-08T22:38:54.000Z | 2022-03-31T22:03:27.000Z | tests/test_pyoidc_facade.py | Titotix/Flask-pyoidc | 87b8059617304ec634d7b6d3f5f649c40d9cdb4c | [
"Apache-2.0"
] | 33 | 2017-02-09T18:19:51.000Z | 2021-12-24T17:48:52.000Z | import time
import base64
import pytest
import responses
from oic.oic import AuthorizationResponse, AccessTokenResponse, TokenErrorResponse, OpenIDSchema, \
AuthorizationErrorResponse
from urllib.parse import parse_qsl, urlparse
from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata, ProviderMetadata, \
ClientRegistrationInfo
from flask_pyoidc.pyoidc_facade import PyoidcFacade, _ClientAuthentication
from .util import signed_id_token
REDIRECT_URI = 'https://rp.example.com/redirect_uri'
class TestPyoidcFacade(object):
PROVIDER_BASEURL = 'https://op.example.com'
PROVIDER_METADATA = ProviderMetadata(PROVIDER_BASEURL,
PROVIDER_BASEURL + '/auth',
PROVIDER_BASEURL + '/jwks')
CLIENT_METADATA = ClientMetadata('client1', 'secret1')
def test_registered_client_metadata_is_forwarded_to_pyoidc(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA)
facade = PyoidcFacade(config, REDIRECT_URI)
assert facade._client.registration_response
def test_no_registered_client_metadata_is_handled(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(config, REDIRECT_URI)
assert not facade._client.registration_response
def test_is_registered(self):
unregistered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
registered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA)
assert PyoidcFacade(unregistered, REDIRECT_URI).is_registered() is False
assert PyoidcFacade(registered, REDIRECT_URI).is_registered() is True
@responses.activate
def test_register(self):
registration_endpoint = self.PROVIDER_BASEURL + '/register'
responses.add(responses.POST, registration_endpoint, json=self.CLIENT_METADATA.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(registration_endpoint=registration_endpoint)
unregistered = ProviderConfiguration(provider_metadata=provider_metadata,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(unregistered, REDIRECT_URI)
facade.register()
assert facade.is_registered() is True
def test_authentication_request(self):
extra_user_auth_params = {'foo': 'bar', 'abc': 'xyz'}
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
auth_request_params=extra_user_auth_params)
state = 'test_state'
nonce = 'test_nonce'
facade = PyoidcFacade(config, REDIRECT_URI)
extra_lib_auth_params = {'foo': 'baz', 'qwe': 'rty'}
auth_request = facade.authentication_request(state, nonce, extra_lib_auth_params)
expected_auth_params = {
'scope': 'openid',
'response_type': 'code',
'client_id': self.CLIENT_METADATA['client_id'],
'redirect_uri': REDIRECT_URI,
'state': state,
'nonce': nonce
}
expected_auth_params.update(extra_user_auth_params)
expected_auth_params.update(extra_lib_auth_params)
assert auth_request.to_dict() == expected_auth_params
def test_parse_authentication_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
auth_code = 'auth_code-1234'
state = 'state-1234'
auth_response = AuthorizationResponse(**{'state': state, 'code': auth_code})
parsed_auth_response = facade.parse_authentication_response(auth_response.to_dict())
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response.to_dict() == auth_response.to_dict()
def test_parse_authentication_response_handles_error_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
error_response = AuthorizationErrorResponse(**{'error': 'invalid_request', 'state': 'state-1234'})
parsed_auth_response = facade.parse_authentication_response(error_response)
assert isinstance(parsed_auth_response, AuthorizationErrorResponse)
assert parsed_auth_response.to_dict() == error_response.to_dict()
@responses.activate
def test_parse_authentication_response_preserves_id_token_jwt(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
state = 'state-1234'
now = int(time.time())
id_token, id_token_signing_key = signed_id_token({
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_sub',
'aud': 'client1',
'exp': now + 1,
'iat': now
})
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
auth_response = AuthorizationResponse(**{'state': state, 'id_token': id_token})
parsed_auth_response = facade.parse_authentication_response(auth_response)
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response['state'] == state
assert parsed_auth_response['id_token_jwt'] == id_token
@pytest.mark.parametrize('request_func,expected_token_request', [
(
lambda facade: facade.exchange_authorization_code('auth-code'),
{
'grant_type': 'authorization_code',
'code': 'auth-code',
'redirect_uri': REDIRECT_URI
}
),
(
lambda facade: facade.refresh_token('refresh-token'),
{
'grant_type': 'refresh_token',
'refresh_token': 'refresh-token',
'redirect_uri': REDIRECT_URI
}
)
])
@responses.activate
def test_token_request(self, request_func, expected_token_request):
token_endpoint = self.PROVIDER_BASEURL + '/token'
now = int(time.time())
id_token_claims = {
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_user',
'aud': [self.CLIENT_METADATA['client_id']],
'exp': now + 1,
'iat': now,
'nonce': 'test_nonce'
}
id_token_jwt, id_token_signing_key = signed_id_token(id_token_claims)
token_response = AccessTokenResponse(access_token='test_access_token',
token_type='Bearer',
id_token=id_token_jwt)
responses.add(responses.POST, token_endpoint, json=token_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
token_response = request_func(facade)
assert isinstance(token_response, AccessTokenResponse)
expected_token_response = token_response.to_dict()
expected_token_response['id_token'] = id_token_claims
expected_token_response['id_token_jwt'] = id_token_jwt
assert token_response.to_dict() == expected_token_response
token_request = dict(parse_qsl(responses.calls[0].request.body))
assert token_request == expected_token_request
@responses.activate
def test_token_request_handles_error_response(self):
token_endpoint = self.PROVIDER_BASEURL + '/token'
token_response = TokenErrorResponse(error='invalid_request', error_description='test error description')
responses.add(responses.POST, token_endpoint, json=token_response.to_dict(), status=400)
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') == token_response
def test_token_request_handles_missing_provider_token_endpoint(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') is None
@pytest.mark.parametrize('userinfo_http_method', [
'GET',
'POST'
])
@responses.activate
def test_configurable_userinfo_endpoint_method_is_used(self, userinfo_http_method):
userinfo_endpoint = self.PROVIDER_BASEURL + '/userinfo'
userinfo_response = OpenIDSchema(sub='user1')
responses.add(userinfo_http_method, userinfo_endpoint, json=userinfo_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=userinfo_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=userinfo_http_method),
REDIRECT_URI)
assert facade.userinfo_request('test_token') == userinfo_response
def test_no_userinfo_request_is_made_if_no_userinfo_http_method_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=None),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_userinfo_endpoint_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_access_token(self):
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=self.PROVIDER_BASEURL + '/userinfo')
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request(None) is None
class TestClientAuthentication(object):
CLIENT_ID = 'client1'
CLIENT_SECRET = 'secret1'
@property
def basic_auth(self):
credentials = '{}:{}'.format(self.CLIENT_ID, self.CLIENT_SECRET)
return 'Basic {}'.format(base64.urlsafe_b64encode(credentials.encode('utf-8')).decode('utf-8'))
@pytest.fixture(autouse=True)
def setup(self):
self.client_auth = _ClientAuthentication(self.CLIENT_ID, self.CLIENT_SECRET)
def test_client_secret_basic(self):
request = {}
headers = self.client_auth('client_secret_basic', request)
assert headers == {'Authorization': self.basic_auth}
assert request == {}
def test_client_secret_post(self):
request = {}
headers = self.client_auth('client_secret_post', request)
assert headers is None
assert request == {'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET}
def test_defaults_to_client_secret_basic(self):
assert self.client_auth('invalid_client_auth_method', {}) == self.client_auth('client_secret_basic', {})
| 49.763359 | 118 | 0.649409 | 1,262 | 13,038 | 6.335182 | 0.12916 | 0.094059 | 0.052533 | 0.056035 | 0.596623 | 0.51257 | 0.430394 | 0.397498 | 0.353846 | 0.329581 | 0 | 0.004615 | 0.268753 | 13,038 | 261 | 119 | 49.954023 | 0.833963 | 0 | 0 | 0.321429 | 0 | 0 | 0.071253 | 0.004679 | 0 | 0 | 0 | 0 | 0.120536 | 1 | 0.089286 | false | 0 | 0.040179 | 0 | 0.165179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c236bbb346149e0b60b81086ce4a68adb25d83 | 4,260 | py | Python | INIT.py | db260179/mullion | 3ed599f624ab308f5a6ffe4c51aa372252430aa1 | [
"WTFPL"
] | null | null | null | INIT.py | db260179/mullion | 3ed599f624ab308f5a6ffe4c51aa372252430aa1 | [
"WTFPL"
] | null | null | null | INIT.py | db260179/mullion | 3ed599f624ab308f5a6ffe4c51aa372252430aa1 | [
"WTFPL"
] | null | null | null | # Thanks to zecoxao and flatz <3
import struct
from binascii import unhexlify as uhx
from binascii import hexlify as hx
from Crypto.Cipher import AES
from Crypto.Hash import SHA, HMAC, CMAC
import os
import sys
EID1KEYS = [
'88228B0F92C4C36AF097F1FE948D27CE',
'5794BC8C2131B1E3E7EC61EF14C32EB5',
]
INITKEYS = [
'48FF6BFA9C172C6E14AE444419CAF676'
]
ZEROS128 = ['00000000000000000000000000000000']
def aes_decrypt_cbc(key, iv, input):
return AES.new(key, AES.MODE_CBC, iv).decrypt(input)
def aes_decrypt_ecb(key, input):
return AES.new(key, AES.MODE_ECB).decrypt(input)
def aes_encrypt_cbc(key, iv, input):
return AES.new(key, AES.MODE_CBC, iv).encrypt(input)
def main(argc, argv):
with open(sys.argv[1], 'rb') as f:
data = f.read()
data1 = data[0x2A0:0x2B0]
data2 = data[0x2B0:0x2C0]
data3 = data[0x2C0:0x2D0]
data4 = data[0x2D0:0x2E0]
data5 = data[0x2E0:0x300]
data6 = data[0x300:0x320]
data7 = data[0x320:0x340]
data8 = data[0x340:0x360]
eid1 = data[0x10:0x290]
hash = data[0x290:0x2A0]
cmac1= CMAC.new(uhx(EID1KEYS[0]), ciphermod=AES)
cmac1.update(eid1)
print(hx(hash))
print(cmac1.hexdigest())
sexy = aes_decrypt_cbc(uhx(EID1KEYS[0]), uhx(ZEROS128[0]), eid1)
keyseed = sexy[:0x10]
pck1 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), keyseed)
pck2 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck1)
pck3 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck2)
pck4 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck3)
pck5 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck4)
pck6 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck5)
pck7 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck6)
pck8 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck7)
pck9 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck8)
pck10 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck9)
pck11 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck10)
pck12 = aes_encrypt_cbc(uhx(INITKEYS[0]), uhx(ZEROS128[0]), pck11)
data1_stage1 = aes_decrypt_ecb(pck1,data1)
data2_stage1 = aes_decrypt_ecb(pck2,data2)
data3_stage1 = aes_decrypt_ecb(pck3,data3)
data4_stage1 = aes_decrypt_ecb(pck4,data4)
hash1 = data5[0x10:]
body1 = data5[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body1)
print(hx(hash1))
print(cmac1.hexdigest())
hash2 = data6[0x10:]
body2 = data6[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body2)
print(hx(hash2))
print(cmac1.hexdigest())
hash3 = data7[0x10:]
body3 = data7[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body3)
print(hx(hash3))
print(cmac1.hexdigest())
hash4 = data8[0x10:]
body4 = data8[:0x10]
cmac1= CMAC.new(pck1, ciphermod=AES)
cmac1.update(body4)
print(hx(hash4))
print(cmac1.hexdigest())
data5_stage1 = aes_decrypt_ecb(pck1,body1)
data6_stage1 = aes_decrypt_ecb(pck1,body2)
data7_stage1 = aes_decrypt_ecb(pck1,body3)
data8_stage1 = aes_decrypt_ecb(pck1,body4)
with open(sys.argv[1] + '.eid1.dec.bin', 'wb') as g:
g.write(sexy)
with open(sys.argv[1] + '.init.dec.bin', 'wb') as g:
g.write(data1_stage1+data2_stage1+data3_stage1+data4_stage1+data5_stage1+data6_stage1+data7_stage1+data8_stage1)
if __name__ == '__main__':
main(len(sys.argv), sys.argv) | 38.035714 | 129 | 0.559155 | 502 | 4,260 | 4.595618 | 0.219124 | 0.05635 | 0.073255 | 0.073255 | 0.398786 | 0.328132 | 0.328132 | 0.301691 | 0.301691 | 0.034677 | 0 | 0.135986 | 0.321596 | 4,260 | 112 | 130 | 38.035714 | 0.662284 | 0.007042 | 0 | 0.1 | 0 | 0 | 0.040797 | 0.031083 | 0 | 0 | 0.032783 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.077778 | 0.033333 | 0.155556 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c3c58b7c52752273d5091bfab02b92f9e98a85 | 10,549 | py | Python | dgi/code2graph/class_graph_builder.py | konveyor/tackle-data-gravity-insights | 97a3eb6a04a2bca7f7e3422581a8fad055d90c04 | [
"Apache-2.0"
] | 3 | 2022-03-28T20:54:34.000Z | 2022-03-31T15:14:39.000Z | dgi/code2graph/class_graph_builder.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 9 | 2022-03-01T13:29:50.000Z | 2022-03-31T13:04:36.000Z | dgi/code2graph/class_graph_builder.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 3 | 2022-03-28T14:41:45.000Z | 2022-03-30T19:17:31.000Z | ################################################################################
# Copyright IBM Corporation 2021, 2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import errno
import logging
import pandas as pd
from typing import Dict
from pathlib import Path
from tqdm import tqdm
from neomodel.exceptions import DoesNotExist
# Import out packages
from dgi.code2graph.process_facts import ConsumeFacts
from dgi.models import ClassNode
from dgi.code2graph.abstract_graph_builder import AbstractGraphBuilder
# Author information
__author__ = "Rahul Krishna"
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Rahul Krishna"
__email__ = "rkrsn@ibm.com"
__status__ = "Research Prototype"
class ClassGraphBuilder(AbstractGraphBuilder):
def __init__(self, opt):
super().__init__(opt)
@staticmethod
def _clear_all_nodes():
""" Delete all nodes
"""
for node in ClassNode.nodes.all():
node.delete()
def _process_entrypoints(self):
""" Annotate nodes with their entrypoint data
"""
facts_dir = Path(self.opt.GRAPH_FACTS_DIR)
# ----------------
# Process Servlets
# ----------------
# Make sure all Servlet data files are available
if not facts_dir.joinpath(self.opt.JEE.SERVLET.GenericServlet).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.GenericServlet)
if not facts_dir.joinpath(self.opt.JEE.SERVLET.WebServlet).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.WebServlet)
if not facts_dir.joinpath(self.opt.JEE.SERVLET.ServletFilter).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.ServletFilter)
for key, fact_file in self.opt.JEE.SERVLET:
if not fact_file or not isinstance(fact_file, str):
continue
fact_file = facts_dir.joinpath(fact_file)
with open(fact_file, 'r') as facts:
classes = facts.readlines()
for class_name in classes:
class_name = class_name.rstrip()
try:
graph_node = ClassNode.nodes.get(node_class=class_name)
except DoesNotExist:
continue
graph_node.node_is_entrypoint = True
graph_node.node_is_servlet = True
graph_node.servlet_type = key
graph_node.save()
# --------------
# Process Beans
# --------------
# Make sure all Beans data files are available
if not facts_dir.joinpath(self.opt.JEE.BEANS.EJBTransactionBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.EJBTransactionBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.SessionBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.SessionBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.SingletonBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.SingletonBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.StatefulBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.StatefulBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.StatelessBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.StatelessBean)
for key, fact_file in self.opt.JEE.BEANS:
if not fact_file or not isinstance(fact_file, str):
continue
fact_file = facts_dir.joinpath(fact_file)
with open(fact_file, 'r') as facts:
classes = facts.readlines()
for class_name in classes:
class_name = class_name.rstrip()
try:
graph_node = ClassNode.nodes.get(node_class=class_name)
except DoesNotExist:
continue
graph_node.node_is_entrypoint = True
graph_node.node_is_bean = True
graph_node.bean_type = key
graph_node.save()
def _create_prev_and_next_nodes(self, prev_meth: Dict, next_meth: Dict):
prev_class_name = prev_meth["class"]
prev_class_short_name = prev_class_name.split('.')[-1]
try:
prev_graph_node = ClassNode.nodes.get(
node_short_name=prev_class_short_name)
except DoesNotExist:
# Method information
prev_graph_node = ClassNode(
node_class=prev_class_name,
node_short_name=prev_class_short_name).save()
next_class_name = next_meth["class"]
next_class_short_name = next_class_name.split('.')[-1]
try:
next_graph_node = ClassNode.nodes.get(
node_short_name=next_class_short_name)
except DoesNotExist:
# Method information
next_graph_node = ClassNode(
node_class=next_class_name,
node_short_name=next_class_short_name).save()
return prev_graph_node, next_graph_node
def _populate_heap_edges(self, heap_flows: pd.DataFrame) -> None:
""" Populate heap carried dependencies
Args:
heap_flows (pd.DataFrame): Heap flows as a pandas dataframe
"""
logging.info("Populating heap carried dependencies edges")
rel_id = 0
for _, row in tqdm(heap_flows.iterrows(), total=heap_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node != next_graph_node:
rel = prev_graph_node.heap_flows.relationship(next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.context, rel.heap_object) == (
prev_meth['name'], next_meth["name"], row.context, row.heap_obj):
rel.weight += 1
rel.rel_id = rel_id
rel.save()
else:
relationship_property = {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"context": row.context,
"heap_object": row.heap_obj
}
prev_graph_node.heap_flows.connect(
next_graph_node, relationship_property)
def _populate_dataflow_edges(self, data_flows: pd.DataFrame) -> None:
""" Populate data flow dependencies
Args:
data_flows (pd.DataFrame): Data flows as a pandas dataframe
"""
logging.info("Populating dataflow edges")
rel_id = 0
for _, row in tqdm(data_flows.iterrows(), total=data_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node != next_graph_node:
rel = prev_graph_node.data_flows.relationship(next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.context) == (
prev_meth['name'], next_meth["name"], row.context):
rel.rel_id = rel_id
rel.weight += 1
rel.save()
else:
next_graph_node.data_flows.connect(
prev_graph_node, {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"context": row.context
})
def _populate_callreturn_edges(self, call_ret_flows: pd.DataFrame) -> None:
""" Populate data flow dependencies
Args:
call_ret_flows (pd.DataFrame): Data flows as a pandas dataframe
"""
logging.info("Populating call-return dependencies edges")
rel_id = 0
for _, row in tqdm(call_ret_flows.iterrows(),
total=call_ret_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node.node_class != next_graph_node.node_class:
rel = prev_graph_node.call_ret_flows.relationship(
next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.pcontext, rel.ncontext) == (
prev_meth["name"], next_meth["name"], row.prev_context, row.next_context):
rel.rel_id = rel_id
rel.weight += 1
rel.save()
else:
next_graph_node.call_ret_flows.connect(
prev_graph_node, {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"pcontext": row.prev_context,
"ncontext": row.next_context
})
| 40.263359 | 98 | 0.567921 | 1,168 | 10,549 | 4.864726 | 0.17637 | 0.063358 | 0.031679 | 0.029039 | 0.665611 | 0.583069 | 0.575678 | 0.541359 | 0.495952 | 0.424498 | 0 | 0.004943 | 0.328752 | 10,549 | 261 | 99 | 40.417625 | 0.797486 | 0.115177 | 0 | 0.472527 | 0 | 0 | 0.039695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.06044 | 0 | 0.10989 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c5c6595e58e5ebc6c9bd0923ccb47fb28a9fb2 | 3,025 | py | Python | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 14 | 2019-03-01T09:47:36.000Z | 2019-11-28T01:58:54.000Z | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 8 | 2019-03-12T10:22:53.000Z | 2020-08-20T08:15:51.000Z | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 7 | 2019-03-01T09:47:52.000Z | 2020-06-13T12:14:48.000Z | import click
import os
_DEAFULT_TEMPLATE_FILE = 'template.[yaml|yml]'
def get_template_abspath(ctx, param, template_name):
if template_name == _DEAFULT_TEMPLATE_FILE:
template_name = 'template.yaml'
tmp = 'template.yml'
if os.path.exists(tmp):
template_name = tmp
return os.path.abspath(template_name)
def template_click_option():
"""
Click Option for template option
"""
return click.option('--template', '-t',
default=_DEAFULT_TEMPLATE_FILE,
type=click.Path(exists=True),
envvar="TCF_TEMPLATE_FILE",
callback=get_template_abspath,
show_default=True)
def invoke_common_options(f):
invoke_options = [
template_click_option(),
click.option('--env-vars', '-n',
help='JSON file contains function environment variables.',
type=click.Path(exists=True)),
click.option('--debug-port', '-d',
help='The port exposed for debugging. If specified, local container will start with debug mode.',
envvar="TCF_DEBUG_PORT"),
click.option('--debugger-path',
help='The debugger path in host. If specified, the debugger will mounted into the function container.'),
click.option('--debug-args',
help='Additional args to be passed the debugger.',
envvar="DEBUGGER_ARGS"),
click.option('--docker-volume-basedir', '-v',
help='The basedir where TCF template locate in.',
envvar="TCF_DOCKER_VOLUME_BASEDIR"),
click.option('--docker-network',
help='Specifies the name or id of an existing docker network which containers should connect to, '
'along with the default bridge network.',
envvar="TCF_DOCKER_NETWORK"),
click.option('--log-file', '-l',
help='Path of logfile where send runtime logs to'),
click.option('--skip-pull-image',
is_flag=True,
help='Specify whether CLI skip pulling or update docker images',
envvar="TCF_SKIP_PULL_IMAGE"),
click.option('--region'),
]
for option in reversed(invoke_options):
option(f)
return f
def service_common_options(port):
def construct_options(f):
service_options = [
click.option('--host',
default="127.0.0.1",
help="Local hostname or IP address bind to (default: '127.0.0.1')"),
click.option("--port", "-p",
default=port,
help="Local port number to listen on (default: '{}')".format(str(port)))
]
for option in reversed(service_options):
option(f)
return f
return construct_options | 32.880435 | 125 | 0.551736 | 326 | 3,025 | 4.97546 | 0.361963 | 0.101726 | 0.035142 | 0.033292 | 0.107275 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006021 | 0.341157 | 3,025 | 92 | 126 | 32.880435 | 0.807827 | 0.010579 | 0 | 0.063492 | 0 | 0 | 0.324043 | 0.016118 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0.015873 | 0.031746 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c8da5877fedaee54944d0a1f1f838582ece639 | 2,049 | py | Python | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | 3 | 2021-11-03T14:36:53.000Z | 2021-11-18T17:21:28.000Z | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | null | null | null | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | null | null | null | ERROR_TEXT = "Sources can not be set if country or category is set."
class QueryHelper(object):
def __init__(self, query=None, category=None, country=None,
sources=None, language=None, slack_channel=None):
"""Constructs the query helper object.
Args:
name: string, The name for this query (used in Slack).
query: string, The query to use. Advanced search is available:
Surround phrases with quotes (") for exact match.
Prepend words or phrases that must appear with a + symbol. Eg: +bitcoin
Prepend words that must not appear with a - symbol. Eg: -bitcoin
Alternatively you can use the AND / OR / NOT keywords,
and optionally group these with parenthesis.
Eg: crypto AND (ethereum OR litecoin) NOT bitcoin.
category: string, One of business, entertainment, general, health, science
sports, technology. Cannot be set if sources is set.
country: string, The 2-letter ISO 3166-1 code (lowercase) for the country.
Cannot be set if sources is set.
sources: list, String sources valid for the api. Obtainable from
https://newsapi.org/sources or by calling the sources endpoint.
Cannot be set if category or country is set.
language: string, The 2-letter ISO-639-1 code of the language
you want to get headlines for. Defaults to "en".
slack_channel: string, the #channel name where these results will be
published.
Raises:
ValueError if sources is set with country or category.
"""
if sources is not None and (country is not None or category is not None):
raise ValueError(ERROR_TEXT)
self.query = query
self.category = category
self.country = country
self.sources = sources
self.language = language
self.slack_channel = slack_channel
| 49.97561 | 87 | 0.617862 | 260 | 2,049 | 4.830769 | 0.403846 | 0.019904 | 0.022293 | 0.031051 | 0.111465 | 0.08121 | 0.039809 | 0 | 0 | 0 | 0 | 0.007948 | 0.324549 | 2,049 | 40 | 88 | 51.225 | 0.899566 | 0.632992 | 0 | 0 | 0 | 0 | 0.094474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c973a8e5ee64795c61fcb8c64b1477242ab749 | 1,666 | py | Python | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | from django.contrib.auth import forms as auth_forms, get_user_model
from django.core.validators import MinLengthValidator
from final_project.accounts.helpers import BootstrapFormMixin
from final_project.accounts.models import Profile
from django import forms
from final_project.main.validators import validate_only_letters
UserModel = get_user_model()
class UserRegistrationForm(BootstrapFormMixin, auth_forms.UserCreationForm):
first_name = forms.CharField(
max_length=Profile.FIRST_NAME_MAX_LENGTH,
validators=(
MinLengthValidator(Profile.FIRST_NAME_MIN_LENGTH),
validate_only_letters,
)
)
last_name = forms.CharField(
max_length=Profile.LAST_NAME_MAX_LENGTH,
)
picture = forms.URLField()
date_of_birth = forms.DateField()
gender = forms.ChoiceField(
choices=Profile.GENDERS,
)
account_balance = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_bootstrap_form_controls()
class Meta:
model = UserModel
fields = ('email',)
def save(self, commit=True):
user = super().save(commit=commit)
profile = Profile(
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'],
picture=self.cleaned_data['picture'],
date_of_birth=self.cleaned_data['date_of_birth'],
gender=self.cleaned_data['gender'],
account_balance=self.cleaned_data['account_balance'],
user=user,
)
if commit:
profile.save()
return user
| 29.75 | 76 | 0.67587 | 185 | 1,666 | 5.778378 | 0.362162 | 0.06174 | 0.084191 | 0.044902 | 0.063611 | 0.063611 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233493 | 1,666 | 55 | 77 | 30.290909 | 0.837118 | 0 | 0 | 0 | 0 | 0 | 0.039016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c9ebcbbf685433e3347e934c9eb9cfdc882fb1 | 1,931 | py | Python | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | from plotly import graph_objs as go
from stats.funnel import create_funnel_df, group_funnel_dfs
def plot_stacked_funnel(events, steps, col=None, from_date=None, to_date=None, step_interval=0):
"""
Function used for producing a funnel plot
:param events: (DataFrame)
events dataframe
:param steps: (list)
list containing funnel steps as strings
:param col: (str)
column to be used for grouping the funnel dataframes
:return: (plt.figure) funnel plot
"""
# create list to append each trace to
# this will be passed to "go.Figure" at the end
data = []
# if col is provided, create a funnel_df for each entry in the "col"
if col:
# generate dict of funnel dataframes
dict_ = group_funnel_dfs(events, steps, col)
title = 'Funnel plot per {}'.format(col)
else:
funnel_df = create_funnel_df(events, steps, from_date=from_date, to_date=to_date, step_interval=step_interval)
dict_ = {'Total': funnel_df}
title = 'Funnel plot'
for t in dict_.keys():
trace = go.Funnel(
name=t,
y=dict_[t].step.values,
x=dict_[t].val.values,
textinfo="value+percent previous"
)
data.append(trace)
layout = go.Layout(margin={"l": 180, "r": 0, "t": 30, "b": 0, "pad": 0},
funnelmode="stack",
showlegend=True,
hovermode='closest',
title='Funnel plot per {}'.format(col),
legend=dict(orientation="v",
bgcolor='#E2E2E2',
xanchor='left',
font=dict(
size=12)
)
)
return go.Figure(data, layout)
| 33.293103 | 118 | 0.520974 | 221 | 1,931 | 4.425339 | 0.457014 | 0.0409 | 0.046012 | 0.03681 | 0.055215 | 0.055215 | 0 | 0 | 0 | 0 | 0 | 0.011775 | 0.384257 | 1,931 | 57 | 119 | 33.877193 | 0.810765 | 0.252719 | 0 | 0 | 0 | 0 | 0.075215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74cb877c97af60c6e4b5a8e17a82bc5b28994b1e | 1,016 | py | Python | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 92 | 2019-02-17T02:36:29.000Z | 2022-01-22T04:29:10.000Z | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 128 | 2019-02-22T03:52:40.000Z | 2022-02-28T18:39:01.000Z | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 29 | 2019-02-17T02:36:50.000Z | 2022-03-17T04:15:49.000Z | import unittest
import geopandas as gpd
import numpy as np
from libpysal.examples import load_example
from segregation.local import LocalRelativeCentralization
class Local_Relative_Centralization_Tester(unittest.TestCase):
def test_Local_Relative_Centralization(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
df = s_map[["geometry", "BLACK", "TOT_POP"]]
index = LocalRelativeCentralization(df, "BLACK", "TOT_POP")
np.testing.assert_almost_equal(
index.statistics[0:10],
np.array(
[
0.03443055,
-0.29063264,
-0.19110976,
0.24978919,
0.01252249,
0.61152941,
0.78917647,
0.53129412,
0.04436346,
-0.20216325,
]
),
)
if __name__ == "__main__":
unittest.main()
| 29.028571 | 87 | 0.538386 | 95 | 1,016 | 5.505263 | 0.621053 | 0.042065 | 0.10325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148903 | 0.372047 | 1,016 | 34 | 88 | 29.882353 | 0.670846 | 0 | 0 | 0 | 0 | 0 | 0.065945 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74cf3cdf2e8551d93b756d1f90473ad3386552cc | 819 | py | Python | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | 3 | 2016-01-31T21:48:37.000Z | 2021-01-17T00:39:22.000Z | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | null | null | null | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | null | null | null | """Packaging script for ardubus"""
import os
import subprocess
import setuptools
GIT_VERSION = 'UNKNOWN'
try:
GIT_VERSION = subprocess.check_output(['git', 'rev-parse', '--verify', '--short', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
pass
setuptools.setup(
name='ardubus',
version=os.getenv('PACKAGE_VERSION', '0.1.0+git.%s' % GIT_VERSION),
author='Eero "rambo" af Heurlin',
author_email='eero.afheurlin@iki.fi',
packages=setuptools.find_packages(),
license='MIT',
long_description=open('README.md', 'rt', encoding='utf-8').read(),
long_description_content_type='text/markdown',
description='ArDuBUS for python3',
install_requires=open('requirements.txt', 'rt', encoding='utf-8').readlines(),
url='https://github.com/rambo/ardubus',
)
| 31.5 | 118 | 0.695971 | 101 | 819 | 5.524752 | 0.683168 | 0.053763 | 0.046595 | 0.050179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008392 | 0.126984 | 819 | 25 | 119 | 32.76 | 0.772028 | 0.034188 | 0 | 0 | 0 | 0 | 0.289172 | 0.026752 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |